1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
mt76_led_cleanup(struct mt76_phy * phy)243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 					NL80211_CHAN_HT20);
416 		phy->chan_state = &msband->chan[0];
417 		phy->dev->band_phys[band] = phy;
418 		return;
419 	}
420 
421 	sband->n_channels = 0;
422 	if (phy->hw->wiphy->bands[band] == sband)
423 		phy->hw->wiphy->bands[band] = NULL;
424 }
425 
426 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 	struct mt76_dev *dev = phy->dev;
430 	struct wiphy *wiphy = hw->wiphy;
431 
432 	INIT_LIST_HEAD(&phy->tx_list);
433 	spin_lock_init(&phy->tx_lock);
434 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
435 
436 	if ((void *)phy != hw->priv)
437 		return 0;
438 
439 	SET_IEEE80211_DEV(hw, dev->dev);
440 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
441 
442 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
443 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
444 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
445 			WIPHY_FLAG_SUPPORTS_TDLS |
446 			WIPHY_FLAG_AP_UAPSD;
447 
448 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
449 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
450 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
451 
452 	wiphy->available_antennas_tx = phy->antenna_mask;
453 	wiphy->available_antennas_rx = phy->antenna_mask;
454 
455 	wiphy->sar_capa = &mt76_sar_capa;
456 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
457 				sizeof(struct mt76_freq_range_power),
458 				GFP_KERNEL);
459 	if (!phy->frp)
460 		return -ENOMEM;
461 
462 	hw->txq_data_size = sizeof(struct mt76_txq);
463 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
464 
465 	if (!hw->max_tx_fragments)
466 		hw->max_tx_fragments = 16;
467 
468 	ieee80211_hw_set(hw, SIGNAL_DBM);
469 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
470 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
471 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
472 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
473 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
474 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
475 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
476 
477 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
478 	    hw->max_tx_fragments > 1) {
479 		ieee80211_hw_set(hw, TX_AMSDU);
480 		ieee80211_hw_set(hw, TX_FRAG_LIST);
481 	}
482 
483 	ieee80211_hw_set(hw, MFP_CAPABLE);
484 	ieee80211_hw_set(hw, AP_LINK_PS);
485 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
486 
487 	return 0;
488 }
489 
490 struct mt76_phy *
mt76_alloc_radio_phy(struct mt76_dev * dev,unsigned int size,u8 band_idx)491 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
492 		     u8 band_idx)
493 {
494 	struct ieee80211_hw *hw = dev->phy.hw;
495 	unsigned int phy_size;
496 	struct mt76_phy *phy;
497 
498 	phy_size = ALIGN(sizeof(*phy), 8);
499 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
500 	if (!phy)
501 		return NULL;
502 
503 	phy->dev = dev;
504 	phy->hw = hw;
505 	phy->priv = (void *)phy + phy_size;
506 	phy->band_idx = band_idx;
507 
508 	return phy;
509 }
510 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
511 
512 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)513 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
514 	       const struct ieee80211_ops *ops, u8 band_idx)
515 {
516 	struct ieee80211_hw *hw;
517 	unsigned int phy_size;
518 	struct mt76_phy *phy;
519 
520 	phy_size = ALIGN(sizeof(*phy), 8);
521 	hw = ieee80211_alloc_hw(size + phy_size, ops);
522 	if (!hw)
523 		return NULL;
524 
525 	phy = hw->priv;
526 	phy->dev = dev;
527 	phy->hw = hw;
528 	phy->priv = hw->priv + phy_size;
529 	phy->band_idx = band_idx;
530 
531 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
532 	hw->wiphy->interface_modes =
533 		BIT(NL80211_IFTYPE_STATION) |
534 		BIT(NL80211_IFTYPE_AP) |
535 #ifdef CONFIG_MAC80211_MESH
536 		BIT(NL80211_IFTYPE_MESH_POINT) |
537 #endif
538 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
539 		BIT(NL80211_IFTYPE_P2P_GO) |
540 		BIT(NL80211_IFTYPE_ADHOC);
541 
542 	return phy;
543 }
544 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
545 
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)546 int mt76_register_phy(struct mt76_phy *phy, bool vht,
547 		      struct ieee80211_rate *rates, int n_rates)
548 {
549 	int ret;
550 
551 	ret = mt76_phy_init(phy, phy->hw);
552 	if (ret)
553 		return ret;
554 
555 	if (phy->cap.has_2ghz) {
556 		ret = mt76_init_sband_2g(phy, rates, n_rates);
557 		if (ret)
558 			return ret;
559 	}
560 
561 	if (phy->cap.has_5ghz) {
562 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
563 		if (ret)
564 			return ret;
565 	}
566 
567 	if (phy->cap.has_6ghz) {
568 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
569 		if (ret)
570 			return ret;
571 	}
572 
573 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
574 		ret = mt76_led_init(phy);
575 		if (ret)
576 			return ret;
577 	}
578 
579 	wiphy_read_of_freq_limits(phy->hw->wiphy);
580 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
581 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
582 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
583 
584 	if ((void *)phy == phy->hw->priv) {
585 		ret = ieee80211_register_hw(phy->hw);
586 		if (ret)
587 			return ret;
588 	}
589 
590 	set_bit(MT76_STATE_REGISTERED, &phy->state);
591 	phy->dev->phys[phy->band_idx] = phy;
592 
593 	return 0;
594 }
595 EXPORT_SYMBOL_GPL(mt76_register_phy);
596 
mt76_unregister_phy(struct mt76_phy * phy)597 void mt76_unregister_phy(struct mt76_phy *phy)
598 {
599 	struct mt76_dev *dev = phy->dev;
600 
601 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
602 		return;
603 
604 	if (IS_ENABLED(CONFIG_MT76_LEDS))
605 		mt76_led_cleanup(phy);
606 	mt76_tx_status_check(dev, true);
607 	ieee80211_unregister_hw(phy->hw);
608 	dev->phys[phy->band_idx] = NULL;
609 }
610 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
611 
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)612 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
613 {
614 	bool is_qrx = mt76_queue_is_rx(dev, q);
615 	struct page_pool_params pp_params = {
616 		.order = 0,
617 		.flags = 0,
618 		.nid = NUMA_NO_NODE,
619 		.dev = dev->dma_dev,
620 	};
621 	int idx = is_qrx ? q - dev->q_rx : -1;
622 
623 	/* Allocate page_pools just for rx/wed_tx_free queues */
624 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
625 		return 0;
626 
627 	switch (idx) {
628 	case MT_RXQ_MAIN:
629 	case MT_RXQ_BAND1:
630 	case MT_RXQ_BAND2:
631 		pp_params.pool_size = 256;
632 		break;
633 	default:
634 		pp_params.pool_size = 16;
635 		break;
636 	}
637 
638 	if (mt76_is_mmio(dev)) {
639 		/* rely on page_pool for DMA mapping */
640 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
641 		pp_params.dma_dir = DMA_FROM_DEVICE;
642 		pp_params.max_len = PAGE_SIZE;
643 		pp_params.offset = 0;
644 		/* NAPI is available just for rx queues */
645 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
646 			pp_params.napi = &dev->napi[idx];
647 	}
648 
649 	q->page_pool = page_pool_create(&pp_params);
650 	if (IS_ERR(q->page_pool)) {
651 		int err = PTR_ERR(q->page_pool);
652 
653 		q->page_pool = NULL;
654 		return err;
655 	}
656 
657 	return 0;
658 }
659 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
660 
661 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)662 mt76_alloc_device(struct device *pdev, unsigned int size,
663 		  const struct ieee80211_ops *ops,
664 		  const struct mt76_driver_ops *drv_ops)
665 {
666 	struct ieee80211_hw *hw;
667 	struct mt76_phy *phy;
668 	struct mt76_dev *dev;
669 	int i;
670 
671 	hw = ieee80211_alloc_hw(size, ops);
672 	if (!hw)
673 		return NULL;
674 
675 	dev = hw->priv;
676 	dev->hw = hw;
677 	dev->dev = pdev;
678 	dev->drv = drv_ops;
679 	dev->dma_dev = pdev;
680 
681 	phy = &dev->phy;
682 	phy->dev = dev;
683 	phy->hw = hw;
684 	phy->band_idx = MT_BAND0;
685 	dev->phys[phy->band_idx] = phy;
686 
687 	spin_lock_init(&dev->rx_lock);
688 	spin_lock_init(&dev->lock);
689 	spin_lock_init(&dev->cc_lock);
690 	spin_lock_init(&dev->status_lock);
691 	spin_lock_init(&dev->wed_lock);
692 	mutex_init(&dev->mutex);
693 	init_waitqueue_head(&dev->tx_wait);
694 
695 	skb_queue_head_init(&dev->mcu.res_q);
696 	init_waitqueue_head(&dev->mcu.wait);
697 	mutex_init(&dev->mcu.mutex);
698 	dev->tx_worker.fn = mt76_tx_worker;
699 
700 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
701 	hw->wiphy->interface_modes =
702 		BIT(NL80211_IFTYPE_STATION) |
703 		BIT(NL80211_IFTYPE_AP) |
704 #ifdef CONFIG_MAC80211_MESH
705 		BIT(NL80211_IFTYPE_MESH_POINT) |
706 #endif
707 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
708 		BIT(NL80211_IFTYPE_P2P_GO) |
709 		BIT(NL80211_IFTYPE_ADHOC);
710 
711 	spin_lock_init(&dev->token_lock);
712 	idr_init(&dev->token);
713 
714 	spin_lock_init(&dev->rx_token_lock);
715 	idr_init(&dev->rx_token);
716 
717 	INIT_LIST_HEAD(&dev->wcid_list);
718 	INIT_LIST_HEAD(&dev->sta_poll_list);
719 	spin_lock_init(&dev->sta_poll_lock);
720 
721 	INIT_LIST_HEAD(&dev->txwi_cache);
722 	INIT_LIST_HEAD(&dev->rxwi_cache);
723 	dev->token_size = dev->drv->token_size;
724 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
725 
726 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
727 		skb_queue_head_init(&dev->rx_skb[i]);
728 
729 	dev->wq = alloc_ordered_workqueue("mt76", 0);
730 	if (!dev->wq) {
731 		ieee80211_free_hw(hw);
732 		return NULL;
733 	}
734 
735 	return dev;
736 }
737 EXPORT_SYMBOL_GPL(mt76_alloc_device);
738 
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)739 int mt76_register_device(struct mt76_dev *dev, bool vht,
740 			 struct ieee80211_rate *rates, int n_rates)
741 {
742 	struct ieee80211_hw *hw = dev->hw;
743 	struct mt76_phy *phy = &dev->phy;
744 	int ret;
745 
746 	dev_set_drvdata(dev->dev, dev);
747 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
748 	ret = mt76_phy_init(phy, hw);
749 	if (ret)
750 		return ret;
751 
752 	if (phy->cap.has_2ghz) {
753 		ret = mt76_init_sband_2g(phy, rates, n_rates);
754 		if (ret)
755 			return ret;
756 	}
757 
758 	if (phy->cap.has_5ghz) {
759 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
760 		if (ret)
761 			return ret;
762 	}
763 
764 	if (phy->cap.has_6ghz) {
765 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
766 		if (ret)
767 			return ret;
768 	}
769 
770 	wiphy_read_of_freq_limits(hw->wiphy);
771 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
772 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
773 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
774 
775 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
776 		ret = mt76_led_init(phy);
777 		if (ret)
778 			return ret;
779 	}
780 
781 	ret = ieee80211_register_hw(hw);
782 	if (ret)
783 		return ret;
784 
785 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
786 	set_bit(MT76_STATE_REGISTERED, &phy->state);
787 	sched_set_fifo_low(dev->tx_worker.task);
788 
789 	return 0;
790 }
791 EXPORT_SYMBOL_GPL(mt76_register_device);
792 
mt76_unregister_device(struct mt76_dev * dev)793 void mt76_unregister_device(struct mt76_dev *dev)
794 {
795 	struct ieee80211_hw *hw = dev->hw;
796 
797 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
798 		return;
799 
800 	if (IS_ENABLED(CONFIG_MT76_LEDS))
801 		mt76_led_cleanup(&dev->phy);
802 	mt76_tx_status_check(dev, true);
803 	mt76_wcid_cleanup(dev, &dev->global_wcid);
804 	ieee80211_unregister_hw(hw);
805 }
806 EXPORT_SYMBOL_GPL(mt76_unregister_device);
807 
mt76_free_device(struct mt76_dev * dev)808 void mt76_free_device(struct mt76_dev *dev)
809 {
810 	mt76_worker_teardown(&dev->tx_worker);
811 	if (dev->wq) {
812 		destroy_workqueue(dev->wq);
813 		dev->wq = NULL;
814 	}
815 	ieee80211_free_hw(dev->hw);
816 }
817 EXPORT_SYMBOL_GPL(mt76_free_device);
818 
mt76_vif_phy(struct ieee80211_hw * hw,struct ieee80211_vif * vif)819 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
820 			      struct ieee80211_vif *vif)
821 {
822 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
823 	struct mt76_chanctx *ctx;
824 
825 	if (!hw->wiphy->n_radio)
826 		return hw->priv;
827 
828 	if (!mlink->ctx)
829 		return NULL;
830 
831 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
832 	return ctx->phy;
833 }
834 EXPORT_SYMBOL_GPL(mt76_vif_phy);
835 
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)836 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
837 {
838 	struct sk_buff *skb = phy->rx_amsdu[q].head;
839 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
840 	struct mt76_dev *dev = phy->dev;
841 
842 	phy->rx_amsdu[q].head = NULL;
843 	phy->rx_amsdu[q].tail = NULL;
844 
845 	/*
846 	 * Validate if the amsdu has a proper first subframe.
847 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
848 	 * flag of the QoS header gets flipped. In such cases, the first
849 	 * subframe has a LLC/SNAP header in the location of the destination
850 	 * address.
851 	 */
852 	if (skb_shinfo(skb)->frag_list) {
853 		int offset = 0;
854 
855 		if (!(status->flag & RX_FLAG_8023)) {
856 			offset = ieee80211_get_hdrlen_from_skb(skb);
857 
858 			if ((status->flag &
859 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
860 			    RX_FLAG_DECRYPTED)
861 				offset += 8;
862 		}
863 
864 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
865 			dev_kfree_skb(skb);
866 			return;
867 		}
868 	}
869 	__skb_queue_tail(&dev->rx_skb[q], skb);
870 }
871 
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)872 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
873 				  struct sk_buff *skb)
874 {
875 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
876 
877 	if (phy->rx_amsdu[q].head &&
878 	    (!status->amsdu || status->first_amsdu ||
879 	     status->seqno != phy->rx_amsdu[q].seqno))
880 		mt76_rx_release_amsdu(phy, q);
881 
882 	if (!phy->rx_amsdu[q].head) {
883 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
884 		phy->rx_amsdu[q].seqno = status->seqno;
885 		phy->rx_amsdu[q].head = skb;
886 	} else {
887 		*phy->rx_amsdu[q].tail = skb;
888 		phy->rx_amsdu[q].tail = &skb->next;
889 	}
890 
891 	if (!status->amsdu || status->last_amsdu)
892 		mt76_rx_release_amsdu(phy, q);
893 }
894 
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)895 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
896 {
897 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
898 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
899 
900 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
901 		dev_kfree_skb(skb);
902 		return;
903 	}
904 
905 #ifdef CONFIG_NL80211_TESTMODE
906 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
907 		phy->test.rx_stats.packets[q]++;
908 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
909 			phy->test.rx_stats.fcs_error[q]++;
910 	}
911 #endif
912 
913 	mt76_rx_release_burst(phy, q, skb);
914 }
915 EXPORT_SYMBOL_GPL(mt76_rx);
916 
mt76_has_tx_pending(struct mt76_phy * phy)917 bool mt76_has_tx_pending(struct mt76_phy *phy)
918 {
919 	struct mt76_queue *q;
920 	int i;
921 
922 	for (i = 0; i < __MT_TXQ_MAX; i++) {
923 		q = phy->q_tx[i];
924 		if (q && q->queued)
925 			return true;
926 	}
927 
928 	return false;
929 }
930 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
931 
932 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)933 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
934 {
935 	struct mt76_sband *msband;
936 	int idx;
937 
938 	if (c->band == NL80211_BAND_2GHZ)
939 		msband = &phy->sband_2g;
940 	else if (c->band == NL80211_BAND_6GHZ)
941 		msband = &phy->sband_6g;
942 	else
943 		msband = &phy->sband_5g;
944 
945 	idx = c - &msband->sband.channels[0];
946 	return &msband->chan[idx];
947 }
948 
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)949 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
950 {
951 	struct mt76_channel_state *state = phy->chan_state;
952 
953 	state->cc_active += ktime_to_us(ktime_sub(time,
954 						  phy->survey_time));
955 	phy->survey_time = time;
956 }
957 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
958 
mt76_update_survey(struct mt76_phy * phy)959 void mt76_update_survey(struct mt76_phy *phy)
960 {
961 	struct mt76_dev *dev = phy->dev;
962 	ktime_t cur_time;
963 
964 	if (dev->drv->update_survey)
965 		dev->drv->update_survey(phy);
966 
967 	cur_time = ktime_get_boottime();
968 	mt76_update_survey_active_time(phy, cur_time);
969 
970 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
971 		struct mt76_channel_state *state = phy->chan_state;
972 
973 		spin_lock_bh(&dev->cc_lock);
974 		state->cc_bss_rx += dev->cur_cc_bss_rx;
975 		dev->cur_cc_bss_rx = 0;
976 		spin_unlock_bh(&dev->cc_lock);
977 	}
978 }
979 EXPORT_SYMBOL_GPL(mt76_update_survey);
980 
__mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)981 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
982 		       bool offchannel)
983 {
984 	struct mt76_dev *dev = phy->dev;
985 	int timeout = HZ / 5;
986 	int ret;
987 
988 	set_bit(MT76_RESET, &phy->state);
989 
990 	mt76_worker_disable(&dev->tx_worker);
991 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
992 	mt76_update_survey(phy);
993 
994 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
995 	    phy->chandef.width != chandef->width)
996 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
997 
998 	phy->chandef = *chandef;
999 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1000 	phy->offchannel = offchannel;
1001 
1002 	if (!offchannel)
1003 		phy->main_chandef = *chandef;
1004 
1005 	if (chandef->chan != phy->main_chandef.chan)
1006 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1007 
1008 	ret = dev->drv->set_channel(phy);
1009 
1010 	clear_bit(MT76_RESET, &phy->state);
1011 	mt76_worker_enable(&dev->tx_worker);
1012 	mt76_worker_schedule(&dev->tx_worker);
1013 
1014 	return ret;
1015 }
1016 
mt76_set_channel(struct mt76_phy * phy,struct cfg80211_chan_def * chandef,bool offchannel)1017 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1018 		     bool offchannel)
1019 {
1020 	struct mt76_dev *dev = phy->dev;
1021 	int ret;
1022 
1023 	cancel_delayed_work_sync(&phy->mac_work);
1024 
1025 	mutex_lock(&dev->mutex);
1026 	ret = __mt76_set_channel(phy, chandef, offchannel);
1027 	mutex_unlock(&dev->mutex);
1028 
1029 	return ret;
1030 }
1031 
mt76_update_channel(struct mt76_phy * phy)1032 int mt76_update_channel(struct mt76_phy *phy)
1033 {
1034 	struct ieee80211_hw *hw = phy->hw;
1035 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1036 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1037 
1038 	phy->radar_enabled = hw->conf.radar_enabled;
1039 
1040 	return mt76_set_channel(phy, chandef, offchannel);
1041 }
1042 EXPORT_SYMBOL_GPL(mt76_update_channel);
1043 
1044 static struct mt76_sband *
mt76_get_survey_sband(struct mt76_phy * phy,int * idx)1045 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1046 {
1047 	if (*idx < phy->sband_2g.sband.n_channels)
1048 		return &phy->sband_2g;
1049 
1050 	*idx -= phy->sband_2g.sband.n_channels;
1051 	if (*idx < phy->sband_5g.sband.n_channels)
1052 		return &phy->sband_5g;
1053 
1054 	*idx -= phy->sband_5g.sband.n_channels;
1055 	if (*idx < phy->sband_6g.sband.n_channels)
1056 		return &phy->sband_6g;
1057 
1058 	*idx -= phy->sband_6g.sband.n_channels;
1059 	return NULL;
1060 }
1061 
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1062 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1063 		    struct survey_info *survey)
1064 {
1065 	struct mt76_phy *phy = hw->priv;
1066 	struct mt76_dev *dev = phy->dev;
1067 	struct mt76_sband *sband = NULL;
1068 	struct ieee80211_channel *chan;
1069 	struct mt76_channel_state *state;
1070 	int phy_idx = 0;
1071 	int ret = 0;
1072 
1073 	mutex_lock(&dev->mutex);
1074 
1075 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1076 		sband = NULL;
1077 		phy = dev->phys[phy_idx];
1078 		if (!phy || phy->hw != hw)
1079 			continue;
1080 
1081 		sband = mt76_get_survey_sband(phy, &idx);
1082 
1083 		if (idx == 0 && phy->dev->drv->update_survey)
1084 			mt76_update_survey(phy);
1085 
1086 		if (sband || !hw->wiphy->n_radio)
1087 			break;
1088 	}
1089 
1090 	if (!sband) {
1091 		ret = -ENOENT;
1092 		goto out;
1093 	}
1094 
1095 	chan = &sband->sband.channels[idx];
1096 	state = mt76_channel_state(phy, chan);
1097 
1098 	memset(survey, 0, sizeof(*survey));
1099 	survey->channel = chan;
1100 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1101 	survey->filled |= dev->drv->survey_flags;
1102 	if (state->noise)
1103 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1104 
1105 	if (chan == phy->main_chandef.chan) {
1106 		survey->filled |= SURVEY_INFO_IN_USE;
1107 
1108 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1109 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1110 	}
1111 
1112 	survey->time_busy = div_u64(state->cc_busy, 1000);
1113 	survey->time_rx = div_u64(state->cc_rx, 1000);
1114 	survey->time = div_u64(state->cc_active, 1000);
1115 	survey->noise = state->noise;
1116 
1117 	spin_lock_bh(&dev->cc_lock);
1118 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1119 	survey->time_tx = div_u64(state->cc_tx, 1000);
1120 	spin_unlock_bh(&dev->cc_lock);
1121 
1122 out:
1123 	mutex_unlock(&dev->mutex);
1124 
1125 	return ret;
1126 }
1127 EXPORT_SYMBOL_GPL(mt76_get_survey);
1128 
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)1129 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1130 			 struct ieee80211_key_conf *key)
1131 {
1132 	struct ieee80211_key_seq seq;
1133 	int i;
1134 
1135 	wcid->rx_check_pn = false;
1136 
1137 	if (!key)
1138 		return;
1139 
1140 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1141 		return;
1142 
1143 	wcid->rx_check_pn = true;
1144 
1145 	/* data frame */
1146 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1147 		ieee80211_get_key_rx_seq(key, i, &seq);
1148 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1149 	}
1150 
1151 	/* robust management frame */
1152 	ieee80211_get_key_rx_seq(key, -1, &seq);
1153 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1154 
1155 }
1156 EXPORT_SYMBOL(mt76_wcid_key_setup);
1157 
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1158 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1159 {
1160 	int signal = -128;
1161 	u8 chains;
1162 
1163 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1164 		int cur, diff;
1165 
1166 		cur = *chain_signal;
1167 		if (!(chains & BIT(0)) ||
1168 		    cur > 0)
1169 			continue;
1170 
1171 		if (cur > signal)
1172 			swap(cur, signal);
1173 
1174 		diff = signal - cur;
1175 		if (diff == 0)
1176 			signal += 3;
1177 		else if (diff <= 2)
1178 			signal += 2;
1179 		else if (diff <= 6)
1180 			signal += 1;
1181 	}
1182 
1183 	return signal;
1184 }
1185 EXPORT_SYMBOL(mt76_rx_signal);
1186 
1187 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1188 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1189 		struct ieee80211_hw **hw,
1190 		struct ieee80211_sta **sta)
1191 {
1192 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1193 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1194 	struct mt76_rx_status mstat;
1195 
1196 	mstat = *((struct mt76_rx_status *)skb->cb);
1197 	memset(status, 0, sizeof(*status));
1198 
1199 	status->flag = mstat.flag;
1200 	status->freq = mstat.freq;
1201 	status->enc_flags = mstat.enc_flags;
1202 	status->encoding = mstat.encoding;
1203 	status->bw = mstat.bw;
1204 	if (status->encoding == RX_ENC_EHT) {
1205 		status->eht.ru = mstat.eht.ru;
1206 		status->eht.gi = mstat.eht.gi;
1207 	} else {
1208 		status->he_ru = mstat.he_ru;
1209 		status->he_gi = mstat.he_gi;
1210 		status->he_dcm = mstat.he_dcm;
1211 	}
1212 	status->rate_idx = mstat.rate_idx;
1213 	status->nss = mstat.nss;
1214 	status->band = mstat.band;
1215 	status->signal = mstat.signal;
1216 	status->chains = mstat.chains;
1217 	status->ampdu_reference = mstat.ampdu_ref;
1218 	status->device_timestamp = mstat.timestamp;
1219 	status->mactime = mstat.timestamp;
1220 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1221 	if (status->signal <= -128)
1222 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1223 
1224 	if (ieee80211_is_beacon(hdr->frame_control) ||
1225 	    ieee80211_is_probe_resp(hdr->frame_control))
1226 		status->boottime_ns = ktime_get_boottime_ns();
1227 
1228 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1229 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1230 		     sizeof(mstat.chain_signal));
1231 	memcpy(status->chain_signal, mstat.chain_signal,
1232 	       sizeof(mstat.chain_signal));
1233 
1234 	if (mstat.wcid) {
1235 		status->link_valid = mstat.wcid->link_valid;
1236 		status->link_id = mstat.wcid->link_id;
1237 	}
1238 
1239 	*sta = wcid_to_sta(mstat.wcid);
1240 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1241 }
1242 
1243 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1244 mt76_check_ccmp_pn(struct sk_buff *skb)
1245 {
1246 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1247 	struct mt76_wcid *wcid = status->wcid;
1248 	struct ieee80211_hdr *hdr;
1249 	int security_idx;
1250 	int ret;
1251 
1252 	if (!(status->flag & RX_FLAG_DECRYPTED))
1253 		return;
1254 
1255 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1256 		return;
1257 
1258 	if (!wcid || !wcid->rx_check_pn)
1259 		return;
1260 
1261 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1262 	if (status->flag & RX_FLAG_8023)
1263 		goto skip_hdr_check;
1264 
1265 	hdr = mt76_skb_get_hdr(skb);
1266 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1267 		/*
1268 		 * Validate the first fragment both here and in mac80211
1269 		 * All further fragments will be validated by mac80211 only.
1270 		 */
1271 		if (ieee80211_is_frag(hdr) &&
1272 		    !ieee80211_is_first_frag(hdr->frame_control))
1273 			return;
1274 	}
1275 
1276 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1277 	 *
1278 	 * the recipient shall maintain a single replay counter for received
1279 	 * individually addressed robust Management frames that are received
1280 	 * with the To DS subfield equal to 0, [...]
1281 	 */
1282 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1283 	    !ieee80211_has_tods(hdr->frame_control))
1284 		security_idx = IEEE80211_NUM_TIDS;
1285 
1286 skip_hdr_check:
1287 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1288 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1289 		     sizeof(status->iv));
1290 	if (ret <= 0) {
1291 		status->flag |= RX_FLAG_ONLY_MONITOR;
1292 		return;
1293 	}
1294 
1295 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1296 
1297 	if (status->flag & RX_FLAG_IV_STRIPPED)
1298 		status->flag |= RX_FLAG_PN_VALIDATED;
1299 }
1300 
1301 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1302 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1303 		    int len)
1304 {
1305 	struct mt76_wcid *wcid = status->wcid;
1306 	struct ieee80211_rx_status info = {
1307 		.enc_flags = status->enc_flags,
1308 		.rate_idx = status->rate_idx,
1309 		.encoding = status->encoding,
1310 		.band = status->band,
1311 		.nss = status->nss,
1312 		.bw = status->bw,
1313 	};
1314 	struct ieee80211_sta *sta;
1315 	u32 airtime;
1316 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1317 
1318 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1319 	spin_lock(&dev->cc_lock);
1320 	dev->cur_cc_bss_rx += airtime;
1321 	spin_unlock(&dev->cc_lock);
1322 
1323 	if (!wcid || !wcid->sta)
1324 		return;
1325 
1326 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1327 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1328 }
1329 
1330 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1331 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1332 {
1333 	struct mt76_wcid *wcid;
1334 	int wcid_idx;
1335 
1336 	if (!dev->rx_ampdu_len)
1337 		return;
1338 
1339 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1340 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1341 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1342 	else
1343 		wcid = NULL;
1344 	dev->rx_ampdu_status.wcid = wcid;
1345 
1346 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1347 
1348 	dev->rx_ampdu_len = 0;
1349 	dev->rx_ampdu_ref = 0;
1350 }
1351 
1352 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1353 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1354 {
1355 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1356 	struct mt76_wcid *wcid = status->wcid;
1357 
1358 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1359 		return;
1360 
1361 	if (!wcid || !wcid->sta) {
1362 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1363 
1364 		if (status->flag & RX_FLAG_8023)
1365 			return;
1366 
1367 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1368 			return;
1369 
1370 		wcid = NULL;
1371 	}
1372 
1373 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1374 	    status->ampdu_ref != dev->rx_ampdu_ref)
1375 		mt76_airtime_flush_ampdu(dev);
1376 
1377 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1378 		if (!dev->rx_ampdu_len ||
1379 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1380 			dev->rx_ampdu_status = *status;
1381 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1382 			dev->rx_ampdu_ref = status->ampdu_ref;
1383 		}
1384 
1385 		dev->rx_ampdu_len += skb->len;
1386 		return;
1387 	}
1388 
1389 	mt76_airtime_report(dev, status, skb->len);
1390 }
1391 
1392 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1393 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1394 {
1395 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1396 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1397 	struct ieee80211_sta *sta;
1398 	struct ieee80211_hw *hw;
1399 	struct mt76_wcid *wcid = status->wcid;
1400 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1401 	bool ps;
1402 
1403 	hw = mt76_phy_hw(dev, status->phy_idx);
1404 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1405 	    !(status->flag & RX_FLAG_8023)) {
1406 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1407 		if (sta)
1408 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1409 	}
1410 
1411 	mt76_airtime_check(dev, skb);
1412 
1413 	if (!wcid || !wcid->sta)
1414 		return;
1415 
1416 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1417 
1418 	if (status->signal <= 0)
1419 		ewma_signal_add(&wcid->rssi, -status->signal);
1420 
1421 	wcid->inactive_count = 0;
1422 
1423 	if (status->flag & RX_FLAG_8023)
1424 		return;
1425 
1426 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1427 		return;
1428 
1429 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1430 		ieee80211_sta_pspoll(sta);
1431 		return;
1432 	}
1433 
1434 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1435 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1436 	      ieee80211_is_data(hdr->frame_control)))
1437 		return;
1438 
1439 	ps = ieee80211_has_pm(hdr->frame_control);
1440 
1441 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1442 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1443 		ieee80211_sta_uapsd_trigger(sta, tidno);
1444 
1445 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1446 		return;
1447 
1448 	if (ps)
1449 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1450 
1451 	if (dev->drv->sta_ps)
1452 		dev->drv->sta_ps(dev, sta, ps);
1453 
1454 	if (!ps)
1455 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1456 
1457 	ieee80211_sta_ps_transition(sta, ps);
1458 }
1459 
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1460 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1461 		      struct napi_struct *napi)
1462 {
1463 	struct ieee80211_sta *sta;
1464 	struct ieee80211_hw *hw;
1465 	struct sk_buff *skb, *tmp;
1466 	LIST_HEAD(list);
1467 
1468 	spin_lock(&dev->rx_lock);
1469 	while ((skb = __skb_dequeue(frames)) != NULL) {
1470 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1471 
1472 		mt76_check_ccmp_pn(skb);
1473 		skb_shinfo(skb)->frag_list = NULL;
1474 		mt76_rx_convert(dev, skb, &hw, &sta);
1475 		ieee80211_rx_list(hw, sta, skb, &list);
1476 
1477 		/* subsequent amsdu frames */
1478 		while (nskb) {
1479 			skb = nskb;
1480 			nskb = nskb->next;
1481 			skb->next = NULL;
1482 
1483 			mt76_rx_convert(dev, skb, &hw, &sta);
1484 			ieee80211_rx_list(hw, sta, skb, &list);
1485 		}
1486 	}
1487 	spin_unlock(&dev->rx_lock);
1488 
1489 	if (!napi) {
1490 		netif_receive_skb_list(&list);
1491 		return;
1492 	}
1493 
1494 	list_for_each_entry_safe(skb, tmp, &list, list) {
1495 		skb_list_del_init(skb);
1496 		napi_gro_receive(napi, skb);
1497 	}
1498 }
1499 
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1500 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1501 			   struct napi_struct *napi)
1502 {
1503 	struct sk_buff_head frames;
1504 	struct sk_buff *skb;
1505 
1506 	__skb_queue_head_init(&frames);
1507 
1508 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1509 		mt76_check_sta(dev, skb);
1510 		if (mtk_wed_device_active(&dev->mmio.wed))
1511 			__skb_queue_tail(&frames, skb);
1512 		else
1513 			mt76_rx_aggr_reorder(skb, &frames);
1514 	}
1515 
1516 	mt76_rx_complete(dev, &frames, napi);
1517 }
1518 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1519 
1520 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1521 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1522 	     struct ieee80211_sta *sta)
1523 {
1524 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1525 	struct mt76_dev *dev = phy->dev;
1526 	int ret;
1527 	int i;
1528 
1529 	mutex_lock(&dev->mutex);
1530 
1531 	ret = dev->drv->sta_add(dev, vif, sta);
1532 	if (ret)
1533 		goto out;
1534 
1535 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1536 		struct mt76_txq *mtxq;
1537 
1538 		if (!sta->txq[i])
1539 			continue;
1540 
1541 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1542 		mtxq->wcid = wcid->idx;
1543 	}
1544 
1545 	ewma_signal_init(&wcid->rssi);
1546 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1547 	phy->num_sta++;
1548 
1549 	mt76_wcid_init(wcid, phy->band_idx);
1550 out:
1551 	mutex_unlock(&dev->mutex);
1552 
1553 	return ret;
1554 }
1555 
__mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1556 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1557 		       struct ieee80211_sta *sta)
1558 {
1559 	struct mt76_dev *dev = phy->dev;
1560 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1561 	int i, idx = wcid->idx;
1562 
1563 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1564 		mt76_rx_aggr_stop(dev, wcid, i);
1565 
1566 	if (dev->drv->sta_remove)
1567 		dev->drv->sta_remove(dev, vif, sta);
1568 
1569 	mt76_wcid_cleanup(dev, wcid);
1570 
1571 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1572 	phy->num_sta--;
1573 }
1574 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1575 
1576 static void
mt76_sta_remove(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1577 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1578 		struct ieee80211_sta *sta)
1579 {
1580 	struct mt76_dev *dev = phy->dev;
1581 
1582 	mutex_lock(&dev->mutex);
1583 	__mt76_sta_remove(phy, vif, sta);
1584 	mutex_unlock(&dev->mutex);
1585 }
1586 
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1587 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1588 		   struct ieee80211_sta *sta,
1589 		   enum ieee80211_sta_state old_state,
1590 		   enum ieee80211_sta_state new_state)
1591 {
1592 	struct mt76_phy *phy = hw->priv;
1593 	struct mt76_dev *dev = phy->dev;
1594 	enum mt76_sta_event ev;
1595 
1596 	phy = mt76_vif_phy(hw, vif);
1597 	if (!phy)
1598 		return -EINVAL;
1599 
1600 	if (old_state == IEEE80211_STA_NOTEXIST &&
1601 	    new_state == IEEE80211_STA_NONE)
1602 		return mt76_sta_add(phy, vif, sta);
1603 
1604 	if (old_state == IEEE80211_STA_NONE &&
1605 	    new_state == IEEE80211_STA_NOTEXIST)
1606 		mt76_sta_remove(phy, vif, sta);
1607 
1608 	if (!dev->drv->sta_event)
1609 		return 0;
1610 
1611 	if (old_state == IEEE80211_STA_AUTH &&
1612 	    new_state == IEEE80211_STA_ASSOC)
1613 		ev = MT76_STA_EVENT_ASSOC;
1614 	else if (old_state == IEEE80211_STA_ASSOC &&
1615 		 new_state == IEEE80211_STA_AUTHORIZED)
1616 		ev = MT76_STA_EVENT_AUTHORIZE;
1617 	else if (old_state == IEEE80211_STA_ASSOC &&
1618 		 new_state == IEEE80211_STA_AUTH)
1619 		ev = MT76_STA_EVENT_DISASSOC;
1620 	else
1621 		return 0;
1622 
1623 	return dev->drv->sta_event(dev, vif, sta, ev);
1624 }
1625 EXPORT_SYMBOL_GPL(mt76_sta_state);
1626 
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1627 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1628 			     struct ieee80211_sta *sta)
1629 {
1630 	struct mt76_phy *phy = hw->priv;
1631 	struct mt76_dev *dev = phy->dev;
1632 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1633 
1634 	mutex_lock(&dev->mutex);
1635 	spin_lock_bh(&dev->status_lock);
1636 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1637 	spin_unlock_bh(&dev->status_lock);
1638 	mutex_unlock(&dev->mutex);
1639 }
1640 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1641 
mt76_wcid_init(struct mt76_wcid * wcid,u8 band_idx)1642 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1643 {
1644 	wcid->hw_key_idx = -1;
1645 	wcid->phy_idx = band_idx;
1646 
1647 	INIT_LIST_HEAD(&wcid->tx_list);
1648 	skb_queue_head_init(&wcid->tx_pending);
1649 	skb_queue_head_init(&wcid->tx_offchannel);
1650 
1651 	INIT_LIST_HEAD(&wcid->list);
1652 	idr_init(&wcid->pktid);
1653 
1654 	INIT_LIST_HEAD(&wcid->poll_list);
1655 }
1656 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1657 
mt76_wcid_cleanup(struct mt76_dev * dev,struct mt76_wcid * wcid)1658 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1659 {
1660 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1661 	struct ieee80211_hw *hw;
1662 	struct sk_buff_head list;
1663 	struct sk_buff *skb;
1664 
1665 	mt76_tx_status_lock(dev, &list);
1666 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1667 	mt76_tx_status_unlock(dev, &list);
1668 
1669 	idr_destroy(&wcid->pktid);
1670 
1671 	spin_lock_bh(&phy->tx_lock);
1672 
1673 	if (!list_empty(&wcid->tx_list))
1674 		list_del_init(&wcid->tx_list);
1675 
1676 	spin_lock(&wcid->tx_pending.lock);
1677 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1678 	spin_unlock(&wcid->tx_pending.lock);
1679 
1680 	spin_unlock_bh(&phy->tx_lock);
1681 
1682 	while ((skb = __skb_dequeue(&list)) != NULL) {
1683 		hw = mt76_tx_status_get_hw(dev, skb);
1684 		ieee80211_free_txskb(hw, skb);
1685 	}
1686 }
1687 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1688 
mt76_wcid_add_poll(struct mt76_dev * dev,struct mt76_wcid * wcid)1689 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1690 {
1691 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
1692 		return;
1693 
1694 	spin_lock_bh(&dev->sta_poll_lock);
1695 	if (list_empty(&wcid->poll_list))
1696 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1697 	spin_unlock_bh(&dev->sta_poll_lock);
1698 }
1699 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1700 
mt76_get_power_bound(struct mt76_phy * phy,s8 txpower)1701 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1702 {
1703 	int n_chains = hweight16(phy->chainmask);
1704 
1705 	txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1706 	txpower -= mt76_tx_power_nss_delta(n_chains);
1707 
1708 	return txpower;
1709 }
1710 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1711 
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,int * dbm)1712 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1713 		     unsigned int link_id, int *dbm)
1714 {
1715 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1716 	int n_chains, delta;
1717 
1718 	if (!phy)
1719 		return -EINVAL;
1720 
1721 	n_chains = hweight16(phy->chainmask);
1722 	delta = mt76_tx_power_nss_delta(n_chains);
1723 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1724 
1725 	return 0;
1726 }
1727 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1728 
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1729 int mt76_init_sar_power(struct ieee80211_hw *hw,
1730 			const struct cfg80211_sar_specs *sar)
1731 {
1732 	struct mt76_phy *phy = hw->priv;
1733 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1734 	int i;
1735 
1736 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1737 		return -EINVAL;
1738 
1739 	for (i = 0; i < sar->num_sub_specs; i++) {
1740 		u32 index = sar->sub_specs[i].freq_range_index;
1741 		/* SAR specifies power limitaton in 0.25dbm */
1742 		s32 power = sar->sub_specs[i].power >> 1;
1743 
1744 		if (power > 127 || power < -127)
1745 			power = 127;
1746 
1747 		phy->frp[index].range = &capa->freq_ranges[index];
1748 		phy->frp[index].power = power;
1749 	}
1750 
1751 	return 0;
1752 }
1753 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1754 
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1755 int mt76_get_sar_power(struct mt76_phy *phy,
1756 		       struct ieee80211_channel *chan,
1757 		       int power)
1758 {
1759 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1760 	int freq, i;
1761 
1762 	if (!capa || !phy->frp)
1763 		return power;
1764 
1765 	if (power > 127 || power < -127)
1766 		power = 127;
1767 
1768 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1769 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1770 		if (phy->frp[i].range &&
1771 		    freq >= phy->frp[i].range->start_freq &&
1772 		    freq < phy->frp[i].range->end_freq) {
1773 			power = min_t(int, phy->frp[i].power, power);
1774 			break;
1775 		}
1776 	}
1777 
1778 	return power;
1779 }
1780 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1781 
1782 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1783 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1784 {
1785 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1786 		ieee80211_csa_finish(vif, 0);
1787 }
1788 
mt76_csa_finish(struct mt76_dev * dev)1789 void mt76_csa_finish(struct mt76_dev *dev)
1790 {
1791 	if (!dev->csa_complete)
1792 		return;
1793 
1794 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1795 		IEEE80211_IFACE_ITER_RESUME_ALL,
1796 		__mt76_csa_finish, dev);
1797 
1798 	dev->csa_complete = 0;
1799 }
1800 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1801 
1802 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1803 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1804 {
1805 	struct mt76_dev *dev = priv;
1806 
1807 	if (!vif->bss_conf.csa_active)
1808 		return;
1809 
1810 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1811 }
1812 
mt76_csa_check(struct mt76_dev * dev)1813 void mt76_csa_check(struct mt76_dev *dev)
1814 {
1815 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1816 		IEEE80211_IFACE_ITER_RESUME_ALL,
1817 		__mt76_csa_check, dev);
1818 }
1819 EXPORT_SYMBOL_GPL(mt76_csa_check);
1820 
1821 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1822 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1823 {
1824 	return 0;
1825 }
1826 EXPORT_SYMBOL_GPL(mt76_set_tim);
1827 
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1828 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1829 {
1830 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1831 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1832 	u8 *hdr, *pn = status->iv;
1833 
1834 	__skb_push(skb, 8);
1835 	memmove(skb->data, skb->data + 8, hdr_len);
1836 	hdr = skb->data + hdr_len;
1837 
1838 	hdr[0] = pn[5];
1839 	hdr[1] = pn[4];
1840 	hdr[2] = 0;
1841 	hdr[3] = 0x20 | (key_id << 6);
1842 	hdr[4] = pn[3];
1843 	hdr[5] = pn[2];
1844 	hdr[6] = pn[1];
1845 	hdr[7] = pn[0];
1846 
1847 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1848 }
1849 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1850 
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1851 int mt76_get_rate(struct mt76_dev *dev,
1852 		  struct ieee80211_supported_band *sband,
1853 		  int idx, bool cck)
1854 {
1855 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1856 	int i, offset = 0, len = sband->n_bitrates;
1857 
1858 	if (cck) {
1859 		if (!is_2g)
1860 			return 0;
1861 
1862 		idx &= ~BIT(2); /* short preamble */
1863 	} else if (is_2g) {
1864 		offset = 4;
1865 	}
1866 
1867 	for (i = offset; i < len; i++) {
1868 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1869 			return i;
1870 	}
1871 
1872 	return 0;
1873 }
1874 EXPORT_SYMBOL_GPL(mt76_get_rate);
1875 
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1876 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1877 		  const u8 *mac)
1878 {
1879 	struct mt76_phy *phy = hw->priv;
1880 
1881 	set_bit(MT76_SCANNING, &phy->state);
1882 }
1883 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1884 
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1885 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1886 {
1887 	struct mt76_phy *phy = hw->priv;
1888 
1889 	clear_bit(MT76_SCANNING, &phy->state);
1890 }
1891 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1892 
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1893 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1894 {
1895 	struct mt76_phy *phy = hw->priv;
1896 	struct mt76_dev *dev = phy->dev;
1897 	int i;
1898 
1899 	mutex_lock(&dev->mutex);
1900 	*tx_ant = 0;
1901 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1902 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1903 			*tx_ant |= dev->phys[i]->chainmask;
1904 	*rx_ant = *tx_ant;
1905 	mutex_unlock(&dev->mutex);
1906 
1907 	return 0;
1908 }
1909 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1910 
1911 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,void * wed,u32 flags)1912 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1913 		int ring_base, void *wed, u32 flags)
1914 {
1915 	struct mt76_queue *hwq;
1916 	int err;
1917 
1918 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1919 	if (!hwq)
1920 		return ERR_PTR(-ENOMEM);
1921 
1922 	hwq->flags = flags;
1923 	hwq->wed = wed;
1924 
1925 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1926 	if (err < 0)
1927 		return ERR_PTR(err);
1928 
1929 	return hwq;
1930 }
1931 EXPORT_SYMBOL_GPL(mt76_init_queue);
1932 
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1933 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1934 			 struct mt76_sta_stats *stats, bool eht)
1935 {
1936 	int i, ei = wi->initial_stat_idx;
1937 	u64 *data = wi->data;
1938 
1939 	wi->sta_count++;
1940 
1941 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1942 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1943 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1944 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1945 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1946 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1947 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1948 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1949 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1950 	if (eht) {
1951 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1952 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1953 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1954 	}
1955 
1956 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1957 		data[ei++] += stats->tx_bw[i];
1958 
1959 	for (i = 0; i < (eht ? 14 : 12); i++)
1960 		data[ei++] += stats->tx_mcs[i];
1961 
1962 	for (i = 0; i < 4; i++)
1963 		data[ei++] += stats->tx_nss[i];
1964 
1965 	wi->worker_stat_count = ei - wi->initial_stat_idx;
1966 }
1967 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1968 
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1969 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1970 {
1971 #ifdef CONFIG_PAGE_POOL_STATS
1972 	struct page_pool_stats stats = {};
1973 	int i;
1974 
1975 	mt76_for_each_q_rx(dev, i)
1976 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1977 
1978 	page_pool_ethtool_stats_get(data, &stats);
1979 	*index += page_pool_ethtool_stats_get_count();
1980 #endif
1981 }
1982 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1983 
mt76_phy_dfs_state(struct mt76_phy * phy)1984 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1985 {
1986 	struct ieee80211_hw *hw = phy->hw;
1987 	struct mt76_dev *dev = phy->dev;
1988 
1989 	if (dev->region == NL80211_DFS_UNSET ||
1990 	    test_bit(MT76_SCANNING, &phy->state))
1991 		return MT_DFS_STATE_DISABLED;
1992 
1993 	if (!phy->radar_enabled) {
1994 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1995 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1996 			return MT_DFS_STATE_ACTIVE;
1997 
1998 		return MT_DFS_STATE_DISABLED;
1999 	}
2000 
2001 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2002 		return MT_DFS_STATE_CAC;
2003 
2004 	return MT_DFS_STATE_ACTIVE;
2005 }
2006 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2007 
mt76_vif_cleanup(struct mt76_dev * dev,struct ieee80211_vif * vif)2008 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2009 {
2010 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2011 	struct mt76_vif_data *mvif = mlink->mvif;
2012 
2013 	rcu_assign_pointer(mvif->link[0], NULL);
2014 	mt76_abort_scan(dev);
2015 	if (mvif->roc_phy)
2016 		mt76_abort_roc(mvif->roc_phy);
2017 }
2018 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2019