1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024-2025 Intel Corporation
4 */
5
6 #include <linux/ieee80211.h>
7 #include <kunit/static_stub.h>
8
9 #include "sta.h"
10 #include "hcmd.h"
11 #include "iface.h"
12 #include "mlo.h"
13 #include "key.h"
14 #include "agg.h"
15 #include "tlc.h"
16 #include "fw/api/sta.h"
17 #include "fw/api/mac.h"
18 #include "fw/api/rx.h"
19
iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)20 int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
21 struct ieee80211_link_sta *link_sta)
22 {
23 struct iwl_mld_link_sta *mld_link_sta;
24
25 /* This function should only be used with the wiphy lock held,
26 * In other cases, it is not guaranteed that the link_sta will exist
27 * in the driver too, and it is checked here.
28 */
29 lockdep_assert_wiphy(mld->wiphy);
30
31 /* This is not meant to be called with a NULL pointer */
32 if (WARN_ON(!link_sta))
33 return -ENOENT;
34
35 mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
36 if (!mld_link_sta) {
37 WARN_ON(!iwl_mld_error_before_recovery(mld));
38 return -ENOENT;
39 }
40
41 return mld_link_sta->fw_id;
42 }
43
44 static void
iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta * link_sta,struct ieee80211_bss_conf * link,__le32 * tx_ampdu_max_size,__le32 * tx_ampdu_spacing)45 iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta *link_sta,
46 struct ieee80211_bss_conf *link,
47 __le32 *tx_ampdu_max_size,
48 __le32 *tx_ampdu_spacing)
49 {
50 u32 agg_size = 0, mpdu_dens = 0;
51
52 if (WARN_ON(!link_sta || !link))
53 return;
54
55 /* Note that we always use only legacy & highest supported PPDUs, so
56 * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
57 * the maximum A-MPDU size of various PPDU types in different bands,
58 * we only need to worry about the highest supported PPDU type here.
59 */
60
61 if (link_sta->ht_cap.ht_supported) {
62 agg_size = link_sta->ht_cap.ampdu_factor;
63 mpdu_dens = link_sta->ht_cap.ampdu_density;
64 }
65
66 if (link->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
67 /* overwrite HT values on 6 GHz */
68 mpdu_dens =
69 le16_get_bits(link_sta->he_6ghz_capa.capa,
70 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
71 agg_size =
72 le16_get_bits(link_sta->he_6ghz_capa.capa,
73 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
74 } else if (link_sta->vht_cap.vht_supported) {
75 /* if VHT supported overwrite HT value */
76 agg_size =
77 u32_get_bits(link_sta->vht_cap.cap,
78 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
79 }
80
81 /* D6.0 10.12.2 A-MPDU length limit rules
82 * A STA indicates the maximum length of the A-MPDU preEOF padding
83 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
84 * Exponent field in its HT Capabilities, VHT Capabilities,
85 * and HE 6 GHz Band Capabilities elements (if present) and the
86 * Maximum AMPDU Length Exponent Extension field in its HE
87 * Capabilities element
88 */
89 if (link_sta->he_cap.has_he)
90 agg_size +=
91 u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
92 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
93
94 if (link_sta->eht_cap.has_eht)
95 agg_size +=
96 u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
97 IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
98
99 /* Limit to max A-MPDU supported by FW */
100 agg_size = min_t(u32, agg_size,
101 STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
102
103 *tx_ampdu_max_size = cpu_to_le32(agg_size);
104 *tx_ampdu_spacing = cpu_to_le32(mpdu_dens);
105 }
106
iwl_mld_get_uapsd_acs(struct ieee80211_sta * sta)107 static u8 iwl_mld_get_uapsd_acs(struct ieee80211_sta *sta)
108 {
109 u8 uapsd_acs = 0;
110
111 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
112 uapsd_acs |= BIT(AC_BK);
113 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
114 uapsd_acs |= BIT(AC_BE);
115 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
116 uapsd_acs |= BIT(AC_VI);
117 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
118 uapsd_acs |= BIT(AC_VO);
119
120 return uapsd_acs | uapsd_acs << 4;
121 }
122
iwl_mld_he_get_ppe_val(u8 * ppe,u8 ppe_pos_bit)123 static u8 iwl_mld_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
124 {
125 u8 byte_num = ppe_pos_bit / 8;
126 u8 bit_num = ppe_pos_bit % 8;
127 u8 residue_bits;
128 u8 res;
129
130 if (bit_num <= 5)
131 return (ppe[byte_num] >> bit_num) &
132 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
133
134 /* If bit_num > 5, we have to combine bits with next byte.
135 * Calculate how many bits we need to take from current byte (called
136 * here "residue_bits"), and add them to bits from next byte.
137 */
138
139 residue_bits = 8 - bit_num;
140
141 res = (ppe[byte_num + 1] &
142 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
143 residue_bits;
144 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
145
146 return res;
147 }
148
iwl_mld_parse_ppe(struct iwl_mld * mld,struct iwl_he_pkt_ext_v2 * pkt_ext,u8 nss,u8 ru_index_bitmap,u8 * ppe,u8 ppe_pos_bit,bool inheritance)149 static void iwl_mld_parse_ppe(struct iwl_mld *mld,
150 struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
151 u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
152 bool inheritance)
153 {
154 /* FW currently supports only nss == MAX_HE_SUPP_NSS
155 *
156 * If nss > MAX: we can ignore values we don't support
157 * If nss < MAX: we can set zeros in other streams
158 */
159 if (nss > MAX_HE_SUPP_NSS) {
160 IWL_DEBUG_INFO(mld, "Got NSS = %d - trimming to %d\n", nss,
161 MAX_HE_SUPP_NSS);
162 nss = MAX_HE_SUPP_NSS;
163 }
164
165 for (int i = 0; i < nss; i++) {
166 u8 ru_index_tmp = ru_index_bitmap << 1;
167 u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
168
169 for (u8 bw = 0;
170 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
171 bw++) {
172 ru_index_tmp >>= 1;
173
174 /* According to the 11be spec, if for a specific BW the PPE Thresholds
175 * isn't present - it should inherit the thresholds from the last
176 * BW for which we had PPE Thresholds. In 11ax though, we don't have
177 * this inheritance - continue in this case
178 */
179 if (!(ru_index_tmp & 1)) {
180 if (inheritance)
181 goto set_thresholds;
182 else
183 continue;
184 }
185
186 high_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
187 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
188 low_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
189 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
190
191 set_thresholds:
192 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
193 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
194 }
195 }
196 }
197
iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta,struct iwl_he_pkt_ext_v2 * pkt_ext,bool inheritance)198 static void iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld *mld,
199 struct ieee80211_link_sta *link_sta,
200 struct iwl_he_pkt_ext_v2 *pkt_ext,
201 bool inheritance)
202 {
203 u8 nss = (link_sta->he_cap.ppe_thres[0] &
204 IEEE80211_PPE_THRES_NSS_MASK) + 1;
205 u8 *ppe = &link_sta->he_cap.ppe_thres[0];
206 u8 ru_index_bitmap =
207 u8_get_bits(*ppe,
208 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
209 /* Starting after PPE header */
210 u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
211
212 iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
213 inheritance);
214 }
215
216 static int
iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 * pkt_ext,u8 nominal_padding)217 iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
218 u8 nominal_padding)
219 {
220 int low_th = -1;
221 int high_th = -1;
222
223 /* all the macros are the same for EHT and HE */
224 switch (nominal_padding) {
225 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
226 low_th = IWL_HE_PKT_EXT_NONE;
227 high_th = IWL_HE_PKT_EXT_NONE;
228 break;
229 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
230 low_th = IWL_HE_PKT_EXT_BPSK;
231 high_th = IWL_HE_PKT_EXT_NONE;
232 break;
233 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
234 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
235 low_th = IWL_HE_PKT_EXT_NONE;
236 high_th = IWL_HE_PKT_EXT_BPSK;
237 break;
238 }
239
240 if (low_th < 0 || high_th < 0)
241 return -EINVAL;
242
243 /* Set the PPE thresholds accordingly */
244 for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
245 for (u8 bw = 0;
246 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
247 bw++) {
248 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
249 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
250 }
251 }
252
253 return 0;
254 }
255
iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 * pkt_ext,u8 nominal_padding)256 static void iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
257 u8 nominal_padding)
258 {
259 for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
260 for (u8 bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
261 bw++) {
262 u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
263
264 if (nominal_padding >
265 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
266 qam_th[1] == IWL_HE_PKT_EXT_NONE)
267 qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
268 else if (nominal_padding ==
269 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
270 qam_th[0] == IWL_HE_PKT_EXT_NONE &&
271 qam_th[1] == IWL_HE_PKT_EXT_NONE)
272 qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
273 }
274 }
275 }
276
iwl_mld_fill_pkt_ext(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta,struct iwl_he_pkt_ext_v2 * pkt_ext)277 static void iwl_mld_fill_pkt_ext(struct iwl_mld *mld,
278 struct ieee80211_link_sta *link_sta,
279 struct iwl_he_pkt_ext_v2 *pkt_ext)
280 {
281 if (WARN_ON(!link_sta))
282 return;
283
284 /* Initialize the PPE thresholds to "None" (7), as described in Table
285 * 9-262ac of 80211.ax/D3.0.
286 */
287 memset(pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(*pkt_ext));
288
289 if (link_sta->eht_cap.has_eht) {
290 u8 nominal_padding =
291 u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
292 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
293
294 /* If PPE Thresholds exists, parse them into a FW-familiar
295 * format.
296 */
297 if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
298 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
299 u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] &
300 IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
301 u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0];
302 u8 ru_index_bitmap =
303 u16_get_bits(*ppe,
304 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
305 /* Starting after PPE header */
306 u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
307
308 iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap,
309 ppe, ppe_pos_bit, true);
310 /* EHT PPE Thresholds doesn't exist - set the API according to
311 * HE PPE Tresholds
312 */
313 } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
314 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
315 /* Even though HE Capabilities IE doesn't contain PPE
316 * Thresholds for BW 320Mhz, thresholds for this BW will
317 * be filled in with the same values as 160Mhz, due to
318 * the inheritance, as required.
319 */
320 iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
321 true);
322
323 /* According to the requirements, for MCSs 12-13 the
324 * maximum value between HE PPE Threshold and Common
325 * Nominal Packet Padding needs to be taken
326 */
327 iwl_mld_get_optimal_ppe_info(pkt_ext, nominal_padding);
328
329 /* if PPE Thresholds doesn't present in both EHT IE and HE IE -
330 * take the Thresholds from Common Nominal Packet Padding field
331 */
332 } else {
333 iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
334 nominal_padding);
335 }
336 } else if (link_sta->he_cap.has_he) {
337 /* If PPE Thresholds exist, parse them into a FW-familiar format. */
338 if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
339 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
340 iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
341 false);
342 /* PPE Thresholds doesn't exist - set the API PPE values
343 * according to Common Nominal Packet Padding field.
344 */
345 } else {
346 u8 nominal_padding =
347 u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9],
348 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
349 if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
350 iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
351 nominal_padding);
352 }
353 }
354
355 for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
356 for (int bw = 0;
357 bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]);
358 bw++) {
359 u8 *qam_th =
360 &pkt_ext->pkt_ext_qam_th[i][bw][0];
361
362 IWL_DEBUG_HT(mld,
363 "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
364 i, bw, qam_th[0], qam_th[1]);
365 }
366 }
367 }
368
iwl_mld_get_htc_flags(struct ieee80211_link_sta * link_sta)369 static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
370 {
371 u8 *mac_cap_info =
372 &link_sta->he_cap.he_cap_elem.mac_cap_info[0];
373 u32 htc_flags = 0;
374
375 if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
376 htc_flags |= IWL_HE_HTC_SUPPORT;
377 if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
378 (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
379 u8 link_adap =
380 ((mac_cap_info[2] &
381 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
382 (mac_cap_info[1] &
383 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
384
385 if (link_adap == 2)
386 htc_flags |=
387 IWL_HE_HTC_LINK_ADAP_UNSOLICITED;
388 else if (link_adap == 3)
389 htc_flags |= IWL_HE_HTC_LINK_ADAP_BOTH;
390 }
391 if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
392 htc_flags |= IWL_HE_HTC_BSR_SUPP;
393 if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
394 htc_flags |= IWL_HE_HTC_OMI_SUPP;
395 if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
396 htc_flags |= IWL_HE_HTC_BQR_SUPP;
397
398 return htc_flags;
399 }
400
iwl_mld_send_sta_cmd(struct iwl_mld * mld,const struct iwl_sta_cfg_cmd * cmd)401 static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
402 const struct iwl_sta_cfg_cmd *cmd)
403 {
404 u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
405 int cmd_len = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) > 1 ?
406 sizeof(*cmd) :
407 sizeof(struct iwl_sta_cfg_cmd_v1);
408 int ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, cmd_len);
409 if (ret)
410 IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
411 return ret;
412 }
413
414 static int
iwl_mld_add_modify_sta_cmd(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)415 iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld,
416 struct ieee80211_link_sta *link_sta)
417 {
418 struct ieee80211_sta *sta = link_sta->sta;
419 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
420 struct ieee80211_bss_conf *link;
421 struct iwl_mld_link *mld_link;
422 struct iwl_sta_cfg_cmd cmd = {};
423 int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
424
425 lockdep_assert_wiphy(mld->wiphy);
426
427 link = link_conf_dereference_protected(mld_sta->vif,
428 link_sta->link_id);
429
430 mld_link = iwl_mld_link_from_mac80211(link);
431
432 if (WARN_ON(!link || !mld_link) || fw_id < 0)
433 return -EINVAL;
434
435 cmd.sta_id = cpu_to_le32(fw_id);
436 cmd.station_type = cpu_to_le32(mld_sta->sta_type);
437 cmd.link_id = cpu_to_le32(mld_link->fw_id);
438
439 memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
440 memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
441
442 if (mld_sta->sta_state >= IEEE80211_STA_ASSOC)
443 cmd.assoc_id = cpu_to_le32(sta->aid);
444
445 if (sta->mfp || mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
446 cmd.mfp = cpu_to_le32(1);
447
448 switch (link_sta->rx_nss) {
449 case 1:
450 cmd.mimo = cpu_to_le32(0);
451 break;
452 case 2 ... 8:
453 cmd.mimo = cpu_to_le32(1);
454 break;
455 }
456
457 switch (link_sta->smps_mode) {
458 case IEEE80211_SMPS_AUTOMATIC:
459 case IEEE80211_SMPS_NUM_MODES:
460 WARN_ON(1);
461 break;
462 case IEEE80211_SMPS_STATIC:
463 /* override NSS */
464 cmd.mimo = cpu_to_le32(0);
465 break;
466 case IEEE80211_SMPS_DYNAMIC:
467 cmd.mimo_protection = cpu_to_le32(1);
468 break;
469 case IEEE80211_SMPS_OFF:
470 /* nothing */
471 break;
472 }
473
474 iwl_mld_fill_ampdu_size_and_dens(link_sta, link,
475 &cmd.tx_ampdu_max_size,
476 &cmd.tx_ampdu_spacing);
477
478 if (sta->wme) {
479 cmd.sp_length =
480 cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128);
481 cmd.uapsd_acs = cpu_to_le32(iwl_mld_get_uapsd_acs(sta));
482 }
483
484 if (link_sta->he_cap.has_he) {
485 cmd.trig_rnd_alloc =
486 cpu_to_le32(link->uora_exists ? 1 : 0);
487
488 /* PPE Thresholds */
489 iwl_mld_fill_pkt_ext(mld, link_sta, &cmd.pkt_ext);
490
491 /* HTC flags */
492 cmd.htc_flags =
493 cpu_to_le32(iwl_mld_get_htc_flags(link_sta));
494
495 if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] &
496 IEEE80211_HE_MAC_CAP2_ACK_EN)
497 cmd.ack_enabled = cpu_to_le32(1);
498 }
499
500 return iwl_mld_send_sta_cmd(mld, &cmd);
501 }
502
IWL_MLD_ALLOC_FN(link_sta,link_sta)503 IWL_MLD_ALLOC_FN(link_sta, link_sta)
504
505 static int
506 iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta)
507 {
508 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
509 struct iwl_mld_link_sta *mld_link_sta;
510 int ret;
511 u8 fw_id;
512
513 lockdep_assert_wiphy(mld->wiphy);
514
515 /* We will fail to add it to the FW anyway */
516 if (iwl_mld_error_before_recovery(mld))
517 return -ENODEV;
518
519 mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
520
521 /* We need to preserve the fw sta ids during a restart, since the fw
522 * will recover SN/PN for them, this is why the mld_link_sta exists.
523 */
524 if (mld_link_sta) {
525 /* But if we are not restarting, this is not OK */
526 WARN_ON(!mld->fw_status.in_hw_restart);
527
528 /* Avoid adding a STA that is already in FW to avoid an assert */
529 if (WARN_ON(mld_link_sta->in_fw))
530 return -EINVAL;
531
532 fw_id = mld_link_sta->fw_id;
533 goto add_to_fw;
534 }
535
536 /* Allocate a fw id and map it to the link_sta */
537 ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
538 if (ret)
539 return ret;
540
541 if (link_sta == &link_sta->sta->deflink) {
542 mld_link_sta = &mld_sta->deflink;
543 } else {
544 mld_link_sta = kzalloc(sizeof(*mld_link_sta), GFP_KERNEL);
545 if (!mld_link_sta)
546 return -ENOMEM;
547 }
548
549 mld_link_sta->fw_id = fw_id;
550 rcu_assign_pointer(mld_sta->link[link_sta->link_id], mld_link_sta);
551
552 add_to_fw:
553 ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
554 if (ret) {
555 RCU_INIT_POINTER(mld->fw_id_to_link_sta[fw_id], NULL);
556 RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
557 if (link_sta != &link_sta->sta->deflink)
558 kfree(mld_link_sta);
559 return ret;
560 }
561 mld_link_sta->in_fw = true;
562
563 return 0;
564 }
565
iwl_mld_rm_sta_from_fw(struct iwl_mld * mld,u8 fw_sta_id)566 static int iwl_mld_rm_sta_from_fw(struct iwl_mld *mld, u8 fw_sta_id)
567 {
568 struct iwl_remove_sta_cmd cmd = {
569 .sta_id = cpu_to_le32(fw_sta_id),
570 };
571 int ret;
572
573 ret = iwl_mld_send_cmd_pdu(mld,
574 WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD),
575 &cmd);
576 if (ret)
577 IWL_ERR(mld, "Failed to remove station. Id=%d\n", fw_sta_id);
578
579 return ret;
580 }
581
582 static void
iwl_mld_remove_link_sta(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)583 iwl_mld_remove_link_sta(struct iwl_mld *mld,
584 struct ieee80211_link_sta *link_sta)
585 {
586 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
587 struct iwl_mld_link_sta *mld_link_sta =
588 iwl_mld_link_sta_from_mac80211(link_sta);
589
590 if (WARN_ON(!mld_link_sta))
591 return;
592
593 iwl_mld_rm_sta_from_fw(mld, mld_link_sta->fw_id);
594 mld_link_sta->in_fw = false;
595
596 /* Now that the STA doesn't exist in FW, we don't expect any new
597 * notifications for it. Cancel the ones that are already pending
598 */
599 iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_STA,
600 mld_link_sta->fw_id);
601
602 /* This will not be done upon reconfig, so do it also when
603 * failed to remove from fw
604 */
605 RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id], NULL);
606 RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
607 if (mld_link_sta != &mld_sta->deflink)
608 kfree_rcu(mld_link_sta, rcu_head);
609 }
610
iwl_mld_set_max_amsdu_len(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)611 static void iwl_mld_set_max_amsdu_len(struct iwl_mld *mld,
612 struct ieee80211_link_sta *link_sta)
613 {
614 const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
615
616 /* For EHT, HE and VHT we can use the value as it was calculated by
617 * mac80211. For HT, mac80211 doesn't enforce to 4095, so force it
618 * here
619 */
620 if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he ||
621 link_sta->vht_cap.vht_supported ||
622 !ht_cap->ht_supported ||
623 !(ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU))
624 return;
625
626 link_sta->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
627 ieee80211_sta_recalc_aggregates(link_sta->sta);
628 }
629
iwl_mld_update_all_link_stations(struct iwl_mld * mld,struct ieee80211_sta * sta)630 int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
631 struct ieee80211_sta *sta)
632 {
633 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
634 struct ieee80211_link_sta *link_sta;
635 int link_id;
636
637 for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
638 int ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
639
640 if (ret)
641 return ret;
642
643 if (mld_sta->sta_state == IEEE80211_STA_ASSOC)
644 iwl_mld_set_max_amsdu_len(mld, link_sta);
645 }
646 return 0;
647 }
648
iwl_mld_destroy_sta(struct ieee80211_sta * sta)649 static void iwl_mld_destroy_sta(struct ieee80211_sta *sta)
650 {
651 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
652
653 kfree(mld_sta->dup_data);
654 kfree(mld_sta->mpdu_counters);
655 }
656
657 static int
iwl_mld_alloc_dup_data(struct iwl_mld * mld,struct iwl_mld_sta * mld_sta)658 iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
659 {
660 struct iwl_mld_rxq_dup_data *dup_data;
661
662 if (mld->fw_status.in_hw_restart)
663 return 0;
664
665 dup_data = kcalloc(mld->trans->info.num_rxqs, sizeof(*dup_data),
666 GFP_KERNEL);
667 if (!dup_data)
668 return -ENOMEM;
669
670 /* Initialize all the last_seq values to 0xffff which can never
671 * compare equal to the frame's seq_ctrl in the check in
672 * iwl_mld_is_dup() since the lower 4 bits are the fragment
673 * number and fragmented packets don't reach that function.
674 *
675 * This thus allows receiving a packet with seqno 0 and the
676 * retry bit set as the very first packet on a new TID.
677 */
678 for (int q = 0; q < mld->trans->info.num_rxqs; q++)
679 memset(dup_data[q].last_seq, 0xff,
680 sizeof(dup_data[q].last_seq));
681 mld_sta->dup_data = dup_data;
682
683 return 0;
684 }
685
iwl_mld_alloc_mpdu_counters(struct iwl_mld * mld,struct ieee80211_sta * sta)686 static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld,
687 struct ieee80211_sta *sta)
688 {
689 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
690 struct ieee80211_vif *vif = mld_sta->vif;
691
692 if (mld->fw_status.in_hw_restart)
693 return;
694
695 /* MPDUs are counted only when EMLSR is possible */
696 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION ||
697 sta->tdls || !ieee80211_vif_is_mld(vif))
698 return;
699
700 mld_sta->mpdu_counters = kcalloc(mld->trans->info.num_rxqs,
701 sizeof(*mld_sta->mpdu_counters),
702 GFP_KERNEL);
703 if (!mld_sta->mpdu_counters)
704 return;
705
706 for (int q = 0; q < mld->trans->info.num_rxqs; q++)
707 spin_lock_init(&mld_sta->mpdu_counters[q].lock);
708 }
709
710 static int
iwl_mld_init_sta(struct iwl_mld * mld,struct ieee80211_sta * sta,struct ieee80211_vif * vif,enum iwl_fw_sta_type type)711 iwl_mld_init_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
712 struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
713 {
714 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
715
716 mld_sta->vif = vif;
717 mld_sta->sta_type = type;
718 mld_sta->mld = mld;
719
720 if (!mld->fw_status.in_hw_restart)
721 for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
722 iwl_mld_init_txq(iwl_mld_txq_from_mac80211(sta->txq[i]));
723
724 iwl_mld_alloc_mpdu_counters(mld, sta);
725
726 iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
727
728 return iwl_mld_alloc_dup_data(mld, mld_sta);
729 }
730
iwl_mld_add_sta(struct iwl_mld * mld,struct ieee80211_sta * sta,struct ieee80211_vif * vif,enum iwl_fw_sta_type type)731 int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
732 struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
733 {
734 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
735 struct ieee80211_link_sta *link_sta;
736 int link_id;
737 int ret;
738
739 ret = iwl_mld_init_sta(mld, sta, vif, type);
740 if (ret)
741 return ret;
742
743 /* We could have add only the deflink link_sta, but it will not work
744 * in the restart case if the single link that is active during
745 * reconfig is not the deflink one.
746 */
747 for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
748 ret = iwl_mld_add_link_sta(mld, link_sta);
749 if (ret)
750 goto destroy_sta;
751 }
752
753 return 0;
754
755 destroy_sta:
756 iwl_mld_destroy_sta(sta);
757
758 return ret;
759 }
760
iwl_mld_flush_sta_txqs(struct iwl_mld * mld,struct ieee80211_sta * sta)761 void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta)
762 {
763 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
764 struct ieee80211_link_sta *link_sta;
765 int link_id;
766
767 for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
768 int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
769
770 if (fw_sta_id < 0)
771 continue;
772
773 iwl_mld_flush_link_sta_txqs(mld, fw_sta_id);
774 }
775 }
776
iwl_mld_wait_sta_txqs_empty(struct iwl_mld * mld,struct ieee80211_sta * sta)777 void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld, struct ieee80211_sta *sta)
778 {
779 /* Avoid a warning in iwl_trans_wait_txq_empty if are anyway on the way
780 * to a restart.
781 */
782 if (iwl_mld_error_before_recovery(mld))
783 return;
784
785 for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) {
786 struct iwl_mld_txq *mld_txq =
787 iwl_mld_txq_from_mac80211(sta->txq[i]);
788
789 if (!mld_txq->status.allocated)
790 continue;
791
792 iwl_trans_wait_txq_empty(mld->trans, mld_txq->fw_id);
793 }
794 }
795
iwl_mld_remove_sta(struct iwl_mld * mld,struct ieee80211_sta * sta)796 void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta)
797 {
798 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
799 struct ieee80211_vif *vif = mld_sta->vif;
800 struct ieee80211_link_sta *link_sta;
801 u8 link_id;
802
803 lockdep_assert_wiphy(mld->wiphy);
804
805 /* Tell the HW to flush the queues */
806 iwl_mld_flush_sta_txqs(mld, sta);
807
808 /* Wait for trans to empty its queues */
809 iwl_mld_wait_sta_txqs_empty(mld, sta);
810
811 /* Now we can remove the queues */
812 for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
813 iwl_mld_remove_txq(mld, sta->txq[i]);
814
815 for_each_sta_active_link(vif, sta, link_sta, link_id) {
816 /* Mac8011 will remove the groupwise keys after the sta is
817 * removed, but FW expects all the keys to be removed before
818 * the STA is, so remove them all here.
819 */
820 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
821 iwl_mld_remove_ap_keys(mld, vif, sta, link_id);
822
823 /* Remove the link_sta */
824 iwl_mld_remove_link_sta(mld, link_sta);
825 }
826
827 iwl_mld_destroy_sta(sta);
828 }
829
iwl_mld_fw_sta_id_mask(struct iwl_mld * mld,struct ieee80211_sta * sta)830 u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
831 {
832 struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
833 struct ieee80211_link_sta *link_sta;
834 unsigned int link_id;
835 u32 result = 0;
836
837 KUNIT_STATIC_STUB_REDIRECT(iwl_mld_fw_sta_id_mask, mld, sta);
838
839 /* This function should only be used with the wiphy lock held,
840 * In other cases, it is not guaranteed that the link_sta will exist
841 * in the driver too, and it is checked in
842 * iwl_mld_fw_sta_id_from_link_sta.
843 */
844 lockdep_assert_wiphy(mld->wiphy);
845
846 for_each_sta_active_link(vif, sta, link_sta, link_id) {
847 int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
848
849 if (!(fw_id < 0))
850 result |= BIT(fw_id);
851 }
852
853 return result;
854 }
855 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_fw_sta_id_mask);
856
iwl_mld_count_mpdu(struct ieee80211_link_sta * link_sta,int queue,u32 count,bool tx)857 static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue,
858 u32 count, bool tx)
859 {
860 struct iwl_mld_per_q_mpdu_counter *queue_counter;
861 struct iwl_mld_per_link_mpdu_counter *link_counter;
862 struct iwl_mld_vif *mld_vif;
863 struct iwl_mld_sta *mld_sta;
864 struct iwl_mld_link *mld_link;
865 struct iwl_mld *mld;
866 int total_mpdus = 0;
867
868 if (WARN_ON(!link_sta))
869 return;
870
871 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
872 if (!mld_sta->mpdu_counters)
873 return;
874
875 mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
876 mld_link = iwl_mld_link_dereference_check(mld_vif, link_sta->link_id);
877
878 if (WARN_ON_ONCE(!mld_link))
879 return;
880
881 queue_counter = &mld_sta->mpdu_counters[queue];
882
883 mld = mld_vif->mld;
884
885 /* If it the window is over, first clear the counters.
886 * When we are not blocked by TPT, the window is managed by check_tpt_wk
887 */
888 if ((mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT) &&
889 time_is_before_jiffies(queue_counter->window_start_time +
890 IWL_MLD_TPT_COUNT_WINDOW)) {
891 memset(queue_counter->per_link, 0,
892 sizeof(queue_counter->per_link));
893 queue_counter->window_start_time = jiffies;
894
895 IWL_DEBUG_INFO(mld, "MPDU counters are cleared\n");
896 }
897
898 link_counter = &queue_counter->per_link[mld_link->fw_id];
899
900 spin_lock_bh(&queue_counter->lock);
901
902 /* Update the statistics for this TPT measurement window */
903 if (tx)
904 link_counter->tx += count;
905 else
906 link_counter->rx += count;
907
908 /*
909 * Next, evaluate whether we should queue an unblock,
910 * skip this if we are not blocked due to low throughput.
911 */
912 if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
913 goto unlock;
914
915 for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++)
916 total_mpdus += tx ? queue_counter->per_link[i].tx :
917 queue_counter->per_link[i].rx;
918
919 /* Unblock is already queued if the threshold was reached before */
920 if (total_mpdus - count >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
921 goto unlock;
922
923 if (total_mpdus >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
924 wiphy_work_queue(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
925
926 unlock:
927 spin_unlock_bh(&queue_counter->lock);
928 }
929
930 /* must be called under rcu_read_lock() */
iwl_mld_count_mpdu_rx(struct ieee80211_link_sta * link_sta,int queue,u32 count)931 void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
932 u32 count)
933 {
934 iwl_mld_count_mpdu(link_sta, queue, count, false);
935 }
936
937 /* must be called under rcu_read_lock() */
iwl_mld_count_mpdu_tx(struct ieee80211_link_sta * link_sta,u32 count)938 void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count)
939 {
940 /* use queue 0 for all TX */
941 iwl_mld_count_mpdu(link_sta, 0, count, true);
942 }
943
iwl_mld_allocate_internal_txq(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta,u8 tid)944 static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld,
945 struct iwl_mld_int_sta *internal_sta,
946 u8 tid)
947 {
948 u32 sta_mask = BIT(internal_sta->sta_id);
949 int queue, size;
950
951 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
952 mld->trans->mac_cfg->base->min_txq_size);
953
954 queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size,
955 IWL_WATCHDOG_DISABLED);
956
957 if (queue >= 0)
958 IWL_DEBUG_TX_QUEUES(mld,
959 "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
960 queue, sta_mask, tid);
961 return queue;
962 }
963
iwl_mld_send_aux_sta_cmd(struct iwl_mld * mld,const struct iwl_mld_int_sta * internal_sta)964 static int iwl_mld_send_aux_sta_cmd(struct iwl_mld *mld,
965 const struct iwl_mld_int_sta *internal_sta)
966 {
967 struct iwl_aux_sta_cmd cmd = {
968 .sta_id = cpu_to_le32(internal_sta->sta_id),
969 /* TODO: CDB - properly set the lmac_id */
970 .lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX),
971 };
972
973 return iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD),
974 &cmd);
975 }
976
977 static int
iwl_mld_add_internal_sta_to_fw(struct iwl_mld * mld,const struct iwl_mld_int_sta * internal_sta,u8 fw_link_id,const u8 * addr)978 iwl_mld_add_internal_sta_to_fw(struct iwl_mld *mld,
979 const struct iwl_mld_int_sta *internal_sta,
980 u8 fw_link_id,
981 const u8 *addr)
982 {
983 struct iwl_sta_cfg_cmd cmd = {};
984
985 if (internal_sta->sta_type == STATION_TYPE_AUX)
986 return iwl_mld_send_aux_sta_cmd(mld, internal_sta);
987
988 cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id);
989 cmd.link_id = cpu_to_le32(fw_link_id);
990 cmd.station_type = cpu_to_le32(internal_sta->sta_type);
991
992 /* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP.
993 * On the other hand, FW will never check this flag during RX since
994 * an AP/GO doesn't receive protected broadcast management frames.
995 * So, we can set it unconditionally.
996 */
997 if (internal_sta->sta_type == STATION_TYPE_BCAST_MGMT)
998 cmd.mfp = cpu_to_le32(1);
999
1000 if (addr) {
1001 memcpy(cmd.peer_mld_address, addr, ETH_ALEN);
1002 memcpy(cmd.peer_link_address, addr, ETH_ALEN);
1003 }
1004
1005 return iwl_mld_send_sta_cmd(mld, &cmd);
1006 }
1007
iwl_mld_add_internal_sta(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta,enum iwl_fw_sta_type sta_type,u8 fw_link_id,const u8 * addr,u8 tid)1008 static int iwl_mld_add_internal_sta(struct iwl_mld *mld,
1009 struct iwl_mld_int_sta *internal_sta,
1010 enum iwl_fw_sta_type sta_type,
1011 u8 fw_link_id, const u8 *addr, u8 tid)
1012 {
1013 int ret, queue_id;
1014
1015 ret = iwl_mld_allocate_link_sta_fw_id(mld,
1016 &internal_sta->sta_id,
1017 ERR_PTR(-EINVAL));
1018 if (ret)
1019 return ret;
1020
1021 internal_sta->sta_type = sta_type;
1022
1023 ret = iwl_mld_add_internal_sta_to_fw(mld, internal_sta, fw_link_id,
1024 addr);
1025 if (ret)
1026 goto err;
1027
1028 queue_id = iwl_mld_allocate_internal_txq(mld, internal_sta, tid);
1029 if (queue_id < 0) {
1030 iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
1031 ret = queue_id;
1032 goto err;
1033 }
1034
1035 internal_sta->queue_id = queue_id;
1036
1037 return 0;
1038 err:
1039 iwl_mld_free_internal_sta(mld, internal_sta);
1040 return ret;
1041 }
1042
iwl_mld_add_bcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1043 int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
1044 struct ieee80211_vif *vif,
1045 struct ieee80211_bss_conf *link)
1046 {
1047 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1048 const u8 bcast_addr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1049 const u8 *addr;
1050
1051 if (WARN_ON(!mld_link))
1052 return -EINVAL;
1053
1054 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1055 vif->type != NL80211_IFTYPE_ADHOC))
1056 return -EINVAL;
1057
1058 addr = vif->type == NL80211_IFTYPE_ADHOC ? link->bssid : bcast_addr;
1059
1060 return iwl_mld_add_internal_sta(mld, &mld_link->bcast_sta,
1061 STATION_TYPE_BCAST_MGMT,
1062 mld_link->fw_id, addr,
1063 IWL_MGMT_TID);
1064 }
1065
iwl_mld_add_mcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1066 int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
1067 struct ieee80211_vif *vif,
1068 struct ieee80211_bss_conf *link)
1069 {
1070 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1071 const u8 mcast_addr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
1072
1073 if (WARN_ON(!mld_link))
1074 return -EINVAL;
1075
1076 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1077 vif->type != NL80211_IFTYPE_ADHOC))
1078 return -EINVAL;
1079
1080 return iwl_mld_add_internal_sta(mld, &mld_link->mcast_sta,
1081 STATION_TYPE_MCAST,
1082 mld_link->fw_id, mcast_addr, 0);
1083 }
1084
iwl_mld_add_aux_sta(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta)1085 int iwl_mld_add_aux_sta(struct iwl_mld *mld,
1086 struct iwl_mld_int_sta *internal_sta)
1087 {
1088 return iwl_mld_add_internal_sta(mld, internal_sta, STATION_TYPE_AUX,
1089 0, NULL, IWL_MAX_TID_COUNT);
1090 }
1091
iwl_mld_add_mon_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1092 int iwl_mld_add_mon_sta(struct iwl_mld *mld,
1093 struct ieee80211_vif *vif,
1094 struct ieee80211_bss_conf *link)
1095 {
1096 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1097
1098 if (WARN_ON(!mld_link))
1099 return -EINVAL;
1100
1101 if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR))
1102 return -EINVAL;
1103
1104 return iwl_mld_add_internal_sta(mld, &mld_link->mon_sta,
1105 STATION_TYPE_BCAST_MGMT,
1106 mld_link->fw_id, NULL,
1107 IWL_MAX_TID_COUNT);
1108 }
1109
iwl_mld_remove_internal_sta(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta,bool flush,u8 tid)1110 static void iwl_mld_remove_internal_sta(struct iwl_mld *mld,
1111 struct iwl_mld_int_sta *internal_sta,
1112 bool flush, u8 tid)
1113 {
1114 if (WARN_ON_ONCE(internal_sta->sta_id == IWL_INVALID_STA ||
1115 internal_sta->queue_id == IWL_MLD_INVALID_QUEUE))
1116 return;
1117
1118 if (flush)
1119 iwl_mld_flush_link_sta_txqs(mld, internal_sta->sta_id);
1120
1121 iwl_mld_free_txq(mld, BIT(internal_sta->sta_id),
1122 tid, internal_sta->queue_id);
1123
1124 iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
1125
1126 iwl_mld_free_internal_sta(mld, internal_sta);
1127 }
1128
iwl_mld_remove_bcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1129 void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
1130 struct ieee80211_vif *vif,
1131 struct ieee80211_bss_conf *link)
1132 {
1133 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1134
1135 if (WARN_ON(!mld_link))
1136 return;
1137
1138 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1139 vif->type != NL80211_IFTYPE_ADHOC))
1140 return;
1141
1142 iwl_mld_remove_internal_sta(mld, &mld_link->bcast_sta, true,
1143 IWL_MGMT_TID);
1144 }
1145
iwl_mld_remove_mcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1146 void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
1147 struct ieee80211_vif *vif,
1148 struct ieee80211_bss_conf *link)
1149 {
1150 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1151
1152 if (WARN_ON(!mld_link))
1153 return;
1154
1155 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1156 vif->type != NL80211_IFTYPE_ADHOC))
1157 return;
1158
1159 iwl_mld_remove_internal_sta(mld, &mld_link->mcast_sta, true, 0);
1160 }
1161
iwl_mld_remove_aux_sta(struct iwl_mld * mld,struct ieee80211_vif * vif)1162 void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
1163 struct ieee80211_vif *vif)
1164 {
1165 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1166
1167 if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE &&
1168 vif->type != NL80211_IFTYPE_STATION))
1169 return;
1170
1171 iwl_mld_remove_internal_sta(mld, &mld_vif->aux_sta, false,
1172 IWL_MAX_TID_COUNT);
1173 }
1174
iwl_mld_remove_mon_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1175 void iwl_mld_remove_mon_sta(struct iwl_mld *mld,
1176 struct ieee80211_vif *vif,
1177 struct ieee80211_bss_conf *link)
1178 {
1179 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1180
1181 if (WARN_ON(!mld_link))
1182 return;
1183
1184 if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR))
1185 return;
1186
1187 iwl_mld_remove_internal_sta(mld, &mld_link->mon_sta, false,
1188 IWL_MAX_TID_COUNT);
1189 }
1190
iwl_mld_update_sta_resources(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 old_sta_mask,u32 new_sta_mask)1191 static int iwl_mld_update_sta_resources(struct iwl_mld *mld,
1192 struct ieee80211_vif *vif,
1193 struct ieee80211_sta *sta,
1194 u32 old_sta_mask,
1195 u32 new_sta_mask)
1196 {
1197 int ret;
1198
1199 ret = iwl_mld_update_sta_txqs(mld, sta, old_sta_mask, new_sta_mask);
1200 if (ret)
1201 return ret;
1202
1203 ret = iwl_mld_update_sta_keys(mld, vif, sta, old_sta_mask, new_sta_mask);
1204 if (ret)
1205 return ret;
1206
1207 return iwl_mld_update_sta_baids(mld, old_sta_mask, new_sta_mask);
1208 }
1209
iwl_mld_update_link_stas(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 old_links,u16 new_links)1210 int iwl_mld_update_link_stas(struct iwl_mld *mld,
1211 struct ieee80211_vif *vif,
1212 struct ieee80211_sta *sta,
1213 u16 old_links, u16 new_links)
1214 {
1215 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
1216 struct iwl_mld_link_sta *mld_link_sta;
1217 unsigned long links_to_add = ~old_links & new_links;
1218 unsigned long links_to_rem = old_links & ~new_links;
1219 unsigned long old_links_long = old_links;
1220 unsigned long sta_mask_added = 0;
1221 u32 current_sta_mask = 0, sta_mask_to_rem = 0;
1222 unsigned int link_id, sta_id;
1223 int ret;
1224
1225 lockdep_assert_wiphy(mld->wiphy);
1226
1227 for_each_set_bit(link_id, &old_links_long,
1228 IEEE80211_MLD_MAX_NUM_LINKS) {
1229 mld_link_sta =
1230 iwl_mld_link_sta_dereference_check(mld_sta, link_id);
1231
1232 if (WARN_ON(!mld_link_sta))
1233 return -EINVAL;
1234
1235 current_sta_mask |= BIT(mld_link_sta->fw_id);
1236 if (links_to_rem & BIT(link_id))
1237 sta_mask_to_rem |= BIT(mld_link_sta->fw_id);
1238 }
1239
1240 if (sta_mask_to_rem) {
1241 ret = iwl_mld_update_sta_resources(mld, vif, sta,
1242 current_sta_mask,
1243 current_sta_mask &
1244 ~sta_mask_to_rem);
1245 if (ret)
1246 return ret;
1247
1248 current_sta_mask &= ~sta_mask_to_rem;
1249 }
1250
1251 for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
1252 struct ieee80211_link_sta *link_sta =
1253 link_sta_dereference_protected(sta, link_id);
1254
1255 if (WARN_ON(!link_sta))
1256 return -EINVAL;
1257
1258 iwl_mld_remove_link_sta(mld, link_sta);
1259 }
1260
1261 for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
1262 struct ieee80211_link_sta *link_sta =
1263 link_sta_dereference_protected(sta, link_id);
1264 struct ieee80211_bss_conf *link;
1265
1266 if (WARN_ON(!link_sta))
1267 return -EINVAL;
1268
1269 ret = iwl_mld_add_link_sta(mld, link_sta);
1270 if (ret)
1271 goto remove_added_link_stas;
1272
1273 mld_link_sta =
1274 iwl_mld_link_sta_dereference_check(mld_sta,
1275 link_id);
1276
1277 link = link_conf_dereference_protected(mld_sta->vif,
1278 link_sta->link_id);
1279
1280 iwl_mld_set_max_amsdu_len(mld, link_sta);
1281 iwl_mld_config_tlc_link(mld, vif, link, link_sta);
1282
1283 sta_mask_added |= BIT(mld_link_sta->fw_id);
1284 }
1285
1286 if (sta_mask_added) {
1287 ret = iwl_mld_update_sta_resources(mld, vif, sta,
1288 current_sta_mask,
1289 current_sta_mask |
1290 sta_mask_added);
1291 if (ret)
1292 goto remove_added_link_stas;
1293 }
1294
1295 /* We couldn't activate the links before it has a STA. Now we can */
1296 for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
1297 struct ieee80211_bss_conf *link =
1298 link_conf_dereference_protected(mld_sta->vif, link_id);
1299
1300 if (WARN_ON(!link))
1301 continue;
1302
1303 iwl_mld_activate_link(mld, link);
1304 }
1305
1306 return 0;
1307
1308 remove_added_link_stas:
1309 for_each_set_bit(sta_id, &sta_mask_added, mld->fw->ucode_capa.num_stations) {
1310 struct ieee80211_link_sta *link_sta =
1311 wiphy_dereference(mld->wiphy,
1312 mld->fw_id_to_link_sta[sta_id]);
1313
1314 if (WARN_ON(!link_sta))
1315 continue;
1316
1317 iwl_mld_remove_link_sta(mld, link_sta);
1318 }
1319
1320 return ret;
1321 }
1322