1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024-2025 Intel Corporation
4 */
5
6 #include <linux/ieee80211.h>
7 #include <kunit/static_stub.h>
8
9 #include "sta.h"
10 #include "hcmd.h"
11 #include "iface.h"
12 #include "mlo.h"
13 #include "key.h"
14 #include "agg.h"
15 #include "tlc.h"
16 #include "fw/api/sta.h"
17 #include "fw/api/mac.h"
18 #include "fw/api/rx.h"
19
iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)20 int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
21 struct ieee80211_link_sta *link_sta)
22 {
23 struct iwl_mld_link_sta *mld_link_sta;
24
25 /* This function should only be used with the wiphy lock held,
26 * In other cases, it is not guaranteed that the link_sta will exist
27 * in the driver too, and it is checked here.
28 */
29 lockdep_assert_wiphy(mld->wiphy);
30
31 /* This is not meant to be called with a NULL pointer */
32 if (WARN_ON(!link_sta))
33 return -ENOENT;
34
35 mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
36 if (!mld_link_sta) {
37 WARN_ON(!iwl_mld_error_before_recovery(mld));
38 return -ENOENT;
39 }
40
41 return mld_link_sta->fw_id;
42 }
43
44 static void
iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta * link_sta,struct ieee80211_bss_conf * link,__le32 * tx_ampdu_max_size,__le32 * tx_ampdu_spacing)45 iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta *link_sta,
46 struct ieee80211_bss_conf *link,
47 __le32 *tx_ampdu_max_size,
48 __le32 *tx_ampdu_spacing)
49 {
50 u32 agg_size = 0, mpdu_dens = 0;
51
52 if (WARN_ON(!link_sta || !link))
53 return;
54
55 /* Note that we always use only legacy & highest supported PPDUs, so
56 * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
57 * the maximum A-MPDU size of various PPDU types in different bands,
58 * we only need to worry about the highest supported PPDU type here.
59 */
60
61 if (link_sta->ht_cap.ht_supported) {
62 agg_size = link_sta->ht_cap.ampdu_factor;
63 mpdu_dens = link_sta->ht_cap.ampdu_density;
64 }
65
66 if (link->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
67 /* overwrite HT values on 6 GHz */
68 mpdu_dens =
69 le16_get_bits(link_sta->he_6ghz_capa.capa,
70 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
71 agg_size =
72 le16_get_bits(link_sta->he_6ghz_capa.capa,
73 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
74 } else if (link_sta->vht_cap.vht_supported) {
75 /* if VHT supported overwrite HT value */
76 agg_size =
77 u32_get_bits(link_sta->vht_cap.cap,
78 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
79 }
80
81 /* D6.0 10.12.2 A-MPDU length limit rules
82 * A STA indicates the maximum length of the A-MPDU preEOF padding
83 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
84 * Exponent field in its HT Capabilities, VHT Capabilities,
85 * and HE 6 GHz Band Capabilities elements (if present) and the
86 * Maximum AMPDU Length Exponent Extension field in its HE
87 * Capabilities element
88 */
89 if (link_sta->he_cap.has_he)
90 agg_size +=
91 u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
92 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
93
94 if (link_sta->eht_cap.has_eht)
95 agg_size +=
96 u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
97 IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
98
99 /* Limit to max A-MPDU supported by FW */
100 agg_size = min_t(u32, agg_size,
101 STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
102
103 *tx_ampdu_max_size = cpu_to_le32(agg_size);
104 *tx_ampdu_spacing = cpu_to_le32(mpdu_dens);
105 }
106
iwl_mld_get_uapsd_acs(struct ieee80211_sta * sta)107 static u8 iwl_mld_get_uapsd_acs(struct ieee80211_sta *sta)
108 {
109 u8 uapsd_acs = 0;
110
111 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
112 uapsd_acs |= BIT(AC_BK);
113 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
114 uapsd_acs |= BIT(AC_BE);
115 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
116 uapsd_acs |= BIT(AC_VI);
117 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
118 uapsd_acs |= BIT(AC_VO);
119
120 return uapsd_acs | uapsd_acs << 4;
121 }
122
iwl_mld_he_get_ppe_val(u8 * ppe,u8 ppe_pos_bit)123 static u8 iwl_mld_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
124 {
125 u8 byte_num = ppe_pos_bit / 8;
126 u8 bit_num = ppe_pos_bit % 8;
127 u8 residue_bits;
128 u8 res;
129
130 if (bit_num <= 5)
131 return (ppe[byte_num] >> bit_num) &
132 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
133
134 /* If bit_num > 5, we have to combine bits with next byte.
135 * Calculate how many bits we need to take from current byte (called
136 * here "residue_bits"), and add them to bits from next byte.
137 */
138
139 residue_bits = 8 - bit_num;
140
141 res = (ppe[byte_num + 1] &
142 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
143 residue_bits;
144 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
145
146 return res;
147 }
148
iwl_mld_parse_ppe(struct iwl_mld * mld,struct iwl_he_pkt_ext_v2 * pkt_ext,u8 nss,u8 ru_index_bitmap,u8 * ppe,u8 ppe_pos_bit,bool inheritance)149 static void iwl_mld_parse_ppe(struct iwl_mld *mld,
150 struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
151 u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
152 bool inheritance)
153 {
154 /* FW currently supports only nss == MAX_HE_SUPP_NSS
155 *
156 * If nss > MAX: we can ignore values we don't support
157 * If nss < MAX: we can set zeros in other streams
158 */
159 if (nss > MAX_HE_SUPP_NSS) {
160 IWL_DEBUG_INFO(mld, "Got NSS = %d - trimming to %d\n", nss,
161 MAX_HE_SUPP_NSS);
162 nss = MAX_HE_SUPP_NSS;
163 }
164
165 for (int i = 0; i < nss; i++) {
166 u8 ru_index_tmp = ru_index_bitmap << 1;
167 u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
168
169 for (u8 bw = 0;
170 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
171 bw++) {
172 ru_index_tmp >>= 1;
173
174 /* According to the 11be spec, if for a specific BW the PPE Thresholds
175 * isn't present - it should inherit the thresholds from the last
176 * BW for which we had PPE Thresholds. In 11ax though, we don't have
177 * this inheritance - continue in this case
178 */
179 if (!(ru_index_tmp & 1)) {
180 if (inheritance)
181 goto set_thresholds;
182 else
183 continue;
184 }
185
186 high_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
187 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
188 low_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
189 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
190
191 set_thresholds:
192 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
193 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
194 }
195 }
196 }
197
iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta,struct iwl_he_pkt_ext_v2 * pkt_ext,bool inheritance)198 static void iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld *mld,
199 struct ieee80211_link_sta *link_sta,
200 struct iwl_he_pkt_ext_v2 *pkt_ext,
201 bool inheritance)
202 {
203 u8 nss = (link_sta->he_cap.ppe_thres[0] &
204 IEEE80211_PPE_THRES_NSS_MASK) + 1;
205 u8 *ppe = &link_sta->he_cap.ppe_thres[0];
206 u8 ru_index_bitmap =
207 u8_get_bits(*ppe,
208 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
209 /* Starting after PPE header */
210 u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
211
212 iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
213 inheritance);
214 }
215
216 static int
iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 * pkt_ext,u8 nominal_padding)217 iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
218 u8 nominal_padding)
219 {
220 int low_th = -1;
221 int high_th = -1;
222
223 /* all the macros are the same for EHT and HE */
224 switch (nominal_padding) {
225 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
226 low_th = IWL_HE_PKT_EXT_NONE;
227 high_th = IWL_HE_PKT_EXT_NONE;
228 break;
229 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
230 low_th = IWL_HE_PKT_EXT_BPSK;
231 high_th = IWL_HE_PKT_EXT_NONE;
232 break;
233 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
234 case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
235 low_th = IWL_HE_PKT_EXT_NONE;
236 high_th = IWL_HE_PKT_EXT_BPSK;
237 break;
238 }
239
240 if (low_th < 0 || high_th < 0)
241 return -EINVAL;
242
243 /* Set the PPE thresholds accordingly */
244 for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
245 for (u8 bw = 0;
246 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
247 bw++) {
248 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
249 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
250 }
251 }
252
253 return 0;
254 }
255
iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 * pkt_ext,u8 nominal_padding)256 static void iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
257 u8 nominal_padding)
258 {
259 for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
260 for (u8 bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
261 bw++) {
262 u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
263
264 if (nominal_padding >
265 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
266 qam_th[1] == IWL_HE_PKT_EXT_NONE)
267 qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
268 else if (nominal_padding ==
269 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
270 qam_th[0] == IWL_HE_PKT_EXT_NONE &&
271 qam_th[1] == IWL_HE_PKT_EXT_NONE)
272 qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
273 }
274 }
275 }
276
iwl_mld_fill_pkt_ext(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta,struct iwl_he_pkt_ext_v2 * pkt_ext)277 static void iwl_mld_fill_pkt_ext(struct iwl_mld *mld,
278 struct ieee80211_link_sta *link_sta,
279 struct iwl_he_pkt_ext_v2 *pkt_ext)
280 {
281 if (WARN_ON(!link_sta))
282 return;
283
284 /* Initialize the PPE thresholds to "None" (7), as described in Table
285 * 9-262ac of 80211.ax/D3.0.
286 */
287 memset(pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(*pkt_ext));
288
289 if (link_sta->eht_cap.has_eht) {
290 u8 nominal_padding =
291 u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
292 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
293
294 /* If PPE Thresholds exists, parse them into a FW-familiar
295 * format.
296 */
297 if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
298 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
299 u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] &
300 IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
301 u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0];
302 u8 ru_index_bitmap =
303 u16_get_bits(*ppe,
304 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
305 /* Starting after PPE header */
306 u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
307
308 iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap,
309 ppe, ppe_pos_bit, true);
310 /* EHT PPE Thresholds doesn't exist - set the API according to
311 * HE PPE Tresholds
312 */
313 } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
314 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
315 /* Even though HE Capabilities IE doesn't contain PPE
316 * Thresholds for BW 320Mhz, thresholds for this BW will
317 * be filled in with the same values as 160Mhz, due to
318 * the inheritance, as required.
319 */
320 iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
321 true);
322
323 /* According to the requirements, for MCSs 12-13 the
324 * maximum value between HE PPE Threshold and Common
325 * Nominal Packet Padding needs to be taken
326 */
327 iwl_mld_get_optimal_ppe_info(pkt_ext, nominal_padding);
328
329 /* if PPE Thresholds doesn't present in both EHT IE and HE IE -
330 * take the Thresholds from Common Nominal Packet Padding field
331 */
332 } else {
333 iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
334 nominal_padding);
335 }
336 } else if (link_sta->he_cap.has_he) {
337 /* If PPE Thresholds exist, parse them into a FW-familiar format. */
338 if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
339 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
340 iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
341 false);
342 /* PPE Thresholds doesn't exist - set the API PPE values
343 * according to Common Nominal Packet Padding field.
344 */
345 } else {
346 u8 nominal_padding =
347 u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9],
348 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
349 if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
350 iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
351 nominal_padding);
352 }
353 }
354
355 for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
356 for (int bw = 0;
357 bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]);
358 bw++) {
359 u8 *qam_th =
360 &pkt_ext->pkt_ext_qam_th[i][bw][0];
361
362 IWL_DEBUG_HT(mld,
363 "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
364 i, bw, qam_th[0], qam_th[1]);
365 }
366 }
367 }
368
iwl_mld_get_htc_flags(struct ieee80211_link_sta * link_sta)369 static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
370 {
371 u8 *mac_cap_info =
372 &link_sta->he_cap.he_cap_elem.mac_cap_info[0];
373 u32 htc_flags = 0;
374
375 if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
376 htc_flags |= IWL_HE_HTC_SUPPORT;
377 if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
378 (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
379 u8 link_adap =
380 ((mac_cap_info[2] &
381 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
382 (mac_cap_info[1] &
383 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
384
385 if (link_adap == 2)
386 htc_flags |=
387 IWL_HE_HTC_LINK_ADAP_UNSOLICITED;
388 else if (link_adap == 3)
389 htc_flags |= IWL_HE_HTC_LINK_ADAP_BOTH;
390 }
391 if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
392 htc_flags |= IWL_HE_HTC_BSR_SUPP;
393 if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
394 htc_flags |= IWL_HE_HTC_OMI_SUPP;
395 if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
396 htc_flags |= IWL_HE_HTC_BQR_SUPP;
397
398 return htc_flags;
399 }
400
iwl_mld_send_sta_cmd(struct iwl_mld * mld,const struct iwl_sta_cfg_cmd * cmd)401 static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
402 const struct iwl_sta_cfg_cmd *cmd)
403 {
404 int ret = iwl_mld_send_cmd_pdu(mld,
405 WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
406 cmd);
407 if (ret)
408 IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
409 return ret;
410 }
411
412 static int
iwl_mld_add_modify_sta_cmd(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)413 iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld,
414 struct ieee80211_link_sta *link_sta)
415 {
416 struct ieee80211_sta *sta = link_sta->sta;
417 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
418 struct ieee80211_bss_conf *link;
419 struct iwl_mld_link *mld_link;
420 struct iwl_sta_cfg_cmd cmd = {};
421 int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
422
423 lockdep_assert_wiphy(mld->wiphy);
424
425 link = link_conf_dereference_protected(mld_sta->vif,
426 link_sta->link_id);
427
428 mld_link = iwl_mld_link_from_mac80211(link);
429
430 if (WARN_ON(!link || !mld_link) || fw_id < 0)
431 return -EINVAL;
432
433 cmd.sta_id = cpu_to_le32(fw_id);
434 cmd.station_type = cpu_to_le32(mld_sta->sta_type);
435 cmd.link_id = cpu_to_le32(mld_link->fw_id);
436
437 memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
438 memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
439
440 if (mld_sta->sta_state >= IEEE80211_STA_ASSOC)
441 cmd.assoc_id = cpu_to_le32(sta->aid);
442
443 if (sta->mfp || mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
444 cmd.mfp = cpu_to_le32(1);
445
446 switch (link_sta->rx_nss) {
447 case 1:
448 cmd.mimo = cpu_to_le32(0);
449 break;
450 case 2 ... 8:
451 cmd.mimo = cpu_to_le32(1);
452 break;
453 }
454
455 switch (link_sta->smps_mode) {
456 case IEEE80211_SMPS_AUTOMATIC:
457 case IEEE80211_SMPS_NUM_MODES:
458 WARN_ON(1);
459 break;
460 case IEEE80211_SMPS_STATIC:
461 /* override NSS */
462 cmd.mimo = cpu_to_le32(0);
463 break;
464 case IEEE80211_SMPS_DYNAMIC:
465 cmd.mimo_protection = cpu_to_le32(1);
466 break;
467 case IEEE80211_SMPS_OFF:
468 /* nothing */
469 break;
470 }
471
472 iwl_mld_fill_ampdu_size_and_dens(link_sta, link,
473 &cmd.tx_ampdu_max_size,
474 &cmd.tx_ampdu_spacing);
475
476 if (sta->wme) {
477 cmd.sp_length =
478 cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128);
479 cmd.uapsd_acs = cpu_to_le32(iwl_mld_get_uapsd_acs(sta));
480 }
481
482 if (link_sta->he_cap.has_he) {
483 cmd.trig_rnd_alloc =
484 cpu_to_le32(link->uora_exists ? 1 : 0);
485
486 /* PPE Thresholds */
487 iwl_mld_fill_pkt_ext(mld, link_sta, &cmd.pkt_ext);
488
489 /* HTC flags */
490 cmd.htc_flags =
491 cpu_to_le32(iwl_mld_get_htc_flags(link_sta));
492
493 if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] &
494 IEEE80211_HE_MAC_CAP2_ACK_EN)
495 cmd.ack_enabled = cpu_to_le32(1);
496 }
497
498 return iwl_mld_send_sta_cmd(mld, &cmd);
499 }
500
IWL_MLD_ALLOC_FN(link_sta,link_sta)501 IWL_MLD_ALLOC_FN(link_sta, link_sta)
502
503 static int
504 iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta)
505 {
506 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
507 struct iwl_mld_link_sta *mld_link_sta;
508 int ret;
509 u8 fw_id;
510
511 lockdep_assert_wiphy(mld->wiphy);
512
513 /* We will fail to add it to the FW anyway */
514 if (iwl_mld_error_before_recovery(mld))
515 return -ENODEV;
516
517 mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
518
519 /* We need to preserve the fw sta ids during a restart, since the fw
520 * will recover SN/PN for them, this is why the mld_link_sta exists.
521 */
522 if (mld_link_sta) {
523 /* But if we are not restarting, this is not OK */
524 WARN_ON(!mld->fw_status.in_hw_restart);
525
526 /* Avoid adding a STA that is already in FW to avoid an assert */
527 if (WARN_ON(mld_link_sta->in_fw))
528 return -EINVAL;
529
530 fw_id = mld_link_sta->fw_id;
531 goto add_to_fw;
532 }
533
534 /* Allocate a fw id and map it to the link_sta */
535 ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
536 if (ret)
537 return ret;
538
539 if (link_sta == &link_sta->sta->deflink) {
540 mld_link_sta = &mld_sta->deflink;
541 } else {
542 mld_link_sta = kzalloc(sizeof(*mld_link_sta), GFP_KERNEL);
543 if (!mld_link_sta)
544 return -ENOMEM;
545 }
546
547 mld_link_sta->fw_id = fw_id;
548 rcu_assign_pointer(mld_sta->link[link_sta->link_id], mld_link_sta);
549
550 add_to_fw:
551 ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
552 if (ret) {
553 RCU_INIT_POINTER(mld->fw_id_to_link_sta[fw_id], NULL);
554 RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
555 if (link_sta != &link_sta->sta->deflink)
556 kfree(mld_link_sta);
557 return ret;
558 }
559 mld_link_sta->in_fw = true;
560
561 return 0;
562 }
563
iwl_mld_rm_sta_from_fw(struct iwl_mld * mld,u8 fw_sta_id)564 static int iwl_mld_rm_sta_from_fw(struct iwl_mld *mld, u8 fw_sta_id)
565 {
566 struct iwl_remove_sta_cmd cmd = {
567 .sta_id = cpu_to_le32(fw_sta_id),
568 };
569 int ret;
570
571 ret = iwl_mld_send_cmd_pdu(mld,
572 WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD),
573 &cmd);
574 if (ret)
575 IWL_ERR(mld, "Failed to remove station. Id=%d\n", fw_sta_id);
576
577 return ret;
578 }
579
580 static void
iwl_mld_remove_link_sta(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)581 iwl_mld_remove_link_sta(struct iwl_mld *mld,
582 struct ieee80211_link_sta *link_sta)
583 {
584 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
585 struct iwl_mld_link_sta *mld_link_sta =
586 iwl_mld_link_sta_from_mac80211(link_sta);
587
588 if (WARN_ON(!mld_link_sta))
589 return;
590
591 iwl_mld_rm_sta_from_fw(mld, mld_link_sta->fw_id);
592 mld_link_sta->in_fw = false;
593
594 /* Now that the STA doesn't exist in FW, we don't expect any new
595 * notifications for it. Cancel the ones that are already pending
596 */
597 iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_STA,
598 mld_link_sta->fw_id);
599
600 /* This will not be done upon reconfig, so do it also when
601 * failed to remove from fw
602 */
603 RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id], NULL);
604 RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
605 if (mld_link_sta != &mld_sta->deflink)
606 kfree_rcu(mld_link_sta, rcu_head);
607 }
608
iwl_mld_set_max_amsdu_len(struct iwl_mld * mld,struct ieee80211_link_sta * link_sta)609 static void iwl_mld_set_max_amsdu_len(struct iwl_mld *mld,
610 struct ieee80211_link_sta *link_sta)
611 {
612 const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
613
614 /* For EHT, HE and VHT we can use the value as it was calculated by
615 * mac80211. For HT, mac80211 doesn't enforce to 4095, so force it
616 * here
617 */
618 if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he ||
619 link_sta->vht_cap.vht_supported ||
620 !ht_cap->ht_supported ||
621 !(ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU))
622 return;
623
624 link_sta->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
625 ieee80211_sta_recalc_aggregates(link_sta->sta);
626 }
627
iwl_mld_update_all_link_stations(struct iwl_mld * mld,struct ieee80211_sta * sta)628 int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
629 struct ieee80211_sta *sta)
630 {
631 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
632 struct ieee80211_link_sta *link_sta;
633 int link_id;
634
635 for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
636 int ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
637
638 if (ret)
639 return ret;
640
641 if (mld_sta->sta_state == IEEE80211_STA_ASSOC)
642 iwl_mld_set_max_amsdu_len(mld, link_sta);
643 }
644 return 0;
645 }
646
iwl_mld_destroy_sta(struct ieee80211_sta * sta)647 static void iwl_mld_destroy_sta(struct ieee80211_sta *sta)
648 {
649 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
650
651 kfree(mld_sta->dup_data);
652 kfree(mld_sta->mpdu_counters);
653 }
654
655 static int
iwl_mld_alloc_dup_data(struct iwl_mld * mld,struct iwl_mld_sta * mld_sta)656 iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
657 {
658 struct iwl_mld_rxq_dup_data *dup_data;
659
660 if (mld->fw_status.in_hw_restart)
661 return 0;
662
663 dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data),
664 GFP_KERNEL);
665 if (!dup_data)
666 return -ENOMEM;
667
668 /* Initialize all the last_seq values to 0xffff which can never
669 * compare equal to the frame's seq_ctrl in the check in
670 * iwl_mld_is_dup() since the lower 4 bits are the fragment
671 * number and fragmented packets don't reach that function.
672 *
673 * This thus allows receiving a packet with seqno 0 and the
674 * retry bit set as the very first packet on a new TID.
675 */
676 for (int q = 0; q < mld->trans->num_rx_queues; q++)
677 memset(dup_data[q].last_seq, 0xff,
678 sizeof(dup_data[q].last_seq));
679 mld_sta->dup_data = dup_data;
680
681 return 0;
682 }
683
iwl_mld_alloc_mpdu_counters(struct iwl_mld * mld,struct ieee80211_sta * sta)684 static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld,
685 struct ieee80211_sta *sta)
686 {
687 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
688 struct ieee80211_vif *vif = mld_sta->vif;
689
690 if (mld->fw_status.in_hw_restart)
691 return;
692
693 /* MPDUs are counted only when EMLSR is possible */
694 if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION ||
695 sta->tdls || !ieee80211_vif_is_mld(vif))
696 return;
697
698 mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues,
699 sizeof(*mld_sta->mpdu_counters),
700 GFP_KERNEL);
701 if (!mld_sta->mpdu_counters)
702 return;
703
704 for (int q = 0; q < mld->trans->num_rx_queues; q++)
705 spin_lock_init(&mld_sta->mpdu_counters[q].lock);
706 }
707
708 static int
iwl_mld_init_sta(struct iwl_mld * mld,struct ieee80211_sta * sta,struct ieee80211_vif * vif,enum iwl_fw_sta_type type)709 iwl_mld_init_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
710 struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
711 {
712 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
713
714 mld_sta->vif = vif;
715 mld_sta->sta_type = type;
716 mld_sta->mld = mld;
717
718 if (!mld->fw_status.in_hw_restart)
719 for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
720 iwl_mld_init_txq(iwl_mld_txq_from_mac80211(sta->txq[i]));
721
722 iwl_mld_alloc_mpdu_counters(mld, sta);
723
724 iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
725
726 return iwl_mld_alloc_dup_data(mld, mld_sta);
727 }
728
iwl_mld_add_sta(struct iwl_mld * mld,struct ieee80211_sta * sta,struct ieee80211_vif * vif,enum iwl_fw_sta_type type)729 int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
730 struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
731 {
732 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
733 struct ieee80211_link_sta *link_sta;
734 int link_id;
735 int ret;
736
737 ret = iwl_mld_init_sta(mld, sta, vif, type);
738 if (ret)
739 return ret;
740
741 /* We could have add only the deflink link_sta, but it will not work
742 * in the restart case if the single link that is active during
743 * reconfig is not the deflink one.
744 */
745 for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
746 ret = iwl_mld_add_link_sta(mld, link_sta);
747 if (ret)
748 goto destroy_sta;
749 }
750
751 return 0;
752
753 destroy_sta:
754 iwl_mld_destroy_sta(sta);
755
756 return ret;
757 }
758
iwl_mld_flush_sta_txqs(struct iwl_mld * mld,struct ieee80211_sta * sta)759 void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta)
760 {
761 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
762 struct ieee80211_link_sta *link_sta;
763 int link_id;
764
765 for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
766 int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
767
768 if (fw_sta_id < 0)
769 continue;
770
771 iwl_mld_flush_link_sta_txqs(mld, fw_sta_id);
772 }
773 }
774
iwl_mld_wait_sta_txqs_empty(struct iwl_mld * mld,struct ieee80211_sta * sta)775 void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld, struct ieee80211_sta *sta)
776 {
777 /* Avoid a warning in iwl_trans_wait_txq_empty if are anyway on the way
778 * to a restart.
779 */
780 if (iwl_mld_error_before_recovery(mld))
781 return;
782
783 for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) {
784 struct iwl_mld_txq *mld_txq =
785 iwl_mld_txq_from_mac80211(sta->txq[i]);
786
787 if (!mld_txq->status.allocated)
788 continue;
789
790 iwl_trans_wait_txq_empty(mld->trans, mld_txq->fw_id);
791 }
792 }
793
iwl_mld_remove_sta(struct iwl_mld * mld,struct ieee80211_sta * sta)794 void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta)
795 {
796 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
797 struct ieee80211_vif *vif = mld_sta->vif;
798 struct ieee80211_link_sta *link_sta;
799 u8 link_id;
800
801 lockdep_assert_wiphy(mld->wiphy);
802
803 /* Tell the HW to flush the queues */
804 iwl_mld_flush_sta_txqs(mld, sta);
805
806 /* Wait for trans to empty its queues */
807 iwl_mld_wait_sta_txqs_empty(mld, sta);
808
809 /* Now we can remove the queues */
810 for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
811 iwl_mld_remove_txq(mld, sta->txq[i]);
812
813 for_each_sta_active_link(vif, sta, link_sta, link_id) {
814 /* Mac8011 will remove the groupwise keys after the sta is
815 * removed, but FW expects all the keys to be removed before
816 * the STA is, so remove them all here.
817 */
818 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
819 iwl_mld_remove_ap_keys(mld, vif, sta, link_id);
820
821 /* Remove the link_sta */
822 iwl_mld_remove_link_sta(mld, link_sta);
823 }
824
825 iwl_mld_destroy_sta(sta);
826 }
827
iwl_mld_fw_sta_id_mask(struct iwl_mld * mld,struct ieee80211_sta * sta)828 u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
829 {
830 struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
831 struct ieee80211_link_sta *link_sta;
832 unsigned int link_id;
833 u32 result = 0;
834
835 KUNIT_STATIC_STUB_REDIRECT(iwl_mld_fw_sta_id_mask, mld, sta);
836
837 /* This function should only be used with the wiphy lock held,
838 * In other cases, it is not guaranteed that the link_sta will exist
839 * in the driver too, and it is checked in
840 * iwl_mld_fw_sta_id_from_link_sta.
841 */
842 lockdep_assert_wiphy(mld->wiphy);
843
844 for_each_sta_active_link(vif, sta, link_sta, link_id) {
845 int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
846
847 if (!(fw_id < 0))
848 result |= BIT(fw_id);
849 }
850
851 return result;
852 }
853 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_fw_sta_id_mask);
854
iwl_mld_count_mpdu(struct ieee80211_link_sta * link_sta,int queue,u32 count,bool tx)855 static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue,
856 u32 count, bool tx)
857 {
858 struct iwl_mld_per_q_mpdu_counter *queue_counter;
859 struct iwl_mld_per_link_mpdu_counter *link_counter;
860 struct iwl_mld_vif *mld_vif;
861 struct iwl_mld_sta *mld_sta;
862 struct iwl_mld_link *mld_link;
863 struct iwl_mld *mld;
864 int total_mpdus = 0;
865
866 if (WARN_ON(!link_sta))
867 return;
868
869 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
870 if (!mld_sta->mpdu_counters)
871 return;
872
873 mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
874 mld_link = iwl_mld_link_dereference_check(mld_vif, link_sta->link_id);
875
876 if (WARN_ON_ONCE(!mld_link))
877 return;
878
879 queue_counter = &mld_sta->mpdu_counters[queue];
880
881 mld = mld_vif->mld;
882
883 /* If it the window is over, first clear the counters.
884 * When we are not blocked by TPT, the window is managed by check_tpt_wk
885 */
886 if ((mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT) &&
887 time_is_before_jiffies(queue_counter->window_start_time +
888 IWL_MLD_TPT_COUNT_WINDOW)) {
889 memset(queue_counter->per_link, 0,
890 sizeof(queue_counter->per_link));
891 queue_counter->window_start_time = jiffies;
892
893 IWL_DEBUG_INFO(mld, "MPDU counters are cleared\n");
894 }
895
896 link_counter = &queue_counter->per_link[mld_link->fw_id];
897
898 spin_lock_bh(&queue_counter->lock);
899
900 /* Update the statistics for this TPT measurement window */
901 if (tx)
902 link_counter->tx += count;
903 else
904 link_counter->rx += count;
905
906 /*
907 * Next, evaluate whether we should queue an unblock,
908 * skip this if we are not blocked due to low throughput.
909 */
910 if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
911 goto unlock;
912
913 for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++)
914 total_mpdus += tx ? queue_counter->per_link[i].tx :
915 queue_counter->per_link[i].rx;
916
917 /* Unblock is already queued if the threshold was reached before */
918 if (total_mpdus - count >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
919 goto unlock;
920
921 if (total_mpdus >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
922 wiphy_work_queue(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
923
924 unlock:
925 spin_unlock_bh(&queue_counter->lock);
926 }
927
928 /* must be called under rcu_read_lock() */
iwl_mld_count_mpdu_rx(struct ieee80211_link_sta * link_sta,int queue,u32 count)929 void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
930 u32 count)
931 {
932 iwl_mld_count_mpdu(link_sta, queue, count, false);
933 }
934
935 /* must be called under rcu_read_lock() */
iwl_mld_count_mpdu_tx(struct ieee80211_link_sta * link_sta,u32 count)936 void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count)
937 {
938 /* use queue 0 for all TX */
939 iwl_mld_count_mpdu(link_sta, 0, count, true);
940 }
941
iwl_mld_allocate_internal_txq(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta,u8 tid)942 static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld,
943 struct iwl_mld_int_sta *internal_sta,
944 u8 tid)
945 {
946 u32 sta_mask = BIT(internal_sta->sta_id);
947 int queue, size;
948
949 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
950 mld->trans->cfg->min_txq_size);
951
952 queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size,
953 IWL_WATCHDOG_DISABLED);
954
955 if (queue >= 0)
956 IWL_DEBUG_TX_QUEUES(mld,
957 "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
958 queue, sta_mask, tid);
959 return queue;
960 }
961
iwl_mld_send_aux_sta_cmd(struct iwl_mld * mld,const struct iwl_mld_int_sta * internal_sta)962 static int iwl_mld_send_aux_sta_cmd(struct iwl_mld *mld,
963 const struct iwl_mld_int_sta *internal_sta)
964 {
965 struct iwl_aux_sta_cmd cmd = {
966 .sta_id = cpu_to_le32(internal_sta->sta_id),
967 /* TODO: CDB - properly set the lmac_id */
968 .lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX),
969 };
970
971 return iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD),
972 &cmd);
973 }
974
975 static int
iwl_mld_add_internal_sta_to_fw(struct iwl_mld * mld,const struct iwl_mld_int_sta * internal_sta,u8 fw_link_id,const u8 * addr)976 iwl_mld_add_internal_sta_to_fw(struct iwl_mld *mld,
977 const struct iwl_mld_int_sta *internal_sta,
978 u8 fw_link_id,
979 const u8 *addr)
980 {
981 struct iwl_sta_cfg_cmd cmd = {};
982
983 if (internal_sta->sta_type == STATION_TYPE_AUX)
984 return iwl_mld_send_aux_sta_cmd(mld, internal_sta);
985
986 cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id);
987 cmd.link_id = cpu_to_le32(fw_link_id);
988 cmd.station_type = cpu_to_le32(internal_sta->sta_type);
989
990 /* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP.
991 * On the other hand, FW will never check this flag during RX since
992 * an AP/GO doesn't receive protected broadcast management frames.
993 * So, we can set it unconditionally.
994 */
995 if (internal_sta->sta_type == STATION_TYPE_BCAST_MGMT)
996 cmd.mfp = cpu_to_le32(1);
997
998 if (addr) {
999 memcpy(cmd.peer_mld_address, addr, ETH_ALEN);
1000 memcpy(cmd.peer_link_address, addr, ETH_ALEN);
1001 }
1002
1003 return iwl_mld_send_sta_cmd(mld, &cmd);
1004 }
1005
iwl_mld_add_internal_sta(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta,enum iwl_fw_sta_type sta_type,u8 fw_link_id,const u8 * addr,u8 tid)1006 static int iwl_mld_add_internal_sta(struct iwl_mld *mld,
1007 struct iwl_mld_int_sta *internal_sta,
1008 enum iwl_fw_sta_type sta_type,
1009 u8 fw_link_id, const u8 *addr, u8 tid)
1010 {
1011 int ret, queue_id;
1012
1013 ret = iwl_mld_allocate_link_sta_fw_id(mld,
1014 &internal_sta->sta_id,
1015 ERR_PTR(-EINVAL));
1016 if (ret)
1017 return ret;
1018
1019 internal_sta->sta_type = sta_type;
1020
1021 ret = iwl_mld_add_internal_sta_to_fw(mld, internal_sta, fw_link_id,
1022 addr);
1023 if (ret)
1024 goto err;
1025
1026 queue_id = iwl_mld_allocate_internal_txq(mld, internal_sta, tid);
1027 if (queue_id < 0) {
1028 iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
1029 ret = queue_id;
1030 goto err;
1031 }
1032
1033 internal_sta->queue_id = queue_id;
1034
1035 return 0;
1036 err:
1037 iwl_mld_free_internal_sta(mld, internal_sta);
1038 return ret;
1039 }
1040
iwl_mld_add_bcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1041 int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
1042 struct ieee80211_vif *vif,
1043 struct ieee80211_bss_conf *link)
1044 {
1045 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1046 const u8 bcast_addr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1047 const u8 *addr;
1048
1049 if (WARN_ON(!mld_link))
1050 return -EINVAL;
1051
1052 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1053 vif->type != NL80211_IFTYPE_ADHOC))
1054 return -EINVAL;
1055
1056 addr = vif->type == NL80211_IFTYPE_ADHOC ? link->bssid : bcast_addr;
1057
1058 return iwl_mld_add_internal_sta(mld, &mld_link->bcast_sta,
1059 STATION_TYPE_BCAST_MGMT,
1060 mld_link->fw_id, addr,
1061 IWL_MGMT_TID);
1062 }
1063
iwl_mld_add_mcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1064 int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
1065 struct ieee80211_vif *vif,
1066 struct ieee80211_bss_conf *link)
1067 {
1068 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1069 const u8 mcast_addr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
1070
1071 if (WARN_ON(!mld_link))
1072 return -EINVAL;
1073
1074 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1075 vif->type != NL80211_IFTYPE_ADHOC))
1076 return -EINVAL;
1077
1078 return iwl_mld_add_internal_sta(mld, &mld_link->mcast_sta,
1079 STATION_TYPE_MCAST,
1080 mld_link->fw_id, mcast_addr, 0);
1081 }
1082
iwl_mld_add_aux_sta(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta)1083 int iwl_mld_add_aux_sta(struct iwl_mld *mld,
1084 struct iwl_mld_int_sta *internal_sta)
1085 {
1086 return iwl_mld_add_internal_sta(mld, internal_sta, STATION_TYPE_AUX,
1087 0, NULL, IWL_MAX_TID_COUNT);
1088 }
1089
iwl_mld_remove_internal_sta(struct iwl_mld * mld,struct iwl_mld_int_sta * internal_sta,bool flush,u8 tid)1090 static void iwl_mld_remove_internal_sta(struct iwl_mld *mld,
1091 struct iwl_mld_int_sta *internal_sta,
1092 bool flush, u8 tid)
1093 {
1094 if (WARN_ON_ONCE(internal_sta->sta_id == IWL_INVALID_STA ||
1095 internal_sta->queue_id == IWL_MLD_INVALID_QUEUE))
1096 return;
1097
1098 if (flush)
1099 iwl_mld_flush_link_sta_txqs(mld, internal_sta->sta_id);
1100
1101 iwl_mld_free_txq(mld, BIT(internal_sta->sta_id),
1102 tid, internal_sta->queue_id);
1103
1104 iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
1105
1106 iwl_mld_free_internal_sta(mld, internal_sta);
1107 }
1108
iwl_mld_remove_bcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1109 void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
1110 struct ieee80211_vif *vif,
1111 struct ieee80211_bss_conf *link)
1112 {
1113 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1114
1115 if (WARN_ON(!mld_link))
1116 return;
1117
1118 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1119 vif->type != NL80211_IFTYPE_ADHOC))
1120 return;
1121
1122 iwl_mld_remove_internal_sta(mld, &mld_link->bcast_sta, true,
1123 IWL_MGMT_TID);
1124 }
1125
iwl_mld_remove_mcast_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1126 void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
1127 struct ieee80211_vif *vif,
1128 struct ieee80211_bss_conf *link)
1129 {
1130 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1131
1132 if (WARN_ON(!mld_link))
1133 return;
1134
1135 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
1136 vif->type != NL80211_IFTYPE_ADHOC))
1137 return;
1138
1139 iwl_mld_remove_internal_sta(mld, &mld_link->mcast_sta, true, 0);
1140 }
1141
iwl_mld_remove_aux_sta(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link)1142 void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
1143 struct ieee80211_vif *vif,
1144 struct ieee80211_bss_conf *link)
1145 {
1146 struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
1147
1148 if (WARN_ON(!mld_link))
1149 return;
1150
1151 /* TODO: Hotspot 2.0 */
1152 if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
1153 return;
1154
1155 iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false,
1156 IWL_MAX_TID_COUNT);
1157 }
1158
iwl_mld_update_sta_resources(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 old_sta_mask,u32 new_sta_mask)1159 static int iwl_mld_update_sta_resources(struct iwl_mld *mld,
1160 struct ieee80211_vif *vif,
1161 struct ieee80211_sta *sta,
1162 u32 old_sta_mask,
1163 u32 new_sta_mask)
1164 {
1165 int ret;
1166
1167 ret = iwl_mld_update_sta_txqs(mld, sta, old_sta_mask, new_sta_mask);
1168 if (ret)
1169 return ret;
1170
1171 ret = iwl_mld_update_sta_keys(mld, vif, sta, old_sta_mask, new_sta_mask);
1172 if (ret)
1173 return ret;
1174
1175 return iwl_mld_update_sta_baids(mld, old_sta_mask, new_sta_mask);
1176 }
1177
iwl_mld_update_link_stas(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 old_links,u16 new_links)1178 int iwl_mld_update_link_stas(struct iwl_mld *mld,
1179 struct ieee80211_vif *vif,
1180 struct ieee80211_sta *sta,
1181 u16 old_links, u16 new_links)
1182 {
1183 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
1184 struct iwl_mld_link_sta *mld_link_sta;
1185 unsigned long links_to_add = ~old_links & new_links;
1186 unsigned long links_to_rem = old_links & ~new_links;
1187 unsigned long old_links_long = old_links;
1188 unsigned long sta_mask_added = 0;
1189 u32 current_sta_mask = 0, sta_mask_to_rem = 0;
1190 unsigned int link_id, sta_id;
1191 int ret;
1192
1193 lockdep_assert_wiphy(mld->wiphy);
1194
1195 for_each_set_bit(link_id, &old_links_long,
1196 IEEE80211_MLD_MAX_NUM_LINKS) {
1197 mld_link_sta =
1198 iwl_mld_link_sta_dereference_check(mld_sta, link_id);
1199
1200 if (WARN_ON(!mld_link_sta))
1201 return -EINVAL;
1202
1203 current_sta_mask |= BIT(mld_link_sta->fw_id);
1204 if (links_to_rem & BIT(link_id))
1205 sta_mask_to_rem |= BIT(mld_link_sta->fw_id);
1206 }
1207
1208 if (sta_mask_to_rem) {
1209 ret = iwl_mld_update_sta_resources(mld, vif, sta,
1210 current_sta_mask,
1211 current_sta_mask &
1212 ~sta_mask_to_rem);
1213 if (ret)
1214 return ret;
1215
1216 current_sta_mask &= ~sta_mask_to_rem;
1217 }
1218
1219 for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
1220 struct ieee80211_link_sta *link_sta =
1221 link_sta_dereference_protected(sta, link_id);
1222
1223 if (WARN_ON(!link_sta))
1224 return -EINVAL;
1225
1226 iwl_mld_remove_link_sta(mld, link_sta);
1227 }
1228
1229 for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
1230 struct ieee80211_link_sta *link_sta =
1231 link_sta_dereference_protected(sta, link_id);
1232 struct ieee80211_bss_conf *link;
1233
1234 if (WARN_ON(!link_sta))
1235 return -EINVAL;
1236
1237 ret = iwl_mld_add_link_sta(mld, link_sta);
1238 if (ret)
1239 goto remove_added_link_stas;
1240
1241 mld_link_sta =
1242 iwl_mld_link_sta_dereference_check(mld_sta,
1243 link_id);
1244
1245 link = link_conf_dereference_protected(mld_sta->vif,
1246 link_sta->link_id);
1247
1248 iwl_mld_set_max_amsdu_len(mld, link_sta);
1249 iwl_mld_config_tlc_link(mld, vif, link, link_sta);
1250
1251 sta_mask_added |= BIT(mld_link_sta->fw_id);
1252 }
1253
1254 if (sta_mask_added) {
1255 ret = iwl_mld_update_sta_resources(mld, vif, sta,
1256 current_sta_mask,
1257 current_sta_mask |
1258 sta_mask_added);
1259 if (ret)
1260 goto remove_added_link_stas;
1261 }
1262
1263 /* We couldn't activate the links before it has a STA. Now we can */
1264 for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
1265 struct ieee80211_bss_conf *link =
1266 link_conf_dereference_protected(mld_sta->vif, link_id);
1267
1268 if (WARN_ON(!link))
1269 continue;
1270
1271 iwl_mld_activate_link(mld, link);
1272 }
1273
1274 return 0;
1275
1276 remove_added_link_stas:
1277 for_each_set_bit(sta_id, &sta_mask_added, mld->fw->ucode_capa.num_stations) {
1278 struct ieee80211_link_sta *link_sta =
1279 wiphy_dereference(mld->wiphy,
1280 mld->fw_id_to_link_sta[sta_id]);
1281
1282 if (WARN_ON(!link_sta))
1283 continue;
1284
1285 iwl_mld_remove_link_sta(mld, link_sta);
1286 }
1287
1288 return ret;
1289 }
1290