1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024 - 2025 Intel Corporation
4 */
5 #include <net/ip.h>
6
7 #include "tx.h"
8 #include "sta.h"
9 #include "hcmd.h"
10 #include "iwl-utils.h"
11 #include "iface.h"
12
13 #include "fw/dbg.h"
14
15 #include "fw/api/tx.h"
16 #include "fw/api/rs.h"
17 #include "fw/api/txq.h"
18 #include "fw/api/datapath.h"
19 #include "fw/api/time-event.h"
20
21 #define MAX_ANT_NUM 2
22
23 /* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
24 * the *index* used for the last TX, and returns the next valid *index* to use.
25 * In order to set it in the tx_cmd, must do BIT(idx).
26 */
iwl_mld_next_ant(u8 valid,u8 last_idx)27 static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
28 {
29 u8 index = last_idx;
30
31 for (int i = 0; i < MAX_ANT_NUM; i++) {
32 index = (index + 1) % MAX_ANT_NUM;
33 if (valid & BIT(index))
34 return index;
35 }
36
37 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
38
39 return last_idx;
40 }
41
iwl_mld_toggle_tx_ant(struct iwl_mld * mld,u8 * ant)42 void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
43 {
44 *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
45 }
46
47 static int
iwl_mld_get_queue_size(struct iwl_mld * mld,struct ieee80211_txq * txq)48 iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
49 {
50 struct ieee80211_sta *sta = txq->sta;
51 struct ieee80211_link_sta *link_sta;
52 unsigned int link_id;
53 int max_size = IWL_DEFAULT_QUEUE_SIZE;
54
55 lockdep_assert_wiphy(mld->wiphy);
56
57 for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
58 if (link_sta->eht_cap.has_eht) {
59 max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
60 break;
61 }
62
63 if (link_sta->he_cap.has_he)
64 max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
65 }
66
67 return max_size;
68 }
69
iwl_mld_allocate_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)70 static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
71 {
72 u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
73 u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
74 /* We can't know when the station is asleep or awake, so we
75 * must disable the queue hang detection.
76 */
77 unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
78 IWL_WATCHDOG_DISABLED :
79 mld->trans->mac_cfg->base->wd_timeout;
80 int queue, size;
81
82 lockdep_assert_wiphy(mld->wiphy);
83
84 if (tid == IWL_MGMT_TID)
85 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
86 mld->trans->mac_cfg->base->min_txq_size);
87 else
88 size = iwl_mld_get_queue_size(mld, txq);
89
90 queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
91 watchdog_timeout);
92
93 if (queue >= 0)
94 IWL_DEBUG_TX_QUEUES(mld,
95 "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
96 queue, fw_sta_mask, tid);
97 return queue;
98 }
99
iwl_mld_add_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)100 static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
101 {
102 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
103 int id;
104
105 lockdep_assert_wiphy(mld->wiphy);
106
107 /* This will alse send the SCD_QUEUE_CONFIG_CMD */
108 id = iwl_mld_allocate_txq(mld, txq);
109 if (id < 0)
110 return id;
111
112 mld_txq->fw_id = id;
113 mld_txq->status.allocated = true;
114
115 rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
116
117 return 0;
118 }
119
iwl_mld_add_txq_list(struct iwl_mld * mld)120 void iwl_mld_add_txq_list(struct iwl_mld *mld)
121 {
122 lockdep_assert_wiphy(mld->wiphy);
123
124 while (!list_empty(&mld->txqs_to_add)) {
125 struct ieee80211_txq *txq;
126 struct iwl_mld_txq *mld_txq =
127 list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
128 list);
129 int failed;
130
131 txq = container_of((void *)mld_txq, struct ieee80211_txq,
132 drv_priv);
133
134 failed = iwl_mld_add_txq(mld, txq);
135
136 local_bh_disable();
137 spin_lock(&mld->add_txqs_lock);
138 list_del_init(&mld_txq->list);
139 spin_unlock(&mld->add_txqs_lock);
140 /* If the queue allocation failed, we can't transmit. Leave the
141 * frames on the txq, maybe the attempt to allocate the queue
142 * will succeed.
143 */
144 if (!failed)
145 iwl_mld_tx_from_txq(mld, txq);
146 local_bh_enable();
147 }
148 }
149
iwl_mld_add_txqs_wk(struct wiphy * wiphy,struct wiphy_work * wk)150 void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
151 {
152 struct iwl_mld *mld = container_of(wk, struct iwl_mld,
153 add_txqs_wk);
154
155 /* will reschedule to run after restart */
156 if (mld->fw_status.in_hw_restart)
157 return;
158
159 iwl_mld_add_txq_list(mld);
160 }
161
162 void
iwl_mld_free_txq(struct iwl_mld * mld,u32 fw_sta_mask,u32 tid,u32 queue_id)163 iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
164 {
165 struct iwl_scd_queue_cfg_cmd remove_cmd = {
166 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
167 .u.remove.tid = cpu_to_le32(tid),
168 .u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
169 };
170
171 iwl_mld_send_cmd_pdu(mld,
172 WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
173 &remove_cmd);
174
175 iwl_trans_txq_free(mld->trans, queue_id);
176 }
177
iwl_mld_remove_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)178 void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
179 {
180 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
181 u32 sta_msk, tid;
182
183 lockdep_assert_wiphy(mld->wiphy);
184
185 spin_lock_bh(&mld->add_txqs_lock);
186 if (!list_empty(&mld_txq->list))
187 list_del_init(&mld_txq->list);
188 spin_unlock_bh(&mld->add_txqs_lock);
189
190 if (!mld_txq->status.allocated ||
191 WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
192 return;
193
194 sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
195
196 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
197 txq->tid;
198
199 iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
200
201 RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
202 mld_txq->status.allocated = false;
203 }
204
205 #define OPT_HDR(type, skb, off) \
206 (type *)(skb_network_header(skb) + (off))
207
208 static __le32
iwl_mld_get_offload_assist(struct sk_buff * skb,bool amsdu)209 iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
210 {
211 struct ieee80211_hdr *hdr = (void *)skb->data;
212 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
213 u16 offload_assist = 0;
214 #if IS_ENABLED(CONFIG_INET)
215 u8 protocol = 0;
216
217 /* Do not compute checksum if already computed */
218 if (skb->ip_summed != CHECKSUM_PARTIAL)
219 goto out;
220
221 /* We do not expect to be requested to csum stuff we do not support */
222
223 /* TBD: do we also need to check
224 * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
225 * the devices we support has this flags?
226 */
227 if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
228 skb->protocol != htons(ETH_P_IPV6),
229 "No support for requested checksum\n")) {
230 skb_checksum_help(skb);
231 goto out;
232 }
233
234 if (skb->protocol == htons(ETH_P_IP)) {
235 protocol = ip_hdr(skb)->protocol;
236 } else {
237 #if IS_ENABLED(CONFIG_IPV6)
238 struct ipv6hdr *ipv6h =
239 (struct ipv6hdr *)skb_network_header(skb);
240 unsigned int off = sizeof(*ipv6h);
241
242 protocol = ipv6h->nexthdr;
243 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
244 struct ipv6_opt_hdr *hp;
245
246 /* only supported extension headers */
247 if (protocol != NEXTHDR_ROUTING &&
248 protocol != NEXTHDR_HOP &&
249 protocol != NEXTHDR_DEST) {
250 skb_checksum_help(skb);
251 goto out;
252 }
253
254 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
255 protocol = hp->nexthdr;
256 off += ipv6_optlen(hp);
257 }
258 /* if we get here - protocol now should be TCP/UDP */
259 #endif
260 }
261
262 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
263 WARN_ON_ONCE(1);
264 skb_checksum_help(skb);
265 goto out;
266 }
267
268 /* enable L4 csum */
269 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
270
271 /* Set offset to IP header (snap).
272 * We don't support tunneling so no need to take care of inner header.
273 * Size is in words.
274 */
275 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
276
277 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
278 if (skb->protocol == htons(ETH_P_IP) && amsdu) {
279 ip_hdr(skb)->check = 0;
280 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
281 }
282
283 /* reset UDP/TCP header csum */
284 if (protocol == IPPROTO_TCP)
285 tcp_hdr(skb)->check = 0;
286 else
287 udp_hdr(skb)->check = 0;
288
289 out:
290 #endif
291 mh_len /= 2;
292 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
293
294 if (amsdu)
295 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
296 else if (ieee80211_hdrlen(hdr->frame_control) % 4)
297 /* padding is inserted later in transport */
298 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
299
300 return cpu_to_le32(offload_assist);
301 }
302
iwl_mld_get_basic_rates_and_band(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_tx_info * info,unsigned long * basic_rates,u8 * band)303 static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
304 struct ieee80211_vif *vif,
305 struct ieee80211_tx_info *info,
306 unsigned long *basic_rates,
307 u8 *band)
308 {
309 u32 link_id = u32_get_bits(info->control.flags,
310 IEEE80211_TX_CTRL_MLO_LINK);
311
312 *basic_rates = vif->bss_conf.basic_rates;
313 *band = info->band;
314
315 if (link_id == IEEE80211_LINK_UNSPECIFIED &&
316 ieee80211_vif_is_mld(vif)) {
317 /* shouldn't do this when >1 link is active */
318 WARN_ON(hweight16(vif->active_links) != 1);
319 link_id = __ffs(vif->active_links);
320 }
321
322 if (link_id < IEEE80211_LINK_UNSPECIFIED) {
323 struct ieee80211_bss_conf *link_conf;
324
325 rcu_read_lock();
326 link_conf = rcu_dereference(vif->link_conf[link_id]);
327 if (link_conf) {
328 *basic_rates = link_conf->basic_rates;
329 if (link_conf->chanreq.oper.chan)
330 *band = link_conf->chanreq.oper.chan->band;
331 }
332 rcu_read_unlock();
333 }
334 }
335
iwl_mld_get_lowest_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_vif * vif)336 u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
337 struct ieee80211_tx_info *info,
338 struct ieee80211_vif *vif)
339 {
340 struct ieee80211_supported_band *sband;
341 u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
342 unsigned long basic_rates;
343 u8 band, rate;
344 u32 i;
345
346 iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
347
348 if (band >= NUM_NL80211_BANDS) {
349 WARN_ON(vif->type != NL80211_IFTYPE_NAN);
350 return IWL_FIRST_OFDM_RATE;
351 }
352
353 sband = mld->hw->wiphy->bands[band];
354 for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
355 u16 hw = sband->bitrates[i].hw_value;
356
357 if (hw >= IWL_FIRST_OFDM_RATE) {
358 if (lowest_ofdm > hw)
359 lowest_ofdm = hw;
360 } else if (lowest_cck > hw) {
361 lowest_cck = hw;
362 }
363 }
364
365 if (band == NL80211_BAND_2GHZ && !vif->p2p &&
366 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
367 !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
368 if (lowest_cck != IWL_RATE_COUNT)
369 rate = lowest_cck;
370 else if (lowest_ofdm != IWL_RATE_COUNT)
371 rate = lowest_ofdm;
372 else
373 rate = IWL_FIRST_CCK_RATE;
374 } else if (lowest_ofdm != IWL_RATE_COUNT) {
375 rate = lowest_ofdm;
376 } else {
377 rate = IWL_FIRST_OFDM_RATE;
378 }
379
380 return rate;
381 }
382
iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld * mld,struct ieee80211_tx_info * info,int rate_idx)383 static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
384 struct ieee80211_tx_info *info,
385 int rate_idx)
386 {
387 u32 rate_flags = 0;
388 u8 rate_plcp;
389
390 /* if the rate isn't a well known legacy rate, take the lowest one */
391 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
392 rate_idx = iwl_mld_get_lowest_rate(mld, info,
393 info->control.vif);
394
395 WARN_ON_ONCE(rate_idx < 0);
396
397 /* Set CCK or OFDM flag */
398 if (rate_idx <= IWL_LAST_CCK_RATE)
399 rate_flags |= RATE_MCS_MOD_TYPE_CCK;
400 else
401 rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
402
403 /* Legacy rates are indexed:
404 * 0 - 3 for CCK and 0 - 7 for OFDM
405 */
406 rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
407 rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
408
409 return (u32)rate_plcp | rate_flags;
410 }
411
iwl_mld_get_tx_ant(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)412 static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
413 struct ieee80211_tx_info *info,
414 struct ieee80211_sta *sta, __le16 fc)
415 {
416 if (sta && ieee80211_is_data(fc)) {
417 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
418
419 return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
420 }
421
422 return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
423 }
424
iwl_mld_get_inject_tx_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)425 static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
426 struct ieee80211_tx_info *info,
427 struct ieee80211_sta *sta,
428 __le16 fc)
429 {
430 struct ieee80211_tx_rate *rate = &info->control.rates[0];
431 u32 result;
432
433 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
434 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
435 u8 nss = ieee80211_rate_get_vht_nss(rate);
436
437 result = RATE_MCS_MOD_TYPE_VHT;
438 result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
439 result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
440
441 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
442 result |= RATE_MCS_SGI_MSK;
443
444 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
445 result |= RATE_MCS_CHAN_WIDTH_40;
446 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
447 result |= RATE_MCS_CHAN_WIDTH_80;
448 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
449 result |= RATE_MCS_CHAN_WIDTH_160;
450 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
451 /* only MCS 0-15 are supported */
452 u8 mcs = rate->idx & 7;
453 u8 nss = rate->idx > 7;
454
455 result = RATE_MCS_MOD_TYPE_HT;
456 result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
457 result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
458
459 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
460 result |= RATE_MCS_SGI_MSK;
461 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
462 result |= RATE_MCS_CHAN_WIDTH_40;
463 if (info->flags & IEEE80211_TX_CTL_LDPC)
464 result |= RATE_MCS_LDPC_MSK;
465 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
466 result |= RATE_MCS_STBC_MSK;
467 } else {
468 result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
469 }
470
471 if (info->control.antennas)
472 result |= u32_encode_bits(info->control.antennas,
473 RATE_MCS_ANT_AB_MSK);
474 else
475 result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
476
477 return result;
478 }
479
iwl_mld_get_tx_rate_n_flags(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)480 static __le32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
481 struct ieee80211_tx_info *info,
482 struct ieee80211_sta *sta, __le16 fc)
483 {
484 u32 rate;
485
486 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
487 rate = iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
488 else
489 rate = iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
490 iwl_mld_get_tx_ant(mld, info, sta, fc);
491
492 return iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3);
493 }
494
495 static void
iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd * tx_cmd,struct sk_buff * skb,bool amsdu)496 iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd *tx_cmd,
497 struct sk_buff *skb, bool amsdu)
498 {
499 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
500 struct ieee80211_hdr *hdr = (void *)skb->data;
501 struct ieee80211_vif *vif;
502
503 /* Copy MAC header from skb into command buffer */
504 memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
505
506 if (!amsdu || !skb_is_gso(skb))
507 return;
508
509 /* As described in IEEE sta 802.11-2020, table 9-30 (Address
510 * field contents), A-MSDU address 3 should contain the BSSID
511 * address.
512 *
513 * In TSO, the skb header address 3 contains the original address 3 to
514 * correctly create all the A-MSDU subframes headers from it.
515 * Override now the address 3 in the command header with the BSSID.
516 *
517 * Note: we fill in the MLD address, but the firmware will do the
518 * necessary translation to link address after encryption.
519 */
520 vif = info->control.vif;
521 switch (vif->type) {
522 case NL80211_IFTYPE_STATION:
523 ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
524 break;
525 case NL80211_IFTYPE_AP:
526 ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
527 break;
528 default:
529 break;
530 }
531 }
532
533 static void
iwl_mld_fill_tx_cmd(struct iwl_mld * mld,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_tx_cmd,struct ieee80211_sta * sta)534 iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
535 struct iwl_device_tx_cmd *dev_tx_cmd,
536 struct ieee80211_sta *sta)
537 {
538 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
539 struct ieee80211_hdr *hdr = (void *)skb->data;
540 struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
541 NULL;
542 struct iwl_tx_cmd *tx_cmd;
543 bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
544 (*ieee80211_get_qos_ctl(hdr) &
545 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
546 __le32 rate_n_flags = 0;
547 u16 flags = 0;
548
549 dev_tx_cmd->hdr.cmd = TX_CMD;
550
551 if (!info->control.hw_key)
552 flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
553
554 /* For data and mgmt packets rate info comes from the fw.
555 * Only set rate/antenna for injected frames with fixed rate, or
556 * when no sta is given.
557 */
558 if (unlikely(!sta ||
559 info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
560 flags |= IWL_TX_FLAGS_CMD_RATE;
561 rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
562 hdr->frame_control);
563 } else if (!ieee80211_is_data(hdr->frame_control) ||
564 (mld_sta &&
565 mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
566 /* These are important frames */
567 flags |= IWL_TX_FLAGS_HIGH_PRI;
568 }
569
570 tx_cmd = (void *)dev_tx_cmd->payload;
571
572 iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
573
574 tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
575
576 /* Total # bytes to be transmitted */
577 tx_cmd->len = cpu_to_le16((u16)skb->len);
578
579 tx_cmd->flags = cpu_to_le16(flags);
580
581 tx_cmd->rate_n_flags = rate_n_flags;
582 }
583
584 /* Caller of this need to check that info->control.vif is not NULL */
585 static struct iwl_mld_link *
iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info * info)586 iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
587 {
588 struct iwl_mld_vif *mld_vif =
589 iwl_mld_vif_from_mac80211(info->control.vif);
590 u32 link_id = u32_get_bits(info->control.flags,
591 IEEE80211_TX_CTRL_MLO_LINK);
592
593 if (link_id == IEEE80211_LINK_UNSPECIFIED) {
594 if (info->control.vif->active_links)
595 link_id = ffs(info->control.vif->active_links) - 1;
596 else
597 link_id = 0;
598 }
599
600 return rcu_dereference(mld_vif->link[link_id]);
601 }
602
603 static int
iwl_mld_get_tx_queue_id(struct iwl_mld * mld,struct ieee80211_txq * txq,struct sk_buff * skb)604 iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
605 struct sk_buff *skb)
606 {
607 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
608 struct ieee80211_hdr *hdr = (void *)skb->data;
609 __le16 fc = hdr->frame_control;
610 struct iwl_mld_vif *mld_vif;
611 struct iwl_mld_link *link;
612
613 if (txq && txq->sta)
614 return iwl_mld_txq_from_mac80211(txq)->fw_id;
615
616 if (!info->control.vif)
617 return IWL_MLD_INVALID_QUEUE;
618
619 switch (info->control.vif->type) {
620 case NL80211_IFTYPE_AP:
621 case NL80211_IFTYPE_ADHOC:
622 link = iwl_mld_get_link_from_tx_info(info);
623
624 if (WARN_ON(!link))
625 break;
626
627 /* ucast disassociate/deauth frames without a station might
628 * happen, especially with reason 7 ("Class 3 frame received
629 * from nonassociated STA").
630 */
631 if (ieee80211_is_mgmt(fc) &&
632 (!ieee80211_is_bufferable_mmpdu(skb) ||
633 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
634 return link->bcast_sta.queue_id;
635
636 if (is_multicast_ether_addr(hdr->addr1) &&
637 !ieee80211_has_order(fc))
638 return link->mcast_sta.queue_id;
639
640 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
641 "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
642 return link->bcast_sta.queue_id;
643 case NL80211_IFTYPE_P2P_DEVICE:
644 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
645
646 if (mld_vif->roc_activity != ROC_ACTIVITY_P2P_DISC &&
647 mld_vif->roc_activity != ROC_ACTIVITY_P2P_NEG) {
648 IWL_DEBUG_DROP(mld,
649 "Drop tx outside ROC with activity %d\n",
650 mld_vif->roc_activity);
651 return IWL_MLD_INVALID_DROP_TX;
652 }
653
654 WARN_ON(!ieee80211_is_mgmt(fc));
655
656 return mld_vif->aux_sta.queue_id;
657 case NL80211_IFTYPE_MONITOR:
658 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
659 return mld_vif->deflink.mon_sta.queue_id;
660 case NL80211_IFTYPE_STATION:
661 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
662
663 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
664 IWL_DEBUG_DROP(mld, "Drop tx not off-channel\n");
665 return IWL_MLD_INVALID_DROP_TX;
666 }
667
668 if (mld_vif->roc_activity != ROC_ACTIVITY_HOTSPOT) {
669 IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
670 return IWL_MLD_INVALID_DROP_TX;
671 }
672
673 WARN_ON(!ieee80211_is_mgmt(fc));
674 return mld_vif->aux_sta.queue_id;
675 case NL80211_IFTYPE_NAN:
676 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
677
678 WARN_ON(!ieee80211_is_mgmt(fc));
679
680 return mld_vif->aux_sta.queue_id;
681 default:
682 WARN_ONCE(1, "Unsupported vif type\n");
683 break;
684 }
685
686 return IWL_MLD_INVALID_QUEUE;
687 }
688
iwl_mld_probe_resp_set_noa(struct iwl_mld * mld,struct sk_buff * skb)689 static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
690 struct sk_buff *skb)
691 {
692 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
693 struct iwl_mld_link *mld_link =
694 &iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
695 struct iwl_probe_resp_data *resp_data;
696 u8 *pos;
697
698 if (!info->control.vif->p2p)
699 return;
700
701 rcu_read_lock();
702
703 resp_data = rcu_dereference(mld_link->probe_resp_data);
704 if (!resp_data)
705 goto out;
706
707 if (!resp_data->notif.noa_active)
708 goto out;
709
710 if (skb_tailroom(skb) < resp_data->noa_len) {
711 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
712 IWL_ERR(mld,
713 "Failed to reallocate probe resp\n");
714 goto out;
715 }
716 }
717
718 pos = skb_put(skb, resp_data->noa_len);
719
720 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
721 /* Set length of IE body (not including ID and length itself) */
722 *pos++ = resp_data->noa_len - 2;
723 *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
724 *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
725 *pos++ = WLAN_OUI_WFA & 0xff;
726 *pos++ = WLAN_OUI_TYPE_WFA_P2P;
727
728 memcpy(pos, &resp_data->notif.noa_attr,
729 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
730
731 out:
732 rcu_read_unlock();
733 }
734
735 /* This function must be called with BHs disabled */
iwl_mld_tx_mpdu(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)736 static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
737 struct ieee80211_txq *txq)
738 {
739 struct ieee80211_hdr *hdr = (void *)skb->data;
740 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
741 struct ieee80211_sta *sta = txq ? txq->sta : NULL;
742 struct iwl_device_tx_cmd *dev_tx_cmd;
743 int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
744 u8 tid = IWL_MAX_TID_COUNT;
745
746 if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
747 queue == IWL_MLD_INVALID_DROP_TX)
748 return -1;
749
750 if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
751 return -1;
752
753 dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
754 if (unlikely(!dev_tx_cmd))
755 return -1;
756
757 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
758 if (IWL_MLD_NON_TRANSMITTING_AP)
759 return -1;
760
761 iwl_mld_probe_resp_set_noa(mld, skb);
762 }
763
764 iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
765
766 if (ieee80211_is_data(hdr->frame_control)) {
767 if (ieee80211_is_data_qos(hdr->frame_control))
768 tid = ieee80211_get_tid(hdr);
769 else
770 tid = IWL_TID_NON_QOS;
771 }
772
773 IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
774 tid, queue, skb->len);
775
776 /* From now on, we cannot access info->control */
777 memset(&info->status, 0, sizeof(info->status));
778 memset(info->driver_data, 0, sizeof(info->driver_data));
779
780 info->driver_data[1] = dev_tx_cmd;
781
782 if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
783 goto err;
784
785 /* Update low-latency counter when a packet is queued instead
786 * of after TX, it makes sense for early low-latency detection
787 */
788 if (sta)
789 iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
790
791 return 0;
792
793 err:
794 iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
795 IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
796 return -1;
797 }
798
799 #ifdef CONFIG_INET
800
801 /* This function handles the segmentation of a large TSO packet into multiple
802 * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
803 * constraints.
804 */
iwl_mld_tx_tso_segment(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_sta * sta,struct sk_buff_head * mpdus_skbs)805 static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
806 struct ieee80211_sta *sta,
807 struct sk_buff_head *mpdus_skbs)
808 {
809 struct ieee80211_hdr *hdr = (void *)skb->data;
810 netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
811 unsigned int mss = skb_shinfo(skb)->gso_size;
812 unsigned int num_subframes, tcp_payload_len, subf_len;
813 u16 snap_ip_tcp, pad, max_tid_amsdu_len;
814 u8 tid;
815
816 snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
817
818 if (!ieee80211_is_data_qos(hdr->frame_control) ||
819 !sta->cur->max_rc_amsdu_len)
820 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
821
822 /* Do not build AMSDU for IPv6 with extension headers.
823 * Ask stack to segment and checksum the generated MPDUs for us.
824 */
825 if (skb->protocol == htons(ETH_P_IPV6) &&
826 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
827 IPPROTO_TCP) {
828 netdev_flags &= ~NETIF_F_CSUM_MASK;
829 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
830 }
831
832 tid = ieee80211_get_tid(hdr);
833 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
834 return -EINVAL;
835
836 max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
837 if (!max_tid_amsdu_len)
838 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
839
840 /* Sub frame header + SNAP + IP header + TCP header + MSS */
841 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
842 pad = (4 - subf_len) & 0x3;
843
844 /* If we have N subframes in the A-MSDU, then the A-MSDU's size is
845 * N * subf_len + (N - 1) * pad.
846 */
847 num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
848
849 if (sta->max_amsdu_subframes &&
850 num_subframes > sta->max_amsdu_subframes)
851 num_subframes = sta->max_amsdu_subframes;
852
853 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
854 tcp_hdrlen(skb) + skb->data_len;
855
856 /* Make sure we have enough TBs for the A-MSDU:
857 * 2 for each subframe
858 * 1 more for each fragment
859 * 1 more for the potential data in the header
860 */
861 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
862 mld->trans->info.max_skb_frags)
863 num_subframes = 1;
864
865 if (num_subframes > 1)
866 *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
867
868 /* This skb fits in one single A-MSDU */
869 if (tcp_payload_len <= num_subframes * mss) {
870 __skb_queue_tail(mpdus_skbs, skb);
871 return 0;
872 }
873
874 /* Trick the segmentation function to make it create SKBs that can fit
875 * into one A-MSDU.
876 */
877 return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
878 }
879
880 /* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
881 * large packets when necessary and transmitting each segment as MPDU.
882 */
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)883 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
884 struct ieee80211_txq *txq)
885 {
886 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
887 struct sk_buff *orig_skb = skb;
888 struct sk_buff_head mpdus_skbs;
889 unsigned int payload_len;
890 int ret;
891
892 if (WARN_ON(!txq || !txq->sta))
893 return -1;
894
895 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
896 tcp_hdrlen(skb) + skb->data_len;
897
898 if (payload_len <= skb_shinfo(skb)->gso_size)
899 return iwl_mld_tx_mpdu(mld, skb, txq);
900
901 if (!info->control.vif)
902 return -1;
903
904 __skb_queue_head_init(&mpdus_skbs);
905
906 ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
907 if (ret)
908 return ret;
909
910 WARN_ON(skb_queue_empty(&mpdus_skbs));
911
912 while (!skb_queue_empty(&mpdus_skbs)) {
913 skb = __skb_dequeue(&mpdus_skbs);
914
915 ret = iwl_mld_tx_mpdu(mld, skb, txq);
916 if (!ret)
917 continue;
918
919 /* Free skbs created as part of TSO logic that have not yet
920 * been dequeued
921 */
922 __skb_queue_purge(&mpdus_skbs);
923
924 /* skb here is not necessarily same as skb that entered
925 * this method, so free it explicitly.
926 */
927 if (skb == orig_skb)
928 ieee80211_free_txskb(mld->hw, skb);
929 else
930 kfree_skb(skb);
931
932 /* there was error, but we consumed skb one way or
933 * another, so return 0
934 */
935 return 0;
936 }
937
938 return 0;
939 }
940 #else
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)941 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
942 struct ieee80211_txq *txq)
943 {
944 /* Impossible to get TSO without CONFIG_INET */
945 WARN_ON(1);
946
947 return -1;
948 }
949 #endif /* CONFIG_INET */
950
iwl_mld_tx_skb(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)951 void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
952 struct ieee80211_txq *txq)
953 {
954 if (skb_is_gso(skb)) {
955 if (!iwl_mld_tx_tso(mld, skb, txq))
956 return;
957 goto err;
958 }
959
960 if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
961 return;
962
963 err:
964 ieee80211_free_txskb(mld->hw, skb);
965 }
966
iwl_mld_tx_from_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)967 void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
968 {
969 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
970 struct sk_buff *skb = NULL;
971 u8 zero_addr[ETH_ALEN] = {};
972
973 /*
974 * No need for threads to be pending here, they can leave the first
975 * taker all the work.
976 *
977 * mld_txq->tx_request logic:
978 *
979 * If 0, no one is currently TXing, set to 1 to indicate current thread
980 * will now start TX and other threads should quit.
981 *
982 * If 1, another thread is currently TXing, set to 2 to indicate to
983 * that thread that there was another request. Since that request may
984 * have raced with the check whether the queue is empty, the TXing
985 * thread should check the queue's status one more time before leaving.
986 * This check is done in order to not leave any TX hanging in the queue
987 * until the next TX invocation (which may not even happen).
988 *
989 * If 2, another thread is currently TXing, and it will already double
990 * check the queue, so do nothing.
991 */
992 if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
993 return;
994
995 rcu_read_lock();
996 do {
997 while (likely(!mld_txq->status.stop_full) &&
998 (skb = ieee80211_tx_dequeue(mld->hw, txq)))
999 iwl_mld_tx_skb(mld, skb, txq);
1000 } while (atomic_dec_return(&mld_txq->tx_request));
1001
1002 IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
1003 txq->sta ? txq->sta->addr : zero_addr, txq->tid);
1004
1005 rcu_read_unlock();
1006 }
1007
iwl_mld_hwrate_to_tx_rate(struct iwl_mld * mld,__le32 rate_n_flags_fw,struct ieee80211_tx_info * info)1008 static void iwl_mld_hwrate_to_tx_rate(struct iwl_mld *mld,
1009 __le32 rate_n_flags_fw,
1010 struct ieee80211_tx_info *info)
1011 {
1012 enum nl80211_band band = info->band;
1013 struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
1014 u32 rate_n_flags = iwl_v3_rate_from_v2_v3(rate_n_flags_fw,
1015 mld->fw_rates_ver_3);
1016 u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
1017 u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
1018 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1019
1020 if (sgi)
1021 tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
1022
1023 switch (chan_width) {
1024 case RATE_MCS_CHAN_WIDTH_20:
1025 break;
1026 case RATE_MCS_CHAN_WIDTH_40:
1027 tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1028 break;
1029 case RATE_MCS_CHAN_WIDTH_80:
1030 tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1031 break;
1032 case RATE_MCS_CHAN_WIDTH_160:
1033 tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1034 break;
1035 default:
1036 break;
1037 }
1038
1039 switch (format) {
1040 case RATE_MCS_MOD_TYPE_HT:
1041 tx_rate->flags |= IEEE80211_TX_RC_MCS;
1042 tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
1043 break;
1044 case RATE_MCS_MOD_TYPE_VHT:
1045 ieee80211_rate_set_vht(tx_rate,
1046 rate_n_flags & RATE_MCS_CODE_MSK,
1047 u32_get_bits(rate_n_flags,
1048 RATE_MCS_NSS_MSK) + 1);
1049 tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
1050 break;
1051 case RATE_MCS_MOD_TYPE_HE:
1052 case RATE_MCS_MOD_TYPE_EHT:
1053 /* mac80211 cannot do this without ieee80211_tx_status_ext()
1054 * but it only matters for radiotap
1055 */
1056 tx_rate->idx = 0;
1057 break;
1058 default:
1059 tx_rate->idx =
1060 iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1061 band);
1062 break;
1063 }
1064 }
1065
iwl_mld_handle_tx_resp_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1066 void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
1067 struct iwl_rx_packet *pkt)
1068 {
1069 struct iwl_tx_resp *tx_resp = (void *)pkt->data;
1070 int txq_id = le16_to_cpu(tx_resp->tx_queue);
1071 struct agg_tx_status *agg_status = &tx_resp->status;
1072 u32 status = le16_to_cpu(agg_status->status);
1073 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1074 size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
1075 int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
1076 int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
1077 struct ieee80211_link_sta *link_sta;
1078 struct iwl_mld_sta *mld_sta;
1079 u16 ssn;
1080 struct sk_buff_head skbs;
1081 u8 skb_freed = 0;
1082 bool mgmt = false;
1083 bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
1084
1085 if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
1086 "Invalid tx_resp notif frame_count (%d)\n",
1087 tx_resp->frame_count))
1088 return;
1089
1090 /* validate the size of the variable part of the notif */
1091 if (IWL_FW_CHECK(mld, notif_size != pkt_len,
1092 "Invalid tx_resp notif size (expected=%zu got=%u)\n",
1093 notif_size, pkt_len))
1094 return;
1095
1096 ssn = le32_to_cpup((__le32 *)agg_status +
1097 tx_resp->frame_count) & 0xFFFF;
1098
1099 __skb_queue_head_init(&skbs);
1100
1101 /* we can free until ssn % q.n_bd not inclusive */
1102 iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
1103
1104 while (!skb_queue_empty(&skbs)) {
1105 struct sk_buff *skb = __skb_dequeue(&skbs);
1106 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1107 struct ieee80211_hdr *hdr = (void *)skb->data;
1108
1109 skb_freed++;
1110
1111 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1112
1113 memset(&info->status, 0, sizeof(info->status));
1114
1115 info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1116
1117 /* inform mac80211 about what happened with the frame */
1118 switch (status & TX_STATUS_MSK) {
1119 case TX_STATUS_SUCCESS:
1120 case TX_STATUS_DIRECT_DONE:
1121 info->flags |= IEEE80211_TX_STAT_ACK;
1122 break;
1123 default:
1124 break;
1125 }
1126
1127 /* If we are freeing multiple frames, mark all the frames
1128 * but the first one as acked, since they were acknowledged
1129 * before
1130 */
1131 if (skb_freed > 1)
1132 info->flags |= IEEE80211_TX_STAT_ACK;
1133
1134 if (tx_failure) {
1135 enum iwl_fw_ini_time_point tp =
1136 IWL_FW_INI_TIME_POINT_TX_FAILED;
1137
1138 if (ieee80211_is_action(hdr->frame_control))
1139 tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1140 else if (ieee80211_is_mgmt(hdr->frame_control))
1141 mgmt = true;
1142
1143 iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
1144 }
1145
1146 iwl_mld_hwrate_to_tx_rate(mld, tx_resp->initial_rate, info);
1147
1148 if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
1149 ieee80211_tx_status_skb(mld->hw, skb);
1150 }
1151
1152 IWL_DEBUG_TX_REPLY(mld,
1153 "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
1154 txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
1155 tx_resp->failure_frame);
1156
1157 if (tx_failure && mgmt)
1158 iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
1159
1160 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1161 "Got invalid sta_id (%d)\n", sta_id))
1162 return;
1163
1164 rcu_read_lock();
1165
1166 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1167 if (!link_sta) {
1168 /* This can happen if the TX cmd was sent before pre_rcu_remove
1169 * but the TX response was received after
1170 */
1171 IWL_DEBUG_TX_REPLY(mld,
1172 "Got valid sta_id (%d) but sta is NULL\n",
1173 sta_id);
1174 goto out;
1175 }
1176
1177 if (IS_ERR(link_sta))
1178 goto out;
1179
1180 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
1181
1182 if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
1183 iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
1184
1185 if (tid < IWL_MAX_TID_COUNT)
1186 iwl_mld_count_mpdu_tx(link_sta, 1);
1187
1188 out:
1189 rcu_read_unlock();
1190 }
1191
iwl_mld_tx_reclaim_txq(struct iwl_mld * mld,int txq,int index,bool in_flush)1192 static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
1193 bool in_flush)
1194 {
1195 struct sk_buff_head reclaimed_skbs;
1196
1197 __skb_queue_head_init(&reclaimed_skbs);
1198
1199 iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
1200
1201 while (!skb_queue_empty(&reclaimed_skbs)) {
1202 struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
1203 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1204
1205 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1206
1207 memset(&info->status, 0, sizeof(info->status));
1208
1209 /* Packet was transmitted successfully, failures come as single
1210 * frames because before failing a frame the firmware transmits
1211 * it without aggregation at least once.
1212 */
1213 if (!in_flush)
1214 info->flags |= IEEE80211_TX_STAT_ACK;
1215 else
1216 info->flags &= ~IEEE80211_TX_STAT_ACK;
1217
1218 ieee80211_tx_status_skb(mld->hw, skb);
1219 }
1220 }
1221
iwl_mld_flush_link_sta_txqs(struct iwl_mld * mld,u32 fw_sta_id)1222 int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
1223 {
1224 struct iwl_tx_path_flush_cmd_rsp *rsp;
1225 struct iwl_tx_path_flush_cmd flush_cmd = {
1226 .sta_id = cpu_to_le32(fw_sta_id),
1227 .tid_mask = cpu_to_le16(0xffff),
1228 };
1229 struct iwl_host_cmd cmd = {
1230 .id = TXPATH_FLUSH,
1231 .len = { sizeof(flush_cmd), },
1232 .data = { &flush_cmd, },
1233 .flags = CMD_WANT_SKB,
1234 };
1235 int ret, num_flushed_queues;
1236 u32 resp_len;
1237
1238 IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
1239 fw_sta_id, 0xffff);
1240
1241 ret = iwl_mld_send_cmd(mld, &cmd);
1242 if (ret) {
1243 IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
1244 return ret;
1245 }
1246
1247 resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1248 if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
1249 "Invalid TXPATH_FLUSH response len: %d\n",
1250 resp_len)) {
1251 ret = -EIO;
1252 goto free_rsp;
1253 }
1254
1255 rsp = (void *)cmd.resp_pkt->data;
1256
1257 if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
1258 "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
1259 le16_to_cpu(rsp->sta_id))) {
1260 ret = -EIO;
1261 goto free_rsp;
1262 }
1263
1264 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
1265 if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
1266 "num_flushed_queues %d\n", num_flushed_queues)) {
1267 ret = -EIO;
1268 goto free_rsp;
1269 }
1270
1271 for (int i = 0; i < num_flushed_queues; i++) {
1272 struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
1273 int read_after = le16_to_cpu(queue_info->read_after_flush);
1274 int txq_id = le16_to_cpu(queue_info->queue_num);
1275
1276 if (IWL_FW_CHECK(mld,
1277 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1278 "Invalid txq id %d\n", txq_id))
1279 continue;
1280
1281 IWL_DEBUG_TX_QUEUES(mld,
1282 "tid %d txq_id %d read-before %d read-after %d\n",
1283 le16_to_cpu(queue_info->tid), txq_id,
1284 le16_to_cpu(queue_info->read_before_flush),
1285 read_after);
1286
1287 iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
1288 }
1289
1290 free_rsp:
1291 iwl_free_resp(&cmd);
1292 return ret;
1293 }
1294
iwl_mld_ensure_queue(struct iwl_mld * mld,struct ieee80211_txq * txq)1295 int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
1296 {
1297 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
1298 int ret;
1299
1300 lockdep_assert_wiphy(mld->wiphy);
1301
1302 if (likely(mld_txq->status.allocated))
1303 return 0;
1304
1305 ret = iwl_mld_add_txq(mld, txq);
1306
1307 spin_lock_bh(&mld->add_txqs_lock);
1308 if (!list_empty(&mld_txq->list))
1309 list_del_init(&mld_txq->list);
1310 spin_unlock_bh(&mld->add_txqs_lock);
1311
1312 return ret;
1313 }
1314
iwl_mld_update_sta_txqs(struct iwl_mld * mld,struct ieee80211_sta * sta,u32 old_sta_mask,u32 new_sta_mask)1315 int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
1316 struct ieee80211_sta *sta,
1317 u32 old_sta_mask, u32 new_sta_mask)
1318 {
1319 struct iwl_scd_queue_cfg_cmd cmd = {
1320 .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
1321 .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
1322 .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
1323 };
1324
1325 lockdep_assert_wiphy(mld->wiphy);
1326
1327 for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
1328 struct ieee80211_txq *txq =
1329 sta->txq[tid != IWL_MAX_TID_COUNT ?
1330 tid : IEEE80211_NUM_TIDS];
1331 struct iwl_mld_txq *mld_txq =
1332 iwl_mld_txq_from_mac80211(txq);
1333 int ret;
1334
1335 if (!mld_txq->status.allocated)
1336 continue;
1337
1338 if (tid == IWL_MAX_TID_COUNT)
1339 cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
1340 else
1341 cmd.u.modify.tid = cpu_to_le32(tid);
1342
1343 ret = iwl_mld_send_cmd_pdu(mld,
1344 WIDE_ID(DATA_PATH_GROUP,
1345 SCD_QUEUE_CONFIG_CMD),
1346 &cmd);
1347 if (ret)
1348 return ret;
1349 }
1350
1351 return 0;
1352 }
1353
iwl_mld_handle_compressed_ba_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1354 void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
1355 struct iwl_rx_packet *pkt)
1356 {
1357 struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
1358 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1359 u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
1360 u8 sta_id = ba_res->sta_id;
1361 struct ieee80211_link_sta *link_sta;
1362
1363 if (!tfd_cnt)
1364 return;
1365
1366 if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
1367 "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
1368 tfd_cnt, pkt_len))
1369 return;
1370
1371 IWL_DEBUG_TX_REPLY(mld,
1372 "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
1373 sta_id, le32_to_cpu(ba_res->flags),
1374 le16_to_cpu(ba_res->txed),
1375 le16_to_cpu(ba_res->done));
1376
1377 for (int i = 0; i < tfd_cnt; i++) {
1378 struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
1379 int txq_id = le16_to_cpu(ba_tfd->q_num);
1380 int index = le16_to_cpu(ba_tfd->tfd_index);
1381
1382 if (IWL_FW_CHECK(mld,
1383 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1384 "Invalid txq id %d\n", txq_id))
1385 continue;
1386
1387 iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
1388 }
1389
1390 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1391 "Got invalid sta_id (%d)\n", sta_id))
1392 return;
1393
1394 rcu_read_lock();
1395
1396 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1397 if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
1398 "Got valid sta_id (%d) but link_sta is NULL\n",
1399 sta_id))
1400 goto out;
1401
1402 iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
1403 out:
1404 rcu_read_unlock();
1405 }
1406