1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024 - 2025 Intel Corporation
4 */
5 #include <net/ip.h>
6
7 #include "tx.h"
8 #include "sta.h"
9 #include "hcmd.h"
10 #include "iwl-utils.h"
11 #include "iface.h"
12
13 #include "fw/dbg.h"
14
15 #include "fw/api/tx.h"
16 #include "fw/api/rs.h"
17 #include "fw/api/txq.h"
18 #include "fw/api/datapath.h"
19 #include "fw/api/time-event.h"
20
21 #define MAX_ANT_NUM 2
22
23 /* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
24 * the *index* used for the last TX, and returns the next valid *index* to use.
25 * In order to set it in the tx_cmd, must do BIT(idx).
26 */
iwl_mld_next_ant(u8 valid,u8 last_idx)27 static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
28 {
29 u8 index = last_idx;
30
31 for (int i = 0; i < MAX_ANT_NUM; i++) {
32 index = (index + 1) % MAX_ANT_NUM;
33 if (valid & BIT(index))
34 return index;
35 }
36
37 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
38
39 return last_idx;
40 }
41
iwl_mld_toggle_tx_ant(struct iwl_mld * mld,u8 * ant)42 void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
43 {
44 *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
45 }
46
47 static int
iwl_mld_get_queue_size(struct iwl_mld * mld,struct ieee80211_txq * txq)48 iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
49 {
50 struct ieee80211_sta *sta = txq->sta;
51 struct ieee80211_link_sta *link_sta;
52 unsigned int link_id;
53 int max_size = IWL_DEFAULT_QUEUE_SIZE;
54
55 lockdep_assert_wiphy(mld->wiphy);
56
57 for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
58 if (link_sta->eht_cap.has_eht) {
59 max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
60 break;
61 }
62
63 if (link_sta->he_cap.has_he)
64 max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
65 }
66
67 return max_size;
68 }
69
iwl_mld_allocate_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)70 static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
71 {
72 u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
73 u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
74 /* We can't know when the station is asleep or awake, so we
75 * must disable the queue hang detection.
76 */
77 unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
78 IWL_WATCHDOG_DISABLED :
79 mld->trans->mac_cfg->base->wd_timeout;
80 int queue, size;
81
82 lockdep_assert_wiphy(mld->wiphy);
83
84 if (tid == IWL_MGMT_TID)
85 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
86 mld->trans->mac_cfg->base->min_txq_size);
87 else
88 size = iwl_mld_get_queue_size(mld, txq);
89
90 queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
91 watchdog_timeout);
92
93 if (queue >= 0)
94 IWL_DEBUG_TX_QUEUES(mld,
95 "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
96 queue, fw_sta_mask, tid);
97 return queue;
98 }
99
iwl_mld_add_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)100 static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
101 {
102 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
103 int id;
104
105 lockdep_assert_wiphy(mld->wiphy);
106
107 /* This will alse send the SCD_QUEUE_CONFIG_CMD */
108 id = iwl_mld_allocate_txq(mld, txq);
109 if (id < 0)
110 return id;
111
112 mld_txq->fw_id = id;
113 mld_txq->status.allocated = true;
114
115 rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
116
117 return 0;
118 }
119
iwl_mld_add_txq_list(struct iwl_mld * mld)120 void iwl_mld_add_txq_list(struct iwl_mld *mld)
121 {
122 lockdep_assert_wiphy(mld->wiphy);
123
124 while (!list_empty(&mld->txqs_to_add)) {
125 struct ieee80211_txq *txq;
126 struct iwl_mld_txq *mld_txq =
127 list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
128 list);
129 int failed;
130
131 txq = container_of((void *)mld_txq, struct ieee80211_txq,
132 drv_priv);
133
134 failed = iwl_mld_add_txq(mld, txq);
135
136 local_bh_disable();
137 spin_lock(&mld->add_txqs_lock);
138 list_del_init(&mld_txq->list);
139 spin_unlock(&mld->add_txqs_lock);
140 /* If the queue allocation failed, we can't transmit. Leave the
141 * frames on the txq, maybe the attempt to allocate the queue
142 * will succeed.
143 */
144 if (!failed)
145 iwl_mld_tx_from_txq(mld, txq);
146 local_bh_enable();
147 }
148 }
149
iwl_mld_add_txqs_wk(struct wiphy * wiphy,struct wiphy_work * wk)150 void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
151 {
152 struct iwl_mld *mld = container_of(wk, struct iwl_mld,
153 add_txqs_wk);
154
155 /* will reschedule to run after restart */
156 if (mld->fw_status.in_hw_restart)
157 return;
158
159 iwl_mld_add_txq_list(mld);
160 }
161
162 void
iwl_mld_free_txq(struct iwl_mld * mld,u32 fw_sta_mask,u32 tid,u32 queue_id)163 iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
164 {
165 struct iwl_scd_queue_cfg_cmd remove_cmd = {
166 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
167 .u.remove.tid = cpu_to_le32(tid),
168 .u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
169 };
170
171 iwl_mld_send_cmd_pdu(mld,
172 WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
173 &remove_cmd);
174
175 iwl_trans_txq_free(mld->trans, queue_id);
176 }
177
iwl_mld_remove_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)178 void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
179 {
180 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
181 u32 sta_msk, tid;
182
183 lockdep_assert_wiphy(mld->wiphy);
184
185 spin_lock_bh(&mld->add_txqs_lock);
186 if (!list_empty(&mld_txq->list))
187 list_del_init(&mld_txq->list);
188 spin_unlock_bh(&mld->add_txqs_lock);
189
190 if (!mld_txq->status.allocated ||
191 WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
192 return;
193
194 sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
195
196 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
197 txq->tid;
198
199 iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
200
201 RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
202 mld_txq->status.allocated = false;
203 }
204
205 #define OPT_HDR(type, skb, off) \
206 (type *)(skb_network_header(skb) + (off))
207
208 static __le32
iwl_mld_get_offload_assist(struct sk_buff * skb,bool amsdu)209 iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
210 {
211 struct ieee80211_hdr *hdr = (void *)skb->data;
212 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
213 u16 offload_assist = 0;
214 #if IS_ENABLED(CONFIG_INET)
215 u8 protocol = 0;
216
217 /* Do not compute checksum if already computed */
218 if (skb->ip_summed != CHECKSUM_PARTIAL)
219 goto out;
220
221 /* We do not expect to be requested to csum stuff we do not support */
222
223 /* TBD: do we also need to check
224 * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
225 * the devices we support has this flags?
226 */
227 if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
228 skb->protocol != htons(ETH_P_IPV6),
229 "No support for requested checksum\n")) {
230 skb_checksum_help(skb);
231 goto out;
232 }
233
234 if (skb->protocol == htons(ETH_P_IP)) {
235 protocol = ip_hdr(skb)->protocol;
236 } else {
237 #if IS_ENABLED(CONFIG_IPV6)
238 struct ipv6hdr *ipv6h =
239 (struct ipv6hdr *)skb_network_header(skb);
240 unsigned int off = sizeof(*ipv6h);
241
242 protocol = ipv6h->nexthdr;
243 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
244 struct ipv6_opt_hdr *hp;
245
246 /* only supported extension headers */
247 if (protocol != NEXTHDR_ROUTING &&
248 protocol != NEXTHDR_HOP &&
249 protocol != NEXTHDR_DEST) {
250 skb_checksum_help(skb);
251 goto out;
252 }
253
254 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
255 protocol = hp->nexthdr;
256 off += ipv6_optlen(hp);
257 }
258 /* if we get here - protocol now should be TCP/UDP */
259 #endif
260 }
261
262 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
263 WARN_ON_ONCE(1);
264 skb_checksum_help(skb);
265 goto out;
266 }
267
268 /* enable L4 csum */
269 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
270
271 /* Set offset to IP header (snap).
272 * We don't support tunneling so no need to take care of inner header.
273 * Size is in words.
274 */
275 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
276
277 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
278 if (skb->protocol == htons(ETH_P_IP) && amsdu) {
279 ip_hdr(skb)->check = 0;
280 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
281 }
282
283 /* reset UDP/TCP header csum */
284 if (protocol == IPPROTO_TCP)
285 tcp_hdr(skb)->check = 0;
286 else
287 udp_hdr(skb)->check = 0;
288
289 out:
290 #endif
291 mh_len /= 2;
292 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
293
294 if (amsdu)
295 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
296 else if (ieee80211_hdrlen(hdr->frame_control) % 4)
297 /* padding is inserted later in transport */
298 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
299
300 return cpu_to_le32(offload_assist);
301 }
302
iwl_mld_get_basic_rates_and_band(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_tx_info * info,unsigned long * basic_rates,u8 * band)303 static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
304 struct ieee80211_vif *vif,
305 struct ieee80211_tx_info *info,
306 unsigned long *basic_rates,
307 u8 *band)
308 {
309 u32 link_id = u32_get_bits(info->control.flags,
310 IEEE80211_TX_CTRL_MLO_LINK);
311
312 *basic_rates = vif->bss_conf.basic_rates;
313 *band = info->band;
314
315 if (link_id == IEEE80211_LINK_UNSPECIFIED &&
316 ieee80211_vif_is_mld(vif)) {
317 /* shouldn't do this when >1 link is active */
318 WARN_ON(hweight16(vif->active_links) != 1);
319 link_id = __ffs(vif->active_links);
320 }
321
322 if (link_id < IEEE80211_LINK_UNSPECIFIED) {
323 struct ieee80211_bss_conf *link_conf;
324
325 rcu_read_lock();
326 link_conf = rcu_dereference(vif->link_conf[link_id]);
327 if (link_conf) {
328 *basic_rates = link_conf->basic_rates;
329 if (link_conf->chanreq.oper.chan)
330 *band = link_conf->chanreq.oper.chan->band;
331 }
332 rcu_read_unlock();
333 }
334 }
335
iwl_mld_get_lowest_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_vif * vif)336 u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
337 struct ieee80211_tx_info *info,
338 struct ieee80211_vif *vif)
339 {
340 struct ieee80211_supported_band *sband;
341 u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
342 unsigned long basic_rates;
343 u8 band, rate;
344 u32 i;
345
346 iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
347
348 sband = mld->hw->wiphy->bands[band];
349 for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
350 u16 hw = sband->bitrates[i].hw_value;
351
352 if (hw >= IWL_FIRST_OFDM_RATE) {
353 if (lowest_ofdm > hw)
354 lowest_ofdm = hw;
355 } else if (lowest_cck > hw) {
356 lowest_cck = hw;
357 }
358 }
359
360 if (band == NL80211_BAND_2GHZ && !vif->p2p &&
361 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
362 !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
363 if (lowest_cck != IWL_RATE_COUNT)
364 rate = lowest_cck;
365 else if (lowest_ofdm != IWL_RATE_COUNT)
366 rate = lowest_ofdm;
367 else
368 rate = IWL_FIRST_CCK_RATE;
369 } else if (lowest_ofdm != IWL_RATE_COUNT) {
370 rate = lowest_ofdm;
371 } else {
372 rate = IWL_FIRST_OFDM_RATE;
373 }
374
375 return rate;
376 }
377
iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld * mld,struct ieee80211_tx_info * info,int rate_idx)378 static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
379 struct ieee80211_tx_info *info,
380 int rate_idx)
381 {
382 u32 rate_flags = 0;
383 u8 rate_plcp;
384
385 /* if the rate isn't a well known legacy rate, take the lowest one */
386 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
387 rate_idx = iwl_mld_get_lowest_rate(mld, info,
388 info->control.vif);
389
390 WARN_ON_ONCE(rate_idx < 0);
391
392 /* Set CCK or OFDM flag */
393 if (rate_idx <= IWL_LAST_CCK_RATE)
394 rate_flags |= RATE_MCS_MOD_TYPE_CCK;
395 else
396 rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
397
398 /* Legacy rates are indexed:
399 * 0 - 3 for CCK and 0 - 7 for OFDM
400 */
401 rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
402 rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
403
404 return (u32)rate_plcp | rate_flags;
405 }
406
iwl_mld_get_tx_ant(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)407 static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
408 struct ieee80211_tx_info *info,
409 struct ieee80211_sta *sta, __le16 fc)
410 {
411 if (sta && ieee80211_is_data(fc)) {
412 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
413
414 return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
415 }
416
417 return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
418 }
419
iwl_mld_get_inject_tx_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)420 static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
421 struct ieee80211_tx_info *info,
422 struct ieee80211_sta *sta,
423 __le16 fc)
424 {
425 struct ieee80211_tx_rate *rate = &info->control.rates[0];
426 u32 result;
427
428 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
429 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
430 u8 nss = ieee80211_rate_get_vht_nss(rate);
431
432 result = RATE_MCS_MOD_TYPE_VHT;
433 result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
434 result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
435
436 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
437 result |= RATE_MCS_SGI_MSK;
438
439 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
440 result |= RATE_MCS_CHAN_WIDTH_40;
441 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
442 result |= RATE_MCS_CHAN_WIDTH_80;
443 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
444 result |= RATE_MCS_CHAN_WIDTH_160;
445 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
446 /* only MCS 0-15 are supported */
447 u8 mcs = rate->idx & 7;
448 u8 nss = rate->idx > 7;
449
450 result = RATE_MCS_MOD_TYPE_HT;
451 result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
452 result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
453
454 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
455 result |= RATE_MCS_SGI_MSK;
456 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
457 result |= RATE_MCS_CHAN_WIDTH_40;
458 if (info->flags & IEEE80211_TX_CTL_LDPC)
459 result |= RATE_MCS_LDPC_MSK;
460 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
461 result |= RATE_MCS_STBC_MSK;
462 } else {
463 result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
464 }
465
466 if (info->control.antennas)
467 result |= u32_encode_bits(info->control.antennas,
468 RATE_MCS_ANT_AB_MSK);
469 else
470 result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
471
472 return result;
473 }
474
iwl_mld_get_tx_rate_n_flags(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)475 static __le32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
476 struct ieee80211_tx_info *info,
477 struct ieee80211_sta *sta, __le16 fc)
478 {
479 u32 rate;
480
481 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
482 rate = iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
483 else
484 rate = iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
485 iwl_mld_get_tx_ant(mld, info, sta, fc);
486
487 return iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3);
488 }
489
490 static void
iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd * tx_cmd,struct sk_buff * skb,bool amsdu)491 iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd *tx_cmd,
492 struct sk_buff *skb, bool amsdu)
493 {
494 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
495 struct ieee80211_hdr *hdr = (void *)skb->data;
496 struct ieee80211_vif *vif;
497
498 /* Copy MAC header from skb into command buffer */
499 memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
500
501 if (!amsdu || !skb_is_gso(skb))
502 return;
503
504 /* As described in IEEE sta 802.11-2020, table 9-30 (Address
505 * field contents), A-MSDU address 3 should contain the BSSID
506 * address.
507 *
508 * In TSO, the skb header address 3 contains the original address 3 to
509 * correctly create all the A-MSDU subframes headers from it.
510 * Override now the address 3 in the command header with the BSSID.
511 *
512 * Note: we fill in the MLD address, but the firmware will do the
513 * necessary translation to link address after encryption.
514 */
515 vif = info->control.vif;
516 switch (vif->type) {
517 case NL80211_IFTYPE_STATION:
518 ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
519 break;
520 case NL80211_IFTYPE_AP:
521 ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
522 break;
523 default:
524 break;
525 }
526 }
527
528 static void
iwl_mld_fill_tx_cmd(struct iwl_mld * mld,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_tx_cmd,struct ieee80211_sta * sta)529 iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
530 struct iwl_device_tx_cmd *dev_tx_cmd,
531 struct ieee80211_sta *sta)
532 {
533 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
534 struct ieee80211_hdr *hdr = (void *)skb->data;
535 struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
536 NULL;
537 struct iwl_tx_cmd *tx_cmd;
538 bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
539 (*ieee80211_get_qos_ctl(hdr) &
540 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
541 __le32 rate_n_flags = 0;
542 u16 flags = 0;
543
544 dev_tx_cmd->hdr.cmd = TX_CMD;
545
546 if (!info->control.hw_key)
547 flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
548
549 /* For data and mgmt packets rate info comes from the fw.
550 * Only set rate/antenna for injected frames with fixed rate, or
551 * when no sta is given.
552 */
553 if (unlikely(!sta ||
554 info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
555 flags |= IWL_TX_FLAGS_CMD_RATE;
556 rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
557 hdr->frame_control);
558 } else if (!ieee80211_is_data(hdr->frame_control) ||
559 (mld_sta &&
560 mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
561 /* These are important frames */
562 flags |= IWL_TX_FLAGS_HIGH_PRI;
563 }
564
565 tx_cmd = (void *)dev_tx_cmd->payload;
566
567 iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
568
569 tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
570
571 /* Total # bytes to be transmitted */
572 tx_cmd->len = cpu_to_le16((u16)skb->len);
573
574 tx_cmd->flags = cpu_to_le16(flags);
575
576 tx_cmd->rate_n_flags = rate_n_flags;
577 }
578
579 /* Caller of this need to check that info->control.vif is not NULL */
580 static struct iwl_mld_link *
iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info * info)581 iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
582 {
583 struct iwl_mld_vif *mld_vif =
584 iwl_mld_vif_from_mac80211(info->control.vif);
585 u32 link_id = u32_get_bits(info->control.flags,
586 IEEE80211_TX_CTRL_MLO_LINK);
587
588 if (link_id == IEEE80211_LINK_UNSPECIFIED) {
589 if (info->control.vif->active_links)
590 link_id = ffs(info->control.vif->active_links) - 1;
591 else
592 link_id = 0;
593 }
594
595 return rcu_dereference(mld_vif->link[link_id]);
596 }
597
598 static int
iwl_mld_get_tx_queue_id(struct iwl_mld * mld,struct ieee80211_txq * txq,struct sk_buff * skb)599 iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
600 struct sk_buff *skb)
601 {
602 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
603 struct ieee80211_hdr *hdr = (void *)skb->data;
604 __le16 fc = hdr->frame_control;
605 struct iwl_mld_vif *mld_vif;
606 struct iwl_mld_link *link;
607
608 if (txq && txq->sta)
609 return iwl_mld_txq_from_mac80211(txq)->fw_id;
610
611 if (!info->control.vif)
612 return IWL_MLD_INVALID_QUEUE;
613
614 switch (info->control.vif->type) {
615 case NL80211_IFTYPE_AP:
616 case NL80211_IFTYPE_ADHOC:
617 link = iwl_mld_get_link_from_tx_info(info);
618
619 if (WARN_ON(!link))
620 break;
621
622 /* ucast disassociate/deauth frames without a station might
623 * happen, especially with reason 7 ("Class 3 frame received
624 * from nonassociated STA").
625 */
626 if (ieee80211_is_mgmt(fc) &&
627 (!ieee80211_is_bufferable_mmpdu(skb) ||
628 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
629 return link->bcast_sta.queue_id;
630
631 if (is_multicast_ether_addr(hdr->addr1) &&
632 !ieee80211_has_order(fc))
633 return link->mcast_sta.queue_id;
634
635 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
636 "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
637 return link->bcast_sta.queue_id;
638 case NL80211_IFTYPE_P2P_DEVICE:
639 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
640
641 if (mld_vif->roc_activity != ROC_ACTIVITY_P2P_DISC &&
642 mld_vif->roc_activity != ROC_ACTIVITY_P2P_NEG) {
643 IWL_DEBUG_DROP(mld,
644 "Drop tx outside ROC with activity %d\n",
645 mld_vif->roc_activity);
646 return IWL_MLD_INVALID_DROP_TX;
647 }
648
649 WARN_ON(!ieee80211_is_mgmt(fc));
650
651 return mld_vif->aux_sta.queue_id;
652 case NL80211_IFTYPE_MONITOR:
653 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
654 return mld_vif->deflink.mon_sta.queue_id;
655 case NL80211_IFTYPE_STATION:
656 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
657
658 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
659 IWL_DEBUG_DROP(mld, "Drop tx not off-channel\n");
660 return IWL_MLD_INVALID_DROP_TX;
661 }
662
663 if (mld_vif->roc_activity != ROC_ACTIVITY_HOTSPOT) {
664 IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
665 return IWL_MLD_INVALID_DROP_TX;
666 }
667
668 WARN_ON(!ieee80211_is_mgmt(fc));
669 return mld_vif->aux_sta.queue_id;
670 default:
671 WARN_ONCE(1, "Unsupported vif type\n");
672 break;
673 }
674
675 return IWL_MLD_INVALID_QUEUE;
676 }
677
iwl_mld_probe_resp_set_noa(struct iwl_mld * mld,struct sk_buff * skb)678 static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
679 struct sk_buff *skb)
680 {
681 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
682 struct iwl_mld_link *mld_link =
683 &iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
684 struct iwl_probe_resp_data *resp_data;
685 u8 *pos;
686
687 if (!info->control.vif->p2p)
688 return;
689
690 rcu_read_lock();
691
692 resp_data = rcu_dereference(mld_link->probe_resp_data);
693 if (!resp_data)
694 goto out;
695
696 if (!resp_data->notif.noa_active)
697 goto out;
698
699 if (skb_tailroom(skb) < resp_data->noa_len) {
700 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
701 IWL_ERR(mld,
702 "Failed to reallocate probe resp\n");
703 goto out;
704 }
705 }
706
707 pos = skb_put(skb, resp_data->noa_len);
708
709 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
710 /* Set length of IE body (not including ID and length itself) */
711 *pos++ = resp_data->noa_len - 2;
712 *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
713 *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
714 *pos++ = WLAN_OUI_WFA & 0xff;
715 *pos++ = WLAN_OUI_TYPE_WFA_P2P;
716
717 memcpy(pos, &resp_data->notif.noa_attr,
718 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
719
720 out:
721 rcu_read_unlock();
722 }
723
724 /* This function must be called with BHs disabled */
iwl_mld_tx_mpdu(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)725 static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
726 struct ieee80211_txq *txq)
727 {
728 struct ieee80211_hdr *hdr = (void *)skb->data;
729 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
730 struct ieee80211_sta *sta = txq ? txq->sta : NULL;
731 struct iwl_device_tx_cmd *dev_tx_cmd;
732 int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
733 u8 tid = IWL_MAX_TID_COUNT;
734
735 if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
736 queue == IWL_MLD_INVALID_DROP_TX)
737 return -1;
738
739 if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
740 return -1;
741
742 dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
743 if (unlikely(!dev_tx_cmd))
744 return -1;
745
746 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
747 if (IWL_MLD_NON_TRANSMITTING_AP)
748 return -1;
749
750 iwl_mld_probe_resp_set_noa(mld, skb);
751 }
752
753 iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
754
755 if (ieee80211_is_data(hdr->frame_control)) {
756 if (ieee80211_is_data_qos(hdr->frame_control))
757 tid = ieee80211_get_tid(hdr);
758 else
759 tid = IWL_TID_NON_QOS;
760 }
761
762 IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
763 tid, queue, skb->len);
764
765 /* From now on, we cannot access info->control */
766 memset(&info->status, 0, sizeof(info->status));
767 memset(info->driver_data, 0, sizeof(info->driver_data));
768
769 info->driver_data[1] = dev_tx_cmd;
770
771 if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
772 goto err;
773
774 /* Update low-latency counter when a packet is queued instead
775 * of after TX, it makes sense for early low-latency detection
776 */
777 if (sta)
778 iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
779
780 return 0;
781
782 err:
783 iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
784 IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
785 return -1;
786 }
787
788 #ifdef CONFIG_INET
789
790 /* This function handles the segmentation of a large TSO packet into multiple
791 * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
792 * constraints.
793 */
iwl_mld_tx_tso_segment(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_sta * sta,struct sk_buff_head * mpdus_skbs)794 static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
795 struct ieee80211_sta *sta,
796 struct sk_buff_head *mpdus_skbs)
797 {
798 struct ieee80211_hdr *hdr = (void *)skb->data;
799 netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
800 unsigned int mss = skb_shinfo(skb)->gso_size;
801 unsigned int num_subframes, tcp_payload_len, subf_len;
802 u16 snap_ip_tcp, pad, max_tid_amsdu_len;
803 u8 tid;
804
805 snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
806
807 if (!ieee80211_is_data_qos(hdr->frame_control) ||
808 !sta->cur->max_rc_amsdu_len)
809 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
810
811 /* Do not build AMSDU for IPv6 with extension headers.
812 * Ask stack to segment and checksum the generated MPDUs for us.
813 */
814 if (skb->protocol == htons(ETH_P_IPV6) &&
815 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
816 IPPROTO_TCP) {
817 netdev_flags &= ~NETIF_F_CSUM_MASK;
818 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
819 }
820
821 tid = ieee80211_get_tid(hdr);
822 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
823 return -EINVAL;
824
825 max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
826 if (!max_tid_amsdu_len)
827 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
828
829 /* Sub frame header + SNAP + IP header + TCP header + MSS */
830 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
831 pad = (4 - subf_len) & 0x3;
832
833 /* If we have N subframes in the A-MSDU, then the A-MSDU's size is
834 * N * subf_len + (N - 1) * pad.
835 */
836 num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
837
838 if (sta->max_amsdu_subframes &&
839 num_subframes > sta->max_amsdu_subframes)
840 num_subframes = sta->max_amsdu_subframes;
841
842 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
843 tcp_hdrlen(skb) + skb->data_len;
844
845 /* Make sure we have enough TBs for the A-MSDU:
846 * 2 for each subframe
847 * 1 more for each fragment
848 * 1 more for the potential data in the header
849 */
850 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
851 mld->trans->info.max_skb_frags)
852 num_subframes = 1;
853
854 if (num_subframes > 1)
855 *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
856
857 /* This skb fits in one single A-MSDU */
858 if (tcp_payload_len <= num_subframes * mss) {
859 __skb_queue_tail(mpdus_skbs, skb);
860 return 0;
861 }
862
863 /* Trick the segmentation function to make it create SKBs that can fit
864 * into one A-MSDU.
865 */
866 return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
867 }
868
869 /* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
870 * large packets when necessary and transmitting each segment as MPDU.
871 */
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)872 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
873 struct ieee80211_txq *txq)
874 {
875 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
876 struct sk_buff *orig_skb = skb;
877 struct sk_buff_head mpdus_skbs;
878 unsigned int payload_len;
879 int ret;
880
881 if (WARN_ON(!txq || !txq->sta))
882 return -1;
883
884 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
885 tcp_hdrlen(skb) + skb->data_len;
886
887 if (payload_len <= skb_shinfo(skb)->gso_size)
888 return iwl_mld_tx_mpdu(mld, skb, txq);
889
890 if (!info->control.vif)
891 return -1;
892
893 __skb_queue_head_init(&mpdus_skbs);
894
895 ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
896 if (ret)
897 return ret;
898
899 WARN_ON(skb_queue_empty(&mpdus_skbs));
900
901 while (!skb_queue_empty(&mpdus_skbs)) {
902 skb = __skb_dequeue(&mpdus_skbs);
903
904 ret = iwl_mld_tx_mpdu(mld, skb, txq);
905 if (!ret)
906 continue;
907
908 /* Free skbs created as part of TSO logic that have not yet
909 * been dequeued
910 */
911 __skb_queue_purge(&mpdus_skbs);
912
913 /* skb here is not necessarily same as skb that entered
914 * this method, so free it explicitly.
915 */
916 if (skb == orig_skb)
917 ieee80211_free_txskb(mld->hw, skb);
918 else
919 kfree_skb(skb);
920
921 /* there was error, but we consumed skb one way or
922 * another, so return 0
923 */
924 return 0;
925 }
926
927 return 0;
928 }
929 #else
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)930 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
931 struct ieee80211_txq *txq)
932 {
933 /* Impossible to get TSO without CONFIG_INET */
934 WARN_ON(1);
935
936 return -1;
937 }
938 #endif /* CONFIG_INET */
939
iwl_mld_tx_skb(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)940 void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
941 struct ieee80211_txq *txq)
942 {
943 if (skb_is_gso(skb)) {
944 if (!iwl_mld_tx_tso(mld, skb, txq))
945 return;
946 goto err;
947 }
948
949 if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
950 return;
951
952 err:
953 ieee80211_free_txskb(mld->hw, skb);
954 }
955
iwl_mld_tx_from_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)956 void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
957 {
958 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
959 struct sk_buff *skb = NULL;
960 u8 zero_addr[ETH_ALEN] = {};
961
962 /*
963 * No need for threads to be pending here, they can leave the first
964 * taker all the work.
965 *
966 * mld_txq->tx_request logic:
967 *
968 * If 0, no one is currently TXing, set to 1 to indicate current thread
969 * will now start TX and other threads should quit.
970 *
971 * If 1, another thread is currently TXing, set to 2 to indicate to
972 * that thread that there was another request. Since that request may
973 * have raced with the check whether the queue is empty, the TXing
974 * thread should check the queue's status one more time before leaving.
975 * This check is done in order to not leave any TX hanging in the queue
976 * until the next TX invocation (which may not even happen).
977 *
978 * If 2, another thread is currently TXing, and it will already double
979 * check the queue, so do nothing.
980 */
981 if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
982 return;
983
984 rcu_read_lock();
985 do {
986 while (likely(!mld_txq->status.stop_full) &&
987 (skb = ieee80211_tx_dequeue(mld->hw, txq)))
988 iwl_mld_tx_skb(mld, skb, txq);
989 } while (atomic_dec_return(&mld_txq->tx_request));
990
991 IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
992 txq->sta ? txq->sta->addr : zero_addr, txq->tid);
993
994 rcu_read_unlock();
995 }
996
iwl_mld_hwrate_to_tx_rate(struct iwl_mld * mld,__le32 rate_n_flags_fw,struct ieee80211_tx_info * info)997 static void iwl_mld_hwrate_to_tx_rate(struct iwl_mld *mld,
998 __le32 rate_n_flags_fw,
999 struct ieee80211_tx_info *info)
1000 {
1001 enum nl80211_band band = info->band;
1002 struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
1003 u32 rate_n_flags = iwl_v3_rate_from_v2_v3(rate_n_flags_fw,
1004 mld->fw_rates_ver_3);
1005 u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
1006 u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
1007 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1008
1009 if (sgi)
1010 tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
1011
1012 switch (chan_width) {
1013 case RATE_MCS_CHAN_WIDTH_20:
1014 break;
1015 case RATE_MCS_CHAN_WIDTH_40:
1016 tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1017 break;
1018 case RATE_MCS_CHAN_WIDTH_80:
1019 tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1020 break;
1021 case RATE_MCS_CHAN_WIDTH_160:
1022 tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1023 break;
1024 default:
1025 break;
1026 }
1027
1028 switch (format) {
1029 case RATE_MCS_MOD_TYPE_HT:
1030 tx_rate->flags |= IEEE80211_TX_RC_MCS;
1031 tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
1032 break;
1033 case RATE_MCS_MOD_TYPE_VHT:
1034 ieee80211_rate_set_vht(tx_rate,
1035 rate_n_flags & RATE_MCS_CODE_MSK,
1036 u32_get_bits(rate_n_flags,
1037 RATE_MCS_NSS_MSK) + 1);
1038 tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
1039 break;
1040 case RATE_MCS_MOD_TYPE_HE:
1041 case RATE_MCS_MOD_TYPE_EHT:
1042 /* mac80211 cannot do this without ieee80211_tx_status_ext()
1043 * but it only matters for radiotap
1044 */
1045 tx_rate->idx = 0;
1046 break;
1047 default:
1048 tx_rate->idx =
1049 iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1050 band);
1051 break;
1052 }
1053 }
1054
iwl_mld_handle_tx_resp_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1055 void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
1056 struct iwl_rx_packet *pkt)
1057 {
1058 struct iwl_tx_resp *tx_resp = (void *)pkt->data;
1059 int txq_id = le16_to_cpu(tx_resp->tx_queue);
1060 struct agg_tx_status *agg_status = &tx_resp->status;
1061 u32 status = le16_to_cpu(agg_status->status);
1062 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1063 size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
1064 int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
1065 int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
1066 struct ieee80211_link_sta *link_sta;
1067 struct iwl_mld_sta *mld_sta;
1068 u16 ssn;
1069 struct sk_buff_head skbs;
1070 u8 skb_freed = 0;
1071 bool mgmt = false;
1072 bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
1073
1074 if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
1075 "Invalid tx_resp notif frame_count (%d)\n",
1076 tx_resp->frame_count))
1077 return;
1078
1079 /* validate the size of the variable part of the notif */
1080 if (IWL_FW_CHECK(mld, notif_size != pkt_len,
1081 "Invalid tx_resp notif size (expected=%zu got=%u)\n",
1082 notif_size, pkt_len))
1083 return;
1084
1085 ssn = le32_to_cpup((__le32 *)agg_status +
1086 tx_resp->frame_count) & 0xFFFF;
1087
1088 __skb_queue_head_init(&skbs);
1089
1090 /* we can free until ssn % q.n_bd not inclusive */
1091 iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
1092
1093 while (!skb_queue_empty(&skbs)) {
1094 struct sk_buff *skb = __skb_dequeue(&skbs);
1095 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1096 struct ieee80211_hdr *hdr = (void *)skb->data;
1097
1098 skb_freed++;
1099
1100 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1101
1102 memset(&info->status, 0, sizeof(info->status));
1103
1104 info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1105
1106 /* inform mac80211 about what happened with the frame */
1107 switch (status & TX_STATUS_MSK) {
1108 case TX_STATUS_SUCCESS:
1109 case TX_STATUS_DIRECT_DONE:
1110 info->flags |= IEEE80211_TX_STAT_ACK;
1111 break;
1112 default:
1113 break;
1114 }
1115
1116 /* If we are freeing multiple frames, mark all the frames
1117 * but the first one as acked, since they were acknowledged
1118 * before
1119 */
1120 if (skb_freed > 1)
1121 info->flags |= IEEE80211_TX_STAT_ACK;
1122
1123 if (tx_failure) {
1124 enum iwl_fw_ini_time_point tp =
1125 IWL_FW_INI_TIME_POINT_TX_FAILED;
1126
1127 if (ieee80211_is_action(hdr->frame_control))
1128 tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1129 else if (ieee80211_is_mgmt(hdr->frame_control))
1130 mgmt = true;
1131
1132 iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
1133 }
1134
1135 iwl_mld_hwrate_to_tx_rate(mld, tx_resp->initial_rate, info);
1136
1137 if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
1138 ieee80211_tx_status_skb(mld->hw, skb);
1139 }
1140
1141 IWL_DEBUG_TX_REPLY(mld,
1142 "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
1143 txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
1144 tx_resp->failure_frame);
1145
1146 if (tx_failure && mgmt)
1147 iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
1148
1149 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1150 "Got invalid sta_id (%d)\n", sta_id))
1151 return;
1152
1153 rcu_read_lock();
1154
1155 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1156 if (!link_sta) {
1157 /* This can happen if the TX cmd was sent before pre_rcu_remove
1158 * but the TX response was received after
1159 */
1160 IWL_DEBUG_TX_REPLY(mld,
1161 "Got valid sta_id (%d) but sta is NULL\n",
1162 sta_id);
1163 goto out;
1164 }
1165
1166 if (IS_ERR(link_sta))
1167 goto out;
1168
1169 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
1170
1171 if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
1172 iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
1173
1174 if (tid < IWL_MAX_TID_COUNT)
1175 iwl_mld_count_mpdu_tx(link_sta, 1);
1176
1177 out:
1178 rcu_read_unlock();
1179 }
1180
iwl_mld_tx_reclaim_txq(struct iwl_mld * mld,int txq,int index,bool in_flush)1181 static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
1182 bool in_flush)
1183 {
1184 struct sk_buff_head reclaimed_skbs;
1185
1186 __skb_queue_head_init(&reclaimed_skbs);
1187
1188 iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
1189
1190 while (!skb_queue_empty(&reclaimed_skbs)) {
1191 struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
1192 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193
1194 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1195
1196 memset(&info->status, 0, sizeof(info->status));
1197
1198 /* Packet was transmitted successfully, failures come as single
1199 * frames because before failing a frame the firmware transmits
1200 * it without aggregation at least once.
1201 */
1202 if (!in_flush)
1203 info->flags |= IEEE80211_TX_STAT_ACK;
1204 else
1205 info->flags &= ~IEEE80211_TX_STAT_ACK;
1206
1207 ieee80211_tx_status_skb(mld->hw, skb);
1208 }
1209 }
1210
iwl_mld_flush_link_sta_txqs(struct iwl_mld * mld,u32 fw_sta_id)1211 int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
1212 {
1213 struct iwl_tx_path_flush_cmd_rsp *rsp;
1214 struct iwl_tx_path_flush_cmd flush_cmd = {
1215 .sta_id = cpu_to_le32(fw_sta_id),
1216 .tid_mask = cpu_to_le16(0xffff),
1217 };
1218 struct iwl_host_cmd cmd = {
1219 .id = TXPATH_FLUSH,
1220 .len = { sizeof(flush_cmd), },
1221 .data = { &flush_cmd, },
1222 .flags = CMD_WANT_SKB,
1223 };
1224 int ret, num_flushed_queues;
1225 u32 resp_len;
1226
1227 IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
1228 fw_sta_id, 0xffff);
1229
1230 ret = iwl_mld_send_cmd(mld, &cmd);
1231 if (ret) {
1232 IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
1233 return ret;
1234 }
1235
1236 resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1237 if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
1238 "Invalid TXPATH_FLUSH response len: %d\n",
1239 resp_len)) {
1240 ret = -EIO;
1241 goto free_rsp;
1242 }
1243
1244 rsp = (void *)cmd.resp_pkt->data;
1245
1246 if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
1247 "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
1248 le16_to_cpu(rsp->sta_id))) {
1249 ret = -EIO;
1250 goto free_rsp;
1251 }
1252
1253 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
1254 if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
1255 "num_flushed_queues %d\n", num_flushed_queues)) {
1256 ret = -EIO;
1257 goto free_rsp;
1258 }
1259
1260 for (int i = 0; i < num_flushed_queues; i++) {
1261 struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
1262 int read_after = le16_to_cpu(queue_info->read_after_flush);
1263 int txq_id = le16_to_cpu(queue_info->queue_num);
1264
1265 if (IWL_FW_CHECK(mld,
1266 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1267 "Invalid txq id %d\n", txq_id))
1268 continue;
1269
1270 IWL_DEBUG_TX_QUEUES(mld,
1271 "tid %d txq_id %d read-before %d read-after %d\n",
1272 le16_to_cpu(queue_info->tid), txq_id,
1273 le16_to_cpu(queue_info->read_before_flush),
1274 read_after);
1275
1276 iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
1277 }
1278
1279 free_rsp:
1280 iwl_free_resp(&cmd);
1281 return ret;
1282 }
1283
iwl_mld_ensure_queue(struct iwl_mld * mld,struct ieee80211_txq * txq)1284 int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
1285 {
1286 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
1287 int ret;
1288
1289 lockdep_assert_wiphy(mld->wiphy);
1290
1291 if (likely(mld_txq->status.allocated))
1292 return 0;
1293
1294 ret = iwl_mld_add_txq(mld, txq);
1295
1296 spin_lock_bh(&mld->add_txqs_lock);
1297 if (!list_empty(&mld_txq->list))
1298 list_del_init(&mld_txq->list);
1299 spin_unlock_bh(&mld->add_txqs_lock);
1300
1301 return ret;
1302 }
1303
iwl_mld_update_sta_txqs(struct iwl_mld * mld,struct ieee80211_sta * sta,u32 old_sta_mask,u32 new_sta_mask)1304 int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
1305 struct ieee80211_sta *sta,
1306 u32 old_sta_mask, u32 new_sta_mask)
1307 {
1308 struct iwl_scd_queue_cfg_cmd cmd = {
1309 .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
1310 .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
1311 .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
1312 };
1313
1314 lockdep_assert_wiphy(mld->wiphy);
1315
1316 for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
1317 struct ieee80211_txq *txq =
1318 sta->txq[tid != IWL_MAX_TID_COUNT ?
1319 tid : IEEE80211_NUM_TIDS];
1320 struct iwl_mld_txq *mld_txq =
1321 iwl_mld_txq_from_mac80211(txq);
1322 int ret;
1323
1324 if (!mld_txq->status.allocated)
1325 continue;
1326
1327 if (tid == IWL_MAX_TID_COUNT)
1328 cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
1329 else
1330 cmd.u.modify.tid = cpu_to_le32(tid);
1331
1332 ret = iwl_mld_send_cmd_pdu(mld,
1333 WIDE_ID(DATA_PATH_GROUP,
1334 SCD_QUEUE_CONFIG_CMD),
1335 &cmd);
1336 if (ret)
1337 return ret;
1338 }
1339
1340 return 0;
1341 }
1342
iwl_mld_handle_compressed_ba_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1343 void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
1344 struct iwl_rx_packet *pkt)
1345 {
1346 struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
1347 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1348 u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
1349 u8 sta_id = ba_res->sta_id;
1350 struct ieee80211_link_sta *link_sta;
1351
1352 if (!tfd_cnt)
1353 return;
1354
1355 if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
1356 "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
1357 tfd_cnt, pkt_len))
1358 return;
1359
1360 IWL_DEBUG_TX_REPLY(mld,
1361 "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
1362 sta_id, le32_to_cpu(ba_res->flags),
1363 le16_to_cpu(ba_res->txed),
1364 le16_to_cpu(ba_res->done));
1365
1366 for (int i = 0; i < tfd_cnt; i++) {
1367 struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
1368 int txq_id = le16_to_cpu(ba_tfd->q_num);
1369 int index = le16_to_cpu(ba_tfd->tfd_index);
1370
1371 if (IWL_FW_CHECK(mld,
1372 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1373 "Invalid txq id %d\n", txq_id))
1374 continue;
1375
1376 iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
1377 }
1378
1379 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1380 "Got invalid sta_id (%d)\n", sta_id))
1381 return;
1382
1383 rcu_read_lock();
1384
1385 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1386 if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
1387 "Got valid sta_id (%d) but link_sta is NULL\n",
1388 sta_id))
1389 goto out;
1390
1391 iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
1392 out:
1393 rcu_read_unlock();
1394 }
1395