1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024 - 2025 Intel Corporation
4 */
5 #include <net/ip.h>
6
7 #include "tx.h"
8 #include "sta.h"
9 #include "hcmd.h"
10 #include "iwl-utils.h"
11 #include "iface.h"
12
13 #include "fw/dbg.h"
14
15 #include "fw/api/tx.h"
16 #include "fw/api/rs.h"
17 #include "fw/api/txq.h"
18 #include "fw/api/datapath.h"
19 #include "fw/api/time-event.h"
20
21 #define MAX_ANT_NUM 2
22
23 /* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
24 * the *index* used for the last TX, and returns the next valid *index* to use.
25 * In order to set it in the tx_cmd, must do BIT(idx).
26 */
iwl_mld_next_ant(u8 valid,u8 last_idx)27 static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
28 {
29 u8 index = last_idx;
30
31 for (int i = 0; i < MAX_ANT_NUM; i++) {
32 index = (index + 1) % MAX_ANT_NUM;
33 if (valid & BIT(index))
34 return index;
35 }
36
37 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
38
39 return last_idx;
40 }
41
iwl_mld_toggle_tx_ant(struct iwl_mld * mld,u8 * ant)42 void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
43 {
44 *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
45 }
46
47 static int
iwl_mld_get_queue_size(struct iwl_mld * mld,struct ieee80211_txq * txq)48 iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
49 {
50 struct ieee80211_sta *sta = txq->sta;
51 struct ieee80211_link_sta *link_sta;
52 unsigned int link_id;
53 int max_size = IWL_DEFAULT_QUEUE_SIZE;
54
55 lockdep_assert_wiphy(mld->wiphy);
56
57 for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
58 if (link_sta->eht_cap.has_eht) {
59 max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
60 break;
61 }
62
63 if (link_sta->he_cap.has_he)
64 max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
65 }
66
67 return max_size;
68 }
69
iwl_mld_allocate_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)70 static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
71 {
72 u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
73 u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
74 /* We can't know when the station is asleep or awake, so we
75 * must disable the queue hang detection.
76 */
77 unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
78 IWL_WATCHDOG_DISABLED :
79 mld->trans->trans_cfg->base_params->wd_timeout;
80 int queue, size;
81
82 lockdep_assert_wiphy(mld->wiphy);
83
84 if (tid == IWL_MGMT_TID)
85 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
86 mld->trans->cfg->min_txq_size);
87 else
88 size = iwl_mld_get_queue_size(mld, txq);
89
90 queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
91 watchdog_timeout);
92
93 if (queue >= 0)
94 IWL_DEBUG_TX_QUEUES(mld,
95 "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
96 queue, fw_sta_mask, tid);
97 return queue;
98 }
99
iwl_mld_add_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)100 static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
101 {
102 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
103 int id;
104
105 lockdep_assert_wiphy(mld->wiphy);
106
107 /* This will alse send the SCD_QUEUE_CONFIG_CMD */
108 id = iwl_mld_allocate_txq(mld, txq);
109 if (id < 0)
110 return id;
111
112 mld_txq->fw_id = id;
113 mld_txq->status.allocated = true;
114
115 rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
116
117 return 0;
118 }
119
iwl_mld_add_txq_list(struct iwl_mld * mld)120 void iwl_mld_add_txq_list(struct iwl_mld *mld)
121 {
122 lockdep_assert_wiphy(mld->wiphy);
123
124 while (!list_empty(&mld->txqs_to_add)) {
125 struct ieee80211_txq *txq;
126 struct iwl_mld_txq *mld_txq =
127 list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
128 list);
129 int failed;
130
131 txq = container_of((void *)mld_txq, struct ieee80211_txq,
132 drv_priv);
133
134 failed = iwl_mld_add_txq(mld, txq);
135
136 local_bh_disable();
137 spin_lock(&mld->add_txqs_lock);
138 list_del_init(&mld_txq->list);
139 spin_unlock(&mld->add_txqs_lock);
140 /* If the queue allocation failed, we can't transmit. Leave the
141 * frames on the txq, maybe the attempt to allocate the queue
142 * will succeed.
143 */
144 if (!failed)
145 iwl_mld_tx_from_txq(mld, txq);
146 local_bh_enable();
147 }
148 }
149
iwl_mld_add_txqs_wk(struct wiphy * wiphy,struct wiphy_work * wk)150 void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
151 {
152 struct iwl_mld *mld = container_of(wk, struct iwl_mld,
153 add_txqs_wk);
154
155 /* will reschedule to run after restart */
156 if (mld->fw_status.in_hw_restart)
157 return;
158
159 iwl_mld_add_txq_list(mld);
160 }
161
162 void
iwl_mld_free_txq(struct iwl_mld * mld,u32 fw_sta_mask,u32 tid,u32 queue_id)163 iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
164 {
165 struct iwl_scd_queue_cfg_cmd remove_cmd = {
166 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
167 .u.remove.tid = cpu_to_le32(tid),
168 .u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
169 };
170
171 iwl_mld_send_cmd_pdu(mld,
172 WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
173 &remove_cmd);
174
175 iwl_trans_txq_free(mld->trans, queue_id);
176 }
177
iwl_mld_remove_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)178 void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
179 {
180 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
181 u32 sta_msk, tid;
182
183 lockdep_assert_wiphy(mld->wiphy);
184
185 spin_lock_bh(&mld->add_txqs_lock);
186 if (!list_empty(&mld_txq->list))
187 list_del_init(&mld_txq->list);
188 spin_unlock_bh(&mld->add_txqs_lock);
189
190 if (!mld_txq->status.allocated ||
191 WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
192 return;
193
194 sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
195
196 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
197 txq->tid;
198
199 iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
200
201 RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
202 mld_txq->status.allocated = false;
203 }
204
205 #define OPT_HDR(type, skb, off) \
206 (type *)(skb_network_header(skb) + (off))
207
208 static __le32
iwl_mld_get_offload_assist(struct sk_buff * skb,bool amsdu)209 iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
210 {
211 struct ieee80211_hdr *hdr = (void *)skb->data;
212 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
213 u16 offload_assist = 0;
214 #if IS_ENABLED(CONFIG_INET)
215 u8 protocol = 0;
216
217 /* Do not compute checksum if already computed */
218 if (skb->ip_summed != CHECKSUM_PARTIAL)
219 goto out;
220
221 /* We do not expect to be requested to csum stuff we do not support */
222
223 /* TBD: do we also need to check
224 * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
225 * the devices we support has this flags?
226 */
227 if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
228 skb->protocol != htons(ETH_P_IPV6),
229 "No support for requested checksum\n")) {
230 skb_checksum_help(skb);
231 goto out;
232 }
233
234 if (skb->protocol == htons(ETH_P_IP)) {
235 protocol = ip_hdr(skb)->protocol;
236 } else {
237 #if IS_ENABLED(CONFIG_IPV6)
238 struct ipv6hdr *ipv6h =
239 (struct ipv6hdr *)skb_network_header(skb);
240 unsigned int off = sizeof(*ipv6h);
241
242 protocol = ipv6h->nexthdr;
243 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
244 struct ipv6_opt_hdr *hp;
245
246 /* only supported extension headers */
247 if (protocol != NEXTHDR_ROUTING &&
248 protocol != NEXTHDR_HOP &&
249 protocol != NEXTHDR_DEST) {
250 skb_checksum_help(skb);
251 goto out;
252 }
253
254 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
255 protocol = hp->nexthdr;
256 off += ipv6_optlen(hp);
257 }
258 /* if we get here - protocol now should be TCP/UDP */
259 #endif
260 }
261
262 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
263 WARN_ON_ONCE(1);
264 skb_checksum_help(skb);
265 goto out;
266 }
267
268 /* enable L4 csum */
269 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
270
271 /* Set offset to IP header (snap).
272 * We don't support tunneling so no need to take care of inner header.
273 * Size is in words.
274 */
275 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
276
277 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
278 if (skb->protocol == htons(ETH_P_IP) && amsdu) {
279 ip_hdr(skb)->check = 0;
280 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
281 }
282
283 /* reset UDP/TCP header csum */
284 if (protocol == IPPROTO_TCP)
285 tcp_hdr(skb)->check = 0;
286 else
287 udp_hdr(skb)->check = 0;
288
289 out:
290 #endif
291 mh_len /= 2;
292 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
293
294 if (amsdu)
295 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
296 else if (ieee80211_hdrlen(hdr->frame_control) % 4)
297 /* padding is inserted later in transport */
298 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
299
300 return cpu_to_le32(offload_assist);
301 }
302
iwl_mld_get_basic_rates_and_band(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_tx_info * info,unsigned long * basic_rates,u8 * band)303 static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
304 struct ieee80211_vif *vif,
305 struct ieee80211_tx_info *info,
306 unsigned long *basic_rates,
307 u8 *band)
308 {
309 u32 link_id = u32_get_bits(info->control.flags,
310 IEEE80211_TX_CTRL_MLO_LINK);
311
312 *basic_rates = vif->bss_conf.basic_rates;
313 *band = info->band;
314
315 if (link_id == IEEE80211_LINK_UNSPECIFIED &&
316 ieee80211_vif_is_mld(vif)) {
317 /* shouldn't do this when >1 link is active */
318 WARN_ON(hweight16(vif->active_links) != 1);
319 link_id = __ffs(vif->active_links);
320 }
321
322 if (link_id < IEEE80211_LINK_UNSPECIFIED) {
323 struct ieee80211_bss_conf *link_conf;
324
325 rcu_read_lock();
326 link_conf = rcu_dereference(vif->link_conf[link_id]);
327 if (link_conf) {
328 *basic_rates = link_conf->basic_rates;
329 if (link_conf->chanreq.oper.chan)
330 *band = link_conf->chanreq.oper.chan->band;
331 }
332 rcu_read_unlock();
333 }
334 }
335
iwl_mld_get_lowest_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_vif * vif)336 u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
337 struct ieee80211_tx_info *info,
338 struct ieee80211_vif *vif)
339 {
340 struct ieee80211_supported_band *sband;
341 u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
342 unsigned long basic_rates;
343 u8 band, rate;
344 u32 i;
345
346 iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
347
348 sband = mld->hw->wiphy->bands[band];
349 for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
350 u16 hw = sband->bitrates[i].hw_value;
351
352 if (hw >= IWL_FIRST_OFDM_RATE) {
353 if (lowest_ofdm > hw)
354 lowest_ofdm = hw;
355 } else if (lowest_cck > hw) {
356 lowest_cck = hw;
357 }
358 }
359
360 if (band == NL80211_BAND_2GHZ && !vif->p2p &&
361 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
362 !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
363 if (lowest_cck != IWL_RATE_COUNT)
364 rate = lowest_cck;
365 else if (lowest_ofdm != IWL_RATE_COUNT)
366 rate = lowest_ofdm;
367 else
368 rate = IWL_FIRST_CCK_RATE;
369 } else if (lowest_ofdm != IWL_RATE_COUNT) {
370 rate = lowest_ofdm;
371 } else {
372 rate = IWL_FIRST_OFDM_RATE;
373 }
374
375 return rate;
376 }
377
iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld * mld,struct ieee80211_tx_info * info,int rate_idx)378 static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
379 struct ieee80211_tx_info *info,
380 int rate_idx)
381 {
382 u32 rate_flags = 0;
383 u8 rate_plcp;
384
385 /* if the rate isn't a well known legacy rate, take the lowest one */
386 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
387 rate_idx = iwl_mld_get_lowest_rate(mld, info,
388 info->control.vif);
389
390 WARN_ON_ONCE(rate_idx < 0);
391
392 /* Set CCK or OFDM flag */
393 if (rate_idx <= IWL_LAST_CCK_RATE)
394 rate_flags |= RATE_MCS_CCK_MSK;
395 else
396 rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
397
398 /* Legacy rates are indexed:
399 * 0 - 3 for CCK and 0 - 7 for OFDM
400 */
401 rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
402 rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
403
404 return (u32)rate_plcp | rate_flags;
405 }
406
iwl_mld_get_tx_ant(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)407 static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
408 struct ieee80211_tx_info *info,
409 struct ieee80211_sta *sta, __le16 fc)
410 {
411 if (sta && ieee80211_is_data(fc)) {
412 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
413
414 return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
415 }
416
417 return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
418 }
419
iwl_mld_get_inject_tx_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)420 static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
421 struct ieee80211_tx_info *info,
422 struct ieee80211_sta *sta,
423 __le16 fc)
424 {
425 struct ieee80211_tx_rate *rate = &info->control.rates[0];
426 u32 result;
427
428 /* we only care about legacy/HT/VHT so far, so we can
429 * build in v1 and use iwl_new_rate_from_v1()
430 * FIXME: in newer devices we only support the new rates, build
431 * the rate_n_flags in the new format here instead of using v1 and
432 * converting it.
433 */
434
435 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
436 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
437 u8 nss = ieee80211_rate_get_vht_nss(rate);
438
439 result = RATE_MCS_VHT_MSK_V1;
440 result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
441 result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
442
443 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
444 result |= RATE_MCS_SGI_MSK_V1;
445
446 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
447 result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
448 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
449 result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
450 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
451 result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
452
453 result = iwl_new_rate_from_v1(result);
454 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
455 result = RATE_MCS_HT_MSK_V1;
456 result |= u32_encode_bits(rate->idx,
457 RATE_HT_MCS_RATE_CODE_MSK_V1 |
458 RATE_HT_MCS_NSS_MSK_V1);
459 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
460 result |= RATE_MCS_SGI_MSK_V1;
461 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
462 result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
463 if (info->flags & IEEE80211_TX_CTL_LDPC)
464 result |= RATE_MCS_LDPC_MSK_V1;
465 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
466 result |= RATE_MCS_STBC_MSK;
467
468 result = iwl_new_rate_from_v1(result);
469 } else {
470 result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
471 }
472
473 if (info->control.antennas)
474 result |= u32_encode_bits(info->control.antennas,
475 RATE_MCS_ANT_AB_MSK);
476 else
477 result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
478
479 return result;
480 }
481
iwl_mld_get_tx_rate_n_flags(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)482 static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
483 struct ieee80211_tx_info *info,
484 struct ieee80211_sta *sta, __le16 fc)
485 {
486 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
487 return iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
488
489 return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
490 iwl_mld_get_tx_ant(mld, info, sta, fc);
491 }
492
493 static void
iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 * tx_cmd,struct sk_buff * skb,bool amsdu)494 iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd,
495 struct sk_buff *skb, bool amsdu)
496 {
497 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
498 struct ieee80211_hdr *hdr = (void *)skb->data;
499 struct ieee80211_vif *vif;
500
501 /* Copy MAC header from skb into command buffer */
502 memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
503
504 if (!amsdu || !skb_is_gso(skb))
505 return;
506
507 /* As described in IEEE sta 802.11-2020, table 9-30 (Address
508 * field contents), A-MSDU address 3 should contain the BSSID
509 * address.
510 *
511 * In TSO, the skb header address 3 contains the original address 3 to
512 * correctly create all the A-MSDU subframes headers from it.
513 * Override now the address 3 in the command header with the BSSID.
514 *
515 * Note: we fill in the MLD address, but the firmware will do the
516 * necessary translation to link address after encryption.
517 */
518 vif = info->control.vif;
519 switch (vif->type) {
520 case NL80211_IFTYPE_STATION:
521 ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
522 break;
523 case NL80211_IFTYPE_AP:
524 ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
525 break;
526 default:
527 break;
528 }
529 }
530
531 static void
iwl_mld_fill_tx_cmd(struct iwl_mld * mld,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_tx_cmd,struct ieee80211_sta * sta)532 iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
533 struct iwl_device_tx_cmd *dev_tx_cmd,
534 struct ieee80211_sta *sta)
535 {
536 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
537 struct ieee80211_hdr *hdr = (void *)skb->data;
538 struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
539 NULL;
540 struct iwl_tx_cmd_gen3 *tx_cmd;
541 bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
542 (*ieee80211_get_qos_ctl(hdr) &
543 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
544 u32 rate_n_flags = 0;
545 u16 flags = 0;
546
547 dev_tx_cmd->hdr.cmd = TX_CMD;
548
549 if (!info->control.hw_key)
550 flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
551
552 /* For data and mgmt packets rate info comes from the fw.
553 * Only set rate/antenna for injected frames with fixed rate, or
554 * when no sta is given.
555 */
556 if (unlikely(!sta ||
557 info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
558 flags |= IWL_TX_FLAGS_CMD_RATE;
559 rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
560 hdr->frame_control);
561 } else if (!ieee80211_is_data(hdr->frame_control) ||
562 (mld_sta &&
563 mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
564 /* These are important frames */
565 flags |= IWL_TX_FLAGS_HIGH_PRI;
566 }
567
568 tx_cmd = (void *)dev_tx_cmd->payload;
569
570 iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
571
572 tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
573
574 /* Total # bytes to be transmitted */
575 tx_cmd->len = cpu_to_le16((u16)skb->len);
576
577 tx_cmd->flags = cpu_to_le16(flags);
578
579 tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
580 }
581
582 /* Caller of this need to check that info->control.vif is not NULL */
583 static struct iwl_mld_link *
iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info * info)584 iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
585 {
586 struct iwl_mld_vif *mld_vif =
587 iwl_mld_vif_from_mac80211(info->control.vif);
588 u32 link_id = u32_get_bits(info->control.flags,
589 IEEE80211_TX_CTRL_MLO_LINK);
590
591 if (link_id == IEEE80211_LINK_UNSPECIFIED) {
592 if (info->control.vif->active_links)
593 link_id = ffs(info->control.vif->active_links) - 1;
594 else
595 link_id = 0;
596 }
597
598 return rcu_dereference(mld_vif->link[link_id]);
599 }
600
601 static int
iwl_mld_get_tx_queue_id(struct iwl_mld * mld,struct ieee80211_txq * txq,struct sk_buff * skb)602 iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
603 struct sk_buff *skb)
604 {
605 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
606 struct ieee80211_hdr *hdr = (void *)skb->data;
607 __le16 fc = hdr->frame_control;
608 struct iwl_mld_vif *mld_vif;
609 struct iwl_mld_link *link;
610
611 if (txq && txq->sta)
612 return iwl_mld_txq_from_mac80211(txq)->fw_id;
613
614 if (!info->control.vif)
615 return IWL_MLD_INVALID_QUEUE;
616
617 switch (info->control.vif->type) {
618 case NL80211_IFTYPE_AP:
619 case NL80211_IFTYPE_ADHOC:
620 link = iwl_mld_get_link_from_tx_info(info);
621
622 if (WARN_ON(!link))
623 break;
624
625 /* ucast disassociate/deauth frames without a station might
626 * happen, especially with reason 7 ("Class 3 frame received
627 * from nonassociated STA").
628 */
629 if (ieee80211_is_mgmt(fc) &&
630 (!ieee80211_is_bufferable_mmpdu(skb) ||
631 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
632 return link->bcast_sta.queue_id;
633
634 if (is_multicast_ether_addr(hdr->addr1) &&
635 !ieee80211_has_order(fc))
636 return link->mcast_sta.queue_id;
637
638 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
639 "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
640 return link->bcast_sta.queue_id;
641 case NL80211_IFTYPE_P2P_DEVICE:
642 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
643
644 if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) {
645 IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
646 return IWL_MLD_INVALID_DROP_TX;
647 }
648
649 WARN_ON(!ieee80211_is_mgmt(fc));
650
651 return mld_vif->deflink.aux_sta.queue_id;
652 default:
653 /* TODO: consider monitor (task=monitor) */
654 WARN_ONCE(1, "Unsupported vif type\n");
655 break;
656 }
657
658 return IWL_MLD_INVALID_QUEUE;
659 }
660
iwl_mld_probe_resp_set_noa(struct iwl_mld * mld,struct sk_buff * skb)661 static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
662 struct sk_buff *skb)
663 {
664 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
665 struct iwl_mld_link *mld_link =
666 &iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
667 struct iwl_probe_resp_data *resp_data;
668 u8 *pos;
669
670 if (!info->control.vif->p2p)
671 return;
672
673 rcu_read_lock();
674
675 resp_data = rcu_dereference(mld_link->probe_resp_data);
676 if (!resp_data)
677 goto out;
678
679 if (!resp_data->notif.noa_active)
680 goto out;
681
682 if (skb_tailroom(skb) < resp_data->noa_len) {
683 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
684 IWL_ERR(mld,
685 "Failed to reallocate probe resp\n");
686 goto out;
687 }
688 }
689
690 pos = skb_put(skb, resp_data->noa_len);
691
692 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
693 /* Set length of IE body (not including ID and length itself) */
694 *pos++ = resp_data->noa_len - 2;
695 *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
696 *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
697 *pos++ = WLAN_OUI_WFA & 0xff;
698 *pos++ = WLAN_OUI_TYPE_WFA_P2P;
699
700 memcpy(pos, &resp_data->notif.noa_attr,
701 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
702
703 out:
704 rcu_read_unlock();
705 }
706
707 /* This function must be called with BHs disabled */
iwl_mld_tx_mpdu(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)708 static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
709 struct ieee80211_txq *txq)
710 {
711 struct ieee80211_hdr *hdr = (void *)skb->data;
712 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
713 struct ieee80211_sta *sta = txq ? txq->sta : NULL;
714 struct iwl_device_tx_cmd *dev_tx_cmd;
715 int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
716 u8 tid = IWL_MAX_TID_COUNT;
717
718 if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
719 queue == IWL_MLD_INVALID_DROP_TX)
720 return -1;
721
722 if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
723 return -1;
724
725 dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
726 if (unlikely(!dev_tx_cmd))
727 return -1;
728
729 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
730 if (IWL_MLD_NON_TRANSMITTING_AP)
731 return -1;
732
733 iwl_mld_probe_resp_set_noa(mld, skb);
734 }
735
736 iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
737
738 if (ieee80211_is_data(hdr->frame_control)) {
739 if (ieee80211_is_data_qos(hdr->frame_control))
740 tid = ieee80211_get_tid(hdr);
741 else
742 tid = IWL_TID_NON_QOS;
743 }
744
745 IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
746 tid, queue, skb->len);
747
748 /* From now on, we cannot access info->control */
749 memset(&info->status, 0, sizeof(info->status));
750 memset(info->driver_data, 0, sizeof(info->driver_data));
751
752 info->driver_data[1] = dev_tx_cmd;
753
754 if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
755 goto err;
756
757 /* Update low-latency counter when a packet is queued instead
758 * of after TX, it makes sense for early low-latency detection
759 */
760 if (sta)
761 iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
762
763 return 0;
764
765 err:
766 iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
767 IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
768 return -1;
769 }
770
771 #ifdef CONFIG_INET
772
773 /* This function handles the segmentation of a large TSO packet into multiple
774 * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
775 * constraints.
776 */
iwl_mld_tx_tso_segment(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_sta * sta,struct sk_buff_head * mpdus_skbs)777 static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
778 struct ieee80211_sta *sta,
779 struct sk_buff_head *mpdus_skbs)
780 {
781 struct ieee80211_hdr *hdr = (void *)skb->data;
782 netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
783 unsigned int mss = skb_shinfo(skb)->gso_size;
784 unsigned int num_subframes, tcp_payload_len, subf_len;
785 u16 snap_ip_tcp, pad, max_tid_amsdu_len;
786 u8 tid;
787
788 snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
789
790 if (!ieee80211_is_data_qos(hdr->frame_control) ||
791 !sta->cur->max_rc_amsdu_len)
792 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
793
794 /* Do not build AMSDU for IPv6 with extension headers.
795 * Ask stack to segment and checksum the generated MPDUs for us.
796 */
797 if (skb->protocol == htons(ETH_P_IPV6) &&
798 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
799 IPPROTO_TCP) {
800 netdev_flags &= ~NETIF_F_CSUM_MASK;
801 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
802 }
803
804 tid = ieee80211_get_tid(hdr);
805 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
806 return -EINVAL;
807
808 max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
809 if (!max_tid_amsdu_len)
810 return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
811
812 /* Sub frame header + SNAP + IP header + TCP header + MSS */
813 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
814 pad = (4 - subf_len) & 0x3;
815
816 /* If we have N subframes in the A-MSDU, then the A-MSDU's size is
817 * N * subf_len + (N - 1) * pad.
818 */
819 num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
820
821 if (sta->max_amsdu_subframes &&
822 num_subframes > sta->max_amsdu_subframes)
823 num_subframes = sta->max_amsdu_subframes;
824
825 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
826 tcp_hdrlen(skb) + skb->data_len;
827
828 /* Make sure we have enough TBs for the A-MSDU:
829 * 2 for each subframe
830 * 1 more for each fragment
831 * 1 more for the potential data in the header
832 */
833 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
834 mld->trans->max_skb_frags)
835 num_subframes = 1;
836
837 if (num_subframes > 1)
838 *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
839
840 /* This skb fits in one single A-MSDU */
841 if (tcp_payload_len <= num_subframes * mss) {
842 __skb_queue_tail(mpdus_skbs, skb);
843 return 0;
844 }
845
846 /* Trick the segmentation function to make it create SKBs that can fit
847 * into one A-MSDU.
848 */
849 return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
850 }
851
852 /* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
853 * large packets when necessary and transmitting each segment as MPDU.
854 */
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)855 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
856 struct ieee80211_txq *txq)
857 {
858 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
859 struct sk_buff *orig_skb = skb;
860 struct sk_buff_head mpdus_skbs;
861 unsigned int payload_len;
862 int ret;
863
864 if (WARN_ON(!txq || !txq->sta))
865 return -1;
866
867 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
868 tcp_hdrlen(skb) + skb->data_len;
869
870 if (payload_len <= skb_shinfo(skb)->gso_size)
871 return iwl_mld_tx_mpdu(mld, skb, txq);
872
873 if (!info->control.vif)
874 return -1;
875
876 __skb_queue_head_init(&mpdus_skbs);
877
878 ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
879 if (ret)
880 return ret;
881
882 WARN_ON(skb_queue_empty(&mpdus_skbs));
883
884 while (!skb_queue_empty(&mpdus_skbs)) {
885 skb = __skb_dequeue(&mpdus_skbs);
886
887 ret = iwl_mld_tx_mpdu(mld, skb, txq);
888 if (!ret)
889 continue;
890
891 /* Free skbs created as part of TSO logic that have not yet
892 * been dequeued
893 */
894 __skb_queue_purge(&mpdus_skbs);
895
896 /* skb here is not necessarily same as skb that entered
897 * this method, so free it explicitly.
898 */
899 if (skb == orig_skb)
900 ieee80211_free_txskb(mld->hw, skb);
901 else
902 kfree_skb(skb);
903
904 /* there was error, but we consumed skb one way or
905 * another, so return 0
906 */
907 return 0;
908 }
909
910 return 0;
911 }
912 #else
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)913 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
914 struct ieee80211_txq *txq)
915 {
916 /* Impossible to get TSO without CONFIG_INET */
917 WARN_ON(1);
918
919 return -1;
920 }
921 #endif /* CONFIG_INET */
922
iwl_mld_tx_skb(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)923 void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
924 struct ieee80211_txq *txq)
925 {
926 if (skb_is_gso(skb)) {
927 if (!iwl_mld_tx_tso(mld, skb, txq))
928 return;
929 goto err;
930 }
931
932 if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
933 return;
934
935 err:
936 ieee80211_free_txskb(mld->hw, skb);
937 }
938
iwl_mld_tx_from_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)939 void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
940 {
941 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
942 struct sk_buff *skb = NULL;
943 u8 zero_addr[ETH_ALEN] = {};
944
945 /*
946 * No need for threads to be pending here, they can leave the first
947 * taker all the work.
948 *
949 * mld_txq->tx_request logic:
950 *
951 * If 0, no one is currently TXing, set to 1 to indicate current thread
952 * will now start TX and other threads should quit.
953 *
954 * If 1, another thread is currently TXing, set to 2 to indicate to
955 * that thread that there was another request. Since that request may
956 * have raced with the check whether the queue is empty, the TXing
957 * thread should check the queue's status one more time before leaving.
958 * This check is done in order to not leave any TX hanging in the queue
959 * until the next TX invocation (which may not even happen).
960 *
961 * If 2, another thread is currently TXing, and it will already double
962 * check the queue, so do nothing.
963 */
964 if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
965 return;
966
967 rcu_read_lock();
968 do {
969 while (likely(!mld_txq->status.stop_full) &&
970 (skb = ieee80211_tx_dequeue(mld->hw, txq)))
971 iwl_mld_tx_skb(mld, skb, txq);
972 } while (atomic_dec_return(&mld_txq->tx_request));
973
974 IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
975 txq->sta ? txq->sta->addr : zero_addr, txq->tid);
976
977 rcu_read_unlock();
978 }
979
iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,struct ieee80211_tx_info * info)980 static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
981 struct ieee80211_tx_info *info)
982 {
983 enum nl80211_band band = info->band;
984 struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
985 u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
986 u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
987 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
988
989 if (sgi)
990 tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
991
992 switch (chan_width) {
993 case RATE_MCS_CHAN_WIDTH_20:
994 break;
995 case RATE_MCS_CHAN_WIDTH_40:
996 tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
997 break;
998 case RATE_MCS_CHAN_WIDTH_80:
999 tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1000 break;
1001 case RATE_MCS_CHAN_WIDTH_160:
1002 tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1003 break;
1004 default:
1005 break;
1006 }
1007
1008 switch (format) {
1009 case RATE_MCS_HT_MSK:
1010 tx_rate->flags |= IEEE80211_TX_RC_MCS;
1011 tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
1012 break;
1013 case RATE_MCS_VHT_MSK:
1014 ieee80211_rate_set_vht(tx_rate,
1015 rate_n_flags & RATE_MCS_CODE_MSK,
1016 FIELD_GET(RATE_MCS_NSS_MSK,
1017 rate_n_flags) + 1);
1018 tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
1019 break;
1020 case RATE_MCS_HE_MSK:
1021 /* mac80211 cannot do this without ieee80211_tx_status_ext()
1022 * but it only matters for radiotap
1023 */
1024 tx_rate->idx = 0;
1025 break;
1026 default:
1027 tx_rate->idx =
1028 iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1029 band);
1030 break;
1031 }
1032 }
1033
iwl_mld_handle_tx_resp_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1034 void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
1035 struct iwl_rx_packet *pkt)
1036 {
1037 struct iwl_tx_resp *tx_resp = (void *)pkt->data;
1038 int txq_id = le16_to_cpu(tx_resp->tx_queue);
1039 struct agg_tx_status *agg_status = &tx_resp->status;
1040 u32 status = le16_to_cpu(agg_status->status);
1041 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1042 size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
1043 int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
1044 int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
1045 struct ieee80211_link_sta *link_sta;
1046 struct iwl_mld_sta *mld_sta;
1047 u16 ssn;
1048 struct sk_buff_head skbs;
1049 u8 skb_freed = 0;
1050 bool mgmt = false;
1051 bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
1052
1053 if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
1054 "Invalid tx_resp notif frame_count (%d)\n",
1055 tx_resp->frame_count))
1056 return;
1057
1058 /* validate the size of the variable part of the notif */
1059 if (IWL_FW_CHECK(mld, notif_size != pkt_len,
1060 "Invalid tx_resp notif size (expected=%zu got=%u)\n",
1061 notif_size, pkt_len))
1062 return;
1063
1064 ssn = le32_to_cpup((__le32 *)agg_status +
1065 tx_resp->frame_count) & 0xFFFF;
1066
1067 __skb_queue_head_init(&skbs);
1068
1069 /* we can free until ssn % q.n_bd not inclusive */
1070 iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
1071
1072 while (!skb_queue_empty(&skbs)) {
1073 struct sk_buff *skb = __skb_dequeue(&skbs);
1074 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1075 struct ieee80211_hdr *hdr = (void *)skb->data;
1076
1077 skb_freed++;
1078
1079 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1080
1081 memset(&info->status, 0, sizeof(info->status));
1082
1083 info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1084
1085 /* inform mac80211 about what happened with the frame */
1086 switch (status & TX_STATUS_MSK) {
1087 case TX_STATUS_SUCCESS:
1088 case TX_STATUS_DIRECT_DONE:
1089 info->flags |= IEEE80211_TX_STAT_ACK;
1090 break;
1091 default:
1092 break;
1093 }
1094
1095 /* If we are freeing multiple frames, mark all the frames
1096 * but the first one as acked, since they were acknowledged
1097 * before
1098 */
1099 if (skb_freed > 1)
1100 info->flags |= IEEE80211_TX_STAT_ACK;
1101
1102 if (tx_failure) {
1103 enum iwl_fw_ini_time_point tp =
1104 IWL_FW_INI_TIME_POINT_TX_FAILED;
1105
1106 if (ieee80211_is_action(hdr->frame_control))
1107 tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1108 else if (ieee80211_is_mgmt(hdr->frame_control))
1109 mgmt = true;
1110
1111 iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
1112 }
1113
1114 iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate),
1115 info);
1116
1117 if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
1118 ieee80211_tx_status_skb(mld->hw, skb);
1119 }
1120
1121 IWL_DEBUG_TX_REPLY(mld,
1122 "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
1123 txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
1124 tx_resp->failure_frame);
1125
1126 if (tx_failure && mgmt)
1127 iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
1128
1129 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1130 "Got invalid sta_id (%d)\n", sta_id))
1131 return;
1132
1133 rcu_read_lock();
1134
1135 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1136 if (!link_sta) {
1137 /* This can happen if the TX cmd was sent before pre_rcu_remove
1138 * but the TX response was received after
1139 */
1140 IWL_DEBUG_TX_REPLY(mld,
1141 "Got valid sta_id (%d) but sta is NULL\n",
1142 sta_id);
1143 goto out;
1144 }
1145
1146 if (IS_ERR(link_sta))
1147 goto out;
1148
1149 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
1150
1151 if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
1152 iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
1153
1154 if (tid < IWL_MAX_TID_COUNT)
1155 iwl_mld_count_mpdu_tx(link_sta, 1);
1156
1157 out:
1158 rcu_read_unlock();
1159 }
1160
iwl_mld_tx_reclaim_txq(struct iwl_mld * mld,int txq,int index,bool in_flush)1161 static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
1162 bool in_flush)
1163 {
1164 struct sk_buff_head reclaimed_skbs;
1165
1166 __skb_queue_head_init(&reclaimed_skbs);
1167
1168 iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
1169
1170 while (!skb_queue_empty(&reclaimed_skbs)) {
1171 struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
1172 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1173
1174 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1175
1176 memset(&info->status, 0, sizeof(info->status));
1177
1178 /* Packet was transmitted successfully, failures come as single
1179 * frames because before failing a frame the firmware transmits
1180 * it without aggregation at least once.
1181 */
1182 if (!in_flush)
1183 info->flags |= IEEE80211_TX_STAT_ACK;
1184 else
1185 info->flags &= ~IEEE80211_TX_STAT_ACK;
1186
1187 ieee80211_tx_status_skb(mld->hw, skb);
1188 }
1189 }
1190
iwl_mld_flush_link_sta_txqs(struct iwl_mld * mld,u32 fw_sta_id)1191 int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
1192 {
1193 struct iwl_tx_path_flush_cmd_rsp *rsp;
1194 struct iwl_tx_path_flush_cmd flush_cmd = {
1195 .sta_id = cpu_to_le32(fw_sta_id),
1196 .tid_mask = cpu_to_le16(0xffff),
1197 };
1198 struct iwl_host_cmd cmd = {
1199 .id = TXPATH_FLUSH,
1200 .len = { sizeof(flush_cmd), },
1201 .data = { &flush_cmd, },
1202 .flags = CMD_WANT_SKB,
1203 };
1204 int ret, num_flushed_queues;
1205 u32 resp_len;
1206
1207 IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
1208 fw_sta_id, 0xffff);
1209
1210 ret = iwl_mld_send_cmd(mld, &cmd);
1211 if (ret) {
1212 IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
1213 return ret;
1214 }
1215
1216 resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1217 if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
1218 "Invalid TXPATH_FLUSH response len: %d\n",
1219 resp_len)) {
1220 ret = -EIO;
1221 goto free_rsp;
1222 }
1223
1224 rsp = (void *)cmd.resp_pkt->data;
1225
1226 if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
1227 "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
1228 le16_to_cpu(rsp->sta_id))) {
1229 ret = -EIO;
1230 goto free_rsp;
1231 }
1232
1233 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
1234 if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
1235 "num_flushed_queues %d\n", num_flushed_queues)) {
1236 ret = -EIO;
1237 goto free_rsp;
1238 }
1239
1240 for (int i = 0; i < num_flushed_queues; i++) {
1241 struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
1242 int read_after = le16_to_cpu(queue_info->read_after_flush);
1243 int txq_id = le16_to_cpu(queue_info->queue_num);
1244
1245 if (IWL_FW_CHECK(mld,
1246 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1247 "Invalid txq id %d\n", txq_id))
1248 continue;
1249
1250 IWL_DEBUG_TX_QUEUES(mld,
1251 "tid %d txq_id %d read-before %d read-after %d\n",
1252 le16_to_cpu(queue_info->tid), txq_id,
1253 le16_to_cpu(queue_info->read_before_flush),
1254 read_after);
1255
1256 iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
1257 }
1258
1259 free_rsp:
1260 iwl_free_resp(&cmd);
1261 return ret;
1262 }
1263
iwl_mld_ensure_queue(struct iwl_mld * mld,struct ieee80211_txq * txq)1264 int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
1265 {
1266 struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
1267 int ret;
1268
1269 lockdep_assert_wiphy(mld->wiphy);
1270
1271 if (likely(mld_txq->status.allocated))
1272 return 0;
1273
1274 ret = iwl_mld_add_txq(mld, txq);
1275
1276 spin_lock_bh(&mld->add_txqs_lock);
1277 if (!list_empty(&mld_txq->list))
1278 list_del_init(&mld_txq->list);
1279 spin_unlock_bh(&mld->add_txqs_lock);
1280
1281 return ret;
1282 }
1283
iwl_mld_update_sta_txqs(struct iwl_mld * mld,struct ieee80211_sta * sta,u32 old_sta_mask,u32 new_sta_mask)1284 int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
1285 struct ieee80211_sta *sta,
1286 u32 old_sta_mask, u32 new_sta_mask)
1287 {
1288 struct iwl_scd_queue_cfg_cmd cmd = {
1289 .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
1290 .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
1291 .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
1292 };
1293
1294 lockdep_assert_wiphy(mld->wiphy);
1295
1296 for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
1297 struct ieee80211_txq *txq =
1298 sta->txq[tid != IWL_MAX_TID_COUNT ?
1299 tid : IEEE80211_NUM_TIDS];
1300 struct iwl_mld_txq *mld_txq =
1301 iwl_mld_txq_from_mac80211(txq);
1302 int ret;
1303
1304 if (!mld_txq->status.allocated)
1305 continue;
1306
1307 if (tid == IWL_MAX_TID_COUNT)
1308 cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
1309 else
1310 cmd.u.modify.tid = cpu_to_le32(tid);
1311
1312 ret = iwl_mld_send_cmd_pdu(mld,
1313 WIDE_ID(DATA_PATH_GROUP,
1314 SCD_QUEUE_CONFIG_CMD),
1315 &cmd);
1316 if (ret)
1317 return ret;
1318 }
1319
1320 return 0;
1321 }
1322
iwl_mld_handle_compressed_ba_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1323 void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
1324 struct iwl_rx_packet *pkt)
1325 {
1326 struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
1327 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1328 u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
1329 u8 sta_id = ba_res->sta_id;
1330 struct ieee80211_link_sta *link_sta;
1331
1332 if (!tfd_cnt)
1333 return;
1334
1335 if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
1336 "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
1337 tfd_cnt, pkt_len))
1338 return;
1339
1340 IWL_DEBUG_TX_REPLY(mld,
1341 "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
1342 sta_id, le32_to_cpu(ba_res->flags),
1343 le16_to_cpu(ba_res->txed),
1344 le16_to_cpu(ba_res->done));
1345
1346 for (int i = 0; i < tfd_cnt; i++) {
1347 struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
1348 int txq_id = le16_to_cpu(ba_tfd->q_num);
1349 int index = le16_to_cpu(ba_tfd->tfd_index);
1350
1351 if (IWL_FW_CHECK(mld,
1352 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1353 "Invalid txq id %d\n", txq_id))
1354 continue;
1355
1356 iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
1357 }
1358
1359 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1360 "Got invalid sta_id (%d)\n", sta_id))
1361 return;
1362
1363 rcu_read_lock();
1364
1365 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1366 if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
1367 "Got valid sta_id (%d) but link_sta is NULL\n",
1368 sta_id))
1369 goto out;
1370
1371 iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
1372 out:
1373 rcu_read_unlock();
1374 }
1375