1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
6 */
7 #include <linux/etherdevice.h>
8 #include <linux/skbuff.h>
9 #if defined(__FreeBSD__)
10 #include <net/ieee80211_radiotap.h>
11 #endif
12 #include "iwl-trans.h"
13 #include "mvm.h"
14 #include "fw-api.h"
15 #include "time-sync.h"
16
iwl_mvm_check_pn(struct iwl_mvm * mvm,struct sk_buff * skb,int queue,struct ieee80211_sta * sta)17 static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
18 int queue, struct ieee80211_sta *sta)
19 {
20 struct iwl_mvm_sta *mvmsta;
21 struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
22 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
23 struct iwl_mvm_key_pn *ptk_pn;
24 int res;
25 u8 tid, keyidx;
26 u8 pn[IEEE80211_CCMP_PN_LEN];
27 u8 *extiv;
28
29 /* do PN checking */
30
31 /* multicast and non-data only arrives on default queue */
32 if (!ieee80211_is_data(hdr->frame_control) ||
33 is_multicast_ether_addr(hdr->addr1))
34 return 0;
35
36 /* do not check PN for open AP */
37 if (!(stats->flag & RX_FLAG_DECRYPTED))
38 return 0;
39
40 /*
41 * avoid checking for default queue - we don't want to replicate
42 * all the logic that's necessary for checking the PN on fragmented
43 * frames, leave that to mac80211
44 */
45 if (queue == 0)
46 return 0;
47
48 /* if we are here - this for sure is either CCMP or GCMP */
49 if (IS_ERR_OR_NULL(sta)) {
50 IWL_DEBUG_DROP(mvm,
51 "expected hw-decrypted unicast frame for station\n");
52 return -1;
53 }
54
55 mvmsta = iwl_mvm_sta_from_mac80211(sta);
56
57 extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
58 keyidx = extiv[3] >> 6;
59
60 ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
61 if (!ptk_pn)
62 return -1;
63
64 if (ieee80211_is_data_qos(hdr->frame_control))
65 tid = ieee80211_get_tid(hdr);
66 else
67 tid = 0;
68
69 /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
70 if (tid >= IWL_MAX_TID_COUNT)
71 return -1;
72
73 /* load pn */
74 pn[0] = extiv[7];
75 pn[1] = extiv[6];
76 pn[2] = extiv[5];
77 pn[3] = extiv[4];
78 pn[4] = extiv[1];
79 pn[5] = extiv[0];
80
81 res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
82 if (res < 0)
83 return -1;
84 if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
85 return -1;
86
87 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
88 stats->flag |= RX_FLAG_PN_VALIDATED;
89
90 return 0;
91 }
92
93 /* iwl_mvm_create_skb Adds the rxb to a new skb */
iwl_mvm_create_skb(struct iwl_mvm * mvm,struct sk_buff * skb,struct ieee80211_hdr * hdr,u16 len,u8 crypt_len,struct iwl_rx_cmd_buffer * rxb)94 static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
95 struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
96 struct iwl_rx_cmd_buffer *rxb)
97 {
98 struct iwl_rx_packet *pkt = rxb_addr(rxb);
99 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
100 unsigned int headlen, fraglen, pad_len = 0;
101 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
102 u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
103 IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
104
105 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
106 len -= 2;
107 pad_len = 2;
108 }
109
110 /*
111 * For non monitor interface strip the bytes the RADA might not have
112 * removed (it might be disabled, e.g. for mgmt frames). As a monitor
113 * interface cannot exist with other interfaces, this removal is safe
114 * and sufficient, in monitor mode there's no decryption being done.
115 */
116 if (len > mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS))
117 len -= mic_crc_len;
118
119 /* If frame is small enough to fit in skb->head, pull it completely.
120 * If not, only pull ieee80211_hdr (including crypto if present, and
121 * an additional 8 bytes for SNAP/ethertype, see below) so that
122 * splice() or TCP coalesce are more efficient.
123 *
124 * Since, in addition, ieee80211_data_to_8023() always pull in at
125 * least 8 bytes (possibly more for mesh) we can do the same here
126 * to save the cost of doing it later. That still doesn't pull in
127 * the actual IP header since the typical case has a SNAP header.
128 * If the latter changes (there are efforts in the standards group
129 * to do so) we should revisit this and ieee80211_data_to_8023().
130 */
131 headlen = (len <= skb_tailroom(skb)) ? len :
132 hdrlen + crypt_len + 8;
133
134 /* The firmware may align the packet to DWORD.
135 * The padding is inserted after the IV.
136 * After copying the header + IV skip the padding if
137 * present before copying packet data.
138 */
139 hdrlen += crypt_len;
140
141 if (unlikely(headlen < hdrlen))
142 return -EINVAL;
143
144 /* Since data doesn't move data while putting data on skb and that is
145 * the only way we use, data + len is the next place that hdr would be put
146 */
147 skb_set_mac_header(skb, skb->len);
148 skb_put_data(skb, hdr, hdrlen);
149 skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
150
151 /*
152 * If we did CHECKSUM_COMPLETE, the hardware only does it right for
153 * certain cases and starts the checksum after the SNAP. Check if
154 * this is the case - it's easier to just bail out to CHECKSUM_NONE
155 * in the cases the hardware didn't handle, since it's rare to see
156 * such packets, even though the hardware did calculate the checksum
157 * in this case, just starting after the MAC header instead.
158 *
159 * Starting from Bz hardware, it calculates starting directly after
160 * the MAC header, so that matches mac80211's expectation.
161 */
162 if (skb->ip_summed == CHECKSUM_COMPLETE) {
163 struct {
164 u8 hdr[6];
165 __be16 type;
166 } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
167
168 if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
169 !ether_addr_equal(shdr->hdr, rfc1042_header) ||
170 (shdr->type != htons(ETH_P_IP) &&
171 shdr->type != htons(ETH_P_ARP) &&
172 shdr->type != htons(ETH_P_IPV6) &&
173 shdr->type != htons(ETH_P_8021Q) &&
174 shdr->type != htons(ETH_P_PAE) &&
175 shdr->type != htons(ETH_P_TDLS))))
176 skb->ip_summed = CHECKSUM_NONE;
177 else if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
178 /* mac80211 assumes full CSUM including SNAP header */
179 skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
180 }
181
182 fraglen = len - headlen;
183
184 if (fraglen) {
185 int offset = (u8 *)hdr + headlen + pad_len -
186 (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
187
188 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
189 fraglen, rxb->truesize);
190 }
191
192 return 0;
193 }
194
195 /* put a TLV on the skb and return data pointer
196 *
197 * Also pad to 4 the len and zero out all data part
198 */
199 static void *
iwl_mvm_radiotap_put_tlv(struct sk_buff * skb,u16 type,u16 len)200 iwl_mvm_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
201 {
202 struct ieee80211_radiotap_tlv *tlv;
203
204 tlv = skb_put(skb, sizeof(*tlv));
205 tlv->type = cpu_to_le16(type);
206 tlv->len = cpu_to_le16(len);
207 return skb_put_zero(skb, ALIGN(len, 4));
208 }
209
iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm * mvm,struct sk_buff * skb)210 static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
211 struct sk_buff *skb)
212 {
213 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
214 struct ieee80211_radiotap_vendor_content *radiotap;
215 const u16 vendor_data_len = sizeof(mvm->cur_aid);
216
217 if (!mvm->cur_aid)
218 return;
219
220 radiotap = iwl_mvm_radiotap_put_tlv(skb,
221 IEEE80211_RADIOTAP_VENDOR_NAMESPACE,
222 sizeof(*radiotap) + vendor_data_len);
223
224 /* Intel OUI */
225 radiotap->oui[0] = 0xf6;
226 radiotap->oui[1] = 0x54;
227 radiotap->oui[2] = 0x25;
228 /* radiotap sniffer config sub-namespace */
229 radiotap->oui_subtype = 1;
230 radiotap->vendor_type = 0;
231
232 /* fill the data now */
233 memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid));
234
235 rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
236 }
237
238 /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm * mvm,struct napi_struct * napi,struct sk_buff * skb,int queue,struct ieee80211_sta * sta)239 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
240 struct napi_struct *napi,
241 struct sk_buff *skb, int queue,
242 struct ieee80211_sta *sta)
243 {
244 if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
245 kfree_skb(skb);
246 return;
247 }
248
249 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
250 }
251
iwl_mvm_used_average_energy(struct iwl_mvm * mvm,struct iwl_rx_mpdu_desc * desc,struct ieee80211_hdr * hdr,struct ieee80211_rx_status * rx_status)252 static bool iwl_mvm_used_average_energy(struct iwl_mvm *mvm,
253 struct iwl_rx_mpdu_desc *desc,
254 struct ieee80211_hdr *hdr,
255 struct ieee80211_rx_status *rx_status)
256 {
257 struct iwl_mvm_vif *mvm_vif;
258 struct ieee80211_vif *vif;
259 u32 id;
260
261 if (unlikely(!hdr || !desc))
262 return false;
263
264 if (likely(!ieee80211_is_beacon(hdr->frame_control)))
265 return false;
266
267 /* for the link conf lookup */
268 guard(rcu)();
269
270 /* MAC or link ID depending on FW, but driver has them equal */
271 id = u8_get_bits(desc->mac_phy_band,
272 IWL_RX_MPDU_MAC_PHY_BAND_MAC_MASK);
273
274 /* >= means AUX MAC/link ID, no energy correction needed then */
275 if (id >= ARRAY_SIZE(mvm->vif_id_to_mac))
276 return false;
277
278 vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
279 if (!vif)
280 return false;
281
282 mvm_vif = iwl_mvm_vif_from_mac80211(vif);
283
284 /*
285 * If we know the MAC by MAC or link ID then the frame was
286 * received for the link, so by filtering it means it was
287 * from the AP the link is connected to.
288 */
289
290 /* skip also in case we don't have it (yet) */
291 if (!mvm_vif->deflink.average_beacon_energy)
292 return false;
293
294 IWL_DEBUG_STATS(mvm, "energy override by average %d\n",
295 mvm_vif->deflink.average_beacon_energy);
296 rx_status->signal = -mvm_vif->deflink.average_beacon_energy;
297 return true;
298 }
299
iwl_mvm_get_signal_strength(struct iwl_mvm * mvm,struct iwl_rx_mpdu_desc * desc,struct ieee80211_hdr * hdr,struct ieee80211_rx_status * rx_status,u32 rate_n_flags,int energy_a,int energy_b)300 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
301 struct iwl_rx_mpdu_desc *desc,
302 struct ieee80211_hdr *hdr,
303 struct ieee80211_rx_status *rx_status,
304 u32 rate_n_flags, int energy_a,
305 int energy_b)
306 {
307 int max_energy;
308
309 energy_a = energy_a ? -energy_a : S8_MIN;
310 energy_b = energy_b ? -energy_b : S8_MIN;
311 max_energy = max(energy_a, energy_b);
312
313 IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
314 energy_a, energy_b, max_energy);
315
316 if (iwl_mvm_used_average_energy(mvm, desc, hdr, rx_status))
317 return;
318
319 rx_status->signal = max_energy;
320 rx_status->chains = u32_get_bits(rate_n_flags, RATE_MCS_ANT_AB_MSK);
321 rx_status->chain_signal[0] = energy_a;
322 rx_status->chain_signal[1] = energy_b;
323 }
324
iwl_mvm_rx_mgmt_prot(struct ieee80211_sta * sta,struct ieee80211_hdr * hdr,struct iwl_rx_mpdu_desc * desc,u32 status,struct ieee80211_rx_status * stats)325 static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
326 struct ieee80211_hdr *hdr,
327 struct iwl_rx_mpdu_desc *desc,
328 u32 status,
329 struct ieee80211_rx_status *stats)
330 {
331 struct wireless_dev *wdev;
332 struct iwl_mvm_sta *mvmsta;
333 struct iwl_mvm_vif *mvmvif;
334 u8 keyid;
335 struct ieee80211_key_conf *key;
336 u32 len = le16_to_cpu(desc->mpdu_len);
337 const u8 *frame = (void *)hdr;
338 const u8 *mmie;
339
340 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
341 return 0;
342
343 /*
344 * For non-beacon, we don't really care. But beacons may
345 * be filtered out, and we thus need the firmware's replay
346 * detection, otherwise beacons the firmware previously
347 * filtered could be replayed, or something like that, and
348 * it can filter a lot - though usually only if nothing has
349 * changed.
350 */
351 if (!ieee80211_is_beacon(hdr->frame_control))
352 return 0;
353
354 if (!sta)
355 return -1;
356
357 mvmsta = iwl_mvm_sta_from_mac80211(sta);
358 mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
359
360 /* key mismatch - will also report !MIC_OK but we shouldn't count it */
361 if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
362 goto report;
363
364 /* good cases */
365 if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
366 !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
367 stats->flag |= RX_FLAG_DECRYPTED;
368 return 0;
369 }
370
371 /*
372 * both keys will have the same cipher and MIC length, use
373 * whichever one is available
374 */
375 key = rcu_dereference(mvmvif->bcn_prot.keys[0]);
376 if (!key) {
377 key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
378 if (!key)
379 goto report;
380 }
381
382 if (len < key->icv_len)
383 goto report;
384
385 /* get the real key ID */
386 mmie = frame + (len - key->icv_len);
387
388 /* the position of the key_id in ieee80211_mmie_16 is the same */
389 keyid = le16_to_cpu(((const struct ieee80211_mmie *) mmie)->key_id);
390
391 /* and if that's the other key, look it up */
392 if (keyid != key->keyidx) {
393 /*
394 * shouldn't happen since firmware checked, but be safe
395 * in case the MIC length is wrong too, for example
396 */
397 if (keyid != 6 && keyid != 7)
398 return -1;
399 key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
400 if (!key)
401 goto report;
402 }
403
404 /* Report status to mac80211 */
405 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
406 ieee80211_key_mic_failure(key);
407 else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
408 ieee80211_key_replay(key);
409 report:
410 wdev = ieee80211_vif_to_wdev(mvmsta->vif);
411 if (wdev->netdev)
412 cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
413
414 return -1;
415 }
416
iwl_mvm_rx_crypto(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct ieee80211_hdr * hdr,struct ieee80211_rx_status * stats,u16 phy_info,struct iwl_rx_mpdu_desc * desc,u32 pkt_flags,int queue,u8 * crypt_len)417 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
418 struct ieee80211_hdr *hdr,
419 struct ieee80211_rx_status *stats, u16 phy_info,
420 struct iwl_rx_mpdu_desc *desc,
421 u32 pkt_flags, int queue, u8 *crypt_len)
422 {
423 u32 status = le32_to_cpu(desc->status);
424
425 /*
426 * Drop UNKNOWN frames in aggregation, unless in monitor mode
427 * (where we don't have the keys).
428 * We limit this to aggregation because in TKIP this is a valid
429 * scenario, since we may not have the (correct) TTAK (phase 1
430 * key) in the firmware.
431 */
432 if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
433 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
434 IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) {
435 IWL_DEBUG_DROP(mvm, "Dropping packets, bad enc status\n");
436 return -1;
437 }
438
439 if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
440 !ieee80211_has_protected(hdr->frame_control)))
441 return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status, stats);
442
443 if (!ieee80211_has_protected(hdr->frame_control) ||
444 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
445 IWL_RX_MPDU_STATUS_SEC_NONE)
446 return 0;
447
448 /* TODO: handle packets encrypted with unknown alg */
449 #if defined(__FreeBSD__)
450 /* XXX-BZ do similar to rx.c for now as these are plenty. */
451 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
452 IWL_RX_MPDU_STATUS_SEC_ENC_ERR)
453 return (0);
454 #endif
455
456 switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
457 case IWL_RX_MPDU_STATUS_SEC_CCM:
458 case IWL_RX_MPDU_STATUS_SEC_GCM:
459 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
460 /* alg is CCM: check MIC only */
461 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
462 IWL_DEBUG_DROP(mvm,
463 "Dropping packet, bad MIC (CCM/GCM)\n");
464 return -1;
465 }
466
467 stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
468 *crypt_len = IEEE80211_CCMP_HDR_LEN;
469 return 0;
470 case IWL_RX_MPDU_STATUS_SEC_TKIP:
471 /* Don't drop the frame and decrypt it in SW */
472 if (!fw_has_api(&mvm->fw->ucode_capa,
473 IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
474 !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
475 return 0;
476
477 if (mvm->trans->mac_cfg->gen2 &&
478 !(status & RX_MPDU_RES_STATUS_MIC_OK))
479 stats->flag |= RX_FLAG_MMIC_ERROR;
480
481 *crypt_len = IEEE80211_TKIP_IV_LEN;
482 fallthrough;
483 case IWL_RX_MPDU_STATUS_SEC_WEP:
484 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
485 return -1;
486
487 stats->flag |= RX_FLAG_DECRYPTED;
488 if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
489 IWL_RX_MPDU_STATUS_SEC_WEP)
490 *crypt_len = IEEE80211_WEP_IV_LEN;
491
492 if (pkt_flags & FH_RSCSR_RADA_EN) {
493 stats->flag |= RX_FLAG_ICV_STRIPPED;
494 if (mvm->trans->mac_cfg->gen2)
495 stats->flag |= RX_FLAG_MMIC_STRIPPED;
496 }
497
498 return 0;
499 case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
500 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
501 return -1;
502 stats->flag |= RX_FLAG_DECRYPTED;
503 return 0;
504 case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
505 break;
506 default:
507 /*
508 * Sometimes we can get frames that were not decrypted
509 * because the firmware didn't have the keys yet. This can
510 * happen after connection where we can get multicast frames
511 * before the GTK is installed.
512 * Silently drop those frames.
513 * Also drop un-decrypted frames in monitor mode.
514 */
515 if (!is_multicast_ether_addr(hdr->addr1) &&
516 !mvm->monitor_on && net_ratelimit())
517 #if defined(__linux__)
518 IWL_WARN(mvm, "Unhandled alg: 0x%x\n", status);
519 #elif defined(__FreeBSD__)
520 IWL_WARN(mvm, "%s: Unhandled alg: 0x%x\n", __func__, status);
521 #endif
522 }
523
524 return 0;
525 }
526
iwl_mvm_rx_csum(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct sk_buff * skb,struct iwl_rx_packet * pkt)527 static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
528 struct ieee80211_sta *sta,
529 struct sk_buff *skb,
530 struct iwl_rx_packet *pkt)
531 {
532 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
533
534 if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
535 if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
536 u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
537
538 skb->ip_summed = CHECKSUM_COMPLETE;
539 skb->csum = csum_unfold(~(__force __sum16)hwsum);
540 }
541 } else {
542 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
543 struct iwl_mvm_vif *mvmvif;
544 u16 flags = le16_to_cpu(desc->l3l4_flags);
545 u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
546 IWL_RX_L3_PROTO_POS);
547
548 mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
549
550 if (mvmvif->features & NETIF_F_RXCSUM &&
551 flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
552 (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
553 l3_prot == IWL_RX_L3_TYPE_IPV6 ||
554 l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
555 skb->ip_summed = CHECKSUM_UNNECESSARY;
556 }
557 }
558
559 /*
560 * returns true if a packet is a duplicate or invalid tid and should be dropped.
561 * Updates AMSDU PN tracking info
562 */
iwl_mvm_is_dup(struct ieee80211_sta * sta,int queue,struct ieee80211_rx_status * rx_status,struct ieee80211_hdr * hdr,struct iwl_rx_mpdu_desc * desc)563 static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
564 struct ieee80211_rx_status *rx_status,
565 struct ieee80211_hdr *hdr,
566 struct iwl_rx_mpdu_desc *desc)
567 {
568 struct iwl_mvm_sta *mvm_sta;
569 struct iwl_mvm_rxq_dup_data *dup_data;
570 u8 tid, sub_frame_idx;
571
572 if (WARN_ON(IS_ERR_OR_NULL(sta)))
573 return false;
574
575 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
576
577 if (WARN_ON_ONCE(!mvm_sta->dup_data))
578 return false;
579
580 dup_data = &mvm_sta->dup_data[queue];
581
582 /*
583 * Drop duplicate 802.11 retransmissions
584 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
585 */
586 if (ieee80211_is_ctl(hdr->frame_control) ||
587 ieee80211_is_any_nullfunc(hdr->frame_control) ||
588 is_multicast_ether_addr(hdr->addr1))
589 return false;
590
591 if (ieee80211_is_data_qos(hdr->frame_control)) {
592 /* frame has qos control */
593 tid = ieee80211_get_tid(hdr);
594 if (tid >= IWL_MAX_TID_COUNT)
595 return true;
596 } else {
597 tid = IWL_MAX_TID_COUNT;
598 }
599
600 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
601 sub_frame_idx = desc->amsdu_info &
602 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
603
604 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
605 dup_data->last_seq[tid] == hdr->seq_ctrl &&
606 dup_data->last_sub_frame[tid] >= sub_frame_idx))
607 return true;
608
609 /* Allow same PN as the first subframe for following sub frames */
610 if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
611 sub_frame_idx > dup_data->last_sub_frame[tid] &&
612 desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
613 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
614
615 dup_data->last_seq[tid] = hdr->seq_ctrl;
616 dup_data->last_sub_frame[tid] = sub_frame_idx;
617
618 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
619
620 return false;
621 }
622
iwl_mvm_release_frames(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct napi_struct * napi,struct iwl_mvm_baid_data * baid_data,struct iwl_mvm_reorder_buffer * reorder_buf,u16 nssn)623 static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
624 struct ieee80211_sta *sta,
625 struct napi_struct *napi,
626 struct iwl_mvm_baid_data *baid_data,
627 struct iwl_mvm_reorder_buffer *reorder_buf,
628 u16 nssn)
629 {
630 struct iwl_mvm_reorder_buf_entry *entries =
631 &baid_data->entries[reorder_buf->queue *
632 baid_data->entries_per_queue];
633 u16 ssn = reorder_buf->head_sn;
634
635 lockdep_assert_held(&reorder_buf->lock);
636
637 while (ieee80211_sn_less(ssn, nssn)) {
638 int index = ssn % baid_data->buf_size;
639 struct sk_buff_head *skb_list = &entries[index].frames;
640 struct sk_buff *skb;
641
642 ssn = ieee80211_sn_inc(ssn);
643
644 /*
645 * Empty the list. Will have more than one frame for A-MSDU.
646 * Empty list is valid as well since nssn indicates frames were
647 * received.
648 */
649 while ((skb = __skb_dequeue(skb_list))) {
650 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
651 reorder_buf->queue,
652 sta);
653 reorder_buf->num_stored--;
654 }
655 }
656 reorder_buf->head_sn = nssn;
657 }
658
iwl_mvm_del_ba(struct iwl_mvm * mvm,int queue,struct iwl_mvm_delba_data * data)659 static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
660 struct iwl_mvm_delba_data *data)
661 {
662 struct iwl_mvm_baid_data *ba_data;
663 struct ieee80211_sta *sta;
664 struct iwl_mvm_reorder_buffer *reorder_buf;
665 u8 baid = data->baid;
666 u32 sta_id;
667
668 if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
669 return;
670
671 rcu_read_lock();
672
673 ba_data = rcu_dereference(mvm->baid_map[baid]);
674 if (WARN_ON_ONCE(!ba_data))
675 goto out;
676
677 /* pick any STA ID to find the pointer */
678 sta_id = ffs(ba_data->sta_mask) - 1;
679 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
680 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
681 goto out;
682
683 reorder_buf = &ba_data->reorder_buf[queue];
684
685 /* release all frames that are in the reorder buffer to the stack */
686 spin_lock_bh(&reorder_buf->lock);
687 iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
688 ieee80211_sn_add(reorder_buf->head_sn,
689 ba_data->buf_size));
690 spin_unlock_bh(&reorder_buf->lock);
691
692 out:
693 rcu_read_unlock();
694 }
695
iwl_mvm_release_frames_from_notif(struct iwl_mvm * mvm,struct napi_struct * napi,u8 baid,u16 nssn,int queue)696 static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
697 struct napi_struct *napi,
698 u8 baid, u16 nssn, int queue)
699 {
700 struct ieee80211_sta *sta;
701 struct iwl_mvm_reorder_buffer *reorder_buf;
702 struct iwl_mvm_baid_data *ba_data;
703 u32 sta_id;
704
705 IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
706 baid, nssn);
707
708 if (IWL_FW_CHECK(mvm,
709 baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
710 baid >= ARRAY_SIZE(mvm->baid_map),
711 "invalid BAID from FW: %d\n", baid))
712 return;
713
714 rcu_read_lock();
715
716 ba_data = rcu_dereference(mvm->baid_map[baid]);
717 if (!ba_data) {
718 IWL_DEBUG_RX(mvm,
719 "Got valid BAID %d but not allocated, invalid frame release!\n",
720 baid);
721 goto out;
722 }
723
724 /* pick any STA ID to find the pointer */
725 sta_id = ffs(ba_data->sta_mask) - 1;
726 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
727 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
728 goto out;
729
730 reorder_buf = &ba_data->reorder_buf[queue];
731
732 spin_lock_bh(&reorder_buf->lock);
733 iwl_mvm_release_frames(mvm, sta, napi, ba_data,
734 reorder_buf, nssn);
735 spin_unlock_bh(&reorder_buf->lock);
736
737 out:
738 rcu_read_unlock();
739 }
740
iwl_mvm_rx_queue_notif(struct iwl_mvm * mvm,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,int queue)741 void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
742 struct iwl_rx_cmd_buffer *rxb, int queue)
743 {
744 struct iwl_rx_packet *pkt = rxb_addr(rxb);
745 struct iwl_rxq_sync_notification *notif;
746 struct iwl_mvm_internal_rxq_notif *internal_notif;
747 u32 len = iwl_rx_packet_payload_len(pkt);
748
749 notif = (void *)pkt->data;
750 internal_notif = (void *)notif->payload;
751
752 if (WARN_ONCE(len < sizeof(*notif) + sizeof(*internal_notif),
753 "invalid notification size %d (%d)",
754 len, (int)(sizeof(*notif) + sizeof(*internal_notif))))
755 return;
756 len -= sizeof(*notif) + sizeof(*internal_notif);
757
758 if (WARN_ONCE(internal_notif->sync &&
759 mvm->queue_sync_cookie != internal_notif->cookie,
760 "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
761 internal_notif->cookie, mvm->queue_sync_cookie, queue))
762 return;
763
764 switch (internal_notif->type) {
765 case IWL_MVM_RXQ_EMPTY:
766 WARN_ONCE(len, "invalid empty notification size %d", len);
767 break;
768 case IWL_MVM_RXQ_NOTIF_DEL_BA:
769 if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data),
770 "invalid delba notification size %d (%d)",
771 len, (int)sizeof(struct iwl_mvm_delba_data)))
772 break;
773 iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
774 break;
775 default:
776 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
777 }
778
779 if (internal_notif->sync) {
780 WARN_ONCE(!test_and_clear_bit(queue, &mvm->queue_sync_state),
781 "queue sync: queue %d responded a second time!\n",
782 queue);
783 if (READ_ONCE(mvm->queue_sync_state) == 0)
784 wake_up(&mvm->rx_sync_waitq);
785 }
786 }
787
788 /*
789 * Returns true if the MPDU was buffered\dropped, false if it should be passed
790 * to upper layer.
791 */
iwl_mvm_reorder(struct iwl_mvm * mvm,struct napi_struct * napi,int queue,struct ieee80211_sta * sta,struct sk_buff * skb,struct iwl_rx_mpdu_desc * desc)792 static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
793 struct napi_struct *napi,
794 int queue,
795 struct ieee80211_sta *sta,
796 struct sk_buff *skb,
797 struct iwl_rx_mpdu_desc *desc)
798 {
799 struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
800 struct iwl_mvm_baid_data *baid_data;
801 struct iwl_mvm_reorder_buffer *buffer;
802 u32 reorder = le32_to_cpu(desc->reorder_data);
803 bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
804 bool last_subframe =
805 desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
806 #if defined(__linux__)
807 u8 tid = ieee80211_get_tid(hdr);
808 #elif defined(__FreeBSD__)
809 u8 tid;
810 #endif
811 struct iwl_mvm_reorder_buf_entry *entries;
812 u32 sta_mask;
813 int index;
814 u16 nssn, sn;
815 u8 baid;
816
817 baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
818 IWL_RX_MPDU_REORDER_BAID_SHIFT;
819
820 if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000)
821 return false;
822
823 /*
824 * This also covers the case of receiving a Block Ack Request
825 * outside a BA session; we'll pass it to mac80211 and that
826 * then sends a delBA action frame.
827 * This also covers pure monitor mode, in which case we won't
828 * have any BA sessions.
829 */
830 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
831 return false;
832
833 /* no sta yet */
834 if (WARN_ONCE(IS_ERR_OR_NULL(sta),
835 "Got valid BAID without a valid station assigned\n"))
836 return false;
837
838 /* not a data packet or a bar */
839 if (!ieee80211_is_back_req(hdr->frame_control) &&
840 (!ieee80211_is_data_qos(hdr->frame_control) ||
841 is_multicast_ether_addr(hdr->addr1)))
842 return false;
843
844 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
845 return false;
846
847 baid_data = rcu_dereference(mvm->baid_map[baid]);
848 if (!baid_data) {
849 IWL_DEBUG_RX(mvm,
850 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
851 baid, reorder);
852 return false;
853 }
854
855 #if defined(__FreeBSD__)
856 tid = ieee80211_get_tid(hdr);
857 #endif
858 sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
859
860 if (IWL_FW_CHECK(mvm,
861 tid != baid_data->tid ||
862 !(sta_mask & baid_data->sta_mask),
863 "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
864 baid, baid_data->sta_mask, baid_data->tid,
865 sta_mask, tid))
866 return false;
867
868 nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
869 sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
870 IWL_RX_MPDU_REORDER_SN_SHIFT;
871
872 buffer = &baid_data->reorder_buf[queue];
873 entries = &baid_data->entries[queue * baid_data->entries_per_queue];
874
875 spin_lock_bh(&buffer->lock);
876
877 if (!buffer->valid) {
878 if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
879 spin_unlock_bh(&buffer->lock);
880 return false;
881 }
882 buffer->valid = true;
883 }
884
885 /* drop any duplicated packets */
886 if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE))
887 goto drop;
888
889 /* drop any oudated packets */
890 if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)
891 goto drop;
892
893 /* release immediately if allowed by nssn and no stored frames */
894 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
895 if (!amsdu || last_subframe)
896 buffer->head_sn = nssn;
897
898 spin_unlock_bh(&buffer->lock);
899 return false;
900 }
901
902 /*
903 * release immediately if there are no stored frames, and the sn is
904 * equal to the head.
905 * This can happen due to reorder timer, where NSSN is behind head_sn.
906 * When we released everything, and we got the next frame in the
907 * sequence, according to the NSSN we can't release immediately,
908 * while technically there is no hole and we can move forward.
909 */
910 if (!buffer->num_stored && sn == buffer->head_sn) {
911 if (!amsdu || last_subframe)
912 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
913
914 spin_unlock_bh(&buffer->lock);
915 return false;
916 }
917
918 /* put in reorder buffer */
919 index = sn % baid_data->buf_size;
920 __skb_queue_tail(&entries[index].frames, skb);
921 buffer->num_stored++;
922
923 /*
924 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
925 * The reason is that NSSN advances on the first sub-frame, and may
926 * cause the reorder buffer to advance before all the sub-frames arrive.
927 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
928 * SN 1. NSSN for first sub frame will be 3 with the result of driver
929 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
930 * already ahead and it will be dropped.
931 * If the last sub-frame is not on this queue - we will get frame
932 * release notification with up to date NSSN.
933 * If this is the first frame that is stored in the buffer, the head_sn
934 * may be outdated. Update it based on the last NSSN to make sure it
935 * will be released when the frame release notification arrives.
936 */
937 if (!amsdu || last_subframe)
938 iwl_mvm_release_frames(mvm, sta, napi, baid_data,
939 buffer, nssn);
940 else if (buffer->num_stored == 1)
941 buffer->head_sn = nssn;
942
943 spin_unlock_bh(&buffer->lock);
944 return true;
945
946 drop:
947 kfree_skb(skb);
948 spin_unlock_bh(&buffer->lock);
949 return true;
950 }
951
iwl_mvm_agg_rx_received(struct iwl_mvm * mvm,u32 reorder_data,u8 baid)952 static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
953 u32 reorder_data, u8 baid)
954 {
955 unsigned long now = jiffies;
956 unsigned long timeout;
957 struct iwl_mvm_baid_data *data;
958
959 rcu_read_lock();
960
961 data = rcu_dereference(mvm->baid_map[baid]);
962 if (!data) {
963 IWL_DEBUG_RX(mvm,
964 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
965 baid, reorder_data);
966 goto out;
967 }
968
969 if (!data->timeout)
970 goto out;
971
972 timeout = data->timeout;
973 /*
974 * Do not update last rx all the time to avoid cache bouncing
975 * between the rx queues.
976 * Update it every timeout. Worst case is the session will
977 * expire after ~ 2 * timeout, which doesn't matter that much.
978 */
979 if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
980 /* Update is atomic */
981 data->last_rx = now;
982
983 out:
984 rcu_read_unlock();
985 }
986
iwl_mvm_flip_address(u8 * addr)987 static void iwl_mvm_flip_address(u8 *addr)
988 {
989 int i;
990 u8 mac_addr[ETH_ALEN];
991
992 for (i = 0; i < ETH_ALEN; i++)
993 mac_addr[i] = addr[ETH_ALEN - i - 1];
994 ether_addr_copy(addr, mac_addr);
995 }
996
997 struct iwl_mvm_rx_phy_data {
998 enum iwl_rx_phy_info_type info_type;
999 __le32 d0, d1, d2, d3, eht_d4, d5;
1000 __le16 d4;
1001 bool with_data;
1002 bool first_subframe;
1003 __le32 rx_vec[4];
1004
1005 u32 rate_n_flags;
1006 u32 gp2_on_air_rise;
1007 u16 phy_info;
1008 u8 energy_a, energy_b;
1009 u8 channel;
1010 };
1011
iwl_mvm_decode_he_mu_ext(struct iwl_mvm * mvm,struct iwl_mvm_rx_phy_data * phy_data,struct ieee80211_radiotap_he_mu * he_mu)1012 static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
1013 struct iwl_mvm_rx_phy_data *phy_data,
1014 struct ieee80211_radiotap_he_mu *he_mu)
1015 {
1016 u32 phy_data2 = le32_to_cpu(phy_data->d2);
1017 u32 phy_data3 = le32_to_cpu(phy_data->d3);
1018 u16 phy_data4 = le16_to_cpu(phy_data->d4);
1019 u32 rate_n_flags = phy_data->rate_n_flags;
1020
1021 if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
1022 he_mu->flags1 |=
1023 cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
1024 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
1025
1026 he_mu->flags1 |=
1027 le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU,
1028 phy_data4),
1029 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
1030
1031 he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0,
1032 phy_data2);
1033 he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1,
1034 phy_data3);
1035 he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2,
1036 phy_data2);
1037 he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3,
1038 phy_data3);
1039 }
1040
1041 if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) &&
1042 (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) != RATE_MCS_CHAN_WIDTH_20) {
1043 he_mu->flags1 |=
1044 cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
1045 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
1046
1047 he_mu->flags2 |=
1048 le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU,
1049 phy_data4),
1050 IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
1051
1052 he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0,
1053 phy_data2);
1054 he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1,
1055 phy_data3);
1056 he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2,
1057 phy_data2);
1058 he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3,
1059 phy_data3);
1060 }
1061 }
1062
1063 static void
iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data * phy_data,struct ieee80211_radiotap_he * he,struct ieee80211_radiotap_he_mu * he_mu,struct ieee80211_rx_status * rx_status)1064 iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
1065 struct ieee80211_radiotap_he *he,
1066 struct ieee80211_radiotap_he_mu *he_mu,
1067 struct ieee80211_rx_status *rx_status)
1068 {
1069 /*
1070 * Unfortunately, we have to leave the mac80211 data
1071 * incorrect for the case that we receive an HE-MU
1072 * transmission and *don't* have the HE phy data (due
1073 * to the bits being used for TSF). This shouldn't
1074 * happen though as management frames where we need
1075 * the TSF/timers are not be transmitted in HE-MU.
1076 */
1077 u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
1078 u32 rate_n_flags = phy_data->rate_n_flags;
1079 u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
1080 u8 offs = 0;
1081
1082 rx_status->bw = RATE_INFO_BW_HE_RU;
1083
1084 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
1085
1086 switch (ru) {
1087 case 0 ... 36:
1088 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
1089 offs = ru;
1090 break;
1091 case 37 ... 52:
1092 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
1093 offs = ru - 37;
1094 break;
1095 case 53 ... 60:
1096 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1097 offs = ru - 53;
1098 break;
1099 case 61 ... 64:
1100 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
1101 offs = ru - 61;
1102 break;
1103 case 65 ... 66:
1104 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
1105 offs = ru - 65;
1106 break;
1107 case 67:
1108 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
1109 break;
1110 case 68:
1111 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
1112 break;
1113 }
1114 he->data2 |= le16_encode_bits(offs,
1115 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
1116 he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
1117 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
1118 if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
1119 he->data2 |=
1120 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
1121
1122 #define CHECK_BW(bw) \
1123 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
1124 RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
1125 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
1126 RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
1127 CHECK_BW(20);
1128 CHECK_BW(40);
1129 CHECK_BW(80);
1130 CHECK_BW(160);
1131
1132 if (he_mu)
1133 he_mu->flags2 |=
1134 le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
1135 rate_n_flags),
1136 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
1137 else if (he_type == RATE_MCS_HE_TYPE_TRIG)
1138 he->data6 |=
1139 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
1140 le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
1141 rate_n_flags),
1142 IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
1143 }
1144
iwl_mvm_decode_he_phy_data(struct iwl_mvm * mvm,struct iwl_mvm_rx_phy_data * phy_data,struct ieee80211_radiotap_he * he,struct ieee80211_radiotap_he_mu * he_mu,struct ieee80211_rx_status * rx_status,int queue)1145 static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
1146 struct iwl_mvm_rx_phy_data *phy_data,
1147 struct ieee80211_radiotap_he *he,
1148 struct ieee80211_radiotap_he_mu *he_mu,
1149 struct ieee80211_rx_status *rx_status,
1150 int queue)
1151 {
1152 switch (phy_data->info_type) {
1153 case IWL_RX_PHY_INFO_TYPE_NONE:
1154 case IWL_RX_PHY_INFO_TYPE_CCK:
1155 case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
1156 case IWL_RX_PHY_INFO_TYPE_HT:
1157 case IWL_RX_PHY_INFO_TYPE_VHT_SU:
1158 case IWL_RX_PHY_INFO_TYPE_VHT_MU:
1159 case IWL_RX_PHY_INFO_TYPE_EHT_MU:
1160 case IWL_RX_PHY_INFO_TYPE_EHT_TB:
1161 case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
1162 case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
1163 return;
1164 case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
1165 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
1166 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
1167 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
1168 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
1169 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1170 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
1171 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
1172 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1173 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
1174 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
1175 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1176 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
1177 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
1178 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
1179 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
1180 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
1181 fallthrough;
1182 case IWL_RX_PHY_INFO_TYPE_HE_SU:
1183 case IWL_RX_PHY_INFO_TYPE_HE_MU:
1184 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1185 case IWL_RX_PHY_INFO_TYPE_HE_TB:
1186 /* HE common */
1187 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
1188 IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
1189 IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
1190 he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
1191 IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
1192 IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
1193 IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1194 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1195 IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
1196 IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
1197 if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
1198 phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
1199 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
1200 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1201 IWL_RX_PHY_DATA0_HE_UPLINK),
1202 IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
1203 }
1204 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1205 IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
1206 IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
1207 he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1208 IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
1209 IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
1210 he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1211 IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
1212 IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
1213 he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1,
1214 IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
1215 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
1216 he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1217 IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
1218 IEEE80211_RADIOTAP_HE_DATA6_TXOP);
1219 he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1220 IWL_RX_PHY_DATA0_HE_DOPPLER),
1221 IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
1222 break;
1223 }
1224
1225 switch (phy_data->info_type) {
1226 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1227 case IWL_RX_PHY_INFO_TYPE_HE_MU:
1228 case IWL_RX_PHY_INFO_TYPE_HE_SU:
1229 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
1230 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1231 IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
1232 IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
1233 break;
1234 default:
1235 /* nothing here */
1236 break;
1237 }
1238
1239 switch (phy_data->info_type) {
1240 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1241 he_mu->flags1 |=
1242 le16_encode_bits(le16_get_bits(phy_data->d4,
1243 IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
1244 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
1245 he_mu->flags1 |=
1246 le16_encode_bits(le16_get_bits(phy_data->d4,
1247 IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
1248 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
1249 he_mu->flags2 |=
1250 le16_encode_bits(le16_get_bits(phy_data->d4,
1251 IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
1252 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
1253 iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu);
1254 fallthrough;
1255 case IWL_RX_PHY_INFO_TYPE_HE_MU:
1256 he_mu->flags2 |=
1257 le16_encode_bits(le32_get_bits(phy_data->d1,
1258 IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
1259 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
1260 he_mu->flags2 |=
1261 le16_encode_bits(le32_get_bits(phy_data->d1,
1262 IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
1263 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
1264 fallthrough;
1265 case IWL_RX_PHY_INFO_TYPE_HE_TB:
1266 case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
1267 iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
1268 break;
1269 case IWL_RX_PHY_INFO_TYPE_HE_SU:
1270 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
1271 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
1272 IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
1273 IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
1274 break;
1275 default:
1276 /* nothing */
1277 break;
1278 }
1279 }
1280
1281 #define LE32_DEC_ENC(value, dec_bits, enc_bits) \
1282 le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
1283
1284 #define IWL_MVM_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \
1285 typeof(enc_bits) _enc_bits = enc_bits; \
1286 typeof(usig) _usig = usig; \
1287 (_usig)->mask |= cpu_to_le32(_enc_bits); \
1288 (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \
1289 } while (0)
1290
1291 #define __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
1292 eht->data[(rt_data)] |= \
1293 (cpu_to_le32 \
1294 (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \
1295 LE32_DEC_ENC(data ## fw_data, \
1296 IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \
1297 IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru))
1298
1299 #define _IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
1300 __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)
1301
1302 #define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1
1303 #define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2
1304 #define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2
1305 #define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2
1306 #define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3
1307 #define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3
1308 #define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3
1309 #define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4
1310
1311 #define IWL_RX_RU_DATA_A1 2
1312 #define IWL_RX_RU_DATA_A2 2
1313 #define IWL_RX_RU_DATA_B1 2
1314 #define IWL_RX_RU_DATA_B2 4
1315 #define IWL_RX_RU_DATA_C1 3
1316 #define IWL_RX_RU_DATA_C2 3
1317 #define IWL_RX_RU_DATA_D1 4
1318 #define IWL_RX_RU_DATA_D2 4
1319
1320 #define IWL_MVM_ENC_EHT_RU(rt_ru, fw_ru) \
1321 _IWL_MVM_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \
1322 rt_ru, \
1323 IWL_RX_RU_DATA_ ## fw_ru, \
1324 fw_ru)
1325
iwl_mvm_decode_eht_ext_mu(struct iwl_mvm * mvm,struct iwl_mvm_rx_phy_data * phy_data,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht,struct ieee80211_radiotap_eht_usig * usig)1326 static void iwl_mvm_decode_eht_ext_mu(struct iwl_mvm *mvm,
1327 struct iwl_mvm_rx_phy_data *phy_data,
1328 struct ieee80211_rx_status *rx_status,
1329 struct ieee80211_radiotap_eht *eht,
1330 struct ieee80211_radiotap_eht_usig *usig)
1331 {
1332 if (phy_data->with_data) {
1333 __le32 data1 = phy_data->d1;
1334 __le32 data2 = phy_data->d2;
1335 __le32 data3 = phy_data->d3;
1336 __le32 data4 = phy_data->eht_d4;
1337 __le32 data5 = phy_data->d5;
1338 u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
1339
1340 IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
1341 IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
1342 IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
1343 IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
1344 IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE,
1345 IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
1346 IWL_MVM_ENC_USIG_VALUE_MASK(usig, data4,
1347 IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS,
1348 IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
1349 IWL_MVM_ENC_USIG_VALUE_MASK
1350 (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2,
1351 IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
1352
1353 eht->user_info[0] |=
1354 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) |
1355 LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR,
1356 IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID);
1357
1358 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M);
1359 eht->data[7] |= LE32_DEC_ENC
1360 (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA,
1361 IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
1362
1363 /*
1364 * Hardware labels the content channels/RU allocation values
1365 * as follows:
1366 * Content Channel 1 Content Channel 2
1367 * 20 MHz: A1
1368 * 40 MHz: A1 B1
1369 * 80 MHz: A1 C1 B1 D1
1370 * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2
1371 * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4
1372 *
1373 * However firmware can only give us A1-D2, so the higher
1374 * frequencies are missing.
1375 */
1376
1377 switch (phy_bw) {
1378 case RATE_MCS_CHAN_WIDTH_320:
1379 /* additional values are missing in RX metadata */
1380 case RATE_MCS_CHAN_WIDTH_160:
1381 /* content channel 1 */
1382 IWL_MVM_ENC_EHT_RU(1_2_1, A2);
1383 IWL_MVM_ENC_EHT_RU(1_2_2, C2);
1384 /* content channel 2 */
1385 IWL_MVM_ENC_EHT_RU(2_2_1, B2);
1386 IWL_MVM_ENC_EHT_RU(2_2_2, D2);
1387 fallthrough;
1388 case RATE_MCS_CHAN_WIDTH_80:
1389 /* content channel 1 */
1390 IWL_MVM_ENC_EHT_RU(1_1_2, C1);
1391 /* content channel 2 */
1392 IWL_MVM_ENC_EHT_RU(2_1_2, D1);
1393 fallthrough;
1394 case RATE_MCS_CHAN_WIDTH_40:
1395 /* content channel 2 */
1396 IWL_MVM_ENC_EHT_RU(2_1_1, B1);
1397 fallthrough;
1398 case RATE_MCS_CHAN_WIDTH_20:
1399 IWL_MVM_ENC_EHT_RU(1_1_1, A1);
1400 break;
1401 }
1402 } else {
1403 __le32 usig_a1 = phy_data->rx_vec[0];
1404 __le32 usig_a2 = phy_data->rx_vec[1];
1405
1406 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
1407 IWL_RX_USIG_A1_DISREGARD,
1408 IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD);
1409 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
1410 IWL_RX_USIG_A1_VALIDATE,
1411 IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE);
1412 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1413 IWL_RX_USIG_A2_EHT_PPDU_TYPE,
1414 IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
1415 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1416 IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
1417 IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE);
1418 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1419 IWL_RX_USIG_A2_EHT_PUNC_CHANNEL,
1420 IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
1421 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1422 IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8,
1423 IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE);
1424 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1425 IWL_RX_USIG_A2_EHT_SIG_MCS,
1426 IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
1427 IWL_MVM_ENC_USIG_VALUE_MASK
1428 (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM,
1429 IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
1430 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1431 IWL_RX_USIG_A2_EHT_CRC_OK,
1432 IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC);
1433 }
1434 }
1435
iwl_mvm_decode_eht_ext_tb(struct iwl_mvm * mvm,struct iwl_mvm_rx_phy_data * phy_data,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht,struct ieee80211_radiotap_eht_usig * usig)1436 static void iwl_mvm_decode_eht_ext_tb(struct iwl_mvm *mvm,
1437 struct iwl_mvm_rx_phy_data *phy_data,
1438 struct ieee80211_rx_status *rx_status,
1439 struct ieee80211_radiotap_eht *eht,
1440 struct ieee80211_radiotap_eht_usig *usig)
1441 {
1442 if (phy_data->with_data) {
1443 __le32 data5 = phy_data->d5;
1444
1445 IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
1446 IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
1447 IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
1448 IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
1449 IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1,
1450 IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
1451
1452 IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
1453 IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2,
1454 IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
1455 } else {
1456 __le32 usig_a1 = phy_data->rx_vec[0];
1457 __le32 usig_a2 = phy_data->rx_vec[1];
1458
1459 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
1460 IWL_RX_USIG_A1_DISREGARD,
1461 IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD);
1462 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1463 IWL_RX_USIG_A2_EHT_PPDU_TYPE,
1464 IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
1465 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1466 IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
1467 IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE);
1468 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1469 IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1,
1470 IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
1471 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1472 IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2,
1473 IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
1474 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1475 IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD,
1476 IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD);
1477 IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
1478 IWL_RX_USIG_A2_EHT_CRC_OK,
1479 IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC);
1480 }
1481 }
1482
iwl_mvm_decode_eht_ru(struct iwl_mvm * mvm,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht)1483 static void iwl_mvm_decode_eht_ru(struct iwl_mvm *mvm,
1484 struct ieee80211_rx_status *rx_status,
1485 struct ieee80211_radiotap_eht *eht)
1486 {
1487 u32 ru = le32_get_bits(eht->data[8],
1488 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
1489 enum nl80211_eht_ru_alloc nl_ru;
1490
1491 /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields
1492 * in an EHT variant User Info field
1493 */
1494
1495 switch (ru) {
1496 case 0 ... 36:
1497 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
1498 break;
1499 case 37 ... 52:
1500 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
1501 break;
1502 case 53 ... 60:
1503 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
1504 break;
1505 case 61 ... 64:
1506 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
1507 break;
1508 case 65 ... 66:
1509 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
1510 break;
1511 case 67:
1512 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
1513 break;
1514 case 68:
1515 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
1516 break;
1517 case 69:
1518 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
1519 break;
1520 case 70 ... 81:
1521 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
1522 break;
1523 case 82 ... 89:
1524 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
1525 break;
1526 case 90 ... 93:
1527 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
1528 break;
1529 case 94 ... 95:
1530 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
1531 break;
1532 case 96 ... 99:
1533 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
1534 break;
1535 case 100 ... 103:
1536 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
1537 break;
1538 case 104:
1539 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
1540 break;
1541 case 105 ... 106:
1542 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
1543 break;
1544 default:
1545 return;
1546 }
1547
1548 rx_status->bw = RATE_INFO_BW_EHT_RU;
1549 rx_status->eht.ru = nl_ru;
1550 }
1551
iwl_mvm_decode_eht_phy_data(struct iwl_mvm * mvm,struct iwl_mvm_rx_phy_data * phy_data,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht,struct ieee80211_radiotap_eht_usig * usig)1552 static void iwl_mvm_decode_eht_phy_data(struct iwl_mvm *mvm,
1553 struct iwl_mvm_rx_phy_data *phy_data,
1554 struct ieee80211_rx_status *rx_status,
1555 struct ieee80211_radiotap_eht *eht,
1556 struct ieee80211_radiotap_eht_usig *usig)
1557
1558 {
1559 __le32 data0 = phy_data->d0;
1560 __le32 data1 = phy_data->d1;
1561 __le32 usig_a1 = phy_data->rx_vec[0];
1562 u8 info_type = phy_data->info_type;
1563
1564 /* Not in EHT range */
1565 if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU ||
1566 info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT)
1567 return;
1568
1569 usig->common |= cpu_to_le32
1570 (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
1571 IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN);
1572 if (phy_data->with_data) {
1573 usig->common |= LE32_DEC_ENC(data0,
1574 IWL_RX_PHY_DATA0_EHT_UPLINK,
1575 IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
1576 usig->common |= LE32_DEC_ENC(data0,
1577 IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK,
1578 IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
1579 } else {
1580 usig->common |= LE32_DEC_ENC(usig_a1,
1581 IWL_RX_USIG_A1_UL_FLAG,
1582 IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
1583 usig->common |= LE32_DEC_ENC(usig_a1,
1584 IWL_RX_USIG_A1_BSS_COLOR,
1585 IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
1586 }
1587
1588 if (fw_has_capa(&mvm->fw->ucode_capa,
1589 IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT)) {
1590 usig->common |=
1591 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED);
1592 usig->common |=
1593 LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE,
1594 IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK);
1595 }
1596
1597 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE);
1598 eht->data[0] |= LE32_DEC_ENC(data0,
1599 IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK,
1600 IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
1601
1602 /* All RU allocating size/index is in TB format */
1603 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT);
1604 eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160,
1605 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160);
1606 eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0,
1607 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0);
1608 eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7,
1609 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
1610
1611 iwl_mvm_decode_eht_ru(mvm, rx_status, eht);
1612
1613 /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
1614 * which is on only in case of monitor mode so no need to check monitor
1615 * mode
1616 */
1617 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80);
1618 eht->data[1] |=
1619 le32_encode_bits(mvm->monitor_p80,
1620 IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80);
1621
1622 usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN);
1623 if (phy_data->with_data)
1624 usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK,
1625 IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
1626 else
1627 usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION,
1628 IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
1629
1630 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM);
1631 eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM,
1632 IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
1633
1634 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM);
1635 eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK,
1636 IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
1637
1638 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM);
1639 eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG,
1640 IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
1641
1642 /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */
1643
1644 if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK))
1645 usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
1646
1647 usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN);
1648 usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER,
1649 IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER);
1650
1651 /*
1652 * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE,
1653 * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS
1654 */
1655
1656 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF);
1657 eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM,
1658 IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
1659
1660 if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT ||
1661 info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB)
1662 iwl_mvm_decode_eht_ext_tb(mvm, phy_data, rx_status, eht, usig);
1663
1664 if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT ||
1665 info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU)
1666 iwl_mvm_decode_eht_ext_mu(mvm, phy_data, rx_status, eht, usig);
1667 }
1668
iwl_mvm_rx_eht(struct iwl_mvm * mvm,struct sk_buff * skb,struct iwl_mvm_rx_phy_data * phy_data,int queue)1669 static void iwl_mvm_rx_eht(struct iwl_mvm *mvm, struct sk_buff *skb,
1670 struct iwl_mvm_rx_phy_data *phy_data,
1671 int queue)
1672 {
1673 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1674
1675 struct ieee80211_radiotap_eht *eht;
1676 struct ieee80211_radiotap_eht_usig *usig;
1677 size_t eht_len = sizeof(*eht);
1678
1679 u32 rate_n_flags = phy_data->rate_n_flags;
1680 u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
1681 /* EHT and HE have the same valus for LTF */
1682 u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
1683 u16 phy_info = phy_data->phy_info;
1684 u32 bw;
1685
1686 /* u32 for 1 user_info */
1687 if (phy_data->with_data)
1688 eht_len += sizeof(u32);
1689
1690 eht = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
1691
1692 usig = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
1693 sizeof(*usig));
1694 rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
1695 usig->common |=
1696 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN);
1697
1698 /* specific handling for 320MHz */
1699 bw = FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags);
1700 if (bw == RATE_MCS_CHAN_WIDTH_320_VAL)
1701 bw += FIELD_GET(IWL_RX_PHY_DATA0_EHT_BW320_SLOT,
1702 le32_to_cpu(phy_data->d0));
1703
1704 usig->common |= cpu_to_le32
1705 (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw));
1706
1707 /* report the AMPDU-EOF bit on single frames */
1708 if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1709 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1710 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1711 if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
1712 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1713 }
1714
1715 /* update aggregation data for monitor sake on default queue */
1716 if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
1717 (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
1718 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1719 if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
1720 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1721 }
1722
1723 if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
1724 iwl_mvm_decode_eht_phy_data(mvm, phy_data, rx_status, eht, usig);
1725
1726 #define CHECK_TYPE(F) \
1727 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
1728 (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1729
1730 CHECK_TYPE(SU);
1731 CHECK_TYPE(EXT_SU);
1732 CHECK_TYPE(MU);
1733 CHECK_TYPE(TRIG);
1734
1735 switch (FIELD_GET(RATE_MCS_HE_GI_LTF_MSK, rate_n_flags)) {
1736 case 0:
1737 if (he_type == RATE_MCS_HE_TYPE_TRIG) {
1738 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
1739 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
1740 } else {
1741 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
1742 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1743 }
1744 break;
1745 case 1:
1746 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
1747 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1748 break;
1749 case 2:
1750 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1751 if (he_type == RATE_MCS_HE_TYPE_TRIG)
1752 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
1753 else
1754 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
1755 break;
1756 case 3:
1757 if (he_type != RATE_MCS_HE_TYPE_TRIG) {
1758 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1759 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
1760 }
1761 break;
1762 default:
1763 /* nothing here */
1764 break;
1765 }
1766
1767 if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) {
1768 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
1769 eht->data[0] |= cpu_to_le32
1770 (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF,
1771 ltf) |
1772 FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI,
1773 rx_status->eht.gi));
1774 }
1775
1776
1777 if (!phy_data->with_data) {
1778 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
1779 IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S);
1780 eht->data[7] |=
1781 le32_encode_bits(le32_get_bits(phy_data->rx_vec[2],
1782 RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK),
1783 IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
1784 if (rate_n_flags & RATE_MCS_BF_MSK)
1785 eht->data[7] |=
1786 cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
1787 } else {
1788 eht->user_info[0] |=
1789 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
1790 IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
1791 IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
1792 IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
1793 IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER);
1794
1795 if (rate_n_flags & RATE_MCS_BF_MSK)
1796 eht->user_info[0] |=
1797 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
1798
1799 if (rate_n_flags & RATE_MCS_LDPC_MSK)
1800 eht->user_info[0] |=
1801 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
1802
1803 eht->user_info[0] |= cpu_to_le32
1804 (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS,
1805 FIELD_GET(RATE_VHT_MCS_RATE_CODE_MSK,
1806 rate_n_flags)) |
1807 FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O,
1808 FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags)));
1809 }
1810 }
1811
iwl_mvm_rx_he(struct iwl_mvm * mvm,struct sk_buff * skb,struct iwl_mvm_rx_phy_data * phy_data,int queue)1812 static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
1813 struct iwl_mvm_rx_phy_data *phy_data,
1814 int queue)
1815 {
1816 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1817 struct ieee80211_radiotap_he *he = NULL;
1818 struct ieee80211_radiotap_he_mu *he_mu = NULL;
1819 u32 rate_n_flags = phy_data->rate_n_flags;
1820 u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
1821 u8 ltf;
1822 static const struct ieee80211_radiotap_he known = {
1823 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
1824 IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
1825 IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
1826 IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
1827 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
1828 IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
1829 };
1830 static const struct ieee80211_radiotap_he_mu mu_known = {
1831 .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
1832 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
1833 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
1834 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
1835 .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
1836 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
1837 };
1838 u16 phy_info = phy_data->phy_info;
1839
1840 he = skb_put_data(skb, &known, sizeof(known));
1841 rx_status->flag |= RX_FLAG_RADIOTAP_HE;
1842
1843 if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
1844 phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
1845 he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
1846 rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
1847 }
1848
1849 /* report the AMPDU-EOF bit on single frames */
1850 if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1851 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1852 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1853 if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
1854 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1855 }
1856
1857 if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
1858 iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
1859 queue);
1860
1861 /* update aggregation data for monitor sake on default queue */
1862 if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
1863 (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
1864 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1865 if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
1866 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1867 }
1868
1869 if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
1870 rate_n_flags & RATE_MCS_HE_106T_MSK) {
1871 rx_status->bw = RATE_INFO_BW_HE_RU;
1872 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1873 }
1874
1875 /* actually data is filled in mac80211 */
1876 if (he_type == RATE_MCS_HE_TYPE_SU ||
1877 he_type == RATE_MCS_HE_TYPE_EXT_SU)
1878 he->data1 |=
1879 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
1880
1881 #define CHECK_TYPE(F) \
1882 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
1883 (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1884
1885 CHECK_TYPE(SU);
1886 CHECK_TYPE(EXT_SU);
1887 CHECK_TYPE(MU);
1888 CHECK_TYPE(TRIG);
1889
1890 he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
1891
1892 if (rate_n_flags & RATE_MCS_BF_MSK)
1893 he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
1894
1895 switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
1896 RATE_MCS_HE_GI_LTF_POS) {
1897 case 0:
1898 if (he_type == RATE_MCS_HE_TYPE_TRIG)
1899 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1900 else
1901 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1902 if (he_type == RATE_MCS_HE_TYPE_MU)
1903 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1904 else
1905 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
1906 break;
1907 case 1:
1908 if (he_type == RATE_MCS_HE_TYPE_TRIG)
1909 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1910 else
1911 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1912 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1913 break;
1914 case 2:
1915 if (he_type == RATE_MCS_HE_TYPE_TRIG) {
1916 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1917 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1918 } else {
1919 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1920 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1921 }
1922 break;
1923 case 3:
1924 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1925 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1926 break;
1927 case 4:
1928 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1929 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1930 break;
1931 default:
1932 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
1933 }
1934
1935 he->data5 |= le16_encode_bits(ltf,
1936 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
1937 }
1938
iwl_mvm_decode_lsig(struct sk_buff * skb,struct iwl_mvm_rx_phy_data * phy_data)1939 static void iwl_mvm_decode_lsig(struct sk_buff *skb,
1940 struct iwl_mvm_rx_phy_data *phy_data)
1941 {
1942 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1943 struct ieee80211_radiotap_lsig *lsig;
1944
1945 switch (phy_data->info_type) {
1946 case IWL_RX_PHY_INFO_TYPE_HT:
1947 case IWL_RX_PHY_INFO_TYPE_VHT_SU:
1948 case IWL_RX_PHY_INFO_TYPE_VHT_MU:
1949 case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
1950 case IWL_RX_PHY_INFO_TYPE_HE_SU:
1951 case IWL_RX_PHY_INFO_TYPE_HE_MU:
1952 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
1953 case IWL_RX_PHY_INFO_TYPE_HE_TB:
1954 case IWL_RX_PHY_INFO_TYPE_EHT_MU:
1955 case IWL_RX_PHY_INFO_TYPE_EHT_TB:
1956 case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
1957 case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
1958 lsig = skb_put(skb, sizeof(*lsig));
1959 lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
1960 lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1,
1961 IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
1962 IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
1963 rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
1964 break;
1965 default:
1966 break;
1967 }
1968 }
1969
1970 struct iwl_rx_sta_csa {
1971 bool all_sta_unblocked;
1972 struct ieee80211_vif *vif;
1973 };
1974
iwl_mvm_rx_get_sta_block_tx(void * data,struct ieee80211_sta * sta)1975 static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
1976 {
1977 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1978 struct iwl_rx_sta_csa *rx_sta_csa = data;
1979
1980 if (mvmsta->vif != rx_sta_csa->vif)
1981 return;
1982
1983 if (mvmsta->disable_tx)
1984 rx_sta_csa->all_sta_unblocked = false;
1985 }
1986
1987 /*
1988 * Note: requires also rx_status->band to be prefilled, as well
1989 * as phy_data (apart from phy_data->info_type)
1990 * Note: desc/hdr may be NULL
1991 */
iwl_mvm_rx_fill_status(struct iwl_mvm * mvm,struct iwl_rx_mpdu_desc * desc,struct ieee80211_hdr * hdr,struct sk_buff * skb,struct iwl_mvm_rx_phy_data * phy_data,int queue)1992 static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
1993 struct iwl_rx_mpdu_desc *desc,
1994 struct ieee80211_hdr *hdr,
1995 struct sk_buff *skb,
1996 struct iwl_mvm_rx_phy_data *phy_data,
1997 int queue)
1998 {
1999 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
2000 u32 rate_n_flags = phy_data->rate_n_flags;
2001 u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
2002 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
2003 bool is_sgi;
2004
2005 phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
2006
2007 if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
2008 phy_data->info_type =
2009 le32_get_bits(phy_data->d1,
2010 IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
2011
2012 /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
2013 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
2014 case RATE_MCS_CHAN_WIDTH_20:
2015 break;
2016 case RATE_MCS_CHAN_WIDTH_40:
2017 rx_status->bw = RATE_INFO_BW_40;
2018 break;
2019 case RATE_MCS_CHAN_WIDTH_80:
2020 rx_status->bw = RATE_INFO_BW_80;
2021 break;
2022 case RATE_MCS_CHAN_WIDTH_160:
2023 rx_status->bw = RATE_INFO_BW_160;
2024 break;
2025 case RATE_MCS_CHAN_WIDTH_320:
2026 rx_status->bw = RATE_INFO_BW_320;
2027 break;
2028 }
2029
2030 /* must be before L-SIG data */
2031 if (format == RATE_MCS_MOD_TYPE_HE)
2032 iwl_mvm_rx_he(mvm, skb, phy_data, queue);
2033
2034 iwl_mvm_decode_lsig(skb, phy_data);
2035
2036 rx_status->device_timestamp = phy_data->gp2_on_air_rise;
2037
2038 if (mvm->rx_ts_ptp && mvm->monitor_on) {
2039 u64 adj_time =
2040 iwl_mvm_ptp_get_adj_time(mvm, phy_data->gp2_on_air_rise * NSEC_PER_USEC);
2041
2042 rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC);
2043 rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64;
2044 rx_status->flag &= ~RX_FLAG_MACTIME;
2045 }
2046
2047 rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
2048 rx_status->band);
2049 iwl_mvm_get_signal_strength(mvm, desc, hdr, rx_status, rate_n_flags,
2050 phy_data->energy_a, phy_data->energy_b);
2051
2052 /* using TLV format and must be after all fixed len fields */
2053 if (format == RATE_MCS_MOD_TYPE_EHT)
2054 iwl_mvm_rx_eht(mvm, skb, phy_data, queue);
2055
2056 if (unlikely(mvm->monitor_on))
2057 iwl_mvm_add_rtap_sniffer_config(mvm, skb);
2058
2059 is_sgi = format == RATE_MCS_MOD_TYPE_HE ?
2060 iwl_he_is_sgi(rate_n_flags) :
2061 rate_n_flags & RATE_MCS_SGI_MSK;
2062
2063 if (!(format == RATE_MCS_MOD_TYPE_CCK) && is_sgi)
2064 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2065
2066 if (rate_n_flags & RATE_MCS_LDPC_MSK)
2067 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2068
2069 switch (format) {
2070 case RATE_MCS_MOD_TYPE_VHT:
2071 rx_status->encoding = RX_ENC_VHT;
2072 break;
2073 case RATE_MCS_MOD_TYPE_HE:
2074 rx_status->encoding = RX_ENC_HE;
2075 rx_status->he_dcm =
2076 !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
2077 break;
2078 case RATE_MCS_MOD_TYPE_EHT:
2079 rx_status->encoding = RX_ENC_EHT;
2080 break;
2081 }
2082
2083 switch (format) {
2084 case RATE_MCS_MOD_TYPE_HT:
2085 rx_status->encoding = RX_ENC_HT;
2086 rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
2087 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
2088 break;
2089 case RATE_MCS_MOD_TYPE_VHT:
2090 case RATE_MCS_MOD_TYPE_HE:
2091 case RATE_MCS_MOD_TYPE_EHT:
2092 rx_status->nss =
2093 u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
2094 rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
2095 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
2096 break;
2097 default: {
2098 int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
2099 rx_status->band);
2100
2101 rx_status->rate_idx = rate;
2102
2103 if ((rate < 0 || rate > 0xFF)) {
2104 rx_status->rate_idx = 0;
2105 if (net_ratelimit())
2106 IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
2107 rate_n_flags, rx_status->band);
2108 }
2109
2110 break;
2111 }
2112 }
2113 }
2114
iwl_mvm_rx_mpdu_mq(struct iwl_mvm * mvm,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,int queue)2115 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
2116 struct iwl_rx_cmd_buffer *rxb, int queue)
2117 {
2118 struct ieee80211_rx_status *rx_status;
2119 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2120 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
2121 struct ieee80211_hdr *hdr;
2122 u32 len;
2123 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
2124 struct ieee80211_sta *sta = NULL;
2125 struct sk_buff *skb;
2126 u8 crypt_len = 0;
2127 size_t desc_size;
2128 struct iwl_mvm_rx_phy_data phy_data = {};
2129 u32 format;
2130
2131 if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
2132 return;
2133
2134 if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
2135 desc_size = sizeof(*desc);
2136 else
2137 desc_size = IWL_RX_DESC_SIZE_V1;
2138
2139 if (unlikely(pkt_len < desc_size)) {
2140 IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n");
2141 return;
2142 }
2143
2144 if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
2145 phy_data.rate_n_flags =
2146 iwl_mvm_v3_rate_from_fw(desc->v3.rate_n_flags,
2147 mvm->fw_rates_ver);
2148 phy_data.channel = desc->v3.channel;
2149 phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
2150 phy_data.energy_a = desc->v3.energy_a;
2151 phy_data.energy_b = desc->v3.energy_b;
2152
2153 phy_data.d0 = desc->v3.phy_data0;
2154 phy_data.d1 = desc->v3.phy_data1;
2155 phy_data.d2 = desc->v3.phy_data2;
2156 phy_data.d3 = desc->v3.phy_data3;
2157 phy_data.eht_d4 = desc->phy_eht_data4;
2158 phy_data.d5 = desc->v3.phy_data5;
2159 } else {
2160 phy_data.rate_n_flags =
2161 iwl_mvm_v3_rate_from_fw(desc->v1.rate_n_flags,
2162 mvm->fw_rates_ver);
2163 phy_data.channel = desc->v1.channel;
2164 phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
2165 phy_data.energy_a = desc->v1.energy_a;
2166 phy_data.energy_b = desc->v1.energy_b;
2167
2168 phy_data.d0 = desc->v1.phy_data0;
2169 phy_data.d1 = desc->v1.phy_data1;
2170 phy_data.d2 = desc->v1.phy_data2;
2171 phy_data.d3 = desc->v1.phy_data3;
2172 }
2173
2174 format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
2175
2176 len = le16_to_cpu(desc->mpdu_len);
2177
2178 if (unlikely(len + desc_size > pkt_len)) {
2179 IWL_DEBUG_DROP(mvm, "FW lied about packet len\n");
2180 return;
2181 }
2182
2183 phy_data.with_data = true;
2184 phy_data.phy_info = le16_to_cpu(desc->phy_info);
2185 phy_data.d4 = desc->phy_data4;
2186
2187 hdr = (void *)(pkt->data + desc_size);
2188 /* Dont use dev_alloc_skb(), we'll have enough headroom once
2189 * ieee80211_hdr pulled.
2190 */
2191 skb = alloc_skb(128, GFP_ATOMIC);
2192 if (!skb) {
2193 IWL_ERR(mvm, "alloc_skb failed\n");
2194 return;
2195 }
2196
2197 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
2198 /*
2199 * If the device inserted padding it means that (it thought)
2200 * the 802.11 header wasn't a multiple of 4 bytes long. In
2201 * this case, reserve two bytes at the start of the SKB to
2202 * align the payload properly in case we end up copying it.
2203 */
2204 skb_reserve(skb, 2);
2205 }
2206
2207 rx_status = IEEE80211_SKB_RXCB(skb);
2208
2209 /*
2210 * Keep packets with CRC errors (and with overrun) for monitor mode
2211 * (otherwise the firmware discards them) but mark them as bad.
2212 */
2213 if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
2214 !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
2215 IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
2216 le32_to_cpu(desc->status));
2217 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2218 }
2219
2220 /* set the preamble flag if appropriate */
2221 if (format == RATE_MCS_MOD_TYPE_CCK &&
2222 phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
2223 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
2224
2225 if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
2226 u64 tsf_on_air_rise;
2227
2228 if (mvm->trans->mac_cfg->device_family >=
2229 IWL_DEVICE_FAMILY_AX210)
2230 tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
2231 else
2232 tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
2233
2234 rx_status->mactime = tsf_on_air_rise;
2235 /* TSF as indicated by the firmware is at INA time */
2236 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
2237 }
2238
2239 if (iwl_mvm_is_band_in_rx_supported(mvm)) {
2240 u8 band = u8_get_bits(desc->mac_phy_band,
2241 IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK);
2242
2243 rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
2244 } else {
2245 rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
2246 NL80211_BAND_2GHZ;
2247 }
2248
2249 /* update aggregation data for monitor sake on default queue */
2250 if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
2251 bool toggle_bit;
2252
2253 toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
2254 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
2255 /*
2256 * Toggle is switched whenever new aggregation starts. Make
2257 * sure ampdu_reference is never 0 so we can later use it to
2258 * see if the frame was really part of an A-MPDU or not.
2259 */
2260 if (toggle_bit != mvm->ampdu_toggle) {
2261 mvm->ampdu_ref++;
2262 if (mvm->ampdu_ref == 0)
2263 mvm->ampdu_ref++;
2264 mvm->ampdu_toggle = toggle_bit;
2265 phy_data.first_subframe = true;
2266 }
2267 rx_status->ampdu_reference = mvm->ampdu_ref;
2268 }
2269
2270 rcu_read_lock();
2271
2272 if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
2273 u8 sta_id = le32_get_bits(desc->status,
2274 IWL_RX_MPDU_STATUS_STA_ID);
2275
2276 if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
2277 struct ieee80211_link_sta *link_sta;
2278
2279 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
2280 if (IS_ERR(sta))
2281 sta = NULL;
2282 link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]);
2283
2284 if (sta && sta->valid_links && link_sta) {
2285 rx_status->link_valid = 1;
2286 rx_status->link_id = link_sta->link_id;
2287 }
2288 }
2289 } else if (!is_multicast_ether_addr(hdr->addr2)) {
2290 /*
2291 * This is fine since we prevent two stations with the same
2292 * address from being added.
2293 */
2294 sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
2295 }
2296
2297 if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc,
2298 le32_to_cpu(pkt->len_n_flags), queue,
2299 &crypt_len)) {
2300 kfree_skb(skb);
2301 goto out;
2302 }
2303
2304 iwl_mvm_rx_fill_status(mvm, desc, hdr, skb, &phy_data, queue);
2305
2306 if (sta) {
2307 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2308 struct ieee80211_vif *tx_blocked_vif =
2309 rcu_dereference(mvm->csa_tx_blocked_vif);
2310 u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
2311 IWL_RX_MPDU_REORDER_BAID_MASK) >>
2312 IWL_RX_MPDU_REORDER_BAID_SHIFT);
2313 struct iwl_fw_dbg_trigger_tlv *trig;
2314 struct ieee80211_vif *vif = mvmsta->vif;
2315
2316 if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
2317 !is_multicast_ether_addr(hdr->addr1) &&
2318 ieee80211_is_data(hdr->frame_control) &&
2319 time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
2320 schedule_delayed_work(&mvm->tcm.work, 0);
2321
2322 /*
2323 * We have tx blocked stations (with CS bit). If we heard
2324 * frames from a blocked station on a new channel we can
2325 * TX to it again.
2326 */
2327 if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
2328 struct iwl_mvm_vif *mvmvif =
2329 iwl_mvm_vif_from_mac80211(tx_blocked_vif);
2330 struct iwl_rx_sta_csa rx_sta_csa = {
2331 .all_sta_unblocked = true,
2332 .vif = tx_blocked_vif,
2333 };
2334
2335 if (mvmvif->csa_target_freq == rx_status->freq)
2336 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
2337 false);
2338 ieee80211_iterate_stations_atomic(mvm->hw,
2339 iwl_mvm_rx_get_sta_block_tx,
2340 &rx_sta_csa);
2341
2342 if (rx_sta_csa.all_sta_unblocked) {
2343 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2344 /* Unblock BCAST / MCAST station */
2345 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
2346 cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
2347 }
2348 }
2349
2350 rs_update_last_rssi(mvm, mvmsta, rx_status);
2351
2352 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
2353 ieee80211_vif_to_wdev(vif),
2354 FW_DBG_TRIGGER_RSSI);
2355
2356 if (trig && ieee80211_is_beacon(hdr->frame_control)) {
2357 struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
2358 s32 rssi;
2359
2360 rssi_trig = (void *)trig->data;
2361 rssi = le32_to_cpu(rssi_trig->rssi);
2362
2363 if (rx_status->signal < rssi)
2364 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
2365 #if defined(__linux__)
2366 NULL);
2367 #elif defined(__FreeBSD__)
2368 "");
2369 #endif
2370 }
2371
2372 if (ieee80211_is_data(hdr->frame_control))
2373 iwl_mvm_rx_csum(mvm, sta, skb, pkt);
2374
2375 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
2376 IWL_DEBUG_DROP(mvm, "Dropping duplicate packet 0x%x\n",
2377 le16_to_cpu(hdr->seq_ctrl));
2378 kfree_skb(skb);
2379 goto out;
2380 }
2381
2382 /*
2383 * Our hardware de-aggregates AMSDUs but copies the mac header
2384 * as it to the de-aggregated MPDUs. We need to turn off the
2385 * AMSDU bit in the QoS control ourselves.
2386 * In addition, HW reverses addr3 and addr4 - reverse it back.
2387 */
2388 if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
2389 !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
2390 u8 *qc = ieee80211_get_qos_ctl(hdr);
2391
2392 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
2393
2394 if (mvm->trans->mac_cfg->device_family ==
2395 IWL_DEVICE_FAMILY_9000) {
2396 iwl_mvm_flip_address(hdr->addr3);
2397
2398 if (ieee80211_has_a4(hdr->frame_control))
2399 iwl_mvm_flip_address(hdr->addr4);
2400 }
2401 }
2402 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
2403 u32 reorder_data = le32_to_cpu(desc->reorder_data);
2404
2405 iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
2406 }
2407 }
2408
2409 /* management stuff on default queue */
2410 if (!queue) {
2411 if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
2412 ieee80211_is_probe_resp(hdr->frame_control)) &&
2413 mvm->sched_scan_pass_all ==
2414 SCHED_SCAN_PASS_ALL_ENABLED))
2415 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
2416
2417 if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
2418 ieee80211_is_probe_resp(hdr->frame_control)))
2419 rx_status->boottime_ns = ktime_get_boottime_ns();
2420 }
2421
2422 if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
2423 kfree_skb(skb);
2424 goto out;
2425 }
2426
2427 if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
2428 likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
2429 likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
2430 if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
2431 (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
2432 !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
2433 rx_status->flag |= RX_FLAG_AMSDU_MORE;
2434
2435 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
2436 }
2437 out:
2438 rcu_read_unlock();
2439 }
2440
iwl_mvm_rx_monitor_no_data(struct iwl_mvm * mvm,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,int queue)2441 void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
2442 struct iwl_rx_cmd_buffer *rxb, int queue)
2443 {
2444 struct ieee80211_rx_status *rx_status;
2445 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2446 struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
2447 u32 rssi;
2448 struct ieee80211_sta *sta = NULL;
2449 struct sk_buff *skb;
2450 struct iwl_mvm_rx_phy_data phy_data;
2451 u32 format;
2452
2453 if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
2454 return;
2455
2456 if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(struct iwl_rx_no_data)))
2457 return;
2458
2459 rssi = le32_to_cpu(desc->rssi);
2460 phy_data.d0 = desc->phy_info[0];
2461 phy_data.d1 = desc->phy_info[1];
2462 phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
2463 phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
2464 phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
2465 phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
2466 phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
2467 phy_data.with_data = false;
2468 phy_data.rx_vec[0] = desc->rx_vec[0];
2469 phy_data.rx_vec[1] = desc->rx_vec[1];
2470
2471 phy_data.rate_n_flags = iwl_mvm_v3_rate_from_fw(desc->rate,
2472 mvm->fw_rates_ver);
2473
2474 format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
2475
2476 if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
2477 RX_NO_DATA_NOTIF, 0) >= 3) {
2478 if (unlikely(iwl_rx_packet_payload_len(pkt) <
2479 sizeof(struct iwl_rx_no_data_ver_3)))
2480 /* invalid len for ver 3 */
2481 return;
2482 phy_data.rx_vec[2] = desc->rx_vec[2];
2483 phy_data.rx_vec[3] = desc->rx_vec[3];
2484 } else {
2485 if (format == RATE_MCS_MOD_TYPE_EHT)
2486 /* no support for EHT before version 3 API */
2487 return;
2488 }
2489
2490 /* Dont use dev_alloc_skb(), we'll have enough headroom once
2491 * ieee80211_hdr pulled.
2492 */
2493 skb = alloc_skb(128, GFP_ATOMIC);
2494 if (!skb) {
2495 IWL_ERR(mvm, "alloc_skb failed\n");
2496 return;
2497 }
2498
2499 rx_status = IEEE80211_SKB_RXCB(skb);
2500
2501 /* 0-length PSDU */
2502 rx_status->flag |= RX_FLAG_NO_PSDU;
2503
2504 /* mark as failed PLCP on any errors to skip checks in mac80211 */
2505 if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
2506 RX_NO_DATA_INFO_ERR_NONE)
2507 rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
2508
2509 switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
2510 case RX_NO_DATA_INFO_TYPE_NDP:
2511 rx_status->zero_length_psdu_type =
2512 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
2513 break;
2514 case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
2515 case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED:
2516 rx_status->zero_length_psdu_type =
2517 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
2518 break;
2519 default:
2520 rx_status->zero_length_psdu_type =
2521 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
2522 break;
2523 }
2524
2525 rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
2526 NL80211_BAND_2GHZ;
2527
2528 iwl_mvm_rx_fill_status(mvm, NULL, NULL, skb, &phy_data, queue);
2529
2530 /* no more radio tap info should be put after this point.
2531 *
2532 * We mark it as mac header, for upper layers to know where
2533 * all radio tap header ends.
2534 *
2535 * Since data doesn't move data while putting data on skb and that is
2536 * the only way we use, data + len is the next place that hdr would be put
2537 */
2538 skb_set_mac_header(skb, skb->len);
2539
2540 /*
2541 * Override the nss from the rx_vec since the rate_n_flags has
2542 * only 2 bits for the nss which gives a max of 4 ss but there
2543 * may be up to 8 spatial streams.
2544 */
2545 switch (format) {
2546 case RATE_MCS_MOD_TYPE_VHT:
2547 rx_status->nss =
2548 le32_get_bits(desc->rx_vec[0],
2549 RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
2550 break;
2551 case RATE_MCS_MOD_TYPE_HE:
2552 rx_status->nss =
2553 le32_get_bits(desc->rx_vec[0],
2554 RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
2555 break;
2556 case RATE_MCS_MOD_TYPE_EHT:
2557 rx_status->nss =
2558 le32_get_bits(desc->rx_vec[2],
2559 RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
2560 }
2561
2562 rcu_read_lock();
2563 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
2564 rcu_read_unlock();
2565 }
2566
iwl_mvm_rx_frame_release(struct iwl_mvm * mvm,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,int queue)2567 void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
2568 struct iwl_rx_cmd_buffer *rxb, int queue)
2569 {
2570 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2571 struct iwl_frame_release *release = (void *)pkt->data;
2572
2573 if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
2574 return;
2575
2576 iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
2577 le16_to_cpu(release->nssn),
2578 queue);
2579 }
2580
iwl_mvm_rx_bar_frame_release(struct iwl_mvm * mvm,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,int queue)2581 void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
2582 struct iwl_rx_cmd_buffer *rxb, int queue)
2583 {
2584 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2585 struct iwl_bar_frame_release *release = (void *)pkt->data;
2586 struct iwl_mvm_baid_data *baid_data;
2587 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
2588 unsigned int baid, nssn, sta_id, tid;
2589
2590 if (IWL_FW_CHECK(mvm, pkt_len < sizeof(*release),
2591 "Unexpected frame release notif size %d (expected %zu)\n",
2592 pkt_len, sizeof(*release)))
2593 return;
2594
2595 baid = le32_get_bits(release->ba_info,
2596 IWL_BAR_FRAME_RELEASE_BAID_MASK);
2597 nssn = le32_get_bits(release->ba_info,
2598 IWL_BAR_FRAME_RELEASE_NSSN_MASK);
2599 sta_id = le32_get_bits(release->sta_tid,
2600 IWL_BAR_FRAME_RELEASE_STA_MASK);
2601 tid = le32_get_bits(release->sta_tid,
2602 IWL_BAR_FRAME_RELEASE_TID_MASK);
2603
2604 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
2605 baid >= ARRAY_SIZE(mvm->baid_map)))
2606 return;
2607
2608 rcu_read_lock();
2609 baid_data = rcu_dereference(mvm->baid_map[baid]);
2610 if (!baid_data) {
2611 IWL_DEBUG_RX(mvm,
2612 "Got valid BAID %d but not allocated, invalid BAR release!\n",
2613 baid);
2614 goto out;
2615 }
2616
2617 if (WARN(tid != baid_data->tid || sta_id > IWL_STATION_COUNT_MAX ||
2618 !(baid_data->sta_mask & BIT(sta_id)),
2619 "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
2620 baid, baid_data->sta_mask, baid_data->tid, sta_id,
2621 tid))
2622 goto out;
2623
2624 IWL_DEBUG_DROP(mvm, "Received a BAR, expect packet loss: nssn %d\n",
2625 nssn);
2626
2627 iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue);
2628 out:
2629 rcu_read_unlock();
2630 }
2631
iwl_mvm_rx_beacon_filter_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)2632 void iwl_mvm_rx_beacon_filter_notif(struct iwl_mvm *mvm,
2633 struct iwl_rx_cmd_buffer *rxb)
2634 {
2635 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2636 /* MAC or link ID in v1/v2, but driver has the IDs equal */
2637 struct iwl_beacon_filter_notif *notif = (void *)pkt->data;
2638 u32 id = le32_to_cpu(notif->link_id);
2639 struct iwl_mvm_vif *mvm_vif;
2640 struct ieee80211_vif *vif;
2641
2642 /* >= means AUX MAC/link ID, no energy correction needed then */
2643 if (IWL_FW_CHECK(mvm, id >= ARRAY_SIZE(mvm->vif_id_to_mac),
2644 "invalid link ID %d\n", id))
2645 return;
2646
2647 vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
2648 if (!vif)
2649 return;
2650
2651 mvm_vif = iwl_mvm_vif_from_mac80211(vif);
2652
2653 mvm_vif->deflink.average_beacon_energy =
2654 le32_to_cpu(notif->average_energy);
2655 }
2656