1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024-2025 Intel Corporation
4 */
5
6 #include <net/mac80211.h>
7 #include <kunit/static_stub.h>
8
9 #include "mld.h"
10 #include "sta.h"
11 #include "agg.h"
12 #include "rx.h"
13 #include "hcmd.h"
14 #include "iface.h"
15 #include "time_sync.h"
16 #include "fw/dbg.h"
17 #include "fw/api/rx.h"
18
19 /* stores relevant PHY data fields extracted from iwl_rx_mpdu_desc */
20 struct iwl_mld_rx_phy_data {
21 enum iwl_rx_phy_info_type info_type;
22 __le32 data0;
23 __le32 data1;
24 __le32 data2;
25 __le32 data3;
26 __le32 eht_data4;
27 __le32 data5;
28 __le16 data4;
29 bool first_subframe;
30 bool with_data;
31 __le32 rx_vec[4];
32 u32 rate_n_flags;
33 u32 gp2_on_air_rise;
34 u16 phy_info;
35 u8 energy_a, energy_b;
36 u8 channel;
37 };
38
39 static void
iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc * desc,struct iwl_mld_rx_phy_data * phy_data)40 iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc,
41 struct iwl_mld_rx_phy_data *phy_data)
42 {
43 phy_data->phy_info = le16_to_cpu(desc->phy_info);
44 phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
45 phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
46 phy_data->channel = desc->v3.channel;
47 phy_data->energy_a = desc->v3.energy_a;
48 phy_data->energy_b = desc->v3.energy_b;
49 phy_data->data0 = desc->v3.phy_data0;
50 phy_data->data1 = desc->v3.phy_data1;
51 phy_data->data2 = desc->v3.phy_data2;
52 phy_data->data3 = desc->v3.phy_data3;
53 phy_data->data4 = desc->phy_data4;
54 phy_data->eht_data4 = desc->phy_eht_data4;
55 phy_data->data5 = desc->v3.phy_data5;
56 phy_data->with_data = true;
57 }
58
iwl_mld_check_pn(struct iwl_mld * mld,struct sk_buff * skb,int queue,struct ieee80211_sta * sta)59 static inline int iwl_mld_check_pn(struct iwl_mld *mld, struct sk_buff *skb,
60 int queue, struct ieee80211_sta *sta)
61 {
62 struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
63 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
64 struct iwl_mld_sta *mld_sta;
65 struct iwl_mld_ptk_pn *ptk_pn;
66 int res;
67 u8 tid, keyidx;
68 u8 pn[IEEE80211_CCMP_PN_LEN];
69 u8 *extiv;
70
71 /* multicast and non-data only arrives on default queue; avoid checking
72 * for default queue - we don't want to replicate all the logic that's
73 * necessary for checking the PN on fragmented frames, leave that
74 * to mac80211
75 */
76 if (queue == 0 || !ieee80211_is_data(hdr->frame_control) ||
77 is_multicast_ether_addr(hdr->addr1))
78 return 0;
79
80 if (!(stats->flag & RX_FLAG_DECRYPTED))
81 return 0;
82
83 /* if we are here - this for sure is either CCMP or GCMP */
84 if (!sta) {
85 IWL_DEBUG_DROP(mld,
86 "expected hw-decrypted unicast frame for station\n");
87 return -1;
88 }
89
90 mld_sta = iwl_mld_sta_from_mac80211(sta);
91
92 extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
93 keyidx = extiv[3] >> 6;
94
95 ptk_pn = rcu_dereference(mld_sta->ptk_pn[keyidx]);
96 if (!ptk_pn)
97 return -1;
98
99 if (ieee80211_is_data_qos(hdr->frame_control))
100 tid = ieee80211_get_tid(hdr);
101 else
102 tid = 0;
103
104 /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
105 if (tid >= IWL_MAX_TID_COUNT)
106 return -1;
107
108 /* load pn */
109 pn[0] = extiv[7];
110 pn[1] = extiv[6];
111 pn[2] = extiv[5];
112 pn[3] = extiv[4];
113 pn[4] = extiv[1];
114 pn[5] = extiv[0];
115
116 res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
117 if (res < 0)
118 return -1;
119 if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
120 return -1;
121
122 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
123 stats->flag |= RX_FLAG_PN_VALIDATED;
124
125 return 0;
126 }
127
128 /* iwl_mld_pass_packet_to_mac80211 - passes the packet for mac80211 */
iwl_mld_pass_packet_to_mac80211(struct iwl_mld * mld,struct napi_struct * napi,struct sk_buff * skb,int queue,struct ieee80211_sta * sta)129 void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
130 struct napi_struct *napi,
131 struct sk_buff *skb, int queue,
132 struct ieee80211_sta *sta)
133 {
134 KUNIT_STATIC_STUB_REDIRECT(iwl_mld_pass_packet_to_mac80211,
135 mld, napi, skb, queue, sta);
136
137 if (unlikely(iwl_mld_check_pn(mld, skb, queue, sta))) {
138 kfree_skb(skb);
139 return;
140 }
141
142 ieee80211_rx_napi(mld->hw, sta, skb, napi);
143 }
144 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_pass_packet_to_mac80211);
145
iwl_mld_fill_signal(struct iwl_mld * mld,struct ieee80211_rx_status * rx_status,struct iwl_mld_rx_phy_data * phy_data)146 static void iwl_mld_fill_signal(struct iwl_mld *mld,
147 struct ieee80211_rx_status *rx_status,
148 struct iwl_mld_rx_phy_data *phy_data)
149 {
150 u32 rate_n_flags = phy_data->rate_n_flags;
151 int energy_a = phy_data->energy_a;
152 int energy_b = phy_data->energy_b;
153 int max_energy;
154
155 energy_a = energy_a ? -energy_a : S8_MIN;
156 energy_b = energy_b ? -energy_b : S8_MIN;
157 max_energy = max(energy_a, energy_b);
158
159 IWL_DEBUG_STATS(mld, "energy in A %d B %d, and max %d\n",
160 energy_a, energy_b, max_energy);
161
162 rx_status->signal = max_energy;
163 rx_status->chains =
164 (rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
165 rx_status->chain_signal[0] = energy_a;
166 rx_status->chain_signal[1] = energy_b;
167 }
168
169 static void
iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data * phy_data,struct ieee80211_radiotap_he * he,struct ieee80211_radiotap_he_mu * he_mu,struct ieee80211_rx_status * rx_status)170 iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data *phy_data,
171 struct ieee80211_radiotap_he *he,
172 struct ieee80211_radiotap_he_mu *he_mu,
173 struct ieee80211_rx_status *rx_status)
174 {
175 /* Unfortunately, we have to leave the mac80211 data
176 * incorrect for the case that we receive an HE-MU
177 * transmission and *don't* have the HE phy data (due
178 * to the bits being used for TSF). This shouldn't
179 * happen though as management frames where we need
180 * the TSF/timers are not be transmitted in HE-MU.
181 */
182 u8 ru = le32_get_bits(phy_data->data1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
183 u32 rate_n_flags = phy_data->rate_n_flags;
184 u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
185 u8 offs = 0;
186
187 rx_status->bw = RATE_INFO_BW_HE_RU;
188
189 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
190
191 switch (ru) {
192 case 0 ... 36:
193 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
194 offs = ru;
195 break;
196 case 37 ... 52:
197 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
198 offs = ru - 37;
199 break;
200 case 53 ... 60:
201 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
202 offs = ru - 53;
203 break;
204 case 61 ... 64:
205 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
206 offs = ru - 61;
207 break;
208 case 65 ... 66:
209 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
210 offs = ru - 65;
211 break;
212 case 67:
213 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
214 break;
215 case 68:
216 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
217 break;
218 }
219 he->data2 |= le16_encode_bits(offs,
220 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
221 he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
222 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
223 if (phy_data->data1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
224 he->data2 |=
225 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
226
227 #define CHECK_BW(bw) \
228 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
229 RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
230 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
231 RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
232 CHECK_BW(20);
233 CHECK_BW(40);
234 CHECK_BW(80);
235 CHECK_BW(160);
236
237 if (he_mu)
238 he_mu->flags2 |=
239 le16_encode_bits(u32_get_bits(rate_n_flags,
240 RATE_MCS_CHAN_WIDTH_MSK),
241 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
242 else if (he_type == RATE_MCS_HE_TYPE_TRIG)
243 he->data6 |=
244 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
245 le16_encode_bits(u32_get_bits(rate_n_flags,
246 RATE_MCS_CHAN_WIDTH_MSK),
247 IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
248 }
249
250 static void
iwl_mld_decode_he_mu_ext(struct iwl_mld_rx_phy_data * phy_data,struct ieee80211_radiotap_he_mu * he_mu)251 iwl_mld_decode_he_mu_ext(struct iwl_mld_rx_phy_data *phy_data,
252 struct ieee80211_radiotap_he_mu *he_mu)
253 {
254 u32 phy_data2 = le32_to_cpu(phy_data->data2);
255 u32 phy_data3 = le32_to_cpu(phy_data->data3);
256 u16 phy_data4 = le16_to_cpu(phy_data->data4);
257 u32 rate_n_flags = phy_data->rate_n_flags;
258
259 if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK)) {
260 he_mu->flags1 |=
261 cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
262 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
263
264 he_mu->flags1 |=
265 le16_encode_bits(u32_get_bits(phy_data4,
266 IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU),
267 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
268
269 he_mu->ru_ch1[0] = u32_get_bits(phy_data2,
270 IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0);
271 he_mu->ru_ch1[1] = u32_get_bits(phy_data3,
272 IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1);
273 he_mu->ru_ch1[2] = u32_get_bits(phy_data2,
274 IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2);
275 he_mu->ru_ch1[3] = u32_get_bits(phy_data3,
276 IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3);
277 }
278
279 if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK) &&
280 (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
281 he_mu->flags1 |=
282 cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
283 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
284
285 he_mu->flags2 |=
286 le16_encode_bits(u32_get_bits(phy_data4,
287 IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU),
288 IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
289
290 he_mu->ru_ch2[0] = u32_get_bits(phy_data2,
291 IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0);
292 he_mu->ru_ch2[1] = u32_get_bits(phy_data3,
293 IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1);
294 he_mu->ru_ch2[2] = u32_get_bits(phy_data2,
295 IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2);
296 he_mu->ru_ch2[3] = u32_get_bits(phy_data3,
297 IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3);
298 }
299 }
300
301 static void
iwl_mld_decode_he_phy_data(struct iwl_mld_rx_phy_data * phy_data,struct ieee80211_radiotap_he * he,struct ieee80211_radiotap_he_mu * he_mu,struct ieee80211_rx_status * rx_status,int queue)302 iwl_mld_decode_he_phy_data(struct iwl_mld_rx_phy_data *phy_data,
303 struct ieee80211_radiotap_he *he,
304 struct ieee80211_radiotap_he_mu *he_mu,
305 struct ieee80211_rx_status *rx_status,
306 int queue)
307 {
308 switch (phy_data->info_type) {
309 case IWL_RX_PHY_INFO_TYPE_NONE:
310 case IWL_RX_PHY_INFO_TYPE_CCK:
311 case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
312 case IWL_RX_PHY_INFO_TYPE_HT:
313 case IWL_RX_PHY_INFO_TYPE_VHT_SU:
314 case IWL_RX_PHY_INFO_TYPE_VHT_MU:
315 case IWL_RX_PHY_INFO_TYPE_EHT_MU:
316 case IWL_RX_PHY_INFO_TYPE_EHT_TB:
317 case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
318 case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
319 return;
320 case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
321 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
322 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
323 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
324 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
325 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
326 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
327 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
328 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
329 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
330 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
331 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
332 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
333 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
334 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
335 IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
336 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
337 fallthrough;
338 case IWL_RX_PHY_INFO_TYPE_HE_SU:
339 case IWL_RX_PHY_INFO_TYPE_HE_MU:
340 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
341 case IWL_RX_PHY_INFO_TYPE_HE_TB:
342 /* HE common */
343 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
344 IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
345 IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
346 he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
347 IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
348 IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
349 IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
350 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
351 IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
352 IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
353 if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
354 phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
355 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
356 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
357 IWL_RX_PHY_DATA0_HE_UPLINK),
358 IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
359 }
360 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
361 IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
362 IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
363 he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
364 IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
365 IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
366 he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
367 IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
368 IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
369 he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data1,
370 IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
371 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
372 he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
373 IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
374 IEEE80211_RADIOTAP_HE_DATA6_TXOP);
375 he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
376 IWL_RX_PHY_DATA0_HE_DOPPLER),
377 IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
378 break;
379 }
380
381 switch (phy_data->info_type) {
382 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
383 case IWL_RX_PHY_INFO_TYPE_HE_MU:
384 case IWL_RX_PHY_INFO_TYPE_HE_SU:
385 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
386 he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data0,
387 IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
388 IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
389 break;
390 default:
391 /* nothing here */
392 break;
393 }
394
395 switch (phy_data->info_type) {
396 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
397 he_mu->flags1 |=
398 le16_encode_bits(le16_get_bits(phy_data->data4,
399 IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
400 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
401 he_mu->flags1 |=
402 le16_encode_bits(le16_get_bits(phy_data->data4,
403 IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
404 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
405 he_mu->flags2 |=
406 le16_encode_bits(le16_get_bits(phy_data->data4,
407 IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
408 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
409 iwl_mld_decode_he_mu_ext(phy_data, he_mu);
410 fallthrough;
411 case IWL_RX_PHY_INFO_TYPE_HE_MU:
412 he_mu->flags2 |=
413 le16_encode_bits(le32_get_bits(phy_data->data1,
414 IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
415 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
416 he_mu->flags2 |=
417 le16_encode_bits(le32_get_bits(phy_data->data1,
418 IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
419 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
420 fallthrough;
421 case IWL_RX_PHY_INFO_TYPE_HE_TB:
422 case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
423 iwl_mld_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
424 break;
425 case IWL_RX_PHY_INFO_TYPE_HE_SU:
426 he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
427 he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
428 IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
429 IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
430 break;
431 default:
432 /* nothing */
433 break;
434 }
435 }
436
iwl_mld_rx_he(struct iwl_mld * mld,struct sk_buff * skb,struct iwl_mld_rx_phy_data * phy_data,int queue)437 static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb,
438 struct iwl_mld_rx_phy_data *phy_data,
439 int queue)
440 {
441 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
442 struct ieee80211_radiotap_he *he = NULL;
443 struct ieee80211_radiotap_he_mu *he_mu = NULL;
444 u32 rate_n_flags = phy_data->rate_n_flags;
445 u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
446 u8 ltf;
447 static const struct ieee80211_radiotap_he known = {
448 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
449 IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
450 IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
451 IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
452 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
453 IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
454 };
455 static const struct ieee80211_radiotap_he_mu mu_known = {
456 .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
457 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
458 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
459 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
460 .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
461 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
462 };
463 u16 phy_info = phy_data->phy_info;
464
465 he = skb_put_data(skb, &known, sizeof(known));
466 rx_status->flag |= RX_FLAG_RADIOTAP_HE;
467
468 if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
469 phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
470 he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
471 rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
472 }
473
474 /* report the AMPDU-EOF bit on single frames */
475 if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
476 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
477 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
478 if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
479 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
480 }
481
482 if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
483 iwl_mld_decode_he_phy_data(phy_data, he, he_mu, rx_status,
484 queue);
485
486 /* update aggregation data for monitor sake on default queue */
487 if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
488 (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
489 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
490 if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
491 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
492 }
493
494 if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
495 rate_n_flags & RATE_MCS_HE_106T_MSK) {
496 rx_status->bw = RATE_INFO_BW_HE_RU;
497 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
498 }
499
500 /* actually data is filled in mac80211 */
501 if (he_type == RATE_MCS_HE_TYPE_SU ||
502 he_type == RATE_MCS_HE_TYPE_EXT_SU)
503 he->data1 |=
504 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
505
506 #define CHECK_TYPE(F) \
507 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
508 (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
509
510 CHECK_TYPE(SU);
511 CHECK_TYPE(EXT_SU);
512 CHECK_TYPE(MU);
513 CHECK_TYPE(TRIG);
514
515 he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
516
517 if (rate_n_flags & RATE_MCS_BF_MSK)
518 he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
519
520 switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
521 RATE_MCS_HE_GI_LTF_POS) {
522 case 0:
523 if (he_type == RATE_MCS_HE_TYPE_TRIG)
524 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
525 else
526 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
527 if (he_type == RATE_MCS_HE_TYPE_MU)
528 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
529 else
530 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
531 break;
532 case 1:
533 if (he_type == RATE_MCS_HE_TYPE_TRIG)
534 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
535 else
536 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
537 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
538 break;
539 case 2:
540 if (he_type == RATE_MCS_HE_TYPE_TRIG) {
541 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
542 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
543 } else {
544 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
545 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
546 }
547 break;
548 case 3:
549 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
550 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
551 break;
552 case 4:
553 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
554 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
555 break;
556 default:
557 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
558 }
559
560 he->data5 |= le16_encode_bits(ltf,
561 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
562 }
563
iwl_mld_decode_lsig(struct sk_buff * skb,struct iwl_mld_rx_phy_data * phy_data)564 static void iwl_mld_decode_lsig(struct sk_buff *skb,
565 struct iwl_mld_rx_phy_data *phy_data)
566 {
567 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
568 struct ieee80211_radiotap_lsig *lsig;
569
570 switch (phy_data->info_type) {
571 case IWL_RX_PHY_INFO_TYPE_HT:
572 case IWL_RX_PHY_INFO_TYPE_VHT_SU:
573 case IWL_RX_PHY_INFO_TYPE_VHT_MU:
574 case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
575 case IWL_RX_PHY_INFO_TYPE_HE_SU:
576 case IWL_RX_PHY_INFO_TYPE_HE_MU:
577 case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
578 case IWL_RX_PHY_INFO_TYPE_HE_TB:
579 case IWL_RX_PHY_INFO_TYPE_EHT_MU:
580 case IWL_RX_PHY_INFO_TYPE_EHT_TB:
581 case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
582 case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
583 lsig = skb_put(skb, sizeof(*lsig));
584 lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
585 lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->data1,
586 IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
587 IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
588 rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
589 break;
590 default:
591 break;
592 }
593 }
594
595 /* Put a TLV on the skb and return data pointer
596 *
597 * Also pad the len to 4 and zero out all data part
598 */
599 static void *
iwl_mld_radiotap_put_tlv(struct sk_buff * skb,u16 type,u16 len)600 iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
601 {
602 struct ieee80211_radiotap_tlv *tlv;
603
604 tlv = skb_put(skb, sizeof(*tlv));
605 tlv->type = cpu_to_le16(type);
606 tlv->len = cpu_to_le16(len);
607 return skb_put_zero(skb, ALIGN(len, 4));
608 }
609
610 #define LE32_DEC_ENC(value, dec_bits, enc_bits) \
611 le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
612
613 #define IWL_MLD_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \
614 typeof(enc_bits) _enc_bits = enc_bits; \
615 typeof(usig) _usig = usig; \
616 (_usig)->mask |= cpu_to_le32(_enc_bits); \
617 (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \
618 } while (0)
619
620 #define __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
621 eht->data[(rt_data)] |= \
622 (cpu_to_le32 \
623 (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \
624 LE32_DEC_ENC(data ## fw_data, \
625 IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \
626 IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru))
627
628 #define _IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
629 __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)
630
631 #define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1
632 #define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2
633 #define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2
634 #define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2
635 #define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3
636 #define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3
637 #define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3
638 #define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4
639
640 #define IWL_RX_RU_DATA_A1 2
641 #define IWL_RX_RU_DATA_A2 2
642 #define IWL_RX_RU_DATA_B1 2
643 #define IWL_RX_RU_DATA_B2 4
644 #define IWL_RX_RU_DATA_C1 3
645 #define IWL_RX_RU_DATA_C2 3
646 #define IWL_RX_RU_DATA_D1 4
647 #define IWL_RX_RU_DATA_D2 4
648
649 #define IWL_MLD_ENC_EHT_RU(rt_ru, fw_ru) \
650 _IWL_MLD_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \
651 rt_ru, \
652 IWL_RX_RU_DATA_ ## fw_ru, \
653 fw_ru)
654
iwl_mld_decode_eht_ext_mu(struct iwl_mld * mld,struct iwl_mld_rx_phy_data * phy_data,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht,struct ieee80211_radiotap_eht_usig * usig)655 static void iwl_mld_decode_eht_ext_mu(struct iwl_mld *mld,
656 struct iwl_mld_rx_phy_data *phy_data,
657 struct ieee80211_rx_status *rx_status,
658 struct ieee80211_radiotap_eht *eht,
659 struct ieee80211_radiotap_eht_usig *usig)
660 {
661 if (phy_data->with_data) {
662 __le32 data1 = phy_data->data1;
663 __le32 data2 = phy_data->data2;
664 __le32 data3 = phy_data->data3;
665 __le32 data4 = phy_data->eht_data4;
666 __le32 data5 = phy_data->data5;
667 u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
668
669 IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
670 IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
671 IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
672 IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
673 IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE,
674 IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
675 IWL_MLD_ENC_USIG_VALUE_MASK(usig, data4,
676 IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS,
677 IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
678 IWL_MLD_ENC_USIG_VALUE_MASK
679 (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2,
680 IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
681
682 eht->user_info[0] |=
683 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) |
684 LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR,
685 IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID);
686
687 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M);
688 eht->data[7] |= LE32_DEC_ENC
689 (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA,
690 IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
691
692 /*
693 * Hardware labels the content channels/RU allocation values
694 * as follows:
695 * Content Channel 1 Content Channel 2
696 * 20 MHz: A1
697 * 40 MHz: A1 B1
698 * 80 MHz: A1 C1 B1 D1
699 * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2
700 * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4
701 *
702 * However firmware can only give us A1-D2, so the higher
703 * frequencies are missing.
704 */
705
706 switch (phy_bw) {
707 case RATE_MCS_CHAN_WIDTH_320:
708 /* additional values are missing in RX metadata */
709 fallthrough;
710 case RATE_MCS_CHAN_WIDTH_160:
711 /* content channel 1 */
712 IWL_MLD_ENC_EHT_RU(1_2_1, A2);
713 IWL_MLD_ENC_EHT_RU(1_2_2, C2);
714 /* content channel 2 */
715 IWL_MLD_ENC_EHT_RU(2_2_1, B2);
716 IWL_MLD_ENC_EHT_RU(2_2_2, D2);
717 fallthrough;
718 case RATE_MCS_CHAN_WIDTH_80:
719 /* content channel 1 */
720 IWL_MLD_ENC_EHT_RU(1_1_2, C1);
721 /* content channel 2 */
722 IWL_MLD_ENC_EHT_RU(2_1_2, D1);
723 fallthrough;
724 case RATE_MCS_CHAN_WIDTH_40:
725 /* content channel 2 */
726 IWL_MLD_ENC_EHT_RU(2_1_1, B1);
727 fallthrough;
728 case RATE_MCS_CHAN_WIDTH_20:
729 IWL_MLD_ENC_EHT_RU(1_1_1, A1);
730 break;
731 }
732 } else {
733 __le32 usig_a1 = phy_data->rx_vec[0];
734 __le32 usig_a2 = phy_data->rx_vec[1];
735
736 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
737 IWL_RX_USIG_A1_DISREGARD,
738 IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD);
739 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
740 IWL_RX_USIG_A1_VALIDATE,
741 IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE);
742 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
743 IWL_RX_USIG_A2_EHT_PPDU_TYPE,
744 IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
745 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
746 IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
747 IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE);
748 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
749 IWL_RX_USIG_A2_EHT_PUNC_CHANNEL,
750 IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
751 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
752 IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8,
753 IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE);
754 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
755 IWL_RX_USIG_A2_EHT_SIG_MCS,
756 IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
757 IWL_MLD_ENC_USIG_VALUE_MASK
758 (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM,
759 IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
760 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
761 IWL_RX_USIG_A2_EHT_CRC_OK,
762 IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC);
763 }
764 }
765
iwl_mld_decode_eht_ext_tb(struct iwl_mld * mld,struct iwl_mld_rx_phy_data * phy_data,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht,struct ieee80211_radiotap_eht_usig * usig)766 static void iwl_mld_decode_eht_ext_tb(struct iwl_mld *mld,
767 struct iwl_mld_rx_phy_data *phy_data,
768 struct ieee80211_rx_status *rx_status,
769 struct ieee80211_radiotap_eht *eht,
770 struct ieee80211_radiotap_eht_usig *usig)
771 {
772 if (phy_data->with_data) {
773 __le32 data5 = phy_data->data5;
774
775 IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
776 IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
777 IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
778 IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
779 IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1,
780 IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
781
782 IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
783 IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2,
784 IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
785 } else {
786 __le32 usig_a1 = phy_data->rx_vec[0];
787 __le32 usig_a2 = phy_data->rx_vec[1];
788
789 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
790 IWL_RX_USIG_A1_DISREGARD,
791 IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD);
792 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
793 IWL_RX_USIG_A2_EHT_PPDU_TYPE,
794 IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
795 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
796 IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
797 IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE);
798 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
799 IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1,
800 IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
801 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
802 IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2,
803 IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
804 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
805 IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD,
806 IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD);
807 IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
808 IWL_RX_USIG_A2_EHT_CRC_OK,
809 IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC);
810 }
811 }
812
iwl_mld_decode_eht_ru(struct iwl_mld * mld,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht)813 static void iwl_mld_decode_eht_ru(struct iwl_mld *mld,
814 struct ieee80211_rx_status *rx_status,
815 struct ieee80211_radiotap_eht *eht)
816 {
817 u32 ru = le32_get_bits(eht->data[8],
818 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
819 enum nl80211_eht_ru_alloc nl_ru;
820
821 /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields
822 * in an EHT variant User Info field
823 */
824
825 switch (ru) {
826 case 0 ... 36:
827 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
828 break;
829 case 37 ... 52:
830 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
831 break;
832 case 53 ... 60:
833 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
834 break;
835 case 61 ... 64:
836 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
837 break;
838 case 65 ... 66:
839 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
840 break;
841 case 67:
842 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
843 break;
844 case 68:
845 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
846 break;
847 case 69:
848 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
849 break;
850 case 70 ... 81:
851 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
852 break;
853 case 82 ... 89:
854 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
855 break;
856 case 90 ... 93:
857 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
858 break;
859 case 94 ... 95:
860 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
861 break;
862 case 96 ... 99:
863 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
864 break;
865 case 100 ... 103:
866 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
867 break;
868 case 104:
869 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
870 break;
871 case 105 ... 106:
872 nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
873 break;
874 default:
875 return;
876 }
877
878 rx_status->bw = RATE_INFO_BW_EHT_RU;
879 rx_status->eht.ru = nl_ru;
880 }
881
iwl_mld_decode_eht_phy_data(struct iwl_mld * mld,struct iwl_mld_rx_phy_data * phy_data,struct ieee80211_rx_status * rx_status,struct ieee80211_radiotap_eht * eht,struct ieee80211_radiotap_eht_usig * usig)882 static void iwl_mld_decode_eht_phy_data(struct iwl_mld *mld,
883 struct iwl_mld_rx_phy_data *phy_data,
884 struct ieee80211_rx_status *rx_status,
885 struct ieee80211_radiotap_eht *eht,
886 struct ieee80211_radiotap_eht_usig *usig)
887
888 {
889 __le32 data0 = phy_data->data0;
890 __le32 data1 = phy_data->data1;
891 __le32 usig_a1 = phy_data->rx_vec[0];
892 u8 info_type = phy_data->info_type;
893
894 /* Not in EHT range */
895 if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU ||
896 info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT)
897 return;
898
899 usig->common |= cpu_to_le32
900 (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
901 IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN);
902 if (phy_data->with_data) {
903 usig->common |= LE32_DEC_ENC(data0,
904 IWL_RX_PHY_DATA0_EHT_UPLINK,
905 IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
906 usig->common |= LE32_DEC_ENC(data0,
907 IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK,
908 IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
909 } else {
910 usig->common |= LE32_DEC_ENC(usig_a1,
911 IWL_RX_USIG_A1_UL_FLAG,
912 IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
913 usig->common |= LE32_DEC_ENC(usig_a1,
914 IWL_RX_USIG_A1_BSS_COLOR,
915 IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
916 }
917
918 usig->common |=
919 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED);
920 usig->common |=
921 LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE,
922 IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK);
923
924 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE);
925 eht->data[0] |= LE32_DEC_ENC(data0,
926 IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK,
927 IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
928
929 /* All RU allocating size/index is in TB format */
930 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT);
931 eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160,
932 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160);
933 eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0,
934 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0);
935 eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7,
936 IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
937
938 iwl_mld_decode_eht_ru(mld, rx_status, eht);
939
940 /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
941 * which is on only in case of monitor mode so no need to check monitor
942 * mode
943 */
944 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80);
945 eht->data[1] |=
946 le32_encode_bits(mld->monitor.p80,
947 IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80);
948
949 usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN);
950 if (phy_data->with_data)
951 usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK,
952 IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
953 else
954 usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION,
955 IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
956
957 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM);
958 eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM,
959 IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
960
961 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM);
962 eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK,
963 IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
964
965 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM);
966 eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG,
967 IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
968
969 /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */
970
971 if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK))
972 usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
973
974 usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN);
975 usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER,
976 IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER);
977
978 /*
979 * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE,
980 * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS
981 */
982
983 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF);
984 eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM,
985 IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
986
987 if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT ||
988 info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB)
989 iwl_mld_decode_eht_ext_tb(mld, phy_data, rx_status, eht, usig);
990
991 if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT ||
992 info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU)
993 iwl_mld_decode_eht_ext_mu(mld, phy_data, rx_status, eht, usig);
994 }
995
iwl_mld_rx_eht(struct iwl_mld * mld,struct sk_buff * skb,struct iwl_mld_rx_phy_data * phy_data,int queue)996 static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb,
997 struct iwl_mld_rx_phy_data *phy_data,
998 int queue)
999 {
1000 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1001 struct ieee80211_radiotap_eht *eht;
1002 struct ieee80211_radiotap_eht_usig *usig;
1003 size_t eht_len = sizeof(*eht);
1004
1005 u32 rate_n_flags = phy_data->rate_n_flags;
1006 u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
1007 /* EHT and HE have the same values for LTF */
1008 u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
1009 u16 phy_info = phy_data->phy_info;
1010 u32 bw;
1011
1012 /* u32 for 1 user_info */
1013 if (phy_data->with_data)
1014 eht_len += sizeof(u32);
1015
1016 eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
1017
1018 usig = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
1019 sizeof(*usig));
1020 rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
1021 usig->common |=
1022 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN);
1023
1024 /* specific handling for 320MHz */
1025 bw = u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK);
1026 if (bw == RATE_MCS_CHAN_WIDTH_320_VAL)
1027 bw += le32_get_bits(phy_data->data0,
1028 IWL_RX_PHY_DATA0_EHT_BW320_SLOT);
1029
1030 usig->common |= cpu_to_le32
1031 (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw));
1032
1033 /* report the AMPDU-EOF bit on single frames */
1034 if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1035 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1036 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1037 if (phy_data->data0 &
1038 cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
1039 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1040 }
1041
1042 if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
1043 iwl_mld_decode_eht_phy_data(mld, phy_data, rx_status, eht, usig);
1044
1045 #define CHECK_TYPE(F) \
1046 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
1047 (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1048
1049 CHECK_TYPE(SU);
1050 CHECK_TYPE(EXT_SU);
1051 CHECK_TYPE(MU);
1052 CHECK_TYPE(TRIG);
1053
1054 switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) {
1055 case 0:
1056 if (he_type == RATE_MCS_HE_TYPE_TRIG) {
1057 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
1058 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
1059 } else {
1060 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
1061 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1062 }
1063 break;
1064 case 1:
1065 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
1066 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
1067 break;
1068 case 2:
1069 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1070 if (he_type == RATE_MCS_HE_TYPE_TRIG)
1071 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
1072 else
1073 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
1074 break;
1075 case 3:
1076 if (he_type != RATE_MCS_HE_TYPE_TRIG) {
1077 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
1078 rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
1079 }
1080 break;
1081 default:
1082 /* nothing here */
1083 break;
1084 }
1085
1086 if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) {
1087 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
1088 eht->data[0] |= cpu_to_le32
1089 (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF,
1090 ltf) |
1091 FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI,
1092 rx_status->eht.gi));
1093 }
1094
1095 if (!phy_data->with_data) {
1096 eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
1097 IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S);
1098 eht->data[7] |=
1099 le32_encode_bits(le32_get_bits(phy_data->rx_vec[2],
1100 RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK),
1101 IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
1102 if (rate_n_flags & RATE_MCS_BF_MSK)
1103 eht->data[7] |=
1104 cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
1105 } else {
1106 eht->user_info[0] |=
1107 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
1108 IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
1109 IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
1110 IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
1111 IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER);
1112
1113 if (rate_n_flags & RATE_MCS_BF_MSK)
1114 eht->user_info[0] |=
1115 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
1116
1117 if (rate_n_flags & RATE_MCS_LDPC_MSK)
1118 eht->user_info[0] |=
1119 cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
1120
1121 eht->user_info[0] |= cpu_to_le32
1122 (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS,
1123 u32_get_bits(rate_n_flags,
1124 RATE_VHT_MCS_RATE_CODE_MSK)) |
1125 FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O,
1126 u32_get_bits(rate_n_flags,
1127 RATE_MCS_NSS_MSK)));
1128 }
1129 }
1130
1131 #ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mld_add_rtap_sniffer_config(struct iwl_mld * mld,struct sk_buff * skb)1132 static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
1133 struct sk_buff *skb)
1134 {
1135 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1136 struct ieee80211_radiotap_vendor_content *radiotap;
1137 const u16 vendor_data_len = sizeof(mld->monitor.cur_aid);
1138
1139 if (!mld->monitor.cur_aid)
1140 return;
1141
1142 radiotap =
1143 iwl_mld_radiotap_put_tlv(skb,
1144 IEEE80211_RADIOTAP_VENDOR_NAMESPACE,
1145 sizeof(*radiotap) + vendor_data_len);
1146
1147 /* Intel OUI */
1148 radiotap->oui[0] = 0xf6;
1149 radiotap->oui[1] = 0x54;
1150 radiotap->oui[2] = 0x25;
1151 /* radiotap sniffer config sub-namespace */
1152 radiotap->oui_subtype = 1;
1153 radiotap->vendor_type = 0;
1154
1155 /* fill the data now */
1156 memcpy(radiotap->data, &mld->monitor.cur_aid,
1157 sizeof(mld->monitor.cur_aid));
1158
1159 rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
1160 }
1161 #endif
1162
iwl_mld_rx_fill_status(struct iwl_mld * mld,struct sk_buff * skb,struct iwl_mld_rx_phy_data * phy_data,struct iwl_rx_mpdu_desc * mpdu_desc,struct ieee80211_hdr * hdr,int queue)1163 static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
1164 struct iwl_mld_rx_phy_data *phy_data,
1165 struct iwl_rx_mpdu_desc *mpdu_desc,
1166 struct ieee80211_hdr *hdr,
1167 int queue)
1168 {
1169 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
1170 u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1171 u32 rate_n_flags = phy_data->rate_n_flags;
1172 u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
1173 bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK;
1174
1175 if (WARN_ON_ONCE(phy_data->with_data && (!mpdu_desc || !hdr)))
1176 return;
1177
1178 /* Keep packets with CRC errors (and with overrun) for monitor mode
1179 * (otherwise the firmware discards them) but mark them as bad.
1180 */
1181 if (phy_data->with_data &&
1182 (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
1183 !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK)))) {
1184 IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
1185 le32_to_cpu(mpdu_desc->status));
1186 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1187 }
1188
1189 phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
1190
1191 if (phy_data->with_data &&
1192 likely(!(phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
1193 rx_status->mactime =
1194 le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
1195
1196 /* TSF as indicated by the firmware is at INA time */
1197 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
1198 } else {
1199 phy_data->info_type =
1200 le32_get_bits(phy_data->data1,
1201 IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
1202 }
1203
1204 /* management stuff on default queue */
1205 if (!queue && phy_data->with_data &&
1206 unlikely(ieee80211_is_beacon(hdr->frame_control) ||
1207 ieee80211_is_probe_resp(hdr->frame_control))) {
1208 rx_status->boottime_ns = ktime_get_boottime_ns();
1209
1210 if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
1211 mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_FOUND;
1212 }
1213
1214 /* set the preamble flag if appropriate */
1215 if (format == RATE_MCS_CCK_MSK &&
1216 phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
1217 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
1218
1219 iwl_mld_fill_signal(mld, rx_status, phy_data);
1220
1221 /* This may be overridden by iwl_mld_rx_he() to HE_RU */
1222 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1223 case RATE_MCS_CHAN_WIDTH_20:
1224 break;
1225 case RATE_MCS_CHAN_WIDTH_40:
1226 rx_status->bw = RATE_INFO_BW_40;
1227 break;
1228 case RATE_MCS_CHAN_WIDTH_80:
1229 rx_status->bw = RATE_INFO_BW_80;
1230 break;
1231 case RATE_MCS_CHAN_WIDTH_160:
1232 rx_status->bw = RATE_INFO_BW_160;
1233 break;
1234 case RATE_MCS_CHAN_WIDTH_320:
1235 rx_status->bw = RATE_INFO_BW_320;
1236 break;
1237 }
1238
1239 /* must be before L-SIG data */
1240 if (format == RATE_MCS_HE_MSK)
1241 iwl_mld_rx_he(mld, skb, phy_data, queue);
1242
1243 iwl_mld_decode_lsig(skb, phy_data);
1244
1245 rx_status->device_timestamp = phy_data->gp2_on_air_rise;
1246
1247 /* using TLV format and must be after all fixed len fields */
1248 if (format == RATE_MCS_EHT_MSK)
1249 iwl_mld_rx_eht(mld, skb, phy_data, queue);
1250
1251 #ifdef CONFIG_IWLWIFI_DEBUGFS
1252 if (unlikely(mld->monitor.on))
1253 iwl_mld_add_rtap_sniffer_config(mld, skb);
1254 #endif
1255
1256 if (format != RATE_MCS_CCK_MSK && is_sgi)
1257 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1258
1259 if (rate_n_flags & RATE_MCS_LDPC_MSK)
1260 rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
1261
1262 switch (format) {
1263 case RATE_MCS_HT_MSK:
1264 rx_status->encoding = RX_ENC_HT;
1265 rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
1266 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1267 break;
1268 case RATE_MCS_VHT_MSK:
1269 case RATE_MCS_HE_MSK:
1270 case RATE_MCS_EHT_MSK:
1271 if (format == RATE_MCS_VHT_MSK) {
1272 rx_status->encoding = RX_ENC_VHT;
1273 } else if (format == RATE_MCS_HE_MSK) {
1274 rx_status->encoding = RX_ENC_HE;
1275 rx_status->he_dcm =
1276 !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
1277 } else if (format == RATE_MCS_EHT_MSK) {
1278 rx_status->encoding = RX_ENC_EHT;
1279 }
1280
1281 rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
1282 rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
1283 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1284 break;
1285 default: {
1286 int rate =
1287 iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1288 rx_status->band);
1289
1290 /* valid rate */
1291 if (rate >= 0 && rate <= 0xFF) {
1292 rx_status->rate_idx = rate;
1293 break;
1294 }
1295
1296 /* invalid rate */
1297 rx_status->rate_idx = 0;
1298
1299 if (net_ratelimit())
1300 IWL_ERR(mld, "invalid rate_n_flags=0x%x, band=%d\n",
1301 rate_n_flags, rx_status->band);
1302 break;
1303 }
1304 }
1305 }
1306
1307 /* iwl_mld_create_skb adds the rxb to a new skb */
iwl_mld_build_rx_skb(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_hdr * hdr,u16 len,u8 crypt_len,struct iwl_rx_cmd_buffer * rxb)1308 static int iwl_mld_build_rx_skb(struct iwl_mld *mld, struct sk_buff *skb,
1309 struct ieee80211_hdr *hdr, u16 len,
1310 u8 crypt_len, struct iwl_rx_cmd_buffer *rxb)
1311 {
1312 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1313 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
1314 unsigned int headlen, fraglen, pad_len = 0;
1315 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1316 u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
1317 IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
1318
1319 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
1320 len -= 2;
1321 pad_len = 2;
1322 }
1323
1324 /* For non monitor interface strip the bytes the RADA might not have
1325 * removed (it might be disabled, e.g. for mgmt frames). As a monitor
1326 * interface cannot exist with other interfaces, this removal is safe
1327 * and sufficient, in monitor mode there's no decryption being done.
1328 */
1329 if (len > mic_crc_len && !ieee80211_hw_check(mld->hw, RX_INCLUDES_FCS))
1330 len -= mic_crc_len;
1331
1332 /* If frame is small enough to fit in skb->head, pull it completely.
1333 * If not, only pull ieee80211_hdr (including crypto if present, and
1334 * an additional 8 bytes for SNAP/ethertype, see below) so that
1335 * splice() or TCP coalesce are more efficient.
1336 *
1337 * Since, in addition, ieee80211_data_to_8023() always pull in at
1338 * least 8 bytes (possibly more for mesh) we can do the same here
1339 * to save the cost of doing it later. That still doesn't pull in
1340 * the actual IP header since the typical case has a SNAP header.
1341 * If the latter changes (there are efforts in the standards group
1342 * to do so) we should revisit this and ieee80211_data_to_8023().
1343 */
1344 headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
1345
1346 /* The firmware may align the packet to DWORD.
1347 * The padding is inserted after the IV.
1348 * After copying the header + IV skip the padding if
1349 * present before copying packet data.
1350 */
1351 hdrlen += crypt_len;
1352
1353 if (unlikely(headlen < hdrlen))
1354 return -EINVAL;
1355
1356 /* Since data doesn't move data while putting data on skb and that is
1357 * the only way we use, data + len is the next place that hdr would
1358 * be put
1359 */
1360 skb_set_mac_header(skb, skb->len);
1361 skb_put_data(skb, hdr, hdrlen);
1362 skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
1363
1364 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1365 struct {
1366 u8 hdr[6];
1367 __be16 type;
1368 } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
1369
1370 if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
1371 !ether_addr_equal(shdr->hdr, rfc1042_header) ||
1372 (shdr->type != htons(ETH_P_IP) &&
1373 shdr->type != htons(ETH_P_ARP) &&
1374 shdr->type != htons(ETH_P_IPV6) &&
1375 shdr->type != htons(ETH_P_8021Q) &&
1376 shdr->type != htons(ETH_P_PAE) &&
1377 shdr->type != htons(ETH_P_TDLS))))
1378 skb->ip_summed = CHECKSUM_NONE;
1379 }
1380
1381 fraglen = len - headlen;
1382
1383 if (fraglen) {
1384 int offset = (u8 *)hdr + headlen + pad_len -
1385 (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
1386
1387 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
1388 fraglen, rxb->truesize);
1389 }
1390
1391 return 0;
1392 }
1393
1394 /* returns true if a packet is a duplicate or invalid tid and
1395 * should be dropped. Updates AMSDU PN tracking info
1396 */
1397 VISIBLE_IF_IWLWIFI_KUNIT
1398 bool
iwl_mld_is_dup(struct iwl_mld * mld,struct ieee80211_sta * sta,struct ieee80211_hdr * hdr,const struct iwl_rx_mpdu_desc * mpdu_desc,struct ieee80211_rx_status * rx_status,int queue)1399 iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
1400 struct ieee80211_hdr *hdr,
1401 const struct iwl_rx_mpdu_desc *mpdu_desc,
1402 struct ieee80211_rx_status *rx_status, int queue)
1403 {
1404 struct iwl_mld_sta *mld_sta;
1405 struct iwl_mld_rxq_dup_data *dup_data;
1406 u8 tid, sub_frame_idx;
1407
1408 if (WARN_ON(!sta))
1409 return false;
1410
1411 mld_sta = iwl_mld_sta_from_mac80211(sta);
1412
1413 if (WARN_ON_ONCE(!mld_sta->dup_data))
1414 return false;
1415
1416 dup_data = &mld_sta->dup_data[queue];
1417
1418 /* Drop duplicate 802.11 retransmissions
1419 * (IEEE 802.11-2020: 10.3.2.14 "Duplicate detection and recovery")
1420 */
1421 if (ieee80211_is_ctl(hdr->frame_control) ||
1422 ieee80211_is_any_nullfunc(hdr->frame_control) ||
1423 is_multicast_ether_addr(hdr->addr1))
1424 return false;
1425
1426 if (ieee80211_is_data_qos(hdr->frame_control)) {
1427 /* frame has qos control */
1428 tid = ieee80211_get_tid(hdr);
1429 if (tid >= IWL_MAX_TID_COUNT)
1430 return true;
1431 } else {
1432 tid = IWL_MAX_TID_COUNT;
1433 }
1434
1435 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
1436 sub_frame_idx = mpdu_desc->amsdu_info &
1437 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
1438
1439 if (IWL_FW_CHECK(mld,
1440 sub_frame_idx > 0 &&
1441 !(mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU),
1442 "got sub_frame_idx=%d but A-MSDU flag is not set\n",
1443 sub_frame_idx))
1444 return true;
1445
1446 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1447 dup_data->last_seq[tid] == hdr->seq_ctrl &&
1448 dup_data->last_sub_frame_idx[tid] >= sub_frame_idx))
1449 return true;
1450
1451 /* Allow same PN as the first subframe for following sub frames */
1452 if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
1453 sub_frame_idx > dup_data->last_sub_frame_idx[tid])
1454 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
1455
1456 dup_data->last_seq[tid] = hdr->seq_ctrl;
1457 dup_data->last_sub_frame_idx[tid] = sub_frame_idx;
1458
1459 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
1460
1461 return false;
1462 }
1463 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_is_dup);
1464
iwl_mld_update_last_rx_timestamp(struct iwl_mld * mld,u8 baid)1465 static void iwl_mld_update_last_rx_timestamp(struct iwl_mld *mld, u8 baid)
1466 {
1467 unsigned long now = jiffies;
1468 unsigned long timeout;
1469 struct iwl_mld_baid_data *ba_data;
1470
1471 ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
1472 if (!ba_data) {
1473 IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
1474 return;
1475 }
1476
1477 if (!ba_data->timeout)
1478 return;
1479
1480 /* To minimize cache bouncing between RX queues, avoid frequent updates
1481 * to last_rx_timestamp. update it only when the timeout period has
1482 * passed. The worst-case scenario is the session expiring after
1483 * approximately 2 * timeout, which is negligible (the update is
1484 * atomic).
1485 */
1486 timeout = TU_TO_JIFFIES(ba_data->timeout);
1487 if (time_is_before_jiffies(ba_data->last_rx_timestamp + timeout))
1488 ba_data->last_rx_timestamp = now;
1489 }
1490
1491 /* Processes received packets for a station.
1492 * Sets *drop to true if the packet should be dropped.
1493 * Returns the station if found, or NULL otherwise.
1494 */
1495 static struct ieee80211_sta *
iwl_mld_rx_with_sta(struct iwl_mld * mld,struct ieee80211_hdr * hdr,struct sk_buff * skb,const struct iwl_rx_mpdu_desc * mpdu_desc,const struct iwl_rx_packet * pkt,int queue,bool * drop)1496 iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr,
1497 struct sk_buff *skb,
1498 const struct iwl_rx_mpdu_desc *mpdu_desc,
1499 const struct iwl_rx_packet *pkt, int queue, bool *drop)
1500 {
1501 struct ieee80211_sta *sta = NULL;
1502 struct ieee80211_link_sta *link_sta = NULL;
1503 struct ieee80211_rx_status *rx_status;
1504 u8 baid;
1505
1506 if (mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
1507 u8 sta_id = le32_get_bits(mpdu_desc->status,
1508 IWL_RX_MPDU_STATUS_STA_ID);
1509
1510 if (IWL_FW_CHECK(mld,
1511 sta_id >= mld->fw->ucode_capa.num_stations,
1512 "rx_mpdu: invalid sta_id %d\n", sta_id))
1513 return NULL;
1514
1515 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1516 if (!IS_ERR_OR_NULL(link_sta))
1517 sta = link_sta->sta;
1518 } else if (!is_multicast_ether_addr(hdr->addr2)) {
1519 /* Passing NULL is fine since we prevent two stations with the
1520 * same address from being added.
1521 */
1522 sta = ieee80211_find_sta_by_ifaddr(mld->hw, hdr->addr2, NULL);
1523 }
1524
1525 /* we may not have any station yet */
1526 if (!sta)
1527 return NULL;
1528
1529 rx_status = IEEE80211_SKB_RXCB(skb);
1530
1531 if (link_sta && sta->valid_links) {
1532 rx_status->link_valid = true;
1533 rx_status->link_id = link_sta->link_id;
1534 }
1535
1536 /* fill checksum */
1537 if (ieee80211_is_data(hdr->frame_control) &&
1538 pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
1539 u16 hwsum = be16_to_cpu(mpdu_desc->v3.raw_xsum);
1540
1541 skb->ip_summed = CHECKSUM_COMPLETE;
1542 skb->csum = csum_unfold(~(__force __sum16)hwsum);
1543 }
1544
1545 if (iwl_mld_is_dup(mld, sta, hdr, mpdu_desc, rx_status, queue)) {
1546 IWL_DEBUG_DROP(mld, "Dropping duplicate packet 0x%x\n",
1547 le16_to_cpu(hdr->seq_ctrl));
1548 *drop = true;
1549 return NULL;
1550 }
1551
1552 baid = le32_get_bits(mpdu_desc->reorder_data,
1553 IWL_RX_MPDU_REORDER_BAID_MASK);
1554 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
1555 iwl_mld_update_last_rx_timestamp(mld, baid);
1556
1557 if (link_sta && ieee80211_is_data(hdr->frame_control)) {
1558 u8 sub_frame_idx = mpdu_desc->amsdu_info &
1559 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
1560
1561 /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
1562 if (!sub_frame_idx || sub_frame_idx == 1)
1563 iwl_mld_count_mpdu_rx(link_sta, queue, 1);
1564
1565 if (!is_multicast_ether_addr(hdr->addr1))
1566 iwl_mld_low_latency_update_counters(mld, hdr, sta,
1567 queue);
1568 }
1569
1570 return sta;
1571 }
1572
1573 #define KEY_IDX_LEN 2
1574
iwl_mld_rx_mgmt_prot(struct ieee80211_sta * sta,struct ieee80211_hdr * hdr,struct ieee80211_rx_status * rx_status,u32 mpdu_status,u32 mpdu_len)1575 static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
1576 struct ieee80211_hdr *hdr,
1577 struct ieee80211_rx_status *rx_status,
1578 u32 mpdu_status,
1579 u32 mpdu_len)
1580 {
1581 struct wireless_dev *wdev;
1582 struct iwl_mld_sta *mld_sta;
1583 struct iwl_mld_vif *mld_vif;
1584 u8 keyidx;
1585 struct ieee80211_key_conf *key;
1586 const u8 *frame = (void *)hdr;
1587
1588 if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
1589 IWL_RX_MPDU_STATUS_SEC_NONE)
1590 return 0;
1591
1592 /* For non-beacon, we don't really care. But beacons may
1593 * be filtered out, and we thus need the firmware's replay
1594 * detection, otherwise beacons the firmware previously
1595 * filtered could be replayed, or something like that, and
1596 * it can filter a lot - though usually only if nothing has
1597 * changed.
1598 */
1599 if (!ieee80211_is_beacon(hdr->frame_control))
1600 return 0;
1601
1602 if (!sta)
1603 return -1;
1604
1605 mld_sta = iwl_mld_sta_from_mac80211(sta);
1606 mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
1607
1608 /* key mismatch - will also report !MIC_OK but we shouldn't count it */
1609 if (!(mpdu_status & IWL_RX_MPDU_STATUS_KEY_VALID))
1610 goto report;
1611
1612 /* good cases */
1613 if (likely(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK &&
1614 !(mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
1615 rx_status->flag |= RX_FLAG_DECRYPTED;
1616 return 0;
1617 }
1618
1619 /* both keys will have the same cipher and MIC length, use
1620 * whichever one is available
1621 */
1622 key = rcu_dereference(mld_vif->bigtks[0]);
1623 if (!key) {
1624 key = rcu_dereference(mld_vif->bigtks[1]);
1625 if (!key)
1626 goto report;
1627 }
1628
1629 if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN)
1630 goto report;
1631
1632 /* get the real key ID */
1633 keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN];
1634 /* and if that's the other key, look it up */
1635 if (keyidx != key->keyidx) {
1636 /* shouldn't happen since firmware checked, but be safe
1637 * in case the MIC length is wrong too, for example
1638 */
1639 if (keyidx != 6 && keyidx != 7)
1640 return -1;
1641
1642 key = rcu_dereference(mld_vif->bigtks[keyidx - 6]);
1643 if (!key)
1644 goto report;
1645 }
1646
1647 /* Report status to mac80211 */
1648 if (!(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK))
1649 ieee80211_key_mic_failure(key);
1650 else if (mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
1651 ieee80211_key_replay(key);
1652 report:
1653 wdev = ieee80211_vif_to_wdev(mld_sta->vif);
1654 if (wdev->netdev)
1655 cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr,
1656 mpdu_len);
1657
1658 return -1;
1659 }
1660
iwl_mld_rx_crypto(struct iwl_mld * mld,struct ieee80211_sta * sta,struct ieee80211_hdr * hdr,struct ieee80211_rx_status * rx_status,struct iwl_rx_mpdu_desc * desc,int queue,u32 pkt_flags,u8 * crypto_len)1661 static int iwl_mld_rx_crypto(struct iwl_mld *mld,
1662 struct ieee80211_sta *sta,
1663 struct ieee80211_hdr *hdr,
1664 struct ieee80211_rx_status *rx_status,
1665 struct iwl_rx_mpdu_desc *desc, int queue,
1666 u32 pkt_flags, u8 *crypto_len)
1667 {
1668 u32 status = le32_to_cpu(desc->status);
1669
1670 if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
1671 !ieee80211_has_protected(hdr->frame_control)))
1672 return iwl_mld_rx_mgmt_prot(sta, hdr, rx_status, status,
1673 le16_to_cpu(desc->mpdu_len));
1674
1675 if (!ieee80211_has_protected(hdr->frame_control) ||
1676 (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
1677 IWL_RX_MPDU_STATUS_SEC_NONE)
1678 return 0;
1679
1680 switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
1681 case IWL_RX_MPDU_STATUS_SEC_CCM:
1682 case IWL_RX_MPDU_STATUS_SEC_GCM:
1683 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
1684 if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
1685 IWL_DEBUG_DROP(mld,
1686 "Dropping packet, bad MIC (CCM/GCM)\n");
1687 return -1;
1688 }
1689
1690 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
1691 *crypto_len = IEEE80211_CCMP_HDR_LEN;
1692 return 0;
1693 case IWL_RX_MPDU_STATUS_SEC_TKIP:
1694 if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
1695 return -1;
1696
1697 if (!(status & RX_MPDU_RES_STATUS_MIC_OK))
1698 rx_status->flag |= RX_FLAG_MMIC_ERROR;
1699
1700 if (pkt_flags & FH_RSCSR_RADA_EN) {
1701 rx_status->flag |= RX_FLAG_ICV_STRIPPED;
1702 rx_status->flag |= RX_FLAG_MMIC_STRIPPED;
1703 }
1704
1705 *crypto_len = IEEE80211_TKIP_IV_LEN;
1706 rx_status->flag |= RX_FLAG_DECRYPTED;
1707 return 0;
1708 default:
1709 break;
1710 }
1711
1712 return 0;
1713 }
1714
iwl_mld_rx_update_ampdu_ref(struct iwl_mld * mld,struct iwl_mld_rx_phy_data * phy_data,struct ieee80211_rx_status * rx_status)1715 static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld,
1716 struct iwl_mld_rx_phy_data *phy_data,
1717 struct ieee80211_rx_status *rx_status)
1718 {
1719 bool toggle_bit =
1720 phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
1721
1722 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1723 /* Toggle is switched whenever new aggregation starts. Make
1724 * sure ampdu_reference is never 0 so we can later use it to
1725 * see if the frame was really part of an A-MPDU or not.
1726 */
1727 if (toggle_bit != mld->monitor.ampdu_toggle) {
1728 mld->monitor.ampdu_ref++;
1729 if (mld->monitor.ampdu_ref == 0)
1730 mld->monitor.ampdu_ref++;
1731 mld->monitor.ampdu_toggle = toggle_bit;
1732 phy_data->first_subframe = true;
1733 }
1734 rx_status->ampdu_reference = mld->monitor.ampdu_ref;
1735 }
1736
1737 static void
iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data * phy_data,struct iwl_rx_mpdu_desc * mpdu_desc,struct ieee80211_rx_status * rx_status)1738 iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data *phy_data,
1739 struct iwl_rx_mpdu_desc *mpdu_desc,
1740 struct ieee80211_rx_status *rx_status)
1741 {
1742 enum nl80211_band band;
1743
1744 band = BAND_IN_RX_STATUS(mpdu_desc->mac_phy_idx);
1745 rx_status->band = iwl_mld_phy_band_to_nl80211(band);
1746 rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
1747 rx_status->band);
1748 }
1749
iwl_mld_rx_mpdu(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,int queue)1750 void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
1751 struct iwl_rx_cmd_buffer *rxb, int queue)
1752 {
1753 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1754 struct iwl_mld_rx_phy_data phy_data = {};
1755 struct iwl_rx_mpdu_desc *mpdu_desc = (void *)pkt->data;
1756 struct ieee80211_sta *sta;
1757 struct ieee80211_hdr *hdr;
1758 struct sk_buff *skb;
1759 size_t mpdu_desc_size = sizeof(*mpdu_desc);
1760 bool drop = false;
1761 u8 crypto_len = 0;
1762 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1763 u32 mpdu_len;
1764 enum iwl_mld_reorder_result reorder_res;
1765 struct ieee80211_rx_status *rx_status;
1766
1767 if (unlikely(mld->fw_status.in_hw_restart))
1768 return;
1769
1770 if (IWL_FW_CHECK(mld, pkt_len < mpdu_desc_size,
1771 "Bad REPLY_RX_MPDU_CMD size (%d)\n", pkt_len))
1772 return;
1773
1774 mpdu_len = le16_to_cpu(mpdu_desc->mpdu_len);
1775
1776 if (IWL_FW_CHECK(mld, mpdu_len + mpdu_desc_size > pkt_len,
1777 "FW lied about packet len (%d)\n", pkt_len))
1778 return;
1779
1780 /* Don't use dev_alloc_skb(), we'll have enough headroom once
1781 * ieee80211_hdr pulled.
1782 */
1783 skb = alloc_skb(128, GFP_ATOMIC);
1784 if (!skb) {
1785 IWL_ERR(mld, "alloc_skb failed\n");
1786 return;
1787 }
1788
1789 hdr = (void *)(pkt->data + mpdu_desc_size);
1790
1791 iwl_mld_fill_phy_data(mpdu_desc, &phy_data);
1792
1793 if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
1794 /* If the device inserted padding it means that (it thought)
1795 * the 802.11 header wasn't a multiple of 4 bytes long. In
1796 * this case, reserve two bytes at the start of the SKB to
1797 * align the payload properly in case we end up copying it.
1798 */
1799 skb_reserve(skb, 2);
1800 }
1801
1802 rx_status = IEEE80211_SKB_RXCB(skb);
1803
1804 /* this is needed early */
1805 iwl_mld_fill_rx_status_band_freq(&phy_data, mpdu_desc, rx_status);
1806
1807 rcu_read_lock();
1808
1809 sta = iwl_mld_rx_with_sta(mld, hdr, skb, mpdu_desc, pkt, queue, &drop);
1810 if (drop)
1811 goto drop;
1812
1813 /* update aggregation data for monitor sake on default queue */
1814 if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU))
1815 iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status);
1816
1817 iwl_mld_rx_fill_status(mld, skb, &phy_data, mpdu_desc, hdr, queue);
1818
1819 if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue,
1820 le32_to_cpu(pkt->len_n_flags), &crypto_len))
1821 goto drop;
1822
1823 if (iwl_mld_build_rx_skb(mld, skb, hdr, mpdu_len, crypto_len, rxb))
1824 goto drop;
1825
1826 /* time sync frame is saved and will be released later when the
1827 * notification with the timestamps arrives.
1828 */
1829 if (iwl_mld_time_sync_frame(mld, skb, hdr->addr2))
1830 goto out;
1831
1832 reorder_res = iwl_mld_reorder(mld, napi, queue, sta, skb, mpdu_desc);
1833 switch (reorder_res) {
1834 case IWL_MLD_PASS_SKB:
1835 break;
1836 case IWL_MLD_DROP_SKB:
1837 goto drop;
1838 case IWL_MLD_BUFFERED_SKB:
1839 goto out;
1840 default:
1841 WARN_ON(1);
1842 goto drop;
1843 }
1844
1845 iwl_mld_pass_packet_to_mac80211(mld, napi, skb, queue, sta);
1846
1847 goto out;
1848
1849 drop:
1850 kfree_skb(skb);
1851 out:
1852 rcu_read_unlock();
1853 }
1854
1855 #define SYNC_RX_QUEUE_TIMEOUT (HZ)
iwl_mld_sync_rx_queues(struct iwl_mld * mld,enum iwl_mld_internal_rxq_notif_type type,const void * notif_payload,u32 notif_payload_size)1856 void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
1857 enum iwl_mld_internal_rxq_notif_type type,
1858 const void *notif_payload, u32 notif_payload_size)
1859 {
1860 u8 num_rx_queues = mld->trans->num_rx_queues;
1861 struct {
1862 struct iwl_rxq_sync_cmd sync_cmd;
1863 struct iwl_mld_internal_rxq_notif notif;
1864 } __packed cmd = {
1865 .sync_cmd.rxq_mask = cpu_to_le32(BIT(num_rx_queues) - 1),
1866 .sync_cmd.count =
1867 cpu_to_le32(sizeof(struct iwl_mld_internal_rxq_notif) +
1868 notif_payload_size),
1869 .notif.type = type,
1870 .notif.cookie = mld->rxq_sync.cookie,
1871 };
1872 struct iwl_host_cmd hcmd = {
1873 .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
1874 .data[0] = &cmd,
1875 .len[0] = sizeof(cmd),
1876 .data[1] = notif_payload,
1877 .len[1] = notif_payload_size,
1878 };
1879 int ret;
1880
1881 /* size must be a multiple of DWORD */
1882 if (WARN_ON(cmd.sync_cmd.count & cpu_to_le32(3)))
1883 return;
1884
1885 mld->rxq_sync.state = (1 << num_rx_queues) - 1;
1886
1887 ret = iwl_mld_send_cmd(mld, &hcmd);
1888 if (ret) {
1889 IWL_ERR(mld, "Failed to trigger RX queues sync (%d)\n", ret);
1890 goto out;
1891 }
1892
1893 ret = wait_event_timeout(mld->rxq_sync.waitq,
1894 READ_ONCE(mld->rxq_sync.state) == 0,
1895 SYNC_RX_QUEUE_TIMEOUT);
1896 WARN_ONCE(!ret, "RXQ sync failed: state=0x%lx, cookie=%d\n",
1897 mld->rxq_sync.state, mld->rxq_sync.cookie);
1898
1899 out:
1900 mld->rxq_sync.state = 0;
1901 mld->rxq_sync.cookie++;
1902 }
1903
iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)1904 void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
1905 struct napi_struct *napi,
1906 struct iwl_rx_packet *pkt, int queue)
1907 {
1908 struct iwl_rxq_sync_notification *notif;
1909 struct iwl_mld_internal_rxq_notif *internal_notif;
1910 u32 len = iwl_rx_packet_payload_len(pkt);
1911 size_t combined_notif_len = sizeof(*notif) + sizeof(*internal_notif);
1912
1913 notif = (void *)pkt->data;
1914 internal_notif = (void *)notif->payload;
1915
1916 if (IWL_FW_CHECK(mld, len < combined_notif_len,
1917 "invalid notification size %u (%zu)\n",
1918 len, combined_notif_len))
1919 return;
1920
1921 len -= combined_notif_len;
1922
1923 if (IWL_FW_CHECK(mld, mld->rxq_sync.cookie != internal_notif->cookie,
1924 "received expired RX queue sync message (cookie=%d expected=%d q[%d])\n",
1925 internal_notif->cookie, mld->rxq_sync.cookie, queue))
1926 return;
1927
1928 switch (internal_notif->type) {
1929 case IWL_MLD_RXQ_EMPTY:
1930 IWL_FW_CHECK(mld, len,
1931 "invalid empty notification size %d\n", len);
1932 break;
1933 case IWL_MLD_RXQ_NOTIF_DEL_BA:
1934 if (IWL_FW_CHECK(mld, len != sizeof(struct iwl_mld_delba_data),
1935 "invalid delba notification size %u (%zu)\n",
1936 len, sizeof(struct iwl_mld_delba_data)))
1937 break;
1938 iwl_mld_del_ba(mld, queue, (void *)internal_notif->payload);
1939 break;
1940 default:
1941 WARN_ON_ONCE(1);
1942 }
1943
1944 IWL_FW_CHECK(mld, !test_and_clear_bit(queue, &mld->rxq_sync.state),
1945 "RXQ sync: queue %d responded a second time!\n", queue);
1946
1947 if (READ_ONCE(mld->rxq_sync.state) == 0)
1948 wake_up(&mld->rxq_sync.waitq);
1949 }
1950
iwl_mld_rx_monitor_no_data(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)1951 void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
1952 struct iwl_rx_packet *pkt, int queue)
1953 {
1954 struct iwl_rx_no_data_ver_3 *desc;
1955 struct iwl_mld_rx_phy_data phy_data;
1956 struct ieee80211_rx_status *rx_status;
1957 struct sk_buff *skb;
1958 u32 format, rssi;
1959
1960 if (unlikely(mld->fw_status.in_hw_restart))
1961 return;
1962
1963 if (IWL_FW_CHECK(mld, iwl_rx_packet_payload_len(pkt) < sizeof(*desc),
1964 "Bad RX_NO_DATA_NOTIF size (%d)\n",
1965 iwl_rx_packet_payload_len(pkt)))
1966 return;
1967
1968 desc = (void *)pkt->data;
1969
1970 rssi = le32_to_cpu(desc->rssi);
1971 phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
1972 phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
1973 phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
1974 phy_data.data0 = desc->phy_info[0];
1975 phy_data.data1 = desc->phy_info[1];
1976 phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
1977 phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
1978 phy_data.rate_n_flags = le32_to_cpu(desc->rate);
1979 phy_data.with_data = false;
1980
1981 BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec));
1982 memcpy(phy_data.rx_vec, desc->rx_vec, sizeof(phy_data.rx_vec));
1983
1984 format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1985
1986 /* Don't use dev_alloc_skb(), we'll have enough headroom once
1987 * ieee80211_hdr pulled.
1988 */
1989 skb = alloc_skb(128, GFP_ATOMIC);
1990 if (!skb) {
1991 IWL_ERR(mld, "alloc_skb failed\n");
1992 return;
1993 }
1994
1995 rx_status = IEEE80211_SKB_RXCB(skb);
1996
1997 /* 0-length PSDU */
1998 rx_status->flag |= RX_FLAG_NO_PSDU;
1999
2000 /* mark as failed PLCP on any errors to skip checks in mac80211 */
2001 if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
2002 RX_NO_DATA_INFO_ERR_NONE)
2003 rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
2004
2005 switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
2006 case RX_NO_DATA_INFO_TYPE_NDP:
2007 rx_status->zero_length_psdu_type =
2008 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
2009 break;
2010 case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
2011 case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED:
2012 rx_status->zero_length_psdu_type =
2013 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
2014 break;
2015 default:
2016 rx_status->zero_length_psdu_type =
2017 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
2018 break;
2019 }
2020
2021 rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
2022 NL80211_BAND_2GHZ;
2023
2024 rx_status->freq = ieee80211_channel_to_frequency(phy_data.channel,
2025 rx_status->band);
2026
2027 iwl_mld_rx_fill_status(mld, skb, &phy_data, NULL, NULL, queue);
2028
2029 /* No more radiotap info should be added after this point.
2030 * Mark it as mac header for upper layers to know where
2031 * the radiotap header ends.
2032 */
2033 skb_set_mac_header(skb, skb->len);
2034
2035 /* Override the nss from the rx_vec since the rate_n_flags has
2036 * only 1 bit for the nss which gives a max of 2 ss but there
2037 * may be up to 8 spatial streams.
2038 */
2039 switch (format) {
2040 case RATE_MCS_VHT_MSK:
2041 rx_status->nss =
2042 le32_get_bits(desc->rx_vec[0],
2043 RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
2044 break;
2045 case RATE_MCS_HE_MSK:
2046 rx_status->nss =
2047 le32_get_bits(desc->rx_vec[0],
2048 RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
2049 break;
2050 case RATE_MCS_EHT_MSK:
2051 rx_status->nss =
2052 le32_get_bits(desc->rx_vec[2],
2053 RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
2054 }
2055
2056 /* pass the packet to mac80211 */
2057 rcu_read_lock();
2058 ieee80211_rx_napi(mld->hw, NULL, skb, napi);
2059 rcu_read_unlock();
2060 }
2061