1 /*-
2 * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
3 */
4
5 /* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
6
7 /*
8 *
9 * Copyright (c) 2025 The FreeBSD Foundation
10 *
11 * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
12 * under sponsorship from the FreeBSD Foundation.
13 *
14 * Permission to use, copy, modify, and distribute this software for any
15 * purpose with or without fee is hereby granted, provided that the above
16 * copyright notice and this permission notice appear in all copies.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 *
26 */
27
28 /*-
29 * Copyright (c) 2024 Future Crew, LLC
30 * Author: Mikhail Pchelin <misha@FreeBSD.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45 /*
46 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
47 * Author: Stefan Sperling <stsp@openbsd.org>
48 * Copyright (c) 2014 Fixup Software Ltd.
49 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
50 *
51 * Permission to use, copy, modify, and distribute this software for any
52 * purpose with or without fee is hereby granted, provided that the above
53 * copyright notice and this permission notice appear in all copies.
54 *
55 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
56 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
57 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
58 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
59 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
60 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
61 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
62 */
63
64 /*-
65 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
66 * which were used as the reference documentation for this implementation.
67 *
68 ******************************************************************************
69 *
70 * This file is provided under a dual BSD/GPLv2 license. When using or
71 * redistributing this file, you may do so under either license.
72 *
73 * GPL LICENSE SUMMARY
74 *
75 * Copyright(c) 2017 Intel Deutschland GmbH
76 * Copyright(c) 2018 - 2019 Intel Corporation
77 *
78 * This program is free software; you can redistribute it and/or modify
79 * it under the terms of version 2 of the GNU General Public License as
80 * published by the Free Software Foundation.
81 *
82 * This program is distributed in the hope that it will be useful, but
83 * WITHOUT ANY WARRANTY; without even the implied warranty of
84 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
85 * General Public License for more details.
86 *
87 * BSD LICENSE
88 *
89 * Copyright(c) 2017 Intel Deutschland GmbH
90 * Copyright(c) 2018 - 2019 Intel Corporation
91 * All rights reserved.
92 *
93 * Redistribution and use in source and binary forms, with or without
94 * modification, are permitted provided that the following conditions
95 * are met:
96 *
97 * * Redistributions of source code must retain the above copyright
98 * notice, this list of conditions and the following disclaimer.
99 * * Redistributions in binary form must reproduce the above copyright
100 * notice, this list of conditions and the following disclaimer in
101 * the documentation and/or other materials provided with the
102 * distribution.
103 * * Neither the name Intel Corporation nor the names of its
104 * contributors may be used to endorse or promote products derived
105 * from this software without specific prior written permission.
106 *
107 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
108 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
109 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
111 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
112 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
113 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
114 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
115 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
116 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
117 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
118 *
119 *****************************************************************************
120 */
121
122 /*-
123 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
124 *
125 * Permission to use, copy, modify, and distribute this software for any
126 * purpose with or without fee is hereby granted, provided that the above
127 * copyright notice and this permission notice appear in all copies.
128 *
129 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
130 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
131 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
132 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
133 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
134 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
135 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
136 */
137
138 #include <sys/param.h>
139 #include <sys/bus.h>
140 #include <sys/module.h>
141 #include <sys/conf.h>
142 #include <sys/kernel.h>
143 #include <sys/malloc.h>
144 #include <sys/mbuf.h>
145 #include <sys/mutex.h>
146 #include <sys/proc.h>
147 #include <sys/rman.h>
148 #include <sys/rwlock.h>
149 #include <sys/socket.h>
150 #include <sys/sockio.h>
151 #include <sys/systm.h>
152 #include <sys/endian.h>
153 #include <sys/linker.h>
154 #include <sys/firmware.h>
155 #include <sys/epoch.h>
156 #include <sys/kdb.h>
157
158 #include <machine/_inttypes.h>
159 #include <machine/bus.h>
160 #include <machine/endian.h>
161 #include <machine/resource.h>
162
163 #include <dev/pci/pcireg.h>
164 #include <dev/pci/pcivar.h>
165
166 #include <net/bpf.h>
167
168 #include <net/if.h>
169 #include <net/if_var.h>
170 #include <net/if_dl.h>
171 #include <net/if_media.h>
172
173 #include <netinet/in.h>
174 #include <netinet/if_ether.h>
175
176 #include <net80211/ieee80211_var.h>
177 #include <net80211/ieee80211_radiotap.h>
178 #include <net80211/ieee80211_regdomain.h>
179 #include <net80211/ieee80211_ratectl.h>
180 #include <net80211/ieee80211_vht.h>
181
182 int iwx_himark = 224;
183 int iwx_lomark = 192;
184
185 #define IWX_FBSD_RSP_V3 3
186 #define IWX_FBSD_RSP_V4 4
187
188 #define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
189 #define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
190
191 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
192 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
193
194 #include <dev/iwx/if_iwxreg.h>
195 #include <dev/iwx/if_iwxvar.h>
196
197 #include <dev/iwx/if_iwx_debug.h>
198
199 #define PCI_CFG_RETRY_TIMEOUT 0x41
200
201 #define PCI_VENDOR_INTEL 0x8086
202 #define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
203 #define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
204 #define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
205 #define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
206 #define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
207 #define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
208 #define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
209 #define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
210 #define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
211 #define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
212 #define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
213 #define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
214 #define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
215 #define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
216 #define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
217 #define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
218 #define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
219
220 static const struct iwx_devices {
221 uint16_t device;
222 char *name;
223 } iwx_devices[] = {
224 { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
225 { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
226 { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
227 { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
228 { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
229 { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
230 { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
231 { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
232 { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
233 { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
234 { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
235 { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
236 { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
237 { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
238 { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
239 { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
240 { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
241 };
242
243 static const uint8_t iwx_nvm_channels_8000[] = {
244 /* 2.4 GHz */
245 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
246 /* 5 GHz */
247 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
248 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
249 149, 153, 157, 161, 165, 169, 173, 177, 181
250 };
251
252 static const uint8_t iwx_nvm_channels_uhb[] = {
253 /* 2.4 GHz */
254 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
255 /* 5 GHz */
256 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
257 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
258 149, 153, 157, 161, 165, 169, 173, 177, 181,
259 /* 6-7 GHz */
260 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
261 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
262 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
263 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
264 };
265
266 #define IWX_NUM_2GHZ_CHANNELS 14
267 #define IWX_NUM_5GHZ_CHANNELS 37
268
269 const struct iwx_rate {
270 uint16_t rate;
271 uint8_t plcp;
272 uint8_t ht_plcp;
273 } iwx_rates[] = {
274 /* Legacy */ /* HT */
275 { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
276 { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
277 { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
278 { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
279 { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
280 { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
281 { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
282 { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
283 { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
284 { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
285 { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
286 { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
287 { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
288 { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
289 { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
290 { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
291 { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
292 { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
293 { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
294 { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
295 { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
296 };
297 #define IWX_RIDX_CCK 0
298 #define IWX_RIDX_OFDM 4
299 #define IWX_RIDX_MAX (nitems(iwx_rates)-1)
300 #define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
301 #define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
302 #define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
303
304 /* Convert an MCS index into an iwx_rates[] index. */
305 const int iwx_mcs2ridx[] = {
306 IWX_RATE_MCS_0_INDEX,
307 IWX_RATE_MCS_1_INDEX,
308 IWX_RATE_MCS_2_INDEX,
309 IWX_RATE_MCS_3_INDEX,
310 IWX_RATE_MCS_4_INDEX,
311 IWX_RATE_MCS_5_INDEX,
312 IWX_RATE_MCS_6_INDEX,
313 IWX_RATE_MCS_7_INDEX,
314 IWX_RATE_MCS_8_INDEX,
315 IWX_RATE_MCS_9_INDEX,
316 IWX_RATE_MCS_10_INDEX,
317 IWX_RATE_MCS_11_INDEX,
318 IWX_RATE_MCS_12_INDEX,
319 IWX_RATE_MCS_13_INDEX,
320 IWX_RATE_MCS_14_INDEX,
321 IWX_RATE_MCS_15_INDEX,
322 };
323
324 static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
325 static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
326 static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
327 #if 0
328 static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
329 static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
330 #endif
331 static int iwx_apply_debug_destination(struct iwx_softc *);
332 static void iwx_set_ltr(struct iwx_softc *);
333 static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
334 static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
335 const struct iwx_fw_sects *);
336 static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
337 static void iwx_ctxt_info_free_paging(struct iwx_softc *);
338 static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
339 struct iwx_context_info_dram *);
340 static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
341 static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
342 const uint8_t *, size_t);
343 static int iwx_set_default_calib(struct iwx_softc *, const void *);
344 static void iwx_fw_info_free(struct iwx_fw_info *);
345 static int iwx_read_firmware(struct iwx_softc *);
346 static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
347 static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
348 static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
349 static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
350 static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
351 static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
352 static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
353 static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
354 static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
355 static int iwx_nic_lock(struct iwx_softc *);
356 static void iwx_nic_assert_locked(struct iwx_softc *);
357 static void iwx_nic_unlock(struct iwx_softc *);
358 static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
359 uint32_t);
360 static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
361 static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
362 static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
363 static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
364 bus_size_t, bus_size_t);
365 static void iwx_dma_contig_free(struct iwx_dma_info *);
366 static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
367 static void iwx_disable_rx_dma(struct iwx_softc *);
368 static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
369 static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
370 static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
371 static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
372 static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
373 static void iwx_enable_rfkill_int(struct iwx_softc *);
374 static int iwx_check_rfkill(struct iwx_softc *);
375 static void iwx_enable_interrupts(struct iwx_softc *);
376 static void iwx_enable_fwload_interrupt(struct iwx_softc *);
377 #if 0
378 static void iwx_restore_interrupts(struct iwx_softc *);
379 #endif
380 static void iwx_disable_interrupts(struct iwx_softc *);
381 static void iwx_ict_reset(struct iwx_softc *);
382 static int iwx_set_hw_ready(struct iwx_softc *);
383 static int iwx_prepare_card_hw(struct iwx_softc *);
384 static int iwx_force_power_gating(struct iwx_softc *);
385 static void iwx_apm_config(struct iwx_softc *);
386 static int iwx_apm_init(struct iwx_softc *);
387 static void iwx_apm_stop(struct iwx_softc *);
388 static int iwx_allow_mcast(struct iwx_softc *);
389 static void iwx_init_msix_hw(struct iwx_softc *);
390 static void iwx_conf_msix_hw(struct iwx_softc *, int);
391 static int iwx_clear_persistence_bit(struct iwx_softc *);
392 static int iwx_start_hw(struct iwx_softc *);
393 static void iwx_stop_device(struct iwx_softc *);
394 static void iwx_nic_config(struct iwx_softc *);
395 static int iwx_nic_rx_init(struct iwx_softc *);
396 static int iwx_nic_init(struct iwx_softc *);
397 static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
398 static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
399 static void iwx_post_alive(struct iwx_softc *);
400 static int iwx_schedule_session_protection(struct iwx_softc *,
401 struct iwx_node *, uint32_t);
402 static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
403 static void iwx_init_channel_map(struct ieee80211com *, int, int *,
404 struct ieee80211_channel[]);
405 static int iwx_mimo_enabled(struct iwx_softc *);
406 static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
407 uint16_t);
408 static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
409 static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
410 uint16_t, uint16_t, int, int);
411 static void iwx_sta_tx_agg_start(struct iwx_softc *,
412 struct ieee80211_node *, uint8_t);
413 static void iwx_ba_rx_task(void *, int);
414 static void iwx_ba_tx_task(void *, int);
415 static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
416 static int iwx_is_valid_mac_addr(const uint8_t *);
417 static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
418 static int iwx_nvm_get(struct iwx_softc *);
419 static int iwx_load_firmware(struct iwx_softc *);
420 static int iwx_start_fw(struct iwx_softc *);
421 static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
422 static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
423 static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
424 static int iwx_load_pnvm(struct iwx_softc *);
425 static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
426 static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
427 static int iwx_load_ucode_wait_alive(struct iwx_softc *);
428 static int iwx_send_dqa_cmd(struct iwx_softc *);
429 static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
430 static int iwx_config_ltr(struct iwx_softc *);
431 static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
432 static int iwx_rx_addbuf(struct iwx_softc *, int, int);
433 static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
434 static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
435 struct iwx_rx_data *);
436 static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
437 static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
438 #if 0
439 int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
440 struct ieee80211_node *, struct ieee80211_rxinfo *);
441 #endif
442 static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
443 int, int, uint32_t, uint8_t);
444 static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
445 static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
446 struct iwx_tx_data *);
447 static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
448 static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
449 struct iwx_rx_data *);
450 static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
451 static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
452 struct iwx_rx_data *);
453 static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
454 static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
455 static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
456 struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
457 #if 0
458 static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
459 uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
460 #endif
461 static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
462 uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
463 static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
464 static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
465 const void *);
466 static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
467 uint32_t *);
468 static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
469 const void *, uint32_t *);
470 static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
471 static void iwx_cmd_done(struct iwx_softc *, int, int, int);
472 static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
473 static uint32_t iwx_fw_rateidx_cck(uint8_t);
474 static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
475 struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
476 struct mbuf *);
477 static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
478 uint16_t, uint16_t);
479 static int iwx_tx(struct iwx_softc *, struct mbuf *,
480 struct ieee80211_node *);
481 static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
482 static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
483 static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
484 static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
485 struct iwx_beacon_filter_cmd *);
486 static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
487 int);
488 static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
489 struct iwx_mac_power_cmd *);
490 static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
491 static int iwx_power_update_device(struct iwx_softc *);
492 #if 0
493 static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
494 #endif
495 static int iwx_disable_beacon_filter(struct iwx_softc *);
496 static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
497 static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
498 static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
499 static int iwx_fill_probe_req(struct iwx_softc *,
500 struct iwx_scan_probe_req *);
501 static int iwx_config_umac_scan_reduced(struct iwx_softc *);
502 static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
503 static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
504 struct iwx_scan_general_params_v10 *, int);
505 static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
506 struct iwx_scan_general_params_v10 *, uint16_t, int);
507 static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
508 struct iwx_scan_channel_params_v6 *, uint32_t, int);
509 static int iwx_umac_scan_v14(struct iwx_softc *, int);
510 static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
511 static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
512 static int iwx_rval2ridx(int);
513 static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
514 int *);
515 static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
516 struct iwx_mac_ctx_cmd *, uint32_t);
517 static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
518 struct iwx_mac_data_sta *, int);
519 static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
520 uint32_t, int);
521 static int iwx_clear_statistics(struct iwx_softc *);
522 static int iwx_scan(struct iwx_softc *);
523 static int iwx_bgscan(struct ieee80211com *);
524 static int iwx_enable_mgmt_queue(struct iwx_softc *);
525 static int iwx_disable_mgmt_queue(struct iwx_softc *);
526 static int iwx_rs_rval2idx(uint8_t);
527 static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
528 int);
529 static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
530 static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
531 static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
532 static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
533 static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
534 uint8_t, uint8_t);
535 static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
536 struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
537 uint8_t);
538 static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
539 static int iwx_deauth(struct iwx_softc *);
540 static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
541 static int iwx_run_stop(struct iwx_softc *);
542 static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
543 const uint8_t[IEEE80211_ADDR_LEN]);
544 #if 0
545 int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
546 struct ieee80211_key *);
547 void iwx_setkey_task(void *);
548 void iwx_delete_key(struct ieee80211com *,
549 struct ieee80211_node *, struct ieee80211_key *);
550 #endif
551 static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
552 static void iwx_endscan(struct iwx_softc *);
553 static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
554 struct ieee80211_node *);
555 static int iwx_sf_config(struct iwx_softc *, int);
556 static int iwx_send_bt_init_conf(struct iwx_softc *);
557 static int iwx_send_soc_conf(struct iwx_softc *);
558 static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
559 static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
560 static int iwx_init_hw(struct iwx_softc *);
561 static int iwx_init(struct iwx_softc *);
562 static void iwx_stop(struct iwx_softc *);
563 static void iwx_watchdog(void *);
564 static const char *iwx_desc_lookup(uint32_t);
565 static void iwx_nic_error(struct iwx_softc *);
566 static void iwx_dump_driver_status(struct iwx_softc *);
567 static void iwx_nic_umac_error(struct iwx_softc *);
568 static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
569 static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
570 static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
571 struct mbuf *);
572 static void iwx_notif_intr(struct iwx_softc *);
573 #if 0
574 /* XXX-THJ - I don't have hardware for this */
575 static int iwx_intr(void *);
576 #endif
577 static void iwx_intr_msix(void *);
578 static int iwx_preinit(struct iwx_softc *);
579 static void iwx_attach_hook(void *);
580 static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
581 static int iwx_probe(device_t);
582 static int iwx_attach(device_t);
583 static int iwx_detach(device_t);
584
585 /* FreeBSD specific glue */
586 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
587 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
588
589 u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
590 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
591
592 #if IWX_DEBUG
593 #define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
594 #else
595 #define DPRINTF(x) do { ; } while (0)
596 #endif
597
598 /* FreeBSD specific functions */
599 static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
600 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
601 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
602 static void iwx_vap_delete(struct ieee80211vap *);
603 static void iwx_parent(struct ieee80211com *);
604 static void iwx_scan_start(struct ieee80211com *);
605 static void iwx_scan_end(struct ieee80211com *);
606 static void iwx_update_mcast(struct ieee80211com *ic);
607 static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
608 static void iwx_scan_mindwell(struct ieee80211_scan_state *);
609 static void iwx_set_channel(struct ieee80211com *);
610 static void iwx_endscan_cb(void *, int );
611 static int iwx_wme_update(struct ieee80211com *);
612 static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
613 const struct ieee80211_bpf_params *);
614 static int iwx_transmit(struct ieee80211com *, struct mbuf *);
615 static void iwx_start(struct iwx_softc *);
616 static int iwx_ampdu_rx_start(struct ieee80211_node *,
617 struct ieee80211_rx_ampdu *, int, int, int);
618 static void iwx_ampdu_rx_stop(struct ieee80211_node *,
619 struct ieee80211_rx_ampdu *);
620 static int iwx_addba_request(struct ieee80211_node *,
621 struct ieee80211_tx_ampdu *, int, int, int);
622 static int iwx_addba_response(struct ieee80211_node *,
623 struct ieee80211_tx_ampdu *, int, int, int);
624 static void iwx_key_update_begin(struct ieee80211vap *);
625 static void iwx_key_update_end(struct ieee80211vap *);
626 static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
627 ieee80211_keyix *,ieee80211_keyix *);
628 static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
629 static int iwx_key_delete(struct ieee80211vap *,
630 const struct ieee80211_key *);
631 static int iwx_suspend(device_t);
632 static int iwx_resume(device_t);
633 static void iwx_radiotap_attach(struct iwx_softc *);
634
635 /* OpenBSD compat defines */
636 #define IEEE80211_HTOP0_SCO_SCN 0
637 #define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
638 #define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
639
640 #define IEEE80211_HT_RATESET_SISO 0
641 #define IEEE80211_HT_RATESET_MIMO2 2
642
643 const struct ieee80211_rateset ieee80211_std_rateset_11a =
644 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
645
646 const struct ieee80211_rateset ieee80211_std_rateset_11b =
647 { 4, { 2, 4, 11, 22 } };
648
649 const struct ieee80211_rateset ieee80211_std_rateset_11g =
650 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
651
652 inline int
ieee80211_has_addr4(const struct ieee80211_frame * wh)653 ieee80211_has_addr4(const struct ieee80211_frame *wh)
654 {
655 return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
656 IEEE80211_FC1_DIR_DSTODS;
657 }
658
659 static uint8_t
iwx_lookup_cmd_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)660 iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
661 {
662 const struct iwx_fw_cmd_version *entry;
663 int i;
664
665 for (i = 0; i < sc->n_cmd_versions; i++) {
666 entry = &sc->cmd_versions[i];
667 if (entry->group == grp && entry->cmd == cmd)
668 return entry->cmd_ver;
669 }
670
671 return IWX_FW_CMD_VER_UNKNOWN;
672 }
673
674 uint8_t
iwx_lookup_notif_ver(struct iwx_softc * sc,uint8_t grp,uint8_t cmd)675 iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
676 {
677 const struct iwx_fw_cmd_version *entry;
678 int i;
679
680 for (i = 0; i < sc->n_cmd_versions; i++) {
681 entry = &sc->cmd_versions[i];
682 if (entry->group == grp && entry->cmd == cmd)
683 return entry->notif_ver;
684 }
685
686 return IWX_FW_CMD_VER_UNKNOWN;
687 }
688
689 static int
iwx_store_cscheme(struct iwx_softc * sc,const uint8_t * data,size_t dlen)690 iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
691 {
692 const struct iwx_fw_cscheme_list *l = (const void *)data;
693
694 if (dlen < sizeof(*l) ||
695 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
696 return EINVAL;
697
698 /* we don't actually store anything for now, always use s/w crypto */
699
700 return 0;
701 }
702
703 static int
iwx_ctxt_info_alloc_dma(struct iwx_softc * sc,const struct iwx_fw_onesect * sec,struct iwx_dma_info * dram)704 iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
705 const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
706 {
707 int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
708 if (err) {
709 printf("%s: could not allocate context info DMA memory\n",
710 DEVNAME(sc));
711 return err;
712 }
713
714 memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
715
716 return 0;
717 }
718
719 static void
iwx_ctxt_info_free_paging(struct iwx_softc * sc)720 iwx_ctxt_info_free_paging(struct iwx_softc *sc)
721 {
722 struct iwx_self_init_dram *dram = &sc->init_dram;
723 int i;
724
725 if (!dram->paging)
726 return;
727
728 /* free paging*/
729 for (i = 0; i < dram->paging_cnt; i++)
730 iwx_dma_contig_free(&dram->paging[i]);
731
732 free(dram->paging, M_DEVBUF);
733 dram->paging_cnt = 0;
734 dram->paging = NULL;
735 }
736
737 static int
iwx_get_num_sections(const struct iwx_fw_sects * fws,int start)738 iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
739 {
740 int i = 0;
741
742 while (start < fws->fw_count &&
743 fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
744 fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
745 start++;
746 i++;
747 }
748
749 return i;
750 }
751
752 static int
iwx_init_fw_sec(struct iwx_softc * sc,const struct iwx_fw_sects * fws,struct iwx_context_info_dram * ctxt_dram)753 iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
754 struct iwx_context_info_dram *ctxt_dram)
755 {
756 struct iwx_self_init_dram *dram = &sc->init_dram;
757 int i, ret, fw_cnt = 0;
758
759 KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
760
761 dram->lmac_cnt = iwx_get_num_sections(fws, 0);
762 /* add 1 due to separator */
763 dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
764 /* add 2 due to separators */
765 dram->paging_cnt = iwx_get_num_sections(fws,
766 dram->lmac_cnt + dram->umac_cnt + 2);
767
768 IWX_UNLOCK(sc);
769 dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
770 sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
771 if (!dram->fw) {
772 printf("%s: could not allocate memory for firmware sections\n",
773 DEVNAME(sc));
774 IWX_LOCK(sc);
775 return ENOMEM;
776 }
777
778 dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
779 M_DEVBUF, M_ZERO | M_WAITOK);
780 IWX_LOCK(sc);
781 if (!dram->paging) {
782 printf("%s: could not allocate memory for firmware paging\n",
783 DEVNAME(sc));
784 return ENOMEM;
785 }
786
787 /* initialize lmac sections */
788 for (i = 0; i < dram->lmac_cnt; i++) {
789 ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
790 &dram->fw[fw_cnt]);
791 if (ret)
792 return ret;
793 ctxt_dram->lmac_img[i] =
794 htole64(dram->fw[fw_cnt].paddr);
795 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
796 "%s: firmware LMAC section %d at 0x%llx size %lld\n",
797 __func__, i,
798 (unsigned long long)dram->fw[fw_cnt].paddr,
799 (unsigned long long)dram->fw[fw_cnt].size);
800 fw_cnt++;
801 }
802
803 /* initialize umac sections */
804 for (i = 0; i < dram->umac_cnt; i++) {
805 /* access FW with +1 to make up for lmac separator */
806 ret = iwx_ctxt_info_alloc_dma(sc,
807 &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
808 if (ret)
809 return ret;
810 ctxt_dram->umac_img[i] =
811 htole64(dram->fw[fw_cnt].paddr);
812 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
813 "%s: firmware UMAC section %d at 0x%llx size %lld\n",
814 __func__, i,
815 (unsigned long long)dram->fw[fw_cnt].paddr,
816 (unsigned long long)dram->fw[fw_cnt].size);
817 fw_cnt++;
818 }
819
820 /*
821 * Initialize paging.
822 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
823 * stored separately.
824 * This is since the timing of its release is different -
825 * while fw memory can be released on alive, the paging memory can be
826 * freed only when the device goes down.
827 * Given that, the logic here in accessing the fw image is a bit
828 * different - fw_cnt isn't changing so loop counter is added to it.
829 */
830 for (i = 0; i < dram->paging_cnt; i++) {
831 /* access FW with +2 to make up for lmac & umac separators */
832 int fw_idx = fw_cnt + i + 2;
833
834 ret = iwx_ctxt_info_alloc_dma(sc,
835 &fws->fw_sect[fw_idx], &dram->paging[i]);
836 if (ret)
837 return ret;
838
839 ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
840 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
841 "%s: firmware paging section %d at 0x%llx size %lld\n",
842 __func__, i,
843 (unsigned long long)dram->paging[i].paddr,
844 (unsigned long long)dram->paging[i].size);
845 }
846
847 return 0;
848 }
849
850 static void
iwx_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)851 iwx_fw_version_str(char *buf, size_t bufsize,
852 uint32_t major, uint32_t minor, uint32_t api)
853 {
854 /*
855 * Starting with major version 35 the Linux driver prints the minor
856 * version in hexadecimal.
857 */
858 if (major >= 35)
859 snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
860 else
861 snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
862 }
863 #if 0
864 static int
865 iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
866 uint8_t min_power)
867 {
868 struct iwx_dma_info *fw_mon = &sc->fw_mon;
869 uint32_t size = 0;
870 uint8_t power;
871 int err;
872
873 if (fw_mon->size)
874 return 0;
875
876 for (power = max_power; power >= min_power; power--) {
877 size = (1 << power);
878
879 err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
880 if (err)
881 continue;
882
883 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
884 "%s: allocated 0x%08x bytes for firmware monitor.\n",
885 DEVNAME(sc), size);
886 break;
887 }
888
889 if (err) {
890 fw_mon->size = 0;
891 return err;
892 }
893
894 if (power != max_power)
895 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
896 "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
897 DEVNAME(sc), (unsigned long)(1 << (power - 10)),
898 (unsigned long)(1 << (max_power - 10)));
899
900 return 0;
901 }
902
903 static int
904 iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
905 {
906 if (!max_power) {
907 /* default max_power is maximum */
908 max_power = 26;
909 } else {
910 max_power += 11;
911 }
912
913 if (max_power > 26) {
914 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
915 "%s: External buffer size for monitor is too big %d, "
916 "check the FW TLV\n", DEVNAME(sc), max_power);
917 return 0;
918 }
919
920 if (sc->fw_mon.size)
921 return 0;
922
923 return iwx_alloc_fw_monitor_block(sc, max_power, 11);
924 }
925 #endif
926
927 static int
iwx_apply_debug_destination(struct iwx_softc * sc)928 iwx_apply_debug_destination(struct iwx_softc *sc)
929 {
930 #if 0
931 struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
932 int i, err;
933 uint8_t mon_mode, size_power, base_shift, end_shift;
934 uint32_t base_reg, end_reg;
935
936 dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
937 mon_mode = dest_v1->monitor_mode;
938 size_power = dest_v1->size_power;
939 base_reg = le32toh(dest_v1->base_reg);
940 end_reg = le32toh(dest_v1->end_reg);
941 base_shift = dest_v1->base_shift;
942 end_shift = dest_v1->end_shift;
943
944 DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
945
946 if (mon_mode == EXTERNAL_MODE) {
947 err = iwx_alloc_fw_monitor(sc, size_power);
948 if (err)
949 return err;
950 }
951
952 if (!iwx_nic_lock(sc))
953 return EBUSY;
954
955 for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
956 uint32_t addr, val;
957 uint8_t op;
958
959 addr = le32toh(dest_v1->reg_ops[i].addr);
960 val = le32toh(dest_v1->reg_ops[i].val);
961 op = dest_v1->reg_ops[i].op;
962
963 DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
964 switch (op) {
965 case CSR_ASSIGN:
966 IWX_WRITE(sc, addr, val);
967 break;
968 case CSR_SETBIT:
969 IWX_SETBITS(sc, addr, (1 << val));
970 break;
971 case CSR_CLEARBIT:
972 IWX_CLRBITS(sc, addr, (1 << val));
973 break;
974 case PRPH_ASSIGN:
975 iwx_write_prph(sc, addr, val);
976 break;
977 case PRPH_SETBIT:
978 err = iwx_set_bits_prph(sc, addr, (1 << val));
979 if (err)
980 return err;
981 break;
982 case PRPH_CLEARBIT:
983 err = iwx_clear_bits_prph(sc, addr, (1 << val));
984 if (err)
985 return err;
986 break;
987 case PRPH_BLOCKBIT:
988 if (iwx_read_prph(sc, addr) & (1 << val))
989 goto monitor;
990 break;
991 default:
992 DPRINTF(("%s: FW debug - unknown OP %d\n",
993 DEVNAME(sc), op));
994 break;
995 }
996 }
997
998 monitor:
999 if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
1000 iwx_write_prph(sc, le32toh(base_reg),
1001 sc->fw_mon.paddr >> base_shift);
1002 iwx_write_prph(sc, end_reg,
1003 (sc->fw_mon.paddr + sc->fw_mon.size - 256)
1004 >> end_shift);
1005 }
1006
1007 iwx_nic_unlock(sc);
1008 return 0;
1009 #else
1010 return 0;
1011 #endif
1012 }
1013
1014 static void
iwx_set_ltr(struct iwx_softc * sc)1015 iwx_set_ltr(struct iwx_softc *sc)
1016 {
1017 uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
1018 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1019 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
1020 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
1021 ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
1022 IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
1023 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
1024 ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
1025 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
1026 IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
1027 (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
1028
1029 /*
1030 * To workaround hardware latency issues during the boot process,
1031 * initialize the LTR to ~250 usec (see ltr_val above).
1032 * The firmware initializes this again later (to a smaller value).
1033 */
1034 if (!sc->sc_integrated) {
1035 IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
1036 } else if (sc->sc_integrated &&
1037 sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
1038 iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
1039 IWX_HPM_MAC_LRT_ENABLE_ALL);
1040 iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
1041 }
1042 }
1043
1044 int
iwx_ctxt_info_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1045 iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1046 {
1047 struct iwx_context_info *ctxt_info;
1048 struct iwx_context_info_rbd_cfg *rx_cfg;
1049 uint32_t control_flags = 0;
1050 uint64_t paddr;
1051 int err;
1052
1053 ctxt_info = sc->ctxt_info_dma.vaddr;
1054 memset(ctxt_info, 0, sizeof(*ctxt_info));
1055
1056 ctxt_info->version.version = 0;
1057 ctxt_info->version.mac_id =
1058 htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
1059 /* size is in DWs */
1060 ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
1061
1062 KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
1063 ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
1064
1065 control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
1066 (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
1067 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
1068 (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
1069 ctxt_info->control.control_flags = htole32(control_flags);
1070
1071 /* initialize RX default queue */
1072 rx_cfg = &ctxt_info->rbd_cfg;
1073 rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
1074 rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
1075 rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
1076
1077 /* initialize TX command queue */
1078 ctxt_info->hcmd_cfg.cmd_queue_addr =
1079 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1080 ctxt_info->hcmd_cfg.cmd_queue_size =
1081 IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1082
1083 /* allocate ucode sections in dram and set addresses */
1084 err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
1085 if (err) {
1086 iwx_ctxt_info_free_fw_img(sc);
1087 return err;
1088 }
1089
1090 /* Configure debug, if exists */
1091 if (sc->sc_fw.dbg_dest_tlv_v1) {
1092 #if 1
1093 err = iwx_apply_debug_destination(sc);
1094 if (err) {
1095 iwx_ctxt_info_free_fw_img(sc);
1096 return err;
1097 }
1098 #endif
1099 }
1100
1101 /*
1102 * Write the context info DMA base address. The device expects a
1103 * 64-bit address but a simple bus_space_write_8 to this register
1104 * won't work on some devices, such as the AX201.
1105 */
1106 paddr = sc->ctxt_info_dma.paddr;
1107 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
1108 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
1109
1110 /* kick FW self load */
1111 if (!iwx_nic_lock(sc)) {
1112 iwx_ctxt_info_free_fw_img(sc);
1113 return EBUSY;
1114 }
1115
1116 iwx_set_ltr(sc);
1117 iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1118 iwx_nic_unlock(sc);
1119
1120 /* Context info will be released upon alive or failure to get one */
1121
1122 return 0;
1123 }
1124
1125 static int
iwx_ctxt_info_gen3_init(struct iwx_softc * sc,const struct iwx_fw_sects * fws)1126 iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1127 {
1128 struct iwx_context_info_gen3 *ctxt_info_gen3;
1129 struct iwx_prph_scratch *prph_scratch;
1130 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1131 uint16_t cb_size;
1132 uint32_t control_flags, scratch_size;
1133 uint64_t paddr;
1134 int err;
1135
1136 if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1137 printf("%s: no image loader found in firmware file\n",
1138 DEVNAME(sc));
1139 iwx_ctxt_info_free_fw_img(sc);
1140 return EINVAL;
1141 }
1142
1143 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1144 sc->sc_fw.iml_len, 1);
1145 if (err) {
1146 printf("%s: could not allocate DMA memory for "
1147 "firmware image loader\n", DEVNAME(sc));
1148 iwx_ctxt_info_free_fw_img(sc);
1149 return ENOMEM;
1150 }
1151
1152 prph_scratch = sc->prph_scratch_dma.vaddr;
1153 memset(prph_scratch, 0, sizeof(*prph_scratch));
1154 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1155 prph_sc_ctrl->version.version = 0;
1156 prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1157 prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1158
1159 control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1160 IWX_PRPH_SCRATCH_MTR_MODE |
1161 (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1162 if (sc->sc_imr_enabled)
1163 control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1164 prph_sc_ctrl->control.control_flags = htole32(control_flags);
1165
1166 /* initialize RX default queue */
1167 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1168 htole64(sc->rxq.free_desc_dma.paddr);
1169
1170 /* allocate ucode sections in dram and set addresses */
1171 err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1172 if (err) {
1173 iwx_dma_contig_free(&sc->iml_dma);
1174 iwx_ctxt_info_free_fw_img(sc);
1175 return err;
1176 }
1177
1178 ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1179 memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1180 ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1181 ctxt_info_gen3->prph_scratch_base_addr =
1182 htole64(sc->prph_scratch_dma.paddr);
1183 scratch_size = sizeof(*prph_scratch);
1184 ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1185 ctxt_info_gen3->cr_head_idx_arr_base_addr =
1186 htole64(sc->rxq.stat_dma.paddr);
1187 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1188 htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1189 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1190 htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1191 ctxt_info_gen3->mtr_base_addr =
1192 htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1193 ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1194 cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1195 ctxt_info_gen3->mtr_size = htole16(cb_size);
1196 cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1197 ctxt_info_gen3->mcr_size = htole16(cb_size);
1198
1199 memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1200
1201 paddr = sc->ctxt_info_dma.paddr;
1202 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1203 IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1204
1205 paddr = sc->iml_dma.paddr;
1206 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1207 IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1208 IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1209
1210 IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1211 IWX_CSR_AUTO_FUNC_BOOT_ENA);
1212
1213 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1214 "%s:%d kicking fw to get going\n", __func__, __LINE__);
1215
1216 /* kick FW self load */
1217 if (!iwx_nic_lock(sc)) {
1218 iwx_dma_contig_free(&sc->iml_dma);
1219 iwx_ctxt_info_free_fw_img(sc);
1220 return EBUSY;
1221 }
1222 iwx_set_ltr(sc);
1223 iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1224 iwx_nic_unlock(sc);
1225
1226 /* Context info will be released upon alive or failure to get one */
1227 return 0;
1228 }
1229
1230 static void
iwx_ctxt_info_free_fw_img(struct iwx_softc * sc)1231 iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1232 {
1233 struct iwx_self_init_dram *dram = &sc->init_dram;
1234 int i;
1235
1236 if (!dram->fw)
1237 return;
1238
1239 for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1240 iwx_dma_contig_free(&dram->fw[i]);
1241
1242 free(dram->fw, M_DEVBUF);
1243 dram->lmac_cnt = 0;
1244 dram->umac_cnt = 0;
1245 dram->fw = NULL;
1246 }
1247
1248 static int
iwx_firmware_store_section(struct iwx_softc * sc,enum iwx_ucode_type type,const uint8_t * data,size_t dlen)1249 iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1250 const uint8_t *data, size_t dlen)
1251 {
1252 struct iwx_fw_sects *fws;
1253 struct iwx_fw_onesect *fwone;
1254
1255 if (type >= IWX_UCODE_TYPE_MAX)
1256 return EINVAL;
1257 if (dlen < sizeof(uint32_t))
1258 return EINVAL;
1259
1260 fws = &sc->sc_fw.fw_sects[type];
1261 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1262 "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
1263 if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1264 return EINVAL;
1265
1266 fwone = &fws->fw_sect[fws->fw_count];
1267
1268 /* first 32bit are device load offset */
1269 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1270
1271 /* rest is data */
1272 fwone->fws_data = data + sizeof(uint32_t);
1273 fwone->fws_len = dlen - sizeof(uint32_t);
1274
1275 fws->fw_count++;
1276 fws->fw_totlen += fwone->fws_len;
1277
1278 return 0;
1279 }
1280
1281 #define IWX_DEFAULT_SCAN_CHANNELS 40
1282 /* Newer firmware might support more channels. Raise this value if needed. */
1283 #define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
1284
1285 struct iwx_tlv_calib_data {
1286 uint32_t ucode_type;
1287 struct iwx_tlv_calib_ctrl calib;
1288 } __packed;
1289
1290 static int
iwx_set_default_calib(struct iwx_softc * sc,const void * data)1291 iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1292 {
1293 const struct iwx_tlv_calib_data *def_calib = data;
1294 uint32_t ucode_type = le32toh(def_calib->ucode_type);
1295
1296 if (ucode_type >= IWX_UCODE_TYPE_MAX)
1297 return EINVAL;
1298
1299 sc->sc_default_calib[ucode_type].flow_trigger =
1300 def_calib->calib.flow_trigger;
1301 sc->sc_default_calib[ucode_type].event_trigger =
1302 def_calib->calib.event_trigger;
1303
1304 return 0;
1305 }
1306
1307 static void
iwx_fw_info_free(struct iwx_fw_info * fw)1308 iwx_fw_info_free(struct iwx_fw_info *fw)
1309 {
1310 free(fw->fw_rawdata, M_DEVBUF);
1311 fw->fw_rawdata = NULL;
1312 fw->fw_rawsize = 0;
1313 /* don't touch fw->fw_status */
1314 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1315 free(fw->iml, M_DEVBUF);
1316 fw->iml = NULL;
1317 fw->iml_len = 0;
1318 }
1319
1320 #define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1321
1322 static int
iwx_read_firmware(struct iwx_softc * sc)1323 iwx_read_firmware(struct iwx_softc *sc)
1324 {
1325 struct iwx_fw_info *fw = &sc->sc_fw;
1326 const struct iwx_tlv_ucode_header *uhdr;
1327 struct iwx_ucode_tlv tlv;
1328 uint32_t tlv_type;
1329 const uint8_t *data;
1330 int err = 0;
1331 size_t len;
1332 const struct firmware *fwp;
1333
1334 if (fw->fw_status == IWX_FW_STATUS_DONE)
1335 return 0;
1336
1337 fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1338 fwp = firmware_get(sc->sc_fwname);
1339 sc->sc_fwp = fwp;
1340
1341 if (fwp == NULL) {
1342 printf("%s: could not read firmware %s\n",
1343 DEVNAME(sc), sc->sc_fwname);
1344 err = ENOENT;
1345 goto out;
1346 }
1347
1348 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
1349 __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
1350
1351
1352 sc->sc_capaflags = 0;
1353 sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1354 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1355 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1356 sc->n_cmd_versions = 0;
1357
1358 uhdr = (const void *)(fwp->data);
1359 if (*(const uint32_t *)fwp->data != 0
1360 || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1361 printf("%s: invalid firmware %s\n",
1362 DEVNAME(sc), sc->sc_fwname);
1363 err = EINVAL;
1364 goto out;
1365 }
1366
1367 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1368 IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1369 IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1370 IWX_UCODE_API(le32toh(uhdr->ver)));
1371
1372 data = uhdr->data;
1373 len = fwp->datasize - sizeof(*uhdr);
1374
1375 while (len >= sizeof(tlv)) {
1376 size_t tlv_len;
1377 const void *tlv_data;
1378
1379 memcpy(&tlv, data, sizeof(tlv));
1380 tlv_len = le32toh(tlv.length);
1381 tlv_type = le32toh(tlv.type);
1382
1383 len -= sizeof(tlv);
1384 data += sizeof(tlv);
1385 tlv_data = data;
1386
1387 if (len < tlv_len) {
1388 printf("%s: firmware too short: %zu bytes\n",
1389 DEVNAME(sc), len);
1390 err = EINVAL;
1391 goto parse_out;
1392 }
1393
1394 switch (tlv_type) {
1395 case IWX_UCODE_TLV_PROBE_MAX_LEN:
1396 if (tlv_len < sizeof(uint32_t)) {
1397 err = EINVAL;
1398 goto parse_out;
1399 }
1400 sc->sc_capa_max_probe_len
1401 = le32toh(*(const uint32_t *)tlv_data);
1402 if (sc->sc_capa_max_probe_len >
1403 IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1404 err = EINVAL;
1405 goto parse_out;
1406 }
1407 break;
1408 case IWX_UCODE_TLV_PAN:
1409 if (tlv_len) {
1410 err = EINVAL;
1411 goto parse_out;
1412 }
1413 sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1414 break;
1415 case IWX_UCODE_TLV_FLAGS:
1416 if (tlv_len < sizeof(uint32_t)) {
1417 err = EINVAL;
1418 goto parse_out;
1419 }
1420 /*
1421 * Apparently there can be many flags, but Linux driver
1422 * parses only the first one, and so do we.
1423 *
1424 * XXX: why does this override IWX_UCODE_TLV_PAN?
1425 * Intentional or a bug? Observations from
1426 * current firmware file:
1427 * 1) TLV_PAN is parsed first
1428 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
1429 * ==> this resets TLV_PAN to itself... hnnnk
1430 */
1431 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
1432 break;
1433 case IWX_UCODE_TLV_CSCHEME:
1434 err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1435 if (err)
1436 goto parse_out;
1437 break;
1438 case IWX_UCODE_TLV_NUM_OF_CPU: {
1439 uint32_t num_cpu;
1440 if (tlv_len != sizeof(uint32_t)) {
1441 err = EINVAL;
1442 goto parse_out;
1443 }
1444 num_cpu = le32toh(*(const uint32_t *)tlv_data);
1445 if (num_cpu < 1 || num_cpu > 2) {
1446 err = EINVAL;
1447 goto parse_out;
1448 }
1449 break;
1450 }
1451 case IWX_UCODE_TLV_SEC_RT:
1452 err = iwx_firmware_store_section(sc,
1453 IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1454 if (err)
1455 goto parse_out;
1456 break;
1457 case IWX_UCODE_TLV_SEC_INIT:
1458 err = iwx_firmware_store_section(sc,
1459 IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1460 if (err)
1461 goto parse_out;
1462 break;
1463 case IWX_UCODE_TLV_SEC_WOWLAN:
1464 err = iwx_firmware_store_section(sc,
1465 IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1466 if (err)
1467 goto parse_out;
1468 break;
1469 case IWX_UCODE_TLV_DEF_CALIB:
1470 if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1471 err = EINVAL;
1472 goto parse_out;
1473 }
1474 err = iwx_set_default_calib(sc, tlv_data);
1475 if (err)
1476 goto parse_out;
1477 break;
1478 case IWX_UCODE_TLV_PHY_SKU:
1479 if (tlv_len != sizeof(uint32_t)) {
1480 err = EINVAL;
1481 goto parse_out;
1482 }
1483 sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
1484 break;
1485
1486 case IWX_UCODE_TLV_API_CHANGES_SET: {
1487 const struct iwx_ucode_api *api;
1488 int idx, i;
1489 if (tlv_len != sizeof(*api)) {
1490 err = EINVAL;
1491 goto parse_out;
1492 }
1493 api = (const struct iwx_ucode_api *)tlv_data;
1494 idx = le32toh(api->api_index);
1495 if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1496 err = EINVAL;
1497 goto parse_out;
1498 }
1499 for (i = 0; i < 32; i++) {
1500 if ((le32toh(api->api_flags) & (1 << i)) == 0)
1501 continue;
1502 setbit(sc->sc_ucode_api, i + (32 * idx));
1503 }
1504 break;
1505 }
1506
1507 case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1508 const struct iwx_ucode_capa *capa;
1509 int idx, i;
1510 if (tlv_len != sizeof(*capa)) {
1511 err = EINVAL;
1512 goto parse_out;
1513 }
1514 capa = (const struct iwx_ucode_capa *)tlv_data;
1515 idx = le32toh(capa->api_index);
1516 if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1517 goto parse_out;
1518 }
1519 for (i = 0; i < 32; i++) {
1520 if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1521 continue;
1522 setbit(sc->sc_enabled_capa, i + (32 * idx));
1523 }
1524 break;
1525 }
1526
1527 case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1528 case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1529 /* ignore, not used by current driver */
1530 break;
1531
1532 case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1533 err = iwx_firmware_store_section(sc,
1534 IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1535 tlv_len);
1536 if (err)
1537 goto parse_out;
1538 break;
1539
1540 case IWX_UCODE_TLV_PAGING:
1541 if (tlv_len != sizeof(uint32_t)) {
1542 err = EINVAL;
1543 goto parse_out;
1544 }
1545 break;
1546
1547 case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1548 if (tlv_len != sizeof(uint32_t)) {
1549 err = EINVAL;
1550 goto parse_out;
1551 }
1552 sc->sc_capa_n_scan_channels =
1553 le32toh(*(const uint32_t *)tlv_data);
1554 if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1555 err = ERANGE;
1556 goto parse_out;
1557 }
1558 break;
1559
1560 case IWX_UCODE_TLV_FW_VERSION:
1561 if (tlv_len != sizeof(uint32_t) * 3) {
1562 err = EINVAL;
1563 goto parse_out;
1564 }
1565
1566 iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1567 le32toh(((const uint32_t *)tlv_data)[0]),
1568 le32toh(((const uint32_t *)tlv_data)[1]),
1569 le32toh(((const uint32_t *)tlv_data)[2]));
1570 break;
1571
1572 case IWX_UCODE_TLV_FW_DBG_DEST: {
1573 const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1574
1575 fw->dbg_dest_ver = (const uint8_t *)tlv_data;
1576 if (*fw->dbg_dest_ver != 0) {
1577 err = EINVAL;
1578 goto parse_out;
1579 }
1580
1581 if (fw->dbg_dest_tlv_init)
1582 break;
1583 fw->dbg_dest_tlv_init = true;
1584
1585 dest_v1 = (const void *)tlv_data;
1586 fw->dbg_dest_tlv_v1 = dest_v1;
1587 fw->n_dest_reg = tlv_len -
1588 offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1589 fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1590 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1591 "%s: found debug dest; n_dest_reg=%d\n",
1592 __func__, fw->n_dest_reg);
1593 break;
1594 }
1595
1596 case IWX_UCODE_TLV_FW_DBG_CONF: {
1597 const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
1598
1599 if (!fw->dbg_dest_tlv_init ||
1600 conf->id >= nitems(fw->dbg_conf_tlv) ||
1601 fw->dbg_conf_tlv[conf->id] != NULL)
1602 break;
1603
1604 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
1605 "Found debug configuration: %d\n", conf->id);
1606 fw->dbg_conf_tlv[conf->id] = conf;
1607 fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1608 break;
1609 }
1610
1611 case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1612 const struct iwx_umac_debug_addrs *dbg_ptrs =
1613 (const void *)tlv_data;
1614
1615 if (tlv_len != sizeof(*dbg_ptrs)) {
1616 err = EINVAL;
1617 goto parse_out;
1618 }
1619 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1620 break;
1621 sc->sc_uc.uc_umac_error_event_table =
1622 le32toh(dbg_ptrs->error_info_addr) &
1623 ~IWX_FW_ADDR_CACHE_CONTROL;
1624 sc->sc_uc.error_event_table_tlv_status |=
1625 IWX_ERROR_EVENT_TABLE_UMAC;
1626 break;
1627 }
1628
1629 case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1630 const struct iwx_lmac_debug_addrs *dbg_ptrs =
1631 (const void *)tlv_data;
1632
1633 if (tlv_len != sizeof(*dbg_ptrs)) {
1634 err = EINVAL;
1635 goto parse_out;
1636 }
1637 if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1638 break;
1639 sc->sc_uc.uc_lmac_error_event_table[0] =
1640 le32toh(dbg_ptrs->error_event_table_ptr) &
1641 ~IWX_FW_ADDR_CACHE_CONTROL;
1642 sc->sc_uc.error_event_table_tlv_status |=
1643 IWX_ERROR_EVENT_TABLE_LMAC1;
1644 break;
1645 }
1646
1647 case IWX_UCODE_TLV_FW_MEM_SEG:
1648 break;
1649
1650 case IWX_UCODE_TLV_IML:
1651 if (sc->sc_fw.iml != NULL) {
1652 free(fw->iml, M_DEVBUF);
1653 fw->iml_len = 0;
1654 }
1655 sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1656 M_WAITOK | M_ZERO);
1657 if (sc->sc_fw.iml == NULL) {
1658 err = ENOMEM;
1659 goto parse_out;
1660 }
1661 memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1662 sc->sc_fw.iml_len = tlv_len;
1663 break;
1664
1665 case IWX_UCODE_TLV_CMD_VERSIONS:
1666 if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1667 tlv_len /= sizeof(struct iwx_fw_cmd_version);
1668 tlv_len *= sizeof(struct iwx_fw_cmd_version);
1669 }
1670 if (sc->n_cmd_versions != 0) {
1671 err = EINVAL;
1672 goto parse_out;
1673 }
1674 if (tlv_len > sizeof(sc->cmd_versions)) {
1675 err = EINVAL;
1676 goto parse_out;
1677 }
1678 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1679 sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1680 break;
1681
1682 case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1683 break;
1684
1685 case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1686 case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1687 case IWX_UCODE_TLV_FW_NUM_STATIONS:
1688 case IWX_UCODE_TLV_FW_NUM_BEACONS:
1689 break;
1690
1691 /* undocumented TLVs found in iwx-cc-a0-46 image */
1692 case 58:
1693 case 0x1000003:
1694 case 0x1000004:
1695 break;
1696
1697 /* undocumented TLVs found in iwx-cc-a0-48 image */
1698 case 0x1000000:
1699 case 0x1000002:
1700 break;
1701
1702 case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1703 case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1704 case IWX_UCODE_TLV_TYPE_HCMD:
1705 case IWX_UCODE_TLV_TYPE_REGIONS:
1706 case IWX_UCODE_TLV_TYPE_TRIGGERS:
1707 case IWX_UCODE_TLV_TYPE_CONF_SET:
1708 case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1709 case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1710 case IWX_UCODE_TLV_CURRENT_PC:
1711 break;
1712
1713 /* undocumented TLV found in iwx-cc-a0-67 image */
1714 case 0x100000b:
1715 break;
1716
1717 /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1718 case 0x101:
1719 break;
1720
1721 /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1722 case 0x100000c:
1723 break;
1724
1725 /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
1726 case 69:
1727 break;
1728
1729 default:
1730 err = EINVAL;
1731 goto parse_out;
1732 }
1733
1734 /*
1735 * Check for size_t overflow and ignore missing padding at
1736 * end of firmware file.
1737 */
1738 if (roundup(tlv_len, 4) > len)
1739 break;
1740
1741 len -= roundup(tlv_len, 4);
1742 data += roundup(tlv_len, 4);
1743 }
1744
1745 KASSERT(err == 0, ("unhandled fw parse error"));
1746
1747 parse_out:
1748 if (err) {
1749 printf("%s: firmware parse error %d, "
1750 "section type %d\n", DEVNAME(sc), err, tlv_type);
1751 }
1752
1753 out:
1754 if (err) {
1755 fw->fw_status = IWX_FW_STATUS_NONE;
1756 if (fw->fw_rawdata != NULL)
1757 iwx_fw_info_free(fw);
1758 } else
1759 fw->fw_status = IWX_FW_STATUS_DONE;
1760 return err;
1761 }
1762
1763 static uint32_t
iwx_prph_addr_mask(struct iwx_softc * sc)1764 iwx_prph_addr_mask(struct iwx_softc *sc)
1765 {
1766 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1767 return 0x00ffffff;
1768 else
1769 return 0x000fffff;
1770 }
1771
1772 static uint32_t
iwx_read_prph_unlocked(struct iwx_softc * sc,uint32_t addr)1773 iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1774 {
1775 uint32_t mask = iwx_prph_addr_mask(sc);
1776 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1777 IWX_BARRIER_READ_WRITE(sc);
1778 return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1779 }
1780
1781 uint32_t
iwx_read_prph(struct iwx_softc * sc,uint32_t addr)1782 iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1783 {
1784 iwx_nic_assert_locked(sc);
1785 return iwx_read_prph_unlocked(sc, addr);
1786 }
1787
1788 static void
iwx_write_prph_unlocked(struct iwx_softc * sc,uint32_t addr,uint32_t val)1789 iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1790 {
1791 uint32_t mask = iwx_prph_addr_mask(sc);
1792 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1793 IWX_BARRIER_WRITE(sc);
1794 IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1795 }
1796
1797 static void
iwx_write_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1798 iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1799 {
1800 iwx_nic_assert_locked(sc);
1801 iwx_write_prph_unlocked(sc, addr, val);
1802 }
1803
1804 static uint32_t
iwx_read_umac_prph(struct iwx_softc * sc,uint32_t addr)1805 iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1806 {
1807 return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1808 }
1809
1810 static void
iwx_write_umac_prph(struct iwx_softc * sc,uint32_t addr,uint32_t val)1811 iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1812 {
1813 iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1814 }
1815
1816 static int
iwx_read_mem(struct iwx_softc * sc,uint32_t addr,void * buf,int dwords)1817 iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1818 {
1819 int offs, err = 0;
1820 uint32_t *vals = buf;
1821
1822 if (iwx_nic_lock(sc)) {
1823 IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1824 for (offs = 0; offs < dwords; offs++)
1825 vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1826 iwx_nic_unlock(sc);
1827 } else {
1828 err = EBUSY;
1829 }
1830 return err;
1831 }
1832
1833 static int
iwx_poll_bit(struct iwx_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1834 iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1835 int timo)
1836 {
1837 for (;;) {
1838 if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1839 return 1;
1840 }
1841 if (timo < 10) {
1842 return 0;
1843 }
1844 timo -= 10;
1845 DELAY(10);
1846 }
1847 }
1848
1849 static int
iwx_nic_lock(struct iwx_softc * sc)1850 iwx_nic_lock(struct iwx_softc *sc)
1851 {
1852 if (sc->sc_nic_locks > 0) {
1853 iwx_nic_assert_locked(sc);
1854 sc->sc_nic_locks++;
1855 return 1; /* already locked */
1856 }
1857
1858 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1859 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1860
1861 DELAY(2);
1862
1863 if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1864 IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1865 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1866 | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1867 sc->sc_nic_locks++;
1868 return 1;
1869 }
1870
1871 printf("%s: acquiring device failed\n", DEVNAME(sc));
1872 return 0;
1873 }
1874
1875 static void
iwx_nic_assert_locked(struct iwx_softc * sc)1876 iwx_nic_assert_locked(struct iwx_softc *sc)
1877 {
1878 if (sc->sc_nic_locks <= 0)
1879 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1880 }
1881
1882 static void
iwx_nic_unlock(struct iwx_softc * sc)1883 iwx_nic_unlock(struct iwx_softc *sc)
1884 {
1885 if (sc->sc_nic_locks > 0) {
1886 if (--sc->sc_nic_locks == 0)
1887 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1888 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1889 } else
1890 printf("%s: NIC already unlocked\n", DEVNAME(sc));
1891 }
1892
1893 static int
iwx_set_bits_mask_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1894 iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1895 uint32_t mask)
1896 {
1897 uint32_t val;
1898
1899 if (iwx_nic_lock(sc)) {
1900 val = iwx_read_prph(sc, reg) & mask;
1901 val |= bits;
1902 iwx_write_prph(sc, reg, val);
1903 iwx_nic_unlock(sc);
1904 return 0;
1905 }
1906 return EBUSY;
1907 }
1908
1909 static int
iwx_set_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1910 iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1911 {
1912 return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1913 }
1914
1915 static int
iwx_clear_bits_prph(struct iwx_softc * sc,uint32_t reg,uint32_t bits)1916 iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1917 {
1918 return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1919 }
1920
1921 static void
iwx_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1922 iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1923 {
1924 if (error != 0)
1925 return;
1926 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1927 *(bus_addr_t *)arg = segs[0].ds_addr;
1928 }
1929
1930 static int
iwx_dma_contig_alloc(bus_dma_tag_t tag,struct iwx_dma_info * dma,bus_size_t size,bus_size_t alignment)1931 iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1932 bus_size_t size, bus_size_t alignment)
1933 {
1934 int error;
1935
1936 dma->tag = NULL;
1937 dma->map = NULL;
1938 dma->size = size;
1939 dma->vaddr = NULL;
1940
1941 error = bus_dma_tag_create(tag, alignment,
1942 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1943 1, size, 0, NULL, NULL, &dma->tag);
1944 if (error != 0)
1945 goto fail;
1946
1947 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1948 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1949 if (error != 0)
1950 goto fail;
1951
1952 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1953 iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1954 if (error != 0) {
1955 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1956 dma->vaddr = NULL;
1957 goto fail;
1958 }
1959
1960 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1961
1962 return 0;
1963
1964 fail:
1965 iwx_dma_contig_free(dma);
1966 return error;
1967 }
1968
1969 static void
iwx_dma_contig_free(struct iwx_dma_info * dma)1970 iwx_dma_contig_free(struct iwx_dma_info *dma)
1971 {
1972 if (dma->vaddr != NULL) {
1973 bus_dmamap_sync(dma->tag, dma->map,
1974 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1975 bus_dmamap_unload(dma->tag, dma->map);
1976 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1977 dma->vaddr = NULL;
1978 }
1979 if (dma->tag != NULL) {
1980 bus_dma_tag_destroy(dma->tag);
1981 dma->tag = NULL;
1982 }
1983 }
1984
1985 static int
iwx_alloc_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)1986 iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1987 {
1988 bus_size_t size;
1989 int i, err;
1990
1991 ring->cur = 0;
1992
1993 /* Allocate RX descriptors (256-byte aligned). */
1994 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1995 size = sizeof(struct iwx_rx_transfer_desc);
1996 else
1997 size = sizeof(uint64_t);
1998 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1999 size * IWX_RX_MQ_RING_COUNT, 256);
2000 if (err) {
2001 device_printf(sc->sc_dev,
2002 "could not allocate RX ring DMA memory\n");
2003 goto fail;
2004 }
2005 ring->desc = ring->free_desc_dma.vaddr;
2006
2007 /* Allocate RX status area (16-byte aligned). */
2008 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2009 size = sizeof(uint16_t);
2010 else
2011 size = sizeof(*ring->stat);
2012 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
2013 if (err) {
2014 device_printf(sc->sc_dev,
2015 "could not allocate RX status DMA memory\n");
2016 goto fail;
2017 }
2018 ring->stat = ring->stat_dma.vaddr;
2019
2020 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2021 size = sizeof(struct iwx_rx_completion_desc);
2022 else
2023 size = sizeof(uint32_t);
2024 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
2025 size * IWX_RX_MQ_RING_COUNT, 256);
2026 if (err) {
2027 device_printf(sc->sc_dev,
2028 "could not allocate RX ring DMA memory\n");
2029 goto fail;
2030 }
2031
2032 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2033 BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
2034 0, NULL, NULL, &ring->data_dmat);
2035
2036 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2037 struct iwx_rx_data *data = &ring->data[i];
2038
2039 memset(data, 0, sizeof(*data));
2040 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2041 if (err) {
2042 device_printf(sc->sc_dev,
2043 "could not create RX buf DMA map\n");
2044 goto fail;
2045 }
2046
2047 err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
2048 if (err)
2049 goto fail;
2050 }
2051 return 0;
2052
2053 fail: iwx_free_rx_ring(sc, ring);
2054 return err;
2055 }
2056
2057 static void
iwx_disable_rx_dma(struct iwx_softc * sc)2058 iwx_disable_rx_dma(struct iwx_softc *sc)
2059 {
2060 int ntries;
2061
2062 if (iwx_nic_lock(sc)) {
2063 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
2064 iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
2065 else
2066 iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
2067 for (ntries = 0; ntries < 1000; ntries++) {
2068 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2069 if (iwx_read_umac_prph(sc,
2070 IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
2071 break;
2072 } else {
2073 if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
2074 IWX_RXF_DMA_IDLE)
2075 break;
2076 }
2077 DELAY(10);
2078 }
2079 iwx_nic_unlock(sc);
2080 }
2081 }
2082
2083 static void
iwx_reset_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2084 iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2085 {
2086 ring->cur = 0;
2087 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2088 BUS_DMASYNC_PREWRITE);
2089 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2090 uint16_t *status = sc->rxq.stat_dma.vaddr;
2091 *status = 0;
2092 } else
2093 memset(ring->stat, 0, sizeof(*ring->stat));
2094 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
2095 BUS_DMASYNC_POSTWRITE);
2096
2097 }
2098
2099 static void
iwx_free_rx_ring(struct iwx_softc * sc,struct iwx_rx_ring * ring)2100 iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2101 {
2102 int i;
2103
2104 iwx_dma_contig_free(&ring->free_desc_dma);
2105 iwx_dma_contig_free(&ring->stat_dma);
2106 iwx_dma_contig_free(&ring->used_desc_dma);
2107
2108 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2109 struct iwx_rx_data *data = &ring->data[i];
2110 if (data->m != NULL) {
2111 bus_dmamap_sync(ring->data_dmat, data->map,
2112 BUS_DMASYNC_POSTREAD);
2113 bus_dmamap_unload(ring->data_dmat, data->map);
2114 m_freem(data->m);
2115 data->m = NULL;
2116 }
2117 if (data->map != NULL) {
2118 bus_dmamap_destroy(ring->data_dmat, data->map);
2119 data->map = NULL;
2120 }
2121 }
2122 if (ring->data_dmat != NULL) {
2123 bus_dma_tag_destroy(ring->data_dmat);
2124 ring->data_dmat = NULL;
2125 }
2126 }
2127
2128 static int
iwx_alloc_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring,int qid)2129 iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2130 {
2131 bus_addr_t paddr;
2132 bus_size_t size;
2133 int i, err;
2134 size_t bc_tbl_size;
2135 bus_size_t bc_align;
2136 size_t mapsize;
2137
2138 ring->qid = qid;
2139 ring->queued = 0;
2140 ring->cur = 0;
2141 ring->cur_hw = 0;
2142 ring->tail = 0;
2143 ring->tail_hw = 0;
2144
2145 /* Allocate TX descriptors (256-byte aligned). */
2146 size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2147 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2148 if (err) {
2149 device_printf(sc->sc_dev,
2150 "could not allocate TX ring DMA memory\n");
2151 goto fail;
2152 }
2153 ring->desc = ring->desc_dma.vaddr;
2154
2155 /*
2156 * The hardware supports up to 512 Tx rings which is more
2157 * than we currently need.
2158 *
2159 * In DQA mode we use 1 command queue + 1 default queue for
2160 * management, control, and non-QoS data frames.
2161 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2162 *
2163 * Tx aggregation requires additional queues, one queue per TID for
2164 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2165 * Firmware may assign its own internal IDs for these queues
2166 * depending on which TID gets aggregation enabled first.
2167 * The driver maintains a table mapping driver-side queue IDs
2168 * to firmware-side queue IDs.
2169 */
2170
2171 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2172 bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2173 IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2174 bc_align = 128;
2175 } else {
2176 bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2177 bc_align = 64;
2178 }
2179 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2180 bc_align);
2181 if (err) {
2182 device_printf(sc->sc_dev,
2183 "could not allocate byte count table DMA memory\n");
2184 goto fail;
2185 }
2186
2187 size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2188 err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2189 IWX_FIRST_TB_SIZE_ALIGN);
2190 if (err) {
2191 device_printf(sc->sc_dev,
2192 "could not allocate cmd DMA memory\n");
2193 goto fail;
2194 }
2195 ring->cmd = ring->cmd_dma.vaddr;
2196
2197 /* FW commands may require more mapped space than packets. */
2198 if (qid == IWX_DQA_CMD_QUEUE)
2199 mapsize = (sizeof(struct iwx_cmd_header) +
2200 IWX_MAX_CMD_PAYLOAD_SIZE);
2201 else
2202 mapsize = MCLBYTES;
2203 err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
2204 BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
2205 mapsize, 0, NULL, NULL, &ring->data_dmat);
2206
2207 paddr = ring->cmd_dma.paddr;
2208 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2209 struct iwx_tx_data *data = &ring->data[i];
2210
2211 data->cmd_paddr = paddr;
2212 paddr += sizeof(struct iwx_device_cmd);
2213
2214 err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2215 if (err) {
2216 device_printf(sc->sc_dev,
2217 "could not create TX buf DMA map\n");
2218 goto fail;
2219 }
2220 }
2221 KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
2222 return 0;
2223
2224 fail:
2225 return err;
2226 }
2227
2228 static void
iwx_reset_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2229 iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2230 {
2231 int i;
2232
2233 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2234 struct iwx_tx_data *data = &ring->data[i];
2235
2236 if (data->m != NULL) {
2237 bus_dmamap_sync(ring->data_dmat, data->map,
2238 BUS_DMASYNC_POSTWRITE);
2239 bus_dmamap_unload(ring->data_dmat, data->map);
2240 m_freem(data->m);
2241 data->m = NULL;
2242 }
2243 }
2244
2245 /* Clear byte count table. */
2246 memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2247
2248 /* Clear TX descriptors. */
2249 memset(ring->desc, 0, ring->desc_dma.size);
2250 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2251 BUS_DMASYNC_PREWRITE);
2252 sc->qfullmsk &= ~(1 << ring->qid);
2253 sc->qenablemsk &= ~(1 << ring->qid);
2254 for (i = 0; i < nitems(sc->aggqid); i++) {
2255 if (sc->aggqid[i] == ring->qid) {
2256 sc->aggqid[i] = 0;
2257 break;
2258 }
2259 }
2260 ring->queued = 0;
2261 ring->cur = 0;
2262 ring->cur_hw = 0;
2263 ring->tail = 0;
2264 ring->tail_hw = 0;
2265 ring->tid = 0;
2266 }
2267
2268 static void
iwx_free_tx_ring(struct iwx_softc * sc,struct iwx_tx_ring * ring)2269 iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2270 {
2271 int i;
2272
2273 iwx_dma_contig_free(&ring->desc_dma);
2274 iwx_dma_contig_free(&ring->cmd_dma);
2275 iwx_dma_contig_free(&ring->bc_tbl);
2276
2277 for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2278 struct iwx_tx_data *data = &ring->data[i];
2279
2280 if (data->m != NULL) {
2281 bus_dmamap_sync(ring->data_dmat, data->map,
2282 BUS_DMASYNC_POSTWRITE);
2283 bus_dmamap_unload(ring->data_dmat, data->map);
2284 m_freem(data->m);
2285 data->m = NULL;
2286 }
2287 if (data->map != NULL) {
2288 bus_dmamap_destroy(ring->data_dmat, data->map);
2289 data->map = NULL;
2290 }
2291 }
2292 if (ring->data_dmat != NULL) {
2293 bus_dma_tag_destroy(ring->data_dmat);
2294 ring->data_dmat = NULL;
2295 }
2296 }
2297
2298 static void
iwx_enable_rfkill_int(struct iwx_softc * sc)2299 iwx_enable_rfkill_int(struct iwx_softc *sc)
2300 {
2301 if (!sc->sc_msix) {
2302 sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2303 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2304 } else {
2305 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2306 sc->sc_fh_init_mask);
2307 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2308 ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2309 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2310 }
2311
2312 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2313 IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2314 }
2315
2316 static int
iwx_check_rfkill(struct iwx_softc * sc)2317 iwx_check_rfkill(struct iwx_softc *sc)
2318 {
2319 uint32_t v;
2320 int rv;
2321
2322 /*
2323 * "documentation" is not really helpful here:
2324 * 27: HW_RF_KILL_SW
2325 * Indicates state of (platform's) hardware RF-Kill switch
2326 *
2327 * But apparently when it's off, it's on ...
2328 */
2329 v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2330 rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2331 if (rv) {
2332 sc->sc_flags |= IWX_FLAG_RFKILL;
2333 } else {
2334 sc->sc_flags &= ~IWX_FLAG_RFKILL;
2335 }
2336
2337 return rv;
2338 }
2339
2340 static void
iwx_enable_interrupts(struct iwx_softc * sc)2341 iwx_enable_interrupts(struct iwx_softc *sc)
2342 {
2343 if (!sc->sc_msix) {
2344 sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2345 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2346 } else {
2347 /*
2348 * fh/hw_mask keeps all the unmasked causes.
2349 * Unlike msi, in msix cause is enabled when it is unset.
2350 */
2351 sc->sc_hw_mask = sc->sc_hw_init_mask;
2352 sc->sc_fh_mask = sc->sc_fh_init_mask;
2353 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2354 ~sc->sc_fh_mask);
2355 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2356 ~sc->sc_hw_mask);
2357 }
2358 }
2359
2360 static void
iwx_enable_fwload_interrupt(struct iwx_softc * sc)2361 iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2362 {
2363 if (!sc->sc_msix) {
2364 sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2365 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2366 } else {
2367 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2368 ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2369 sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2370 /*
2371 * Leave all the FH causes enabled to get the ALIVE
2372 * notification.
2373 */
2374 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2375 ~sc->sc_fh_init_mask);
2376 sc->sc_fh_mask = sc->sc_fh_init_mask;
2377 }
2378 }
2379
2380 #if 0
2381 static void
2382 iwx_restore_interrupts(struct iwx_softc *sc)
2383 {
2384 IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2385 }
2386 #endif
2387
2388 static void
iwx_disable_interrupts(struct iwx_softc * sc)2389 iwx_disable_interrupts(struct iwx_softc *sc)
2390 {
2391 if (!sc->sc_msix) {
2392 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2393
2394 /* acknowledge all interrupts */
2395 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2396 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2397 } else {
2398 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2399 sc->sc_fh_init_mask);
2400 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2401 sc->sc_hw_init_mask);
2402 }
2403 }
2404
2405 static void
iwx_ict_reset(struct iwx_softc * sc)2406 iwx_ict_reset(struct iwx_softc *sc)
2407 {
2408 iwx_disable_interrupts(sc);
2409
2410 memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2411 sc->ict_cur = 0;
2412
2413 /* Set physical address of ICT (4KB aligned). */
2414 IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2415 IWX_CSR_DRAM_INT_TBL_ENABLE
2416 | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2417 | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2418 | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2419
2420 /* Switch to ICT interrupt mode in driver. */
2421 sc->sc_flags |= IWX_FLAG_USE_ICT;
2422
2423 IWX_WRITE(sc, IWX_CSR_INT, ~0);
2424 iwx_enable_interrupts(sc);
2425 }
2426
2427 #define IWX_HW_READY_TIMEOUT 50
2428 static int
iwx_set_hw_ready(struct iwx_softc * sc)2429 iwx_set_hw_ready(struct iwx_softc *sc)
2430 {
2431 int ready;
2432
2433 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2434 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2435
2436 ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2437 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2438 IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2439 IWX_HW_READY_TIMEOUT);
2440 if (ready)
2441 IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2442 IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2443
2444 DPRINTF(("%s: ready=%d\n", __func__, ready));
2445 return ready;
2446 }
2447 #undef IWX_HW_READY_TIMEOUT
2448
2449 static int
iwx_prepare_card_hw(struct iwx_softc * sc)2450 iwx_prepare_card_hw(struct iwx_softc *sc)
2451 {
2452 int t = 0;
2453 int ntries;
2454
2455 if (iwx_set_hw_ready(sc))
2456 return 0;
2457
2458 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2459 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2460 DELAY(1000);
2461
2462 for (ntries = 0; ntries < 10; ntries++) {
2463 /* If HW is not ready, prepare the conditions to check again */
2464 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2465 IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2466
2467 do {
2468 if (iwx_set_hw_ready(sc))
2469 return 0;
2470 DELAY(200);
2471 t += 200;
2472 } while (t < 150000);
2473 DELAY(25000);
2474 }
2475
2476 return ETIMEDOUT;
2477 }
2478
2479 static int
iwx_force_power_gating(struct iwx_softc * sc)2480 iwx_force_power_gating(struct iwx_softc *sc)
2481 {
2482 int err;
2483
2484 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2485 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2486 if (err)
2487 return err;
2488 DELAY(20);
2489 err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2490 IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2491 IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2492 if (err)
2493 return err;
2494 DELAY(20);
2495 err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2496 IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2497 return err;
2498 }
2499
2500 static void
iwx_apm_config(struct iwx_softc * sc)2501 iwx_apm_config(struct iwx_softc *sc)
2502 {
2503 uint16_t lctl, cap;
2504 int pcie_ptr;
2505 int error;
2506
2507 /*
2508 * L0S states have been found to be unstable with our devices
2509 * and in newer hardware they are not officially supported at
2510 * all, so we must always set the L0S_DISABLED bit.
2511 */
2512 IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2513
2514 error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
2515 if (error != 0) {
2516 printf("can't fill pcie_ptr\n");
2517 return;
2518 }
2519
2520 lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
2521 sizeof(lctl));
2522 #define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
2523 sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2524 #define PCI_PCIE_DCSR2 0x28
2525 cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
2526 sizeof(lctl));
2527 #define PCI_PCIE_DCSR2_LTREN 0x00000400
2528 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2529 #define PCI_PCIE_LCSR_ASPM_L1 0x00000002
2530 DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2531 DEVNAME(sc),
2532 (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2533 sc->sc_ltr_enabled ? "En" : "Dis"));
2534 #undef PCI_PCIE_LCSR_ASPM_L0S
2535 #undef PCI_PCIE_DCSR2
2536 #undef PCI_PCIE_DCSR2_LTREN
2537 #undef PCI_PCIE_LCSR_ASPM_L1
2538 }
2539
2540 /*
2541 * Start up NIC's basic functionality after it has been reset
2542 * e.g. after platform boot or shutdown.
2543 * NOTE: This does not load uCode nor start the embedded processor
2544 */
2545 static int
iwx_apm_init(struct iwx_softc * sc)2546 iwx_apm_init(struct iwx_softc *sc)
2547 {
2548 int err = 0;
2549
2550 /*
2551 * Disable L0s without affecting L1;
2552 * don't wait for ICH L0s (ICH bug W/A)
2553 */
2554 IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2555 IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2556
2557 /* Set FH wait threshold to maximum (HW error during stress W/A) */
2558 IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2559
2560 /*
2561 * Enable HAP INTA (interrupt from management bus) to
2562 * wake device's PCI Express link L1a -> L0s
2563 */
2564 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2565 IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2566
2567 iwx_apm_config(sc);
2568
2569 /*
2570 * Set "initialization complete" bit to move adapter from
2571 * D0U* --> D0A* (powered-up active) state.
2572 */
2573 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2574
2575 /*
2576 * Wait for clock stabilization; once stabilized, access to
2577 * device-internal resources is supported, e.g. iwx_write_prph()
2578 * and accesses to uCode SRAM.
2579 */
2580 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2581 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2582 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2583 printf("%s: timeout waiting for clock stabilization\n",
2584 DEVNAME(sc));
2585 err = ETIMEDOUT;
2586 goto out;
2587 }
2588 out:
2589 if (err)
2590 printf("%s: apm init error %d\n", DEVNAME(sc), err);
2591 return err;
2592 }
2593
2594 static void
iwx_apm_stop(struct iwx_softc * sc)2595 iwx_apm_stop(struct iwx_softc *sc)
2596 {
2597 IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2598 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2599 IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2600 IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2601 IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2602 DELAY(1000);
2603 IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2604 IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2605 DELAY(5000);
2606
2607 /* stop device's busmaster DMA activity */
2608 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2609
2610 if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2611 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2612 IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2613 printf("%s: timeout waiting for bus master\n", DEVNAME(sc));
2614
2615 /*
2616 * Clear "initialization complete" bit to move adapter from
2617 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2618 */
2619 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2620 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2621 }
2622
2623 static void
iwx_init_msix_hw(struct iwx_softc * sc)2624 iwx_init_msix_hw(struct iwx_softc *sc)
2625 {
2626 iwx_conf_msix_hw(sc, 0);
2627
2628 if (!sc->sc_msix)
2629 return;
2630
2631 sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2632 sc->sc_fh_mask = sc->sc_fh_init_mask;
2633 sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2634 sc->sc_hw_mask = sc->sc_hw_init_mask;
2635 }
2636
2637 static void
iwx_conf_msix_hw(struct iwx_softc * sc,int stopped)2638 iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2639 {
2640 int vector = 0;
2641
2642 if (!sc->sc_msix) {
2643 /* Newer chips default to MSIX. */
2644 if (!stopped && iwx_nic_lock(sc)) {
2645 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2646 IWX_UREG_CHICK_MSI_ENABLE);
2647 iwx_nic_unlock(sc);
2648 }
2649 return;
2650 }
2651
2652 if (!stopped && iwx_nic_lock(sc)) {
2653 iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2654 IWX_UREG_CHICK_MSIX_ENABLE);
2655 iwx_nic_unlock(sc);
2656 }
2657
2658 /* Disable all interrupts */
2659 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2660 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2661
2662 /* Map fallback-queue (command/mgmt) to a single vector */
2663 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2664 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2665 /* Map RSS queue (data) to the same vector */
2666 IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2667 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2668
2669 /* Enable the RX queues cause interrupts */
2670 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2671 IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2672
2673 /* Map non-RX causes to the same vector */
2674 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2675 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2676 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2677 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2678 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2679 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2680 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2681 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2682 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2683 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2684 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2685 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2686 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2687 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2688 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2689 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2690 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2691 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2692 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2693 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2694 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2695 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2696 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2697 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2698 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2699 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2700 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2701 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2702 IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2703 vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2704
2705 /* Enable non-RX causes interrupts */
2706 IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2707 IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2708 IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2709 IWX_MSIX_FH_INT_CAUSES_S2D |
2710 IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2711 IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2712 IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2713 IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2714 IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2715 IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2716 IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2717 IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2718 IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2719 IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2720 IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2721 IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2722 IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2723 }
2724
2725 static int
iwx_clear_persistence_bit(struct iwx_softc * sc)2726 iwx_clear_persistence_bit(struct iwx_softc *sc)
2727 {
2728 uint32_t hpm, wprot;
2729
2730 hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2731 if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2732 wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2733 if (wprot & IWX_PREG_WFPM_ACCESS) {
2734 printf("%s: cannot clear persistence bit\n",
2735 DEVNAME(sc));
2736 return EPERM;
2737 }
2738 iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2739 hpm & ~IWX_PERSISTENCE_BIT);
2740 }
2741
2742 return 0;
2743 }
2744
2745 static int
iwx_start_hw(struct iwx_softc * sc)2746 iwx_start_hw(struct iwx_softc *sc)
2747 {
2748 int err;
2749
2750 err = iwx_prepare_card_hw(sc);
2751 if (err)
2752 return err;
2753
2754 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2755 err = iwx_clear_persistence_bit(sc);
2756 if (err)
2757 return err;
2758 }
2759
2760 /* Reset the entire device */
2761 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2762 DELAY(5000);
2763
2764 if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2765 sc->sc_integrated) {
2766 IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2767 IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2768 DELAY(20);
2769 if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2770 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2771 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2772 printf("%s: timeout waiting for clock stabilization\n",
2773 DEVNAME(sc));
2774 return ETIMEDOUT;
2775 }
2776
2777 err = iwx_force_power_gating(sc);
2778 if (err)
2779 return err;
2780
2781 /* Reset the entire device */
2782 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2783 DELAY(5000);
2784 }
2785
2786 err = iwx_apm_init(sc);
2787 if (err)
2788 return err;
2789
2790 iwx_init_msix_hw(sc);
2791
2792 iwx_enable_rfkill_int(sc);
2793 iwx_check_rfkill(sc);
2794
2795 return 0;
2796 }
2797
2798 static void
iwx_stop_device(struct iwx_softc * sc)2799 iwx_stop_device(struct iwx_softc *sc)
2800 {
2801 int i;
2802
2803 iwx_disable_interrupts(sc);
2804 sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2805
2806 iwx_disable_rx_dma(sc);
2807 iwx_reset_rx_ring(sc, &sc->rxq);
2808 for (i = 0; i < nitems(sc->txq); i++)
2809 iwx_reset_tx_ring(sc, &sc->txq[i]);
2810 #if 0
2811 /* XXX-THJ: Tidy up BA state on stop */
2812 for (i = 0; i < IEEE80211_NUM_TID; i++) {
2813 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2814 if (ba->ba_state != IEEE80211_BA_AGREED)
2815 continue;
2816 ieee80211_delba_request(ic, ni, 0, 1, i);
2817 }
2818 #endif
2819 /* Make sure (redundant) we've released our request to stay awake */
2820 IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2821 IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2822 if (sc->sc_nic_locks > 0)
2823 printf("%s: %d active NIC locks forcefully cleared\n",
2824 DEVNAME(sc), sc->sc_nic_locks);
2825 sc->sc_nic_locks = 0;
2826
2827 /* Stop the device, and put it in low power state */
2828 iwx_apm_stop(sc);
2829
2830 /* Reset the on-board processor. */
2831 IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2832 DELAY(5000);
2833
2834 /*
2835 * Upon stop, the IVAR table gets erased, so msi-x won't
2836 * work. This causes a bug in RF-KILL flows, since the interrupt
2837 * that enables radio won't fire on the correct irq, and the
2838 * driver won't be able to handle the interrupt.
2839 * Configure the IVAR table again after reset.
2840 */
2841 iwx_conf_msix_hw(sc, 1);
2842
2843 /*
2844 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2845 * Clear the interrupt again.
2846 */
2847 iwx_disable_interrupts(sc);
2848
2849 /* Even though we stop the HW we still want the RF kill interrupt. */
2850 iwx_enable_rfkill_int(sc);
2851 iwx_check_rfkill(sc);
2852
2853 iwx_prepare_card_hw(sc);
2854
2855 iwx_ctxt_info_free_paging(sc);
2856 iwx_dma_contig_free(&sc->pnvm_dma);
2857 }
2858
2859 static void
iwx_nic_config(struct iwx_softc * sc)2860 iwx_nic_config(struct iwx_softc *sc)
2861 {
2862 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2863 uint32_t mask, val, reg_val = 0;
2864
2865 radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2866 IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2867 radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2868 IWX_FW_PHY_CFG_RADIO_STEP_POS;
2869 radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2870 IWX_FW_PHY_CFG_RADIO_DASH_POS;
2871
2872 reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2873 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2874 reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2875 IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2876
2877 /* radio configuration */
2878 reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2879 reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2880 reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2881
2882 mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2883 IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2884 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2885 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2886 IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2887 IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2888 IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2889
2890 val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2891 val &= ~mask;
2892 val |= reg_val;
2893 IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2894 }
2895
2896 static int
iwx_nic_rx_init(struct iwx_softc * sc)2897 iwx_nic_rx_init(struct iwx_softc *sc)
2898 {
2899 IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2900
2901 /*
2902 * We don't configure the RFH; the firmware will do that.
2903 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2904 */
2905 return 0;
2906 }
2907
2908 static int
iwx_nic_init(struct iwx_softc * sc)2909 iwx_nic_init(struct iwx_softc *sc)
2910 {
2911 int err;
2912
2913 iwx_apm_init(sc);
2914 if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2915 iwx_nic_config(sc);
2916
2917 err = iwx_nic_rx_init(sc);
2918 if (err)
2919 return err;
2920
2921 IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2922
2923 return 0;
2924 }
2925
2926 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2927 const uint8_t iwx_ac_to_tx_fifo[] = {
2928 IWX_GEN2_EDCA_TX_FIFO_BE,
2929 IWX_GEN2_EDCA_TX_FIFO_BK,
2930 IWX_GEN2_EDCA_TX_FIFO_VI,
2931 IWX_GEN2_EDCA_TX_FIFO_VO,
2932 };
2933
2934 static int
iwx_enable_txq(struct iwx_softc * sc,int sta_id,int qid,int tid,int num_slots)2935 iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2936 int num_slots)
2937 {
2938 struct iwx_rx_packet *pkt;
2939 struct iwx_tx_queue_cfg_rsp *resp;
2940 struct iwx_tx_queue_cfg_cmd cmd_v0;
2941 struct iwx_scd_queue_cfg_cmd cmd_v3;
2942 struct iwx_host_cmd hcmd = {
2943 .flags = IWX_CMD_WANT_RESP,
2944 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2945 };
2946 struct iwx_tx_ring *ring = &sc->txq[qid];
2947 int err, fwqid, cmd_ver;
2948 uint32_t wr_idx;
2949 size_t resp_len;
2950
2951 DPRINTF(("%s: tid=%i\n", __func__, tid));
2952 DPRINTF(("%s: qid=%i\n", __func__, qid));
2953 iwx_reset_tx_ring(sc, ring);
2954
2955 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2956 IWX_SCD_QUEUE_CONFIG_CMD);
2957 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2958 memset(&cmd_v0, 0, sizeof(cmd_v0));
2959 cmd_v0.sta_id = sta_id;
2960 cmd_v0.tid = tid;
2961 cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2962 cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2963 cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2964 cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2965 hcmd.id = IWX_SCD_QUEUE_CFG;
2966 hcmd.data[0] = &cmd_v0;
2967 hcmd.len[0] = sizeof(cmd_v0);
2968 } else if (cmd_ver == 3) {
2969 memset(&cmd_v3, 0, sizeof(cmd_v3));
2970 cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2971 cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2972 cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2973 cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2974 cmd_v3.u.add.flags = htole32(0);
2975 cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2976 cmd_v3.u.add.tid = tid;
2977 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2978 IWX_SCD_QUEUE_CONFIG_CMD);
2979 hcmd.data[0] = &cmd_v3;
2980 hcmd.len[0] = sizeof(cmd_v3);
2981 } else {
2982 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2983 DEVNAME(sc), cmd_ver);
2984 return ENOTSUP;
2985 }
2986
2987 err = iwx_send_cmd(sc, &hcmd);
2988 if (err)
2989 return err;
2990
2991 pkt = hcmd.resp_pkt;
2992 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2993 err = EIO;
2994 goto out;
2995 }
2996
2997 resp_len = iwx_rx_packet_payload_len(pkt);
2998 if (resp_len != sizeof(*resp)) {
2999 err = EIO;
3000 goto out;
3001 }
3002
3003 resp = (void *)pkt->data;
3004 fwqid = le16toh(resp->queue_number);
3005 wr_idx = le16toh(resp->write_pointer);
3006
3007 /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
3008 if (fwqid != qid) {
3009 DPRINTF(("%s: === fwqid != qid\n", __func__));
3010 err = EIO;
3011 goto out;
3012 }
3013
3014 if (wr_idx != ring->cur_hw) {
3015 DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
3016 err = EIO;
3017 goto out;
3018 }
3019
3020 sc->qenablemsk |= (1 << qid);
3021 ring->tid = tid;
3022 out:
3023 iwx_free_resp(sc, &hcmd);
3024 return err;
3025 }
3026
3027 static int
iwx_disable_txq(struct iwx_softc * sc,int sta_id,int qid,uint8_t tid)3028 iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
3029 {
3030 struct iwx_rx_packet *pkt;
3031 struct iwx_tx_queue_cfg_rsp *resp;
3032 struct iwx_tx_queue_cfg_cmd cmd_v0;
3033 struct iwx_scd_queue_cfg_cmd cmd_v3;
3034 struct iwx_host_cmd hcmd = {
3035 .flags = IWX_CMD_WANT_RESP,
3036 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
3037 };
3038 struct iwx_tx_ring *ring = &sc->txq[qid];
3039 int err, cmd_ver;
3040
3041 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3042 IWX_SCD_QUEUE_CONFIG_CMD);
3043 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
3044 memset(&cmd_v0, 0, sizeof(cmd_v0));
3045 cmd_v0.sta_id = sta_id;
3046 cmd_v0.tid = tid;
3047 cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
3048 cmd_v0.cb_size = htole32(0);
3049 cmd_v0.byte_cnt_addr = htole64(0);
3050 cmd_v0.tfdq_addr = htole64(0);
3051 hcmd.id = IWX_SCD_QUEUE_CFG;
3052 hcmd.data[0] = &cmd_v0;
3053 hcmd.len[0] = sizeof(cmd_v0);
3054 } else if (cmd_ver == 3) {
3055 memset(&cmd_v3, 0, sizeof(cmd_v3));
3056 cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
3057 cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
3058 cmd_v3.u.remove.tid = tid;
3059 hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3060 IWX_SCD_QUEUE_CONFIG_CMD);
3061 hcmd.data[0] = &cmd_v3;
3062 hcmd.len[0] = sizeof(cmd_v3);
3063 } else {
3064 printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
3065 DEVNAME(sc), cmd_ver);
3066 return ENOTSUP;
3067 }
3068
3069 err = iwx_send_cmd(sc, &hcmd);
3070 if (err)
3071 return err;
3072
3073 pkt = hcmd.resp_pkt;
3074 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
3075 err = EIO;
3076 goto out;
3077 }
3078
3079 sc->qenablemsk &= ~(1 << qid);
3080 iwx_reset_tx_ring(sc, ring);
3081 out:
3082 iwx_free_resp(sc, &hcmd);
3083 return err;
3084 }
3085
3086 static void
iwx_post_alive(struct iwx_softc * sc)3087 iwx_post_alive(struct iwx_softc *sc)
3088 {
3089 int txcmd_ver;
3090
3091 iwx_ict_reset(sc);
3092
3093 txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
3094 if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
3095 sc->sc_rate_n_flags_version = 2;
3096 else
3097 sc->sc_rate_n_flags_version = 1;
3098
3099 txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
3100 }
3101
3102 static int
iwx_schedule_session_protection(struct iwx_softc * sc,struct iwx_node * in,uint32_t duration_tu)3103 iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
3104 uint32_t duration_tu)
3105 {
3106
3107 struct iwx_session_prot_cmd cmd = {
3108 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3109 in->in_color)),
3110 .action = htole32(IWX_FW_CTXT_ACTION_ADD),
3111 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3112 .duration_tu = htole32(duration_tu),
3113 };
3114 uint32_t cmd_id;
3115 int err;
3116
3117 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3118 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3119 if (!err)
3120 sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3121 return err;
3122 }
3123
3124 static void
iwx_unprotect_session(struct iwx_softc * sc,struct iwx_node * in)3125 iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3126 {
3127 struct iwx_session_prot_cmd cmd = {
3128 .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3129 in->in_color)),
3130 .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3131 .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3132 .duration_tu = 0,
3133 };
3134 uint32_t cmd_id;
3135
3136 /* Do nothing if the time event has already ended. */
3137 if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3138 return;
3139
3140 cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3141 if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3142 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3143 }
3144
3145 /*
3146 * NVM read access and content parsing. We do not support
3147 * external NVM or writing NVM.
3148 */
3149
3150 static uint8_t
iwx_fw_valid_tx_ant(struct iwx_softc * sc)3151 iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3152 {
3153 uint8_t tx_ant;
3154
3155 tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3156 >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3157
3158 if (sc->sc_nvm.valid_tx_ant)
3159 tx_ant &= sc->sc_nvm.valid_tx_ant;
3160
3161 return tx_ant;
3162 }
3163
3164 static uint8_t
iwx_fw_valid_rx_ant(struct iwx_softc * sc)3165 iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3166 {
3167 uint8_t rx_ant;
3168
3169 rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3170 >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3171
3172 if (sc->sc_nvm.valid_rx_ant)
3173 rx_ant &= sc->sc_nvm.valid_rx_ant;
3174
3175 return rx_ant;
3176 }
3177
3178 static void
iwx_init_channel_map(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3179 iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
3180 struct ieee80211_channel chans[])
3181 {
3182 struct iwx_softc *sc = ic->ic_softc;
3183 struct iwx_nvm_data *data = &sc->sc_nvm;
3184 uint8_t bands[IEEE80211_MODE_BYTES];
3185 const uint8_t *nvm_channels;
3186 uint32_t ch_flags;
3187 int ch_idx, nchan;
3188
3189 if (sc->sc_uhb_supported) {
3190 nchan = nitems(iwx_nvm_channels_uhb);
3191 nvm_channels = iwx_nvm_channels_uhb;
3192 } else {
3193 nchan = nitems(iwx_nvm_channels_8000);
3194 nvm_channels = iwx_nvm_channels_8000;
3195 }
3196
3197 /* 2.4Ghz; 1-13: 11b/g channels. */
3198 if (!data->sku_cap_band_24GHz_enable)
3199 goto band_5;
3200
3201 memset(bands, 0, sizeof(bands));
3202 setbit(bands, IEEE80211_MODE_11B);
3203 setbit(bands, IEEE80211_MODE_11G);
3204 setbit(bands, IEEE80211_MODE_11NG);
3205 for (ch_idx = 0;
3206 ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
3207 ch_idx++) {
3208
3209 uint32_t nflags = 0;
3210 int cflags = 0;
3211
3212 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
3213 ch_flags = le32_to_cpup(
3214 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3215 } else {
3216 ch_flags = le16_to_cpup(
3217 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3218 }
3219 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3220 continue;
3221
3222 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3223 cflags |= NET80211_CBW_FLAG_HT40;
3224
3225 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3226
3227 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3228 nvm_channels[ch_idx],
3229 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
3230 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3231 nflags, bands, cflags);
3232 }
3233
3234 band_5:
3235 /* 5Ghz */
3236 if (!data->sku_cap_band_52GHz_enable)
3237 goto band_6;
3238
3239
3240 memset(bands, 0, sizeof(bands));
3241 setbit(bands, IEEE80211_MODE_11A);
3242 setbit(bands, IEEE80211_MODE_11NA);
3243 setbit(bands, IEEE80211_MODE_VHT_5GHZ);
3244
3245 for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
3246 ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
3247 ch_idx++) {
3248 uint32_t nflags = 0;
3249 int cflags = 0;
3250
3251 if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
3252 ch_flags = le32_to_cpup(
3253 sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
3254 else
3255 ch_flags = le16_to_cpup(
3256 sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
3257
3258 if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
3259 continue;
3260
3261 if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
3262 cflags |= NET80211_CBW_FLAG_HT40;
3263 if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
3264 cflags |= NET80211_CBW_FLAG_VHT80;
3265 if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
3266 cflags |= NET80211_CBW_FLAG_VHT160;
3267
3268 /* XXX-BZ nflags RADAR/DFS/INDOOR */
3269
3270 /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
3271 nvm_channels[ch_idx],
3272 ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
3273 /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
3274 nflags, bands, cflags);
3275 }
3276 band_6:
3277 /* 6GHz one day ... */
3278 return;
3279 }
3280
3281 static int
iwx_mimo_enabled(struct iwx_softc * sc)3282 iwx_mimo_enabled(struct iwx_softc *sc)
3283 {
3284
3285 return !sc->sc_nvm.sku_cap_mimo_disable;
3286 }
3287
3288 static void
iwx_init_reorder_buffer(struct iwx_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3289 iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3290 uint16_t ssn, uint16_t buf_size)
3291 {
3292 reorder_buf->head_sn = ssn;
3293 reorder_buf->num_stored = 0;
3294 reorder_buf->buf_size = buf_size;
3295 reorder_buf->last_amsdu = 0;
3296 reorder_buf->last_sub_index = 0;
3297 reorder_buf->removed = 0;
3298 reorder_buf->valid = 0;
3299 reorder_buf->consec_oldsn_drops = 0;
3300 reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3301 reorder_buf->consec_oldsn_prev_drop = 0;
3302 }
3303
3304 static void
iwx_clear_reorder_buffer(struct iwx_softc * sc,struct iwx_rxba_data * rxba)3305 iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3306 {
3307 struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3308
3309 reorder_buf->removed = 1;
3310 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3311 }
3312
3313 #define IWX_MAX_RX_BA_SESSIONS 16
3314
3315 static struct iwx_rxba_data *
iwx_find_rxba_data(struct iwx_softc * sc,uint8_t tid)3316 iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3317 {
3318 int i;
3319
3320 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3321 if (sc->sc_rxba_data[i].baid ==
3322 IWX_RX_REORDER_DATA_INVALID_BAID)
3323 continue;
3324 if (sc->sc_rxba_data[i].tid == tid)
3325 return &sc->sc_rxba_data[i];
3326 }
3327
3328 return NULL;
3329 }
3330
3331 static int
iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start,uint8_t * baid)3332 iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3333 uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3334 uint8_t *baid)
3335 {
3336 struct iwx_rx_baid_cfg_cmd cmd;
3337 uint32_t new_baid = 0;
3338 int err;
3339
3340 IWX_ASSERT_LOCKED(sc);
3341
3342 memset(&cmd, 0, sizeof(cmd));
3343
3344 if (start) {
3345 cmd.action = IWX_RX_BAID_ACTION_ADD;
3346 cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3347 cmd.alloc.tid = tid;
3348 cmd.alloc.ssn = htole16(ssn);
3349 cmd.alloc.win_size = htole16(winsize);
3350 } else {
3351 struct iwx_rxba_data *rxba;
3352
3353 rxba = iwx_find_rxba_data(sc, tid);
3354 if (rxba == NULL)
3355 return ENOENT;
3356 *baid = rxba->baid;
3357
3358 cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3359 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3360 IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3361 cmd.remove_v1.baid = rxba->baid;
3362 } else {
3363 cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3364 cmd.remove.tid = tid;
3365 }
3366 }
3367
3368 err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3369 IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3370 if (err)
3371 return err;
3372
3373 if (start) {
3374 if (new_baid >= nitems(sc->sc_rxba_data))
3375 return ERANGE;
3376 *baid = new_baid;
3377 }
3378
3379 return 0;
3380 }
3381
3382 static void
iwx_sta_rx_agg(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3383 iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3384 uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3385 {
3386 int err;
3387 struct iwx_rxba_data *rxba = NULL;
3388 uint8_t baid = 0;
3389
3390 if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3391 return;
3392 }
3393
3394 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3395 err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3396 timeout_val, start, &baid);
3397 } else {
3398 panic("sta_rx_agg unsupported hw");
3399 }
3400 if (err) {
3401 DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
3402 return;
3403 } else
3404 DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
3405
3406 rxba = &sc->sc_rxba_data[baid];
3407
3408 /* Deaggregation is done in hardware. */
3409 if (start) {
3410 if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3411 return;
3412 }
3413 rxba->sta_id = IWX_STATION_ID;
3414 rxba->tid = tid;
3415 rxba->baid = baid;
3416 rxba->timeout = timeout_val;
3417 getmicrouptime(&rxba->last_rx);
3418 iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3419 winsize);
3420 if (timeout_val != 0) {
3421 DPRINTF(("%s: timeout_val != 0\n", __func__));
3422 return;
3423 }
3424 } else
3425 iwx_clear_reorder_buffer(sc, rxba);
3426
3427 if (start) {
3428 sc->sc_rx_ba_sessions++;
3429 } else if (sc->sc_rx_ba_sessions > 0)
3430 sc->sc_rx_ba_sessions--;
3431 }
3432
3433 /**
3434 * @brief Allocate an A-MPDU / aggregation session for the given node and TID.
3435 *
3436 * This allocates a TX queue specifically for that TID.
3437 *
3438 * Note that this routine currently doesn't return any status/errors,
3439 * so the caller can't know if the aggregation session was setup or not.
3440 */
3441 static void
iwx_sta_tx_agg_start(struct iwx_softc * sc,struct ieee80211_node * ni,uint8_t tid)3442 iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3443 uint8_t tid)
3444 {
3445 int err, qid;
3446
3447 qid = sc->aggqid[tid];
3448 if (qid == 0) {
3449 /* Firmware should pick the next unused Tx queue. */
3450 qid = fls(sc->qenablemsk);
3451 }
3452
3453 DPRINTF(("%s: qid=%i\n", __func__, qid));
3454
3455 /*
3456 * Simply enable the queue.
3457 * Firmware handles Tx Ba session setup and teardown.
3458 */
3459 if ((sc->qenablemsk & (1 << qid)) == 0) {
3460 if (!iwx_nic_lock(sc)) {
3461 return;
3462 }
3463 err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3464 IWX_TX_RING_COUNT);
3465 iwx_nic_unlock(sc);
3466 if (err) {
3467 printf("%s: could not enable Tx queue %d "
3468 "(error %d)\n", DEVNAME(sc), qid, err);
3469 return;
3470 }
3471 }
3472 ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
3473 DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
3474 sc->aggqid[tid] = qid;
3475 }
3476
3477 static void
iwx_ba_rx_task(void * arg,int npending __unused)3478 iwx_ba_rx_task(void *arg, int npending __unused)
3479 {
3480 struct iwx_softc *sc = arg;
3481 struct ieee80211com *ic = &sc->sc_ic;
3482 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3483 struct ieee80211_node *ni = vap->iv_bss;
3484 int tid;
3485
3486 IWX_LOCK(sc);
3487 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3488 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3489 break;
3490 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3491 struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
3492 DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
3493 ba->ba_flags));
3494 if (ba->ba_flags == IWX_BA_DONE) {
3495 DPRINTF(("%s: ampdu for tid %i already added\n",
3496 __func__, tid));
3497 break;
3498 }
3499
3500 DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
3501 tid));
3502 iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3503 ba->ba_winsize, ba->ba_timeout_val, 1);
3504 sc->ba_rx.start_tidmask &= ~(1 << tid);
3505 ba->ba_flags = IWX_BA_DONE;
3506 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3507 iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3508 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3509 }
3510 }
3511 IWX_UNLOCK(sc);
3512 }
3513
3514 /**
3515 * @brief Task called to setup a deferred block-ack session.
3516 *
3517 * This sets up any/all pending blockack sessions as defined
3518 * in sc->ba_tx.start_tidmask.
3519 *
3520 * Note: the call to iwx_sta_tx_agg_start() isn't being error checked.
3521 */
3522 static void
iwx_ba_tx_task(void * arg,int npending __unused)3523 iwx_ba_tx_task(void *arg, int npending __unused)
3524 {
3525 struct iwx_softc *sc = arg;
3526 struct ieee80211com *ic = &sc->sc_ic;
3527 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3528 struct ieee80211_node *ni = vap->iv_bss;
3529 uint32_t started_mask = 0;
3530 int tid;
3531
3532 IWX_LOCK(sc);
3533 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3534 const struct ieee80211_tx_ampdu *tap;
3535
3536 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3537 break;
3538 tap = &ni->ni_tx_ampdu[tid];
3539 if (IEEE80211_AMPDU_RUNNING(tap))
3540 break;
3541 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3542 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
3543 "%s: ampdu tx start for tid %i\n", __func__, tid);
3544 iwx_sta_tx_agg_start(sc, ni, tid);
3545 sc->ba_tx.start_tidmask &= ~(1 << tid);
3546 started_mask |= (1 << tid);
3547 }
3548 }
3549
3550 IWX_UNLOCK(sc);
3551
3552 /* Iterate over the sessions we started; mark them as active */
3553 for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3554 if (started_mask & (1 << tid)) {
3555 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
3556 "%s: informing net80211 to start ampdu on tid %i\n",
3557 __func__, tid);
3558 ieee80211_ampdu_tx_request_active_ext(ni, tid, 1);
3559 }
3560 }
3561 }
3562
3563 static void
iwx_set_mac_addr_from_csr(struct iwx_softc * sc,struct iwx_nvm_data * data)3564 iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3565 {
3566 uint32_t mac_addr0, mac_addr1;
3567
3568 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3569
3570 if (!iwx_nic_lock(sc))
3571 return;
3572
3573 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3574 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3575
3576 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3577
3578 /* If OEM fused a valid address, use it instead of the one in OTP. */
3579 if (iwx_is_valid_mac_addr(data->hw_addr)) {
3580 iwx_nic_unlock(sc);
3581 return;
3582 }
3583
3584 mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3585 mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3586
3587 iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3588
3589 iwx_nic_unlock(sc);
3590 }
3591
3592 static int
iwx_is_valid_mac_addr(const uint8_t * addr)3593 iwx_is_valid_mac_addr(const uint8_t *addr)
3594 {
3595 static const uint8_t reserved_mac[] = {
3596 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3597 };
3598
3599 return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3600 memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3601 memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3602 !ETHER_IS_MULTICAST(addr));
3603 }
3604
3605 static void
iwx_flip_hw_address(uint32_t mac_addr0,uint32_t mac_addr1,uint8_t * dest)3606 iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3607 {
3608 const uint8_t *hw_addr;
3609
3610 hw_addr = (const uint8_t *)&mac_addr0;
3611 dest[0] = hw_addr[3];
3612 dest[1] = hw_addr[2];
3613 dest[2] = hw_addr[1];
3614 dest[3] = hw_addr[0];
3615
3616 hw_addr = (const uint8_t *)&mac_addr1;
3617 dest[4] = hw_addr[1];
3618 dest[5] = hw_addr[0];
3619 }
3620
3621 static int
iwx_nvm_get(struct iwx_softc * sc)3622 iwx_nvm_get(struct iwx_softc *sc)
3623 {
3624 struct iwx_nvm_get_info cmd = {};
3625 struct iwx_nvm_data *nvm = &sc->sc_nvm;
3626 struct iwx_host_cmd hcmd = {
3627 .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3628 .data = { &cmd, },
3629 .len = { sizeof(cmd) },
3630 .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3631 IWX_NVM_GET_INFO)
3632 };
3633 int err = 0;
3634 uint32_t mac_flags;
3635 /*
3636 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3637 * in v3, except for the channel profile part of the
3638 * regulatory. So we can just access the new struct, with the
3639 * exception of the latter.
3640 */
3641 struct iwx_nvm_get_info_rsp *rsp;
3642 struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3643 int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3644 size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3645
3646 hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3647 err = iwx_send_cmd(sc, &hcmd);
3648 if (err) {
3649 printf("%s: failed to send cmd (error %d)", __func__, err);
3650 return err;
3651 }
3652
3653 if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3654 printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
3655 iwx_rx_packet_payload_len(hcmd.resp_pkt));
3656 printf("%s: resp_len=%zu\n", __func__, resp_len);
3657 err = EIO;
3658 goto out;
3659 }
3660
3661 memset(nvm, 0, sizeof(*nvm));
3662
3663 iwx_set_mac_addr_from_csr(sc, nvm);
3664 if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3665 printf("%s: no valid mac address was found\n", DEVNAME(sc));
3666 err = EINVAL;
3667 goto out;
3668 }
3669
3670 rsp = (void *)hcmd.resp_pkt->data;
3671
3672 /* Initialize general data */
3673 nvm->nvm_version = le16toh(rsp->general.nvm_version);
3674 nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3675
3676 /* Initialize MAC sku data */
3677 mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3678 nvm->sku_cap_11ac_enable =
3679 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3680 nvm->sku_cap_11n_enable =
3681 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3682 nvm->sku_cap_11ax_enable =
3683 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3684 nvm->sku_cap_band_24GHz_enable =
3685 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3686 nvm->sku_cap_band_52GHz_enable =
3687 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3688 nvm->sku_cap_mimo_disable =
3689 !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3690
3691 /* Initialize PHY sku data */
3692 nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3693 nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3694
3695 if (le32toh(rsp->regulatory.lar_enabled) &&
3696 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3697 nvm->lar_enabled = 1;
3698 }
3699
3700 memcpy(&sc->sc_rsp_info, rsp, resp_len);
3701 if (v4) {
3702 sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
3703 } else {
3704 sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
3705 }
3706 out:
3707 iwx_free_resp(sc, &hcmd);
3708 return err;
3709 }
3710
3711 static int
iwx_load_firmware(struct iwx_softc * sc)3712 iwx_load_firmware(struct iwx_softc *sc)
3713 {
3714 struct iwx_fw_sects *fws;
3715 int err;
3716
3717 IWX_ASSERT_LOCKED(sc)
3718
3719 sc->sc_uc.uc_intr = 0;
3720 sc->sc_uc.uc_ok = 0;
3721
3722 fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3723 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3724 err = iwx_ctxt_info_gen3_init(sc, fws);
3725 else
3726 err = iwx_ctxt_info_init(sc, fws);
3727 if (err) {
3728 printf("%s: could not init context info\n", DEVNAME(sc));
3729 return err;
3730 }
3731
3732 /* wait for the firmware to load */
3733 err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
3734 if (err || !sc->sc_uc.uc_ok) {
3735 printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err);
3736 iwx_ctxt_info_free_paging(sc);
3737 }
3738
3739 iwx_dma_contig_free(&sc->iml_dma);
3740 iwx_ctxt_info_free_fw_img(sc);
3741
3742 if (!sc->sc_uc.uc_ok)
3743 return EINVAL;
3744
3745 return err;
3746 }
3747
3748 static int
iwx_start_fw(struct iwx_softc * sc)3749 iwx_start_fw(struct iwx_softc *sc)
3750 {
3751 int err;
3752
3753 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3754
3755 iwx_disable_interrupts(sc);
3756
3757 /* make sure rfkill handshake bits are cleared */
3758 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
3759 IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
3760 IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3761
3762 /* clear (again), then enable firmware load interrupt */
3763 IWX_WRITE(sc, IWX_CSR_INT, ~0);
3764
3765 err = iwx_nic_init(sc);
3766 if (err) {
3767 printf("%s: unable to init nic\n", DEVNAME(sc));
3768 return err;
3769 }
3770
3771 iwx_enable_fwload_interrupt(sc);
3772
3773 return iwx_load_firmware(sc);
3774 }
3775
3776 static int
iwx_pnvm_handle_section(struct iwx_softc * sc,const uint8_t * data,size_t len)3777 iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
3778 size_t len)
3779 {
3780 const struct iwx_ucode_tlv *tlv;
3781 uint32_t sha1 = 0;
3782 uint16_t mac_type = 0, rf_id = 0;
3783 uint8_t *pnvm_data = NULL, *tmp;
3784 int hw_match = 0;
3785 uint32_t size = 0;
3786 int err;
3787
3788 while (len >= sizeof(*tlv)) {
3789 uint32_t tlv_len, tlv_type;
3790
3791 len -= sizeof(*tlv);
3792 tlv = (const void *)data;
3793
3794 tlv_len = le32toh(tlv->length);
3795 tlv_type = le32toh(tlv->type);
3796
3797 if (len < tlv_len) {
3798 printf("%s: invalid TLV len: %zd/%u\n",
3799 DEVNAME(sc), len, tlv_len);
3800 err = EINVAL;
3801 goto out;
3802 }
3803
3804 data += sizeof(*tlv);
3805
3806 switch (tlv_type) {
3807 case IWX_UCODE_TLV_PNVM_VERSION:
3808 if (tlv_len < sizeof(uint32_t))
3809 break;
3810
3811 sha1 = le32_to_cpup((const uint32_t *)data);
3812 break;
3813 case IWX_UCODE_TLV_HW_TYPE:
3814 if (tlv_len < 2 * sizeof(uint16_t))
3815 break;
3816
3817 if (hw_match)
3818 break;
3819
3820 mac_type = le16_to_cpup((const uint16_t *)data);
3821 rf_id = le16_to_cpup((const uint16_t *)(data +
3822 sizeof(uint16_t)));
3823
3824 if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
3825 rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
3826 hw_match = 1;
3827 break;
3828 case IWX_UCODE_TLV_SEC_RT: {
3829 const struct iwx_pnvm_section *section;
3830 uint32_t data_len;
3831
3832 section = (const void *)data;
3833 data_len = tlv_len - sizeof(*section);
3834
3835 /* TODO: remove, this is a deprecated separator */
3836 if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
3837 break;
3838
3839 tmp = malloc(size + data_len, M_DEVBUF,
3840 M_WAITOK | M_ZERO);
3841 if (tmp == NULL) {
3842 err = ENOMEM;
3843 goto out;
3844 }
3845 // XXX:misha pnvm_data is NULL and size is 0 at first pass
3846 memcpy(tmp, pnvm_data, size);
3847 memcpy(tmp + size, section->data, data_len);
3848 free(pnvm_data, M_DEVBUF);
3849 pnvm_data = tmp;
3850 size += data_len;
3851 break;
3852 }
3853 case IWX_UCODE_TLV_PNVM_SKU:
3854 /* New PNVM section started, stop parsing. */
3855 goto done;
3856 default:
3857 break;
3858 }
3859
3860 if (roundup(tlv_len, 4) > len)
3861 break;
3862 len -= roundup(tlv_len, 4);
3863 data += roundup(tlv_len, 4);
3864 }
3865 done:
3866 if (!hw_match || size == 0) {
3867 err = ENOENT;
3868 goto out;
3869 }
3870
3871 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
3872 if (err) {
3873 printf("%s: could not allocate DMA memory for PNVM\n",
3874 DEVNAME(sc));
3875 err = ENOMEM;
3876 goto out;
3877 }
3878 memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
3879 iwx_ctxt_info_gen3_set_pnvm(sc);
3880 sc->sc_pnvm_ver = sha1;
3881 out:
3882 free(pnvm_data, M_DEVBUF);
3883 return err;
3884 }
3885
3886 static int
iwx_pnvm_parse(struct iwx_softc * sc,const uint8_t * data,size_t len)3887 iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
3888 {
3889 const struct iwx_ucode_tlv *tlv;
3890
3891 while (len >= sizeof(*tlv)) {
3892 uint32_t tlv_len, tlv_type;
3893
3894 len -= sizeof(*tlv);
3895 tlv = (const void *)data;
3896
3897 tlv_len = le32toh(tlv->length);
3898 tlv_type = le32toh(tlv->type);
3899
3900 if (len < tlv_len || roundup(tlv_len, 4) > len)
3901 return EINVAL;
3902
3903 if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
3904 const struct iwx_sku_id *sku_id =
3905 (const void *)(data + sizeof(*tlv));
3906
3907 data += sizeof(*tlv) + roundup(tlv_len, 4);
3908 len -= roundup(tlv_len, 4);
3909
3910 if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
3911 sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
3912 sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
3913 iwx_pnvm_handle_section(sc, data, len) == 0)
3914 return 0;
3915 } else {
3916 data += sizeof(*tlv) + roundup(tlv_len, 4);
3917 len -= roundup(tlv_len, 4);
3918 }
3919 }
3920
3921 return ENOENT;
3922 }
3923
3924 /* Make AX210 firmware loading context point at PNVM image in DMA memory. */
3925 static void
iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc * sc)3926 iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
3927 {
3928 struct iwx_prph_scratch *prph_scratch;
3929 struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
3930
3931 prph_scratch = sc->prph_scratch_dma.vaddr;
3932 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
3933
3934 prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
3935 prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
3936
3937 bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
3938 }
3939
3940 /*
3941 * Load platform-NVM (non-volatile-memory) data from the filesystem.
3942 * This data apparently contains regulatory information and affects device
3943 * channel configuration.
3944 * The SKU of AX210 devices tells us which PNVM file section is needed.
3945 * Pre-AX210 devices store NVM data onboard.
3946 */
3947 static int
iwx_load_pnvm(struct iwx_softc * sc)3948 iwx_load_pnvm(struct iwx_softc *sc)
3949 {
3950 const int wait_flags = IWX_PNVM_COMPLETE;
3951 int err = 0;
3952 const struct firmware *pnvm;
3953
3954 if (sc->sc_sku_id[0] == 0 &&
3955 sc->sc_sku_id[1] == 0 &&
3956 sc->sc_sku_id[2] == 0)
3957 return 0;
3958
3959 if (sc->sc_pnvm_name) {
3960 if (sc->pnvm_dma.vaddr == NULL) {
3961 IWX_UNLOCK(sc);
3962 pnvm = firmware_get(sc->sc_pnvm_name);
3963 if (pnvm == NULL) {
3964 printf("%s: could not read %s (error %d)\n",
3965 DEVNAME(sc), sc->sc_pnvm_name, err);
3966 IWX_LOCK(sc);
3967 return EINVAL;
3968 }
3969 sc->sc_pnvm = pnvm;
3970
3971 err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
3972 IWX_LOCK(sc);
3973 if (err && err != ENOENT) {
3974 return EINVAL;
3975 }
3976 } else
3977 iwx_ctxt_info_gen3_set_pnvm(sc);
3978 }
3979
3980 if (!iwx_nic_lock(sc)) {
3981 return EBUSY;
3982 }
3983
3984 /*
3985 * If we don't have a platform NVM file simply ask firmware
3986 * to proceed without it.
3987 */
3988
3989 iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
3990 IWX_UREG_DOORBELL_TO_ISR6_PNVM);
3991
3992 /* Wait for the pnvm complete notification from firmware. */
3993 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3994 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
3995 if (err)
3996 break;
3997 }
3998
3999 iwx_nic_unlock(sc);
4000
4001 return err;
4002 }
4003
4004 static int
iwx_send_tx_ant_cfg(struct iwx_softc * sc,uint8_t valid_tx_ant)4005 iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4006 {
4007 struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4008 .valid = htole32(valid_tx_ant),
4009 };
4010
4011 return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4012 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4013 }
4014
4015 static int
iwx_send_phy_cfg_cmd(struct iwx_softc * sc)4016 iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4017 {
4018 struct iwx_phy_cfg_cmd phy_cfg_cmd;
4019
4020 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4021 phy_cfg_cmd.calib_control.event_trigger =
4022 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4023 phy_cfg_cmd.calib_control.flow_trigger =
4024 sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4025
4026 return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4027 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4028 }
4029
4030 static int
iwx_send_dqa_cmd(struct iwx_softc * sc)4031 iwx_send_dqa_cmd(struct iwx_softc *sc)
4032 {
4033 struct iwx_dqa_enable_cmd dqa_cmd = {
4034 .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4035 };
4036 uint32_t cmd_id;
4037
4038 cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4039 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4040 }
4041
4042 static int
iwx_load_ucode_wait_alive(struct iwx_softc * sc)4043 iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4044 {
4045 int err;
4046
4047 IWX_UNLOCK(sc);
4048 err = iwx_read_firmware(sc);
4049 IWX_LOCK(sc);
4050 if (err)
4051 return err;
4052
4053 err = iwx_start_fw(sc);
4054 if (err)
4055 return err;
4056
4057 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4058 err = iwx_load_pnvm(sc);
4059 if (err)
4060 return err;
4061 }
4062
4063 iwx_post_alive(sc);
4064
4065 return 0;
4066 }
4067
4068 static int
iwx_run_init_mvm_ucode(struct iwx_softc * sc,int readnvm)4069 iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4070 {
4071 const int wait_flags = IWX_INIT_COMPLETE;
4072 struct iwx_nvm_access_complete_cmd nvm_complete = {};
4073 struct iwx_init_extended_cfg_cmd init_cfg = {
4074 .init_flags = htole32(IWX_INIT_NVM),
4075 };
4076
4077 int err;
4078
4079 if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4080 printf("%s: radio is disabled by hardware switch\n",
4081 DEVNAME(sc));
4082 return EPERM;
4083 }
4084
4085 sc->sc_init_complete = 0;
4086 err = iwx_load_ucode_wait_alive(sc);
4087 if (err) {
4088 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4089 "%s: failed to load init firmware\n", DEVNAME(sc));
4090 return err;
4091 } else {
4092 IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
4093 "%s: successfully loaded init firmware\n", __func__);
4094 }
4095
4096 /*
4097 * Send init config command to mark that we are sending NVM
4098 * access commands
4099 */
4100 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4101 IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4102 if (err) {
4103 printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
4104 err);
4105 return err;
4106 }
4107
4108 err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4109 IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4110 if (err) {
4111 return err;
4112 }
4113
4114 /* Wait for the init complete notification from the firmware. */
4115 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4116 err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
4117 if (err) {
4118 DPRINTF(("%s: will return err=%d\n", __func__, err));
4119 return err;
4120 } else {
4121 DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
4122 __func__));
4123 }
4124 }
4125
4126 if (readnvm) {
4127 err = iwx_nvm_get(sc);
4128 DPRINTF(("%s: err=%d\n", __func__, err));
4129 if (err) {
4130 printf("%s: failed to read nvm (error %d)\n",
4131 DEVNAME(sc), err);
4132 return err;
4133 } else {
4134 DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
4135 }
4136 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
4137 }
4138 return 0;
4139 }
4140
4141 static int
iwx_config_ltr(struct iwx_softc * sc)4142 iwx_config_ltr(struct iwx_softc *sc)
4143 {
4144 struct iwx_ltr_config_cmd cmd = {
4145 .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4146 };
4147
4148 if (!sc->sc_ltr_enabled)
4149 return 0;
4150
4151 return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4152 }
4153
4154 static void
iwx_update_rx_desc(struct iwx_softc * sc,struct iwx_rx_ring * ring,int idx,bus_dma_segment_t * seg)4155 iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
4156 bus_dma_segment_t *seg)
4157 {
4158 struct iwx_rx_data *data = &ring->data[idx];
4159
4160 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4161 struct iwx_rx_transfer_desc *desc = ring->desc;
4162 desc[idx].rbid = htole16(idx & 0xffff);
4163 desc[idx].addr = htole64((*seg).ds_addr);
4164 bus_dmamap_sync(ring->data_dmat, data->map,
4165 BUS_DMASYNC_PREWRITE);
4166 } else {
4167 ((uint64_t *)ring->desc)[idx] =
4168 htole64((*seg).ds_addr);
4169 bus_dmamap_sync(ring->data_dmat, data->map,
4170 BUS_DMASYNC_PREWRITE);
4171 }
4172 }
4173
4174 static int
iwx_rx_addbuf(struct iwx_softc * sc,int size,int idx)4175 iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4176 {
4177 struct iwx_rx_ring *ring = &sc->rxq;
4178 struct iwx_rx_data *data = &ring->data[idx];
4179 struct mbuf *m;
4180 int err;
4181 int fatal = 0;
4182 bus_dma_segment_t seg;
4183 int nsegs;
4184
4185 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
4186 if (m == NULL)
4187 return ENOBUFS;
4188
4189 if (data->m != NULL) {
4190 bus_dmamap_unload(ring->data_dmat, data->map);
4191 fatal = 1;
4192 }
4193
4194 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4195 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
4196 &nsegs, BUS_DMA_NOWAIT);
4197 if (err) {
4198 /* XXX */
4199 if (fatal)
4200 panic("could not load RX mbuf");
4201 m_freem(m);
4202 return err;
4203 }
4204 data->m = m;
4205 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4206
4207 /* Update RX descriptor. */
4208 iwx_update_rx_desc(sc, ring, idx, &seg);
4209 return 0;
4210 }
4211
4212 static int
iwx_rxmq_get_signal_strength(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4213 iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4214 struct iwx_rx_mpdu_desc *desc)
4215 {
4216 int energy_a, energy_b;
4217
4218 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4219 energy_a = desc->v3.energy_a;
4220 energy_b = desc->v3.energy_b;
4221 } else {
4222 energy_a = desc->v1.energy_a;
4223 energy_b = desc->v1.energy_b;
4224 }
4225 energy_a = energy_a ? -energy_a : -256;
4226 energy_b = energy_b ? -energy_b : -256;
4227 return MAX(energy_a, energy_b);
4228 }
4229
4230 static int
iwx_rxmq_get_chains(struct iwx_softc * sc,struct iwx_rx_mpdu_desc * desc)4231 iwx_rxmq_get_chains(struct iwx_softc *sc,
4232 struct iwx_rx_mpdu_desc *desc)
4233 {
4234
4235 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4236 return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4237 IWX_RATE_MCS_ANT_POS);
4238 else
4239 return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
4240 IWX_RATE_MCS_ANT_POS);
4241 }
4242
4243 static void
iwx_rx_rx_phy_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4244 iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4245 struct iwx_rx_data *data)
4246 {
4247 struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4248 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4249 int qid = cmd_hdr->qid;
4250 struct iwx_tx_ring *ring = &sc->txq[qid];
4251
4252 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
4253 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4254 }
4255
4256 /*
4257 * Retrieve the average noise (in dBm) among receivers.
4258 */
4259 static int
iwx_get_noise(const struct iwx_statistics_rx_non_phy * stats)4260 iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4261 {
4262 int i, total, nbant, noise;
4263
4264 total = nbant = noise = 0;
4265 for (i = 0; i < 3; i++) {
4266 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
4267 if (noise) {
4268 total += noise;
4269 nbant++;
4270 }
4271 }
4272
4273 /* There should be at least one antenna but check anyway. */
4274 return (nbant == 0) ? -127 : (total / nbant) - 107;
4275 }
4276
4277 #if 0
4278 int
4279 iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4280 struct ieee80211_rxinfo *rxi)
4281 {
4282 struct ieee80211com *ic = &sc->sc_ic;
4283 struct ieee80211_key *k;
4284 struct ieee80211_frame *wh;
4285 uint64_t pn, *prsc;
4286 uint8_t *ivp;
4287 uint8_t tid;
4288 int hdrlen, hasqos;
4289
4290 wh = mtod(m, struct ieee80211_frame *);
4291 hdrlen = ieee80211_get_hdrlen(wh);
4292 ivp = (uint8_t *)wh + hdrlen;
4293
4294 /* find key for decryption */
4295 k = ieee80211_get_rxkey(ic, m, ni);
4296 if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4297 return 1;
4298
4299 /* Check that ExtIV bit is be set. */
4300 if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4301 return 1;
4302
4303 hasqos = ieee80211_has_qos(wh);
4304 tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4305 prsc = &k->k_rsc[tid];
4306
4307 /* Extract the 48-bit PN from the CCMP header. */
4308 pn = (uint64_t)ivp[0] |
4309 (uint64_t)ivp[1] << 8 |
4310 (uint64_t)ivp[4] << 16 |
4311 (uint64_t)ivp[5] << 24 |
4312 (uint64_t)ivp[6] << 32 |
4313 (uint64_t)ivp[7] << 40;
4314 if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4315 if (pn < *prsc) {
4316 ic->ic_stats.is_ccmp_replays++;
4317 return 1;
4318 }
4319 } else if (pn <= *prsc) {
4320 ic->ic_stats.is_ccmp_replays++;
4321 return 1;
4322 }
4323 /* Last seen packet number is updated in ieee80211_inputm(). */
4324
4325 /*
4326 * Some firmware versions strip the MIC, and some don't. It is not
4327 * clear which of the capability flags could tell us what to expect.
4328 * For now, keep things simple and just leave the MIC in place if
4329 * it is present.
4330 *
4331 * The IV will be stripped by ieee80211_inputm().
4332 */
4333 return 0;
4334 }
4335 #endif
4336
4337 static int
iwx_rx_hwdecrypt(struct iwx_softc * sc,struct mbuf * m,uint32_t rx_pkt_status)4338 iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
4339 {
4340 struct ieee80211_frame *wh;
4341 int ret = 0;
4342 uint8_t type, subtype;
4343
4344 wh = mtod(m, struct ieee80211_frame *);
4345
4346 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4347 if (type == IEEE80211_FC0_TYPE_CTL) {
4348 return 0;
4349 }
4350
4351 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4352 if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
4353 return 0;
4354 }
4355
4356
4357 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
4358 IEEE80211_FC0_TYPE_CTL)
4359 && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
4360 if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4361 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4362 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
4363 ret = 1;
4364 goto out;
4365 }
4366 /* Check whether decryption was successful or not. */
4367 if ((rx_pkt_status &
4368 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4369 IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4370 (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4371 IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4372 DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
4373 ret = 1;
4374 goto out;
4375 }
4376 }
4377 out:
4378 return ret;
4379 }
4380
4381 static void
iwx_rx_frame(struct iwx_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,uint8_t rssi)4382 iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4383 uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4384 uint32_t device_timestamp, uint8_t rssi)
4385 {
4386 struct ieee80211com *ic = &sc->sc_ic;
4387 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4388 struct ieee80211_frame *wh;
4389 struct ieee80211_node *ni;
4390
4391 /*
4392 * We need to turn the hardware provided channel index into a channel
4393 * and then find it in our ic_channels array
4394 */
4395 if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
4396 /*
4397 * OpenBSD points this at the ibss chan, which it defaults to
4398 * channel 1 and then never touches again. Skip a step.
4399 */
4400 printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
4401 chanidx = 1;
4402 }
4403
4404 int channel = chanidx;
4405 for (int i = 0; i < ic->ic_nchans; i++) {
4406 if (ic->ic_channels[i].ic_ieee == channel) {
4407 chanidx = i;
4408 }
4409 }
4410 ic->ic_curchan = &ic->ic_channels[chanidx];
4411
4412 wh = mtod(m, struct ieee80211_frame *);
4413 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4414
4415 #if 0 /* XXX hw decrypt */
4416 if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4417 iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4418 m_freem(m);
4419 ieee80211_release_node(ic, ni);
4420 return;
4421 }
4422 #endif
4423 if (ieee80211_radiotap_active_vap(vap)) {
4424 struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4425 uint16_t chan_flags;
4426 int have_legacy_rate = 1;
4427 uint8_t mcs, rate;
4428
4429 tap->wr_flags = 0;
4430 if (is_shortpre)
4431 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4432 tap->wr_chan_freq =
4433 htole16(ic->ic_channels[chanidx].ic_freq);
4434 chan_flags = ic->ic_channels[chanidx].ic_flags;
4435 #if 0
4436 if (ic->ic_curmode != IEEE80211_MODE_11N &&
4437 ic->ic_curmode != IEEE80211_MODE_11AC) {
4438 chan_flags &= ~IEEE80211_CHAN_HT;
4439 chan_flags &= ~IEEE80211_CHAN_40MHZ;
4440 }
4441 if (ic->ic_curmode != IEEE80211_MODE_11AC)
4442 chan_flags &= ~IEEE80211_CHAN_VHT;
4443 #else
4444 chan_flags &= ~IEEE80211_CHAN_HT;
4445 #endif
4446 tap->wr_chan_flags = htole16(chan_flags);
4447 tap->wr_dbm_antsignal = rssi;
4448 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4449 tap->wr_tsft = device_timestamp;
4450
4451 if (sc->sc_rate_n_flags_version >= 2) {
4452 uint32_t mod_type = (rate_n_flags &
4453 IWX_RATE_MCS_MOD_TYPE_MSK);
4454 const struct ieee80211_rateset *rs = NULL;
4455 uint32_t ridx;
4456 have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4457 mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4458 mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4459 ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4460 if (mod_type == IWX_RATE_MCS_CCK_MSK)
4461 rs = &ieee80211_std_rateset_11b;
4462 else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4463 rs = &ieee80211_std_rateset_11a;
4464 if (rs && ridx < rs->rs_nrates) {
4465 rate = (rs->rs_rates[ridx] &
4466 IEEE80211_RATE_VAL);
4467 } else
4468 rate = 0;
4469 } else {
4470 have_legacy_rate = ((rate_n_flags &
4471 (IWX_RATE_MCS_HT_MSK_V1 |
4472 IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4473 mcs = (rate_n_flags &
4474 (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4475 IWX_RATE_HT_MCS_NSS_MSK_V1));
4476 rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4477 }
4478 if (!have_legacy_rate) {
4479 tap->wr_rate = (0x80 | mcs);
4480 } else {
4481 switch (rate) {
4482 /* CCK rates. */
4483 case 10: tap->wr_rate = 2; break;
4484 case 20: tap->wr_rate = 4; break;
4485 case 55: tap->wr_rate = 11; break;
4486 case 110: tap->wr_rate = 22; break;
4487 /* OFDM rates. */
4488 case 0xd: tap->wr_rate = 12; break;
4489 case 0xf: tap->wr_rate = 18; break;
4490 case 0x5: tap->wr_rate = 24; break;
4491 case 0x7: tap->wr_rate = 36; break;
4492 case 0x9: tap->wr_rate = 48; break;
4493 case 0xb: tap->wr_rate = 72; break;
4494 case 0x1: tap->wr_rate = 96; break;
4495 case 0x3: tap->wr_rate = 108; break;
4496 /* Unknown rate: should not happen. */
4497 default: tap->wr_rate = 0;
4498 }
4499 // XXX hack - this needs rebased with the new rate stuff anyway
4500 tap->wr_rate = rate;
4501 }
4502 }
4503
4504 IWX_UNLOCK(sc);
4505 if (ni == NULL) {
4506 if (ieee80211_input_mimo_all(ic, m) == -1)
4507 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4508 } else {
4509
4510 if (ieee80211_input_mimo(ni, m) == -1)
4511 printf("%s:%d input_all returned -1\n", __func__, __LINE__);
4512 ieee80211_free_node(ni);
4513 }
4514 IWX_LOCK(sc);
4515 }
4516
4517 static void
iwx_rx_mpdu_mq(struct iwx_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen)4518 iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
4519 size_t maxlen)
4520 {
4521 struct ieee80211com *ic = &sc->sc_ic;
4522 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4523 struct ieee80211_node *ni = vap->iv_bss;
4524 struct ieee80211_key *k;
4525 struct ieee80211_rx_stats rxs;
4526 struct iwx_rx_mpdu_desc *desc;
4527 uint32_t len, hdrlen, rate_n_flags, device_timestamp;
4528 int rssi;
4529 uint8_t chanidx;
4530 uint16_t phy_info;
4531 size_t desc_size;
4532 int pad = 0;
4533
4534 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
4535 desc_size = sizeof(*desc);
4536 else
4537 desc_size = IWX_RX_DESC_SIZE_V1;
4538
4539 if (maxlen < desc_size) {
4540 m_freem(m);
4541 return; /* drop */
4542 }
4543
4544 desc = (struct iwx_rx_mpdu_desc *)pktdata;
4545
4546 if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
4547 !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
4548 printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
4549 m_freem(m);
4550 return; /* drop */
4551 }
4552
4553 len = le16toh(desc->mpdu_len);
4554 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4555 /* Allow control frames in monitor mode. */
4556 if (len < sizeof(struct ieee80211_frame_cts)) {
4557 m_freem(m);
4558 return;
4559 }
4560
4561 } else if (len < sizeof(struct ieee80211_frame)) {
4562 m_freem(m);
4563 return;
4564 }
4565 if (len > maxlen - desc_size) {
4566 m_freem(m);
4567 return;
4568 }
4569
4570 // TODO: arithmetic on a pointer to void is a GNU extension
4571 m->m_data = (char *)pktdata + desc_size;
4572 m->m_pkthdr.len = m->m_len = len;
4573
4574 /* Account for padding following the frame header. */
4575 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
4576 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4577 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4578 if (type == IEEE80211_FC0_TYPE_CTL) {
4579 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
4580 case IEEE80211_FC0_SUBTYPE_CTS:
4581 hdrlen = sizeof(struct ieee80211_frame_cts);
4582 break;
4583 case IEEE80211_FC0_SUBTYPE_ACK:
4584 hdrlen = sizeof(struct ieee80211_frame_ack);
4585 break;
4586 default:
4587 hdrlen = sizeof(struct ieee80211_frame_min);
4588 break;
4589 }
4590 } else
4591 hdrlen = ieee80211_hdrsize(wh);
4592
4593 if ((le16toh(desc->status) &
4594 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4595 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4596 // CCMP header length
4597 hdrlen += 8;
4598 }
4599
4600 memmove(m->m_data + 2, m->m_data, hdrlen);
4601 m_adj(m, 2);
4602
4603 }
4604
4605 if ((le16toh(desc->status) &
4606 IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
4607 IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4608 pad = 1;
4609 }
4610
4611 /* If it's a HT node then perform re-order processing */
4612 if (ni->ni_flags & IEEE80211_NODE_HT)
4613 m->m_flags |= M_AMPDU;
4614
4615 /*
4616 * Hardware de-aggregates A-MSDUs and copies the same MAC header
4617 * in place for each subframe. But it leaves the 'A-MSDU present'
4618 * bit set in the frame header. We need to clear this bit ourselves.
4619 * (XXX This workaround is not required on AX200/AX201 devices that
4620 * have been tested by me, but it's unclear when this problem was
4621 * fixed in the hardware. It definitely affects the 9k generation.
4622 * Leaving this in place for now since some 9k/AX200 hybrids seem
4623 * to exist that we may eventually add support for.)
4624 *
4625 * And we must allow the same CCMP PN for subframes following the
4626 * first subframe. Otherwise they would be discarded as replays.
4627 */
4628 if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
4629 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4630 uint8_t subframe_idx = (desc->amsdu_info &
4631 IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4632 uint8_t *qos;
4633
4634 rxs.c_pktflags |= IEEE80211_RX_F_AMSDU;
4635 if (subframe_idx > 0)
4636 rxs.c_pktflags |= IEEE80211_RX_F_AMSDU_MORE;
4637
4638 /* XXX should keep driver statistics about this */
4639 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
4640 "%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__);
4641
4642 qos = ieee80211_getqos(wh);
4643 qos[0] &= ~IEEE80211_QOS_AMSDU;
4644 }
4645
4646 /*
4647 * Verify decryption before duplicate detection. The latter uses
4648 * the TID supplied in QoS frame headers and this TID is implicitly
4649 * verified as part of the CCMP nonce.
4650 */
4651 k = ieee80211_crypto_get_txkey(ni, m);
4652 if (k != NULL &&
4653 (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
4654 iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
4655 DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
4656 m_freem(m);
4657 return;
4658 }
4659
4660 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4661 rate_n_flags = le32toh(desc->v3.rate_n_flags);
4662 chanidx = desc->v3.channel;
4663 device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
4664 } else {
4665 rate_n_flags = le32toh(desc->v1.rate_n_flags);
4666 chanidx = desc->v1.channel;
4667 device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
4668 }
4669
4670 phy_info = le16toh(desc->phy_info);
4671
4672 rssi = iwx_rxmq_get_signal_strength(sc, desc);
4673 rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
4674 rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
4675
4676 memset(&rxs, 0, sizeof(rxs));
4677 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
4678 rxs.r_flags |= IEEE80211_R_BAND;
4679 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
4680 rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
4681
4682 rxs.c_ieee = chanidx;
4683 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
4684 chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
4685 rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
4686 rxs.c_rx_tsf = device_timestamp;
4687 rxs.c_chain = iwx_rxmq_get_chains(sc, desc);
4688 if (rxs.c_chain != 0)
4689 rxs.r_flags |= IEEE80211_R_C_CHAIN;
4690
4691 /* rssi is in 1/2db units */
4692 rxs.c_rssi = rssi * 2;
4693 rxs.c_nf = sc->sc_noise;
4694
4695 if (pad) {
4696 rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
4697 rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
4698 }
4699
4700 if (ieee80211_add_rx_params(m, &rxs) == 0) {
4701 printf("%s: ieee80211_add_rx_params failed\n", __func__);
4702 return;
4703 }
4704
4705 ieee80211_add_rx_params(m, &rxs);
4706
4707 #if 0
4708 if (iwx_rx_reorder(sc, m, chanidx, desc,
4709 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4710 rate_n_flags, device_timestamp, &rxi, ml))
4711 return;
4712 #endif
4713
4714 if (pad) {
4715 #define TRIM 8
4716 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4717 hdrlen = ieee80211_hdrsize(wh);
4718 memmove(m->m_data + TRIM, m->m_data, hdrlen);
4719 m_adj(m, TRIM);
4720 #undef TRIM
4721 }
4722
4723 iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
4724 (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
4725 rate_n_flags, device_timestamp, rssi);
4726 }
4727
4728 static void
iwx_clear_tx_desc(struct iwx_softc * sc,struct iwx_tx_ring * ring,int idx)4729 iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
4730 {
4731 struct iwx_tfh_tfd *desc = &ring->desc[idx];
4732 uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
4733 int i;
4734
4735 /* First TB is never cleared - it is bidirectional DMA data. */
4736 for (i = 1; i < num_tbs; i++) {
4737 struct iwx_tfh_tb *tb = &desc->tbs[i];
4738 memset(tb, 0, sizeof(*tb));
4739 }
4740 desc->num_tbs = htole16(1);
4741
4742 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4743 BUS_DMASYNC_PREWRITE);
4744 }
4745
4746 static void
iwx_txd_done(struct iwx_softc * sc,struct iwx_tx_ring * ring,struct iwx_tx_data * txd)4747 iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
4748 struct iwx_tx_data *txd)
4749 {
4750 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
4751 bus_dmamap_unload(ring->data_dmat, txd->map);
4752
4753 ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
4754 txd->m = NULL;
4755 txd->in = NULL;
4756 }
4757
4758 static void
iwx_txq_advance(struct iwx_softc * sc,struct iwx_tx_ring * ring,uint16_t idx)4759 iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
4760 {
4761 struct iwx_tx_data *txd;
4762
4763 while (ring->tail_hw != idx) {
4764 txd = &ring->data[ring->tail];
4765 if (txd->m != NULL) {
4766 iwx_clear_tx_desc(sc, ring, ring->tail);
4767 iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
4768 iwx_txd_done(sc, ring, txd);
4769 ring->queued--;
4770 if (ring->queued < 0)
4771 panic("caught negative queue count");
4772 }
4773 ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
4774 ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
4775 }
4776 }
4777
4778 static void
iwx_rx_tx_cmd(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4779 iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4780 struct iwx_rx_data *data)
4781 {
4782 struct ieee80211com *ic = &sc->sc_ic;
4783 struct ifnet *ifp = IC2IFP(ic);
4784 struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
4785 int qid = cmd_hdr->qid, status, txfail;
4786 struct iwx_tx_ring *ring = &sc->txq[qid];
4787 struct iwx_tx_resp *tx_resp = (void *)pkt->data;
4788 uint32_t ssn;
4789 uint32_t len = iwx_rx_packet_len(pkt);
4790 int idx = cmd_hdr->idx;
4791 struct iwx_tx_data *txd = &ring->data[idx];
4792 struct mbuf *m = txd->m;
4793
4794 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
4795
4796 /* Sanity checks. */
4797 if (sizeof(*tx_resp) > len)
4798 return;
4799 if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
4800 return;
4801 if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
4802 tx_resp->frame_count * sizeof(tx_resp->status) > len)
4803 return;
4804
4805 sc->sc_tx_timer[qid] = 0;
4806
4807 if (tx_resp->frame_count > 1) /* A-MPDU */
4808 return;
4809
4810 status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
4811 txfail = (status != IWX_TX_STATUS_SUCCESS &&
4812 status != IWX_TX_STATUS_DIRECT_DONE);
4813
4814 #ifdef __not_yet__
4815 /* TODO: Replace accounting below with ieee80211_tx_complete() */
4816 ieee80211_tx_complete(&in->in_ni, m, txfail);
4817 #else
4818 if (txfail)
4819 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4820 else {
4821 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
4822 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4823 if (m->m_flags & M_MCAST)
4824 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
4825 }
4826 #endif
4827 /*
4828 * On hardware supported by iwx(4) the SSN counter corresponds
4829 * to a Tx ring index rather than a sequence number.
4830 * Frames up to this index (non-inclusive) can now be freed.
4831 */
4832 memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
4833 ssn = le32toh(ssn);
4834 if (ssn < sc->max_tfd_queue_size) {
4835 iwx_txq_advance(sc, ring, ssn);
4836 iwx_clear_oactive(sc, ring);
4837 }
4838 }
4839
4840 static void
iwx_clear_oactive(struct iwx_softc * sc,struct iwx_tx_ring * ring)4841 iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
4842 {
4843 IWX_ASSERT_LOCKED(sc);
4844
4845 if (ring->queued < iwx_lomark) {
4846 sc->qfullmsk &= ~(1 << ring->qid);
4847 if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
4848 /*
4849 * Well, we're in interrupt context, but then again
4850 * I guess net80211 does all sorts of stunts in
4851 * interrupt context, so maybe this is no biggie.
4852 */
4853 iwx_start(sc);
4854 }
4855 }
4856 }
4857
4858 static void
iwx_rx_compressed_ba(struct iwx_softc * sc,struct iwx_rx_packet * pkt)4859 iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
4860 {
4861 struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
4862 struct ieee80211com *ic = &sc->sc_ic;
4863 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4864 struct iwx_node *in = IWX_NODE(vap->iv_bss);
4865 struct ieee80211_node *ni = &in->in_ni;
4866 struct iwx_tx_ring *ring;
4867 uint16_t i, tfd_cnt, ra_tid_cnt, idx;
4868 int qid;
4869
4870 // if (ic->ic_state != IEEE80211_S_RUN)
4871 // return;
4872
4873 if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
4874 return;
4875
4876 if (ba_res->sta_id != IWX_STATION_ID)
4877 return;
4878
4879 in = (void *)ni;
4880
4881 tfd_cnt = le16toh(ba_res->tfd_cnt);
4882 ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
4883 if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
4884 sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
4885 sizeof(ba_res->tfd[0]) * tfd_cnt))
4886 return;
4887
4888 for (i = 0; i < tfd_cnt; i++) {
4889 struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
4890 uint8_t tid;
4891
4892 tid = ba_tfd->tid;
4893 if (tid >= nitems(sc->aggqid))
4894 continue;
4895
4896 qid = sc->aggqid[tid];
4897 if (qid != htole16(ba_tfd->q_num))
4898 continue;
4899
4900 ring = &sc->txq[qid];
4901
4902 #if 0
4903 ba = &ni->ni_tx_ba[tid];
4904 if (ba->ba_state != IEEE80211_BA_AGREED)
4905 continue;
4906 #endif
4907 idx = le16toh(ba_tfd->tfd_index);
4908 sc->sc_tx_timer[qid] = 0;
4909 iwx_txq_advance(sc, ring, idx);
4910 iwx_clear_oactive(sc, ring);
4911 }
4912 }
4913
4914 static void
iwx_rx_bmiss(struct iwx_softc * sc,struct iwx_rx_packet * pkt,struct iwx_rx_data * data)4915 iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4916 struct iwx_rx_data *data)
4917 {
4918 struct ieee80211com *ic = &sc->sc_ic;
4919 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4920 struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
4921 uint32_t missed;
4922
4923 if ((ic->ic_opmode != IEEE80211_M_STA) ||
4924 (vap->iv_state != IEEE80211_S_RUN))
4925 return;
4926
4927 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4928 BUS_DMASYNC_POSTREAD);
4929
4930 IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
4931 "%s: mac_id=%u, cmslrx=%u, cmb=%u, neb=%d, nrb=%u\n",
4932 __func__,
4933 le32toh(mbn->mac_id),
4934 le32toh(mbn->consec_missed_beacons_since_last_rx),
4935 le32toh(mbn->consec_missed_beacons),
4936 le32toh(mbn->num_expected_beacons),
4937 le32toh(mbn->num_recvd_beacons));
4938
4939 missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
4940 if (missed > vap->iv_bmissthreshold) {
4941 ieee80211_beacon_miss(ic);
4942 }
4943 }
4944
4945 static int
iwx_binding_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action)4946 iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
4947 {
4948 struct iwx_binding_cmd cmd;
4949 struct ieee80211com *ic = &sc->sc_ic;
4950 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4951 struct iwx_vap *ivp = IWX_VAP(vap);
4952 struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
4953 uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
4954 int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
4955 uint32_t status;
4956
4957 if (action == IWX_FW_CTXT_ACTION_ADD && active)
4958 panic("binding already added");
4959 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
4960 panic("binding already removed");
4961
4962 if (phyctxt == NULL) /* XXX race with iwx_stop() */
4963 return EINVAL;
4964
4965 memset(&cmd, 0, sizeof(cmd));
4966
4967 cmd.id_and_color
4968 = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4969 cmd.action = htole32(action);
4970 cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4971
4972 cmd.macs[0] = htole32(mac_id);
4973 for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
4974 cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
4975
4976 if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
4977 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
4978 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
4979 else
4980 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
4981
4982 status = 0;
4983 err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
4984 &cmd, &status);
4985 if (err == 0 && status != 0)
4986 err = EIO;
4987
4988 return err;
4989 }
4990
4991 static uint8_t
iwx_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)4992 iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
4993 {
4994 int ctlchan = ieee80211_chan2ieee(ic, chan);
4995 int midpoint = chan->ic_vht_ch_freq1;
4996
4997 /*
4998 * The FW is expected to check the control channel position only
4999 * when in HT/VHT and the channel width is not 20MHz. Return
5000 * this value as the default one:
5001 */
5002 uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5003
5004 switch (ctlchan - midpoint) {
5005 case -6:
5006 pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5007 break;
5008 case -2:
5009 pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5010 break;
5011 case 2:
5012 pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5013 break;
5014 case 6:
5015 pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5016 break;
5017 default:
5018 break;
5019 }
5020
5021 return pos;
5022 }
5023
5024 static int
iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint8_t sco,uint8_t vht_chan_width,int cmdver)5025 iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5026 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5027 uint8_t vht_chan_width, int cmdver)
5028 {
5029 struct ieee80211com *ic = &sc->sc_ic;
5030 struct iwx_phy_context_cmd_uhb cmd;
5031 uint8_t active_cnt, idle_cnt;
5032 struct ieee80211_channel *chan = ctxt->channel;
5033
5034 memset(&cmd, 0, sizeof(cmd));
5035 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5036 ctxt->color));
5037 cmd.action = htole32(action);
5038
5039 if (IEEE80211_IS_CHAN_2GHZ(chan) ||
5040 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5041 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5042 else
5043 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5044
5045 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5046 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5047 cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5048
5049 if (IEEE80211_IS_CHAN_VHT80(chan)) {
5050 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5051 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5052 } else if (IEEE80211_IS_CHAN_HT40(chan)) {
5053 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5054 if (IEEE80211_IS_CHAN_HT40D(chan))
5055 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5056 else
5057 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5058 } else {
5059 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5060 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5061 }
5062
5063 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5064 IWX_RLC_CONFIG_CMD) != 2) {
5065 idle_cnt = chains_static;
5066 active_cnt = chains_dynamic;
5067 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5068 IWX_PHY_RX_CHAIN_VALID_POS);
5069 cmd.rxchain_info |= htole32(idle_cnt <<
5070 IWX_PHY_RX_CHAIN_CNT_POS);
5071 cmd.rxchain_info |= htole32(active_cnt <<
5072 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5073 }
5074
5075 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5076 }
5077
5078 #if 0
5079 int
5080 iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5081 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5082 uint8_t vht_chan_width, int cmdver)
5083 {
5084 struct ieee80211com *ic = &sc->sc_ic;
5085 struct iwx_phy_context_cmd cmd;
5086 uint8_t active_cnt, idle_cnt;
5087 struct ieee80211_channel *chan = ctxt->channel;
5088
5089 memset(&cmd, 0, sizeof(cmd));
5090 cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5091 ctxt->color));
5092 cmd.action = htole32(action);
5093
5094 if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5095 !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5096 cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5097 else
5098 cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5099
5100 cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5101 IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5102 cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5103 if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5104 cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5105 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5106 } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5107 if (sco == IEEE80211_HTOP0_SCO_SCA) {
5108 /* secondary chan above -> control chan below */
5109 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5110 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5111 } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5112 /* secondary chan below -> control chan above */
5113 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5114 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5115 } else {
5116 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5117 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5118 }
5119 } else {
5120 cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5121 cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5122 }
5123
5124 if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5125 IWX_RLC_CONFIG_CMD) != 2) {
5126 idle_cnt = chains_static;
5127 active_cnt = chains_dynamic;
5128 cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5129 IWX_PHY_RX_CHAIN_VALID_POS);
5130 cmd.rxchain_info |= htole32(idle_cnt <<
5131 IWX_PHY_RX_CHAIN_CNT_POS);
5132 cmd.rxchain_info |= htole32(active_cnt <<
5133 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5134 }
5135
5136 return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5137 }
5138 #endif
5139
5140 static int
iwx_phy_ctxt_cmd(struct iwx_softc * sc,struct iwx_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)5141 iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5142 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5143 uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5144 {
5145 int cmdver;
5146
5147 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5148 if (cmdver != 3 && cmdver != 4) {
5149 printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5150 DEVNAME(sc));
5151 return ENOTSUP;
5152 }
5153
5154 /*
5155 * Intel increased the size of the fw_channel_info struct and neglected
5156 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5157 * member in the middle.
5158 * To keep things simple we use a separate function to handle the larger
5159 * variant of the phy context command.
5160 */
5161 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5162 return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5163 chains_dynamic, action, sco, vht_chan_width, cmdver);
5164 } else
5165 panic("Unsupported old hardware contact thj@");
5166
5167 #if 0
5168 return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5169 action, sco, vht_chan_width, cmdver);
5170 #endif
5171 }
5172
5173 static int
iwx_send_cmd(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5174 iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5175 {
5176 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5177 struct iwx_tfh_tfd *desc;
5178 struct iwx_tx_data *txdata;
5179 struct iwx_device_cmd *cmd;
5180 struct mbuf *m;
5181 bus_addr_t paddr;
5182 uint64_t addr;
5183 int err = 0, i, paylen, off/*, s*/;
5184 int idx, code, async, group_id;
5185 size_t hdrlen, datasz;
5186 uint8_t *data;
5187 int generation = sc->sc_generation;
5188 bus_dma_segment_t seg[10];
5189 int nsegs;
5190
5191 code = hcmd->id;
5192 async = hcmd->flags & IWX_CMD_ASYNC;
5193 idx = ring->cur;
5194
5195 for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5196 paylen += hcmd->len[i];
5197 }
5198
5199 /* If this command waits for a response, allocate response buffer. */
5200 hcmd->resp_pkt = NULL;
5201 if (hcmd->flags & IWX_CMD_WANT_RESP) {
5202 uint8_t *resp_buf;
5203 KASSERT(!async, ("async command want response"));
5204 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
5205 ("wrong pkt len 1"));
5206 KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
5207 ("wrong pkt len 2"));
5208 if (sc->sc_cmd_resp_pkt[idx] != NULL)
5209 return ENOSPC;
5210 resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5211 M_NOWAIT | M_ZERO);
5212 if (resp_buf == NULL)
5213 return ENOMEM;
5214 sc->sc_cmd_resp_pkt[idx] = resp_buf;
5215 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5216 } else {
5217 sc->sc_cmd_resp_pkt[idx] = NULL;
5218 }
5219
5220 desc = &ring->desc[idx];
5221 txdata = &ring->data[idx];
5222
5223 /*
5224 * XXX Intel inside (tm)
5225 * Firmware API versions >= 50 reject old-style commands in
5226 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5227 * that such commands were in the LONG_GROUP instead in order
5228 * for firmware to accept them.
5229 */
5230 if (iwx_cmd_groupid(code) == 0) {
5231 code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5232 txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5233 } else
5234 txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5235
5236 group_id = iwx_cmd_groupid(code);
5237
5238 hdrlen = sizeof(cmd->hdr_wide);
5239 datasz = sizeof(cmd->data_wide);
5240
5241 if (paylen > datasz) {
5242 /* Command is too large to fit in pre-allocated space. */
5243 size_t totlen = hdrlen + paylen;
5244 if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5245 printf("%s: firmware command too long (%zd bytes)\n",
5246 DEVNAME(sc), totlen);
5247 err = EINVAL;
5248 goto out;
5249 }
5250 if (totlen > IWX_RBUF_SIZE)
5251 panic("totlen > IWX_RBUF_SIZE");
5252 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
5253 if (m == NULL) {
5254 printf("%s: could not get fw cmd mbuf (%i bytes)\n",
5255 DEVNAME(sc), IWX_RBUF_SIZE);
5256 err = ENOMEM;
5257 goto out;
5258 }
5259 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5260 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
5261 seg, &nsegs, BUS_DMA_NOWAIT);
5262 if (nsegs > 20)
5263 panic("nsegs > 20");
5264 DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
5265 if (err) {
5266 printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5267 DEVNAME(sc), totlen);
5268 m_freem(m);
5269 goto out;
5270 }
5271 txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5272 cmd = mtod(m, struct iwx_device_cmd *);
5273 paddr = seg[0].ds_addr;
5274 } else {
5275 cmd = &ring->cmd[idx];
5276 paddr = txdata->cmd_paddr;
5277 }
5278
5279 memset(cmd, 0, sizeof(*cmd));
5280 cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5281 cmd->hdr_wide.group_id = group_id;
5282 cmd->hdr_wide.qid = ring->qid;
5283 cmd->hdr_wide.idx = idx;
5284 cmd->hdr_wide.length = htole16(paylen);
5285 cmd->hdr_wide.version = iwx_cmd_version(code);
5286 data = cmd->data_wide;
5287
5288 for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5289 if (hcmd->len[i] == 0)
5290 continue;
5291 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5292 off += hcmd->len[i];
5293 }
5294 KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
5295
5296 desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5297 addr = htole64(paddr);
5298 memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5299 if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5300 DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
5301 paylen));
5302 desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5303 IWX_FIRST_TB_SIZE);
5304 addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5305 memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5306 desc->num_tbs = htole16(2);
5307 } else
5308 desc->num_tbs = htole16(1);
5309
5310 if (paylen > datasz) {
5311 bus_dmamap_sync(ring->data_dmat, txdata->map,
5312 BUS_DMASYNC_PREWRITE);
5313 } else {
5314 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5315 BUS_DMASYNC_PREWRITE);
5316 }
5317 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5318 BUS_DMASYNC_PREWRITE);
5319
5320 /* Kick command ring. */
5321 ring->queued++;
5322 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5323 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5324 DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
5325 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5326
5327 if (!async) {
5328 err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
5329 if (err == 0) {
5330 /* if hardware is no longer up, return error */
5331 if (generation != sc->sc_generation) {
5332 err = ENXIO;
5333 goto out;
5334 }
5335
5336 /* Response buffer will be freed in iwx_free_resp(). */
5337 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5338 sc->sc_cmd_resp_pkt[idx] = NULL;
5339 } else if (generation == sc->sc_generation) {
5340 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
5341 sc->sc_cmd_resp_pkt[idx] = NULL;
5342 }
5343 }
5344 out:
5345 return err;
5346 }
5347
5348 static int
iwx_send_cmd_pdu(struct iwx_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)5349 iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5350 uint16_t len, const void *data)
5351 {
5352 struct iwx_host_cmd cmd = {
5353 .id = id,
5354 .len = { len, },
5355 .data = { data, },
5356 .flags = flags,
5357 };
5358
5359 return iwx_send_cmd(sc, &cmd);
5360 }
5361
5362 static int
iwx_send_cmd_status(struct iwx_softc * sc,struct iwx_host_cmd * cmd,uint32_t * status)5363 iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5364 uint32_t *status)
5365 {
5366 struct iwx_rx_packet *pkt;
5367 struct iwx_cmd_response *resp;
5368 int err, resp_len;
5369
5370 KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
5371 cmd->flags |= IWX_CMD_WANT_RESP;
5372 cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5373
5374 err = iwx_send_cmd(sc, cmd);
5375 if (err)
5376 return err;
5377
5378 pkt = cmd->resp_pkt;
5379 if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5380 return EIO;
5381
5382 resp_len = iwx_rx_packet_payload_len(pkt);
5383 if (resp_len != sizeof(*resp)) {
5384 iwx_free_resp(sc, cmd);
5385 return EIO;
5386 }
5387
5388 resp = (void *)pkt->data;
5389 *status = le32toh(resp->status);
5390 iwx_free_resp(sc, cmd);
5391 return err;
5392 }
5393
5394 static int
iwx_send_cmd_pdu_status(struct iwx_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)5395 iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5396 const void *data, uint32_t *status)
5397 {
5398 struct iwx_host_cmd cmd = {
5399 .id = id,
5400 .len = { len, },
5401 .data = { data, },
5402 };
5403
5404 return iwx_send_cmd_status(sc, &cmd, status);
5405 }
5406
5407 static void
iwx_free_resp(struct iwx_softc * sc,struct iwx_host_cmd * hcmd)5408 iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5409 {
5410 KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
5411 ("hcmd flags !IWX_CMD_WANT_RESP"));
5412 free(hcmd->resp_pkt, M_DEVBUF);
5413 hcmd->resp_pkt = NULL;
5414 }
5415
5416 static void
iwx_cmd_done(struct iwx_softc * sc,int qid,int idx,int code)5417 iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5418 {
5419 struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5420 struct iwx_tx_data *data;
5421
5422 if (qid != IWX_DQA_CMD_QUEUE) {
5423 return; /* Not a command ack. */
5424 }
5425
5426 data = &ring->data[idx];
5427
5428 if (data->m != NULL) {
5429 bus_dmamap_sync(ring->data_dmat, data->map,
5430 BUS_DMASYNC_POSTWRITE);
5431 bus_dmamap_unload(ring->data_dmat, data->map);
5432 m_freem(data->m);
5433 data->m = NULL;
5434 }
5435 wakeup(&ring->desc[idx]);
5436
5437 DPRINTF(("%s: command 0x%x done\n", __func__, code));
5438 if (ring->queued == 0) {
5439 DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
5440 DEVNAME(sc), code));
5441 } else if (ring->queued > 0)
5442 ring->queued--;
5443 }
5444
5445 static uint32_t
iwx_fw_rateidx_ofdm(uint8_t rval)5446 iwx_fw_rateidx_ofdm(uint8_t rval)
5447 {
5448 /* Firmware expects indices which match our 11a rate set. */
5449 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
5450 int i;
5451
5452 for (i = 0; i < rs->rs_nrates; i++) {
5453 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5454 return i;
5455 }
5456
5457 return 0;
5458 }
5459
5460 static uint32_t
iwx_fw_rateidx_cck(uint8_t rval)5461 iwx_fw_rateidx_cck(uint8_t rval)
5462 {
5463 /* Firmware expects indices which match our 11b rate set. */
5464 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
5465 int i;
5466
5467 for (i = 0; i < rs->rs_nrates; i++) {
5468 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
5469 return i;
5470 }
5471
5472 return 0;
5473 }
5474
5475 static int
iwx_min_basic_rate(struct ieee80211com * ic)5476 iwx_min_basic_rate(struct ieee80211com *ic)
5477 {
5478 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5479 struct ieee80211_node *ni = vap->iv_bss;
5480 struct ieee80211_rateset *rs = &ni->ni_rates;
5481 struct ieee80211_channel *c = ni->ni_chan;
5482 int i, min, rval;
5483
5484 min = -1;
5485
5486 if (c == IEEE80211_CHAN_ANYC) {
5487 printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
5488 return -1;
5489 }
5490
5491 for (i = 0; i < rs->rs_nrates; i++) {
5492 if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
5493 continue;
5494 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5495 if (min == -1)
5496 min = rval;
5497 else if (rval < min)
5498 min = rval;
5499 }
5500
5501 /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
5502 if (min == -1)
5503 min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
5504
5505 return min;
5506 }
5507
5508 /*
5509 * Determine the Tx command flags and Tx rate+flags to use.
5510 * Return the selected Tx rate.
5511 */
5512 static const struct iwx_rate *
iwx_tx_fill_cmd(struct iwx_softc * sc,struct iwx_node * in,struct ieee80211_frame * wh,uint16_t * flags,uint32_t * rate_n_flags,struct mbuf * m)5513 iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
5514 struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
5515 struct mbuf *m)
5516 {
5517 struct ieee80211com *ic = &sc->sc_ic;
5518 struct ieee80211_node *ni = &in->in_ni;
5519 struct ieee80211_rateset *rs = &ni->ni_rates;
5520 const struct iwx_rate *rinfo = NULL;
5521 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5522 int ridx = iwx_min_basic_rate(ic);
5523 int min_ridx, rate_flags;
5524 uint8_t rval;
5525
5526 /* We're in the process of clearing the node, no channel already */
5527 if (ridx == -1)
5528 return NULL;
5529
5530 min_ridx = iwx_rval2ridx(ridx);
5531
5532 *flags = 0;
5533
5534 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
5535 type != IEEE80211_FC0_TYPE_DATA) {
5536 /* for non-data, use the lowest supported rate */
5537 ridx = min_ridx;
5538 *flags |= IWX_TX_FLAGS_CMD_RATE;
5539 } else if (ni->ni_flags & IEEE80211_NODE_VHT) {
5540 /* TODO: VHT - the ridx / rate array doesn't have VHT rates yet */
5541 ridx = iwx_min_basic_rate(ic);
5542 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
5543 ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
5544 & ~IEEE80211_RATE_MCS];
5545 } else {
5546 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5547 & IEEE80211_RATE_VAL);
5548 ridx = iwx_rval2ridx(rval);
5549 if (ridx < min_ridx)
5550 ridx = min_ridx;
5551 }
5552
5553 if (m->m_flags & M_EAPOL)
5554 *flags |= IWX_TX_FLAGS_HIGH_PRI;
5555
5556 rinfo = &iwx_rates[ridx];
5557
5558 /*
5559 * Do not fill rate_n_flags if firmware controls the Tx rate.
5560 * For data frames we rely on Tx rate scaling in firmware by default.
5561 */
5562 if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
5563 *rate_n_flags = 0;
5564 return rinfo;
5565 }
5566
5567 /*
5568 * Forcing a CCK/OFDM legacy rate is important for management frames.
5569 * Association will only succeed if we do this correctly.
5570 */
5571
5572 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
5573 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
5574 rate_flags = IWX_RATE_MCS_ANT_A_MSK;
5575 if (IWX_RIDX_IS_CCK(ridx)) {
5576 if (sc->sc_rate_n_flags_version >= 2)
5577 rate_flags |= IWX_RATE_MCS_CCK_MSK;
5578 else
5579 rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
5580 } else if (sc->sc_rate_n_flags_version >= 2)
5581 rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
5582
5583 rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
5584 & IEEE80211_RATE_VAL);
5585 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
5586 rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
5587
5588 if (sc->sc_rate_n_flags_version >= 2) {
5589 if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
5590 rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
5591 IWX_RATE_LEGACY_RATE_MSK);
5592 } else {
5593 rate_flags |= (iwx_fw_rateidx_cck(rval) &
5594 IWX_RATE_LEGACY_RATE_MSK);
5595 }
5596 } else
5597 rate_flags |= rinfo->plcp;
5598
5599 *rate_n_flags = rate_flags;
5600 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
5601 __func__, __LINE__,*flags);
5602 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
5603 __func__, __LINE__, *rate_n_flags);
5604
5605 if (sc->sc_debug & IWX_DEBUG_TXRATE)
5606 print_ratenflags(__func__, __LINE__,
5607 *rate_n_flags, sc->sc_rate_n_flags_version);
5608
5609 return rinfo;
5610 }
5611
5612 static void
iwx_tx_update_byte_tbl(struct iwx_softc * sc,struct iwx_tx_ring * txq,int idx,uint16_t byte_cnt,uint16_t num_tbs)5613 iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
5614 int idx, uint16_t byte_cnt, uint16_t num_tbs)
5615 {
5616 uint8_t filled_tfd_size, num_fetch_chunks;
5617 uint16_t len = byte_cnt;
5618 uint16_t bc_ent;
5619
5620 filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
5621 num_tbs * sizeof(struct iwx_tfh_tb);
5622 /*
5623 * filled_tfd_size contains the number of filled bytes in the TFD.
5624 * Dividing it by 64 will give the number of chunks to fetch
5625 * to SRAM- 0 for one chunk, 1 for 2 and so on.
5626 * If, for example, TFD contains only 3 TBs then 32 bytes
5627 * of the TFD are used, and only one chunk of 64 bytes should
5628 * be fetched
5629 */
5630 num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
5631
5632 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5633 struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
5634 /* Starting from AX210, the HW expects bytes */
5635 bc_ent = htole16(len | (num_fetch_chunks << 14));
5636 scd_bc_tbl[idx].tfd_offset = bc_ent;
5637 } else {
5638 struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
5639 /* Before AX210, the HW expects DW */
5640 len = howmany(len, 4);
5641 bc_ent = htole16(len | (num_fetch_chunks << 12));
5642 scd_bc_tbl->tfd_offset[idx] = bc_ent;
5643 }
5644
5645 bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
5646 }
5647
5648 static int
iwx_tx(struct iwx_softc * sc,struct mbuf * m,struct ieee80211_node * ni)5649 iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
5650 {
5651 struct ieee80211com *ic = &sc->sc_ic;
5652 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5653 struct iwx_node *in = (void *)ni;
5654 struct iwx_tx_ring *ring;
5655 struct iwx_tx_data *data;
5656 struct iwx_tfh_tfd *desc;
5657 struct iwx_device_cmd *cmd;
5658 struct ieee80211_frame *wh;
5659 struct ieee80211_key *k = NULL;
5660 const struct iwx_rate *rinfo;
5661 uint64_t paddr;
5662 u_int hdrlen;
5663 uint32_t rate_n_flags;
5664 uint16_t num_tbs, flags, offload_assist = 0;
5665 int i, totlen, err, pad, qid;
5666 #define IWM_MAX_SCATTER 20
5667 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
5668 int nsegs;
5669 struct mbuf *m1;
5670 size_t txcmd_size;
5671
5672 IWX_ASSERT_LOCKED(sc);
5673
5674 wh = mtod(m, struct ieee80211_frame *);
5675 hdrlen = ieee80211_anyhdrsize(wh);
5676
5677 qid = sc->first_data_qid;
5678
5679 /* Put QoS frames on the data queue which maps to their TID. */
5680 if (IEEE80211_QOS_HAS_SEQ(wh)) {
5681 uint16_t qos = ieee80211_gettid(wh);
5682 uint8_t tid = qos & IEEE80211_QOS_TID;
5683 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
5684
5685 /*
5686 * Note: we're currently putting all frames into one queue
5687 * except for A-MPDU queues. We should be able to choose
5688 * other WME queues but first we need to verify they've been
5689 * correctly setup for data.
5690 */
5691
5692 /*
5693 * Only QoS data goes into an A-MPDU queue;
5694 * don't add QoS null, the other data types, etc.
5695 */
5696 if (IEEE80211_AMPDU_RUNNING(tap) &&
5697 IEEE80211_IS_QOSDATA(wh) &&
5698 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
5699 sc->aggqid[tid] != 0) {
5700 qid = sc->aggqid[tid];
5701 }
5702 }
5703
5704 ring = &sc->txq[qid];
5705 desc = &ring->desc[ring->cur];
5706 memset(desc, 0, sizeof(*desc));
5707 data = &ring->data[ring->cur];
5708
5709 cmd = &ring->cmd[ring->cur];
5710 cmd->hdr.code = IWX_TX_CMD;
5711 cmd->hdr.flags = 0;
5712 cmd->hdr.qid = ring->qid;
5713 cmd->hdr.idx = ring->cur;
5714
5715 rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
5716 if (rinfo == NULL)
5717 return EINVAL;
5718
5719 /* Offloaded sequence number assignment; non-AMPDU case */
5720 if ((m->m_flags & M_AMPDU_MPDU) == 0)
5721 ieee80211_output_seqno_assign(ni, -1, m);
5722
5723 /* Radiotap */
5724 if (ieee80211_radiotap_active_vap(vap)) {
5725 struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
5726
5727 tap->wt_flags = 0;
5728 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
5729 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
5730 tap->wt_rate = rinfo->rate;
5731 if (k != NULL)
5732 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5733 ieee80211_radiotap_tx(vap, m);
5734 }
5735
5736 /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
5737 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
5738 k = ieee80211_crypto_get_txkey(ni, m);
5739 if (k == NULL) {
5740 printf("%s: k is NULL!\n", __func__);
5741 m_freem(m);
5742 return (ENOBUFS);
5743 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
5744 k->wk_keytsc++;
5745 } else {
5746 k->wk_cipher->ic_encap(k, m);
5747
5748 /* 802.11 headers may have moved */
5749 wh = mtod(m, struct ieee80211_frame *);
5750 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5751 }
5752 } else
5753 flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
5754
5755 totlen = m->m_pkthdr.len;
5756
5757 if (hdrlen & 3) {
5758 /* First segment length must be a multiple of 4. */
5759 pad = 4 - (hdrlen & 3);
5760 offload_assist |= IWX_TX_CMD_OFFLD_PAD;
5761 } else
5762 pad = 0;
5763
5764 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5765 struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
5766 memset(tx, 0, sizeof(*tx));
5767 tx->len = htole16(totlen);
5768 tx->offload_assist = htole32(offload_assist);
5769 tx->flags = htole16(flags);
5770 tx->rate_n_flags = htole32(rate_n_flags);
5771 memcpy(tx->hdr, wh, hdrlen);
5772 txcmd_size = sizeof(*tx);
5773 } else {
5774 struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
5775 memset(tx, 0, sizeof(*tx));
5776 tx->len = htole16(totlen);
5777 tx->offload_assist = htole16(offload_assist);
5778 tx->flags = htole32(flags);
5779 tx->rate_n_flags = htole32(rate_n_flags);
5780 memcpy(tx->hdr, wh, hdrlen);
5781 txcmd_size = sizeof(*tx);
5782 }
5783
5784 /* Trim 802.11 header. */
5785 m_adj(m, hdrlen);
5786
5787 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
5788 &nsegs, BUS_DMA_NOWAIT);
5789 if (err && err != EFBIG) {
5790 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
5791 m_freem(m);
5792 return err;
5793 }
5794 if (err) {
5795 /* Too many DMA segments, linearize mbuf. */
5796 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
5797 if (m1 == NULL) {
5798 printf("%s: could not defrag mbufs\n", __func__);
5799 m_freem(m);
5800 return (ENOBUFS);
5801 }
5802 m = m1;
5803 err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
5804 segs, &nsegs, BUS_DMA_NOWAIT);
5805 if (err) {
5806 printf("%s: can't map mbuf (error %d)\n", __func__,
5807 err);
5808 m_freem(m);
5809 return (err);
5810 }
5811 }
5812 data->m = m;
5813 data->in = in;
5814
5815 /* Fill TX descriptor. */
5816 num_tbs = 2 + nsegs;
5817 desc->num_tbs = htole16(num_tbs);
5818
5819 desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
5820 paddr = htole64(data->cmd_paddr);
5821 memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
5822 #if __SIZEOF_SIZE_T__ > 4
5823 if (data->cmd_paddr >> 32 != (data->cmd_paddr +
5824 le32toh(desc->tbs[0].tb_len)) >> 32)
5825 DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
5826 #endif
5827 desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
5828 txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
5829 paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
5830 memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
5831
5832 #if __SIZEOF_SIZE_T__ > 4
5833 if (data->cmd_paddr >> 32 != (data->cmd_paddr +
5834 le32toh(desc->tbs[1].tb_len)) >> 32)
5835 DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
5836 #endif
5837
5838 /* Other DMA segments are for data payload. */
5839 for (i = 0; i < nsegs; i++) {
5840 seg = &segs[i];
5841 desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
5842 paddr = htole64(seg->ds_addr);
5843 memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
5844 #if __SIZEOF_SIZE_T__ > 4
5845 if (data->cmd_paddr >> 32 != (data->cmd_paddr +
5846 le32toh(desc->tbs[i + 2].tb_len)) >> 32)
5847 DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__,
5848 i + 2));
5849 #endif
5850 }
5851
5852 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
5853 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
5854 BUS_DMASYNC_PREWRITE);
5855 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5856 BUS_DMASYNC_PREWRITE);
5857
5858 iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
5859
5860 /* Kick TX ring. */
5861 ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5862 ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5863 IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5864
5865 /* Mark TX ring as full if we reach a certain threshold. */
5866 if (++ring->queued > iwx_himark) {
5867 sc->qfullmsk |= 1 << ring->qid;
5868 }
5869
5870 sc->sc_tx_timer[ring->qid] = 15;
5871
5872 return 0;
5873 }
5874
5875 static int
iwx_flush_sta_tids(struct iwx_softc * sc,int sta_id,uint16_t tids)5876 iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
5877 {
5878 struct iwx_rx_packet *pkt;
5879 struct iwx_tx_path_flush_cmd_rsp *resp;
5880 struct iwx_tx_path_flush_cmd flush_cmd = {
5881 .sta_id = htole32(sta_id),
5882 .tid_mask = htole16(tids),
5883 };
5884 struct iwx_host_cmd hcmd = {
5885 .id = IWX_TXPATH_FLUSH,
5886 .len = { sizeof(flush_cmd), },
5887 .data = { &flush_cmd, },
5888 .flags = IWX_CMD_WANT_RESP,
5889 .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
5890 };
5891 int err, resp_len, i, num_flushed_queues;
5892
5893 err = iwx_send_cmd(sc, &hcmd);
5894 if (err)
5895 return err;
5896
5897 pkt = hcmd.resp_pkt;
5898 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
5899 err = EIO;
5900 goto out;
5901 }
5902
5903 resp_len = iwx_rx_packet_payload_len(pkt);
5904 /* Some firmware versions don't provide a response. */
5905 if (resp_len == 0)
5906 goto out;
5907 else if (resp_len != sizeof(*resp)) {
5908 err = EIO;
5909 goto out;
5910 }
5911
5912 resp = (void *)pkt->data;
5913
5914 if (le16toh(resp->sta_id) != sta_id) {
5915 err = EIO;
5916 goto out;
5917 }
5918
5919 num_flushed_queues = le16toh(resp->num_flushed_queues);
5920 if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
5921 err = EIO;
5922 goto out;
5923 }
5924
5925 for (i = 0; i < num_flushed_queues; i++) {
5926 struct iwx_flush_queue_info *queue_info = &resp->queues[i];
5927 uint16_t tid = le16toh(queue_info->tid);
5928 uint16_t read_after = le16toh(queue_info->read_after_flush);
5929 uint16_t qid = le16toh(queue_info->queue_num);
5930 struct iwx_tx_ring *txq;
5931
5932 if (qid >= nitems(sc->txq))
5933 continue;
5934
5935 txq = &sc->txq[qid];
5936 if (tid != txq->tid)
5937 continue;
5938
5939 iwx_txq_advance(sc, txq, read_after);
5940 }
5941 out:
5942 iwx_free_resp(sc, &hcmd);
5943 return err;
5944 }
5945
5946 #define IWX_FLUSH_WAIT_MS 2000
5947
5948 static int
iwx_drain_sta(struct iwx_softc * sc,struct iwx_node * in,int drain)5949 iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
5950 {
5951 struct iwx_add_sta_cmd cmd;
5952 int err;
5953 uint32_t status;
5954
5955 memset(&cmd, 0, sizeof(cmd));
5956 cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
5957 in->in_color));
5958 cmd.sta_id = IWX_STATION_ID;
5959 cmd.add_modify = IWX_STA_MODE_MODIFY;
5960 cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
5961 cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
5962
5963 status = IWX_ADD_STA_SUCCESS;
5964 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
5965 sizeof(cmd), &cmd, &status);
5966 if (err) {
5967 printf("%s: could not update sta (error %d)\n",
5968 DEVNAME(sc), err);
5969 return err;
5970 }
5971
5972 switch (status & IWX_ADD_STA_STATUS_MASK) {
5973 case IWX_ADD_STA_SUCCESS:
5974 break;
5975 default:
5976 err = EIO;
5977 printf("%s: Couldn't %s draining for station\n",
5978 DEVNAME(sc), drain ? "enable" : "disable");
5979 break;
5980 }
5981
5982 return err;
5983 }
5984
5985 static int
iwx_flush_sta(struct iwx_softc * sc,struct iwx_node * in)5986 iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
5987 {
5988 int err;
5989
5990 IWX_ASSERT_LOCKED(sc);
5991
5992 sc->sc_flags |= IWX_FLAG_TXFLUSH;
5993
5994 err = iwx_drain_sta(sc, in, 1);
5995 if (err)
5996 goto done;
5997
5998 err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
5999 if (err) {
6000 printf("%s: could not flush Tx path (error %d)\n",
6001 DEVNAME(sc), err);
6002 goto done;
6003 }
6004
6005 /*
6006 * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
6007 * fc drive rand has has been replaced in OpenBSD.
6008 */
6009
6010 err = iwx_drain_sta(sc, in, 0);
6011 done:
6012 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6013 return err;
6014 }
6015
6016 #define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
6017
6018 static int
iwx_beacon_filter_send_cmd(struct iwx_softc * sc,struct iwx_beacon_filter_cmd * cmd)6019 iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6020 struct iwx_beacon_filter_cmd *cmd)
6021 {
6022 return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6023 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6024 }
6025
6026 static int
iwx_update_beacon_abort(struct iwx_softc * sc,struct iwx_node * in,int enable)6027 iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6028 {
6029 struct iwx_beacon_filter_cmd cmd = {
6030 IWX_BF_CMD_CONFIG_DEFAULTS,
6031 .bf_enable_beacon_filter = htole32(1),
6032 .ba_enable_beacon_abort = htole32(enable),
6033 };
6034
6035 if (!sc->sc_bf.bf_enabled)
6036 return 0;
6037
6038 sc->sc_bf.ba_enabled = enable;
6039 return iwx_beacon_filter_send_cmd(sc, &cmd);
6040 }
6041
6042 static void
iwx_power_build_cmd(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_power_cmd * cmd)6043 iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6044 struct iwx_mac_power_cmd *cmd)
6045 {
6046 struct ieee80211com *ic = &sc->sc_ic;
6047 struct ieee80211_node *ni = &in->in_ni;
6048 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6049 int dtim_period, dtim_msec, keep_alive;
6050
6051 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6052 in->in_color));
6053 if (vap->iv_dtim_period)
6054 dtim_period = vap->iv_dtim_period;
6055 else
6056 dtim_period = 1;
6057
6058 /*
6059 * Regardless of power management state the driver must set
6060 * keep alive period. FW will use it for sending keep alive NDPs
6061 * immediately after association. Check that keep alive period
6062 * is at least 3 * DTIM.
6063 */
6064 dtim_msec = dtim_period * ni->ni_intval;
6065 keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6066 keep_alive = roundup(keep_alive, 1000) / 1000;
6067 cmd->keep_alive_seconds = htole16(keep_alive);
6068
6069 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6070 cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6071 }
6072
6073 static int
iwx_power_mac_update_mode(struct iwx_softc * sc,struct iwx_node * in)6074 iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6075 {
6076 int err;
6077 int ba_enable;
6078 struct iwx_mac_power_cmd cmd;
6079
6080 memset(&cmd, 0, sizeof(cmd));
6081
6082 iwx_power_build_cmd(sc, in, &cmd);
6083
6084 err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6085 sizeof(cmd), &cmd);
6086 if (err != 0)
6087 return err;
6088
6089 ba_enable = !!(cmd.flags &
6090 htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6091 return iwx_update_beacon_abort(sc, in, ba_enable);
6092 }
6093
6094 static int
iwx_power_update_device(struct iwx_softc * sc)6095 iwx_power_update_device(struct iwx_softc *sc)
6096 {
6097 struct iwx_device_power_cmd cmd = { };
6098 struct ieee80211com *ic = &sc->sc_ic;
6099
6100 if (ic->ic_opmode != IEEE80211_M_MONITOR)
6101 cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6102
6103 return iwx_send_cmd_pdu(sc,
6104 IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6105 }
6106 #if 0
6107 static int
6108 iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6109 {
6110 struct iwx_beacon_filter_cmd cmd = {
6111 IWX_BF_CMD_CONFIG_DEFAULTS,
6112 .bf_enable_beacon_filter = htole32(1),
6113 .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6114 };
6115 int err;
6116
6117 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6118 if (err == 0)
6119 sc->sc_bf.bf_enabled = 1;
6120
6121 return err;
6122 }
6123 #endif
6124 static int
iwx_disable_beacon_filter(struct iwx_softc * sc)6125 iwx_disable_beacon_filter(struct iwx_softc *sc)
6126 {
6127 struct iwx_beacon_filter_cmd cmd;
6128 int err;
6129
6130 memset(&cmd, 0, sizeof(cmd));
6131
6132 err = iwx_beacon_filter_send_cmd(sc, &cmd);
6133 if (err == 0)
6134 sc->sc_bf.bf_enabled = 0;
6135
6136 return err;
6137 }
6138
6139 static int
iwx_add_sta_cmd(struct iwx_softc * sc,struct iwx_node * in,int update)6140 iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6141 {
6142 struct iwx_add_sta_cmd add_sta_cmd;
6143 int err, i;
6144 uint32_t status, aggsize;
6145 const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6146 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6147 struct ieee80211com *ic = &sc->sc_ic;
6148 struct ieee80211_node *ni = &in->in_ni;
6149 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
6150
6151 if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6152 panic("STA already added");
6153
6154 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6155
6156 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6157 add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6158 add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6159 } else {
6160 add_sta_cmd.sta_id = IWX_STATION_ID;
6161 add_sta_cmd.station_type = IWX_STA_LINK;
6162 }
6163 add_sta_cmd.mac_id_n_color
6164 = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6165 if (!update) {
6166 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6167 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6168 etheranyaddr);
6169 else
6170 IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6171 in->in_macaddr);
6172 }
6173 DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
6174 ether_sprintf(add_sta_cmd.addr)));
6175 add_sta_cmd.add_modify = update ? 1 : 0;
6176 add_sta_cmd.station_flags_msk
6177 |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6178
6179 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6180 add_sta_cmd.station_flags_msk
6181 |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6182 IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6183
6184 if (iwx_mimo_enabled(sc)) {
6185 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6186 add_sta_cmd.station_flags |=
6187 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6188 } else {
6189 int hasmimo = 0;
6190 for (i = 0; i < htrs->rs_nrates; i++) {
6191 if (htrs->rs_rates[i] > 7) {
6192 hasmimo = 1;
6193 break;
6194 }
6195 }
6196 if (hasmimo) {
6197 add_sta_cmd.station_flags |=
6198 htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6199 }
6200 }
6201 }
6202
6203 if (ni->ni_flags & IEEE80211_NODE_HT &&
6204 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
6205 add_sta_cmd.station_flags |= htole32(
6206 IWX_STA_FLG_FAT_EN_40MHZ);
6207 }
6208
6209
6210 if (ni->ni_flags & IEEE80211_NODE_VHT) {
6211 if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
6212 add_sta_cmd.station_flags |= htole32(
6213 IWX_STA_FLG_FAT_EN_80MHZ);
6214 }
6215 // XXX-misha: TODO get real ampdu size
6216 aggsize = max_aggsize;
6217 } else {
6218 aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6219 IEEE80211_HTCAP_MAXRXAMPDU);
6220 }
6221
6222 if (aggsize > max_aggsize)
6223 aggsize = max_aggsize;
6224 add_sta_cmd.station_flags |= htole32((aggsize <<
6225 IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6226 IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6227
6228 switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
6229 IEEE80211_HTCAP_MPDUDENSITY)) {
6230 case IEEE80211_HTCAP_MPDUDENSITY_2:
6231 add_sta_cmd.station_flags
6232 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6233 break;
6234 case IEEE80211_HTCAP_MPDUDENSITY_4:
6235 add_sta_cmd.station_flags
6236 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6237 break;
6238 case IEEE80211_HTCAP_MPDUDENSITY_8:
6239 add_sta_cmd.station_flags
6240 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6241 break;
6242 case IEEE80211_HTCAP_MPDUDENSITY_16:
6243 add_sta_cmd.station_flags
6244 |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6245 break;
6246 default:
6247 break;
6248 }
6249 }
6250
6251 status = IWX_ADD_STA_SUCCESS;
6252 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6253 &add_sta_cmd, &status);
6254 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6255 err = EIO;
6256
6257 return err;
6258 }
6259
6260 static int
iwx_rm_sta_cmd(struct iwx_softc * sc,struct iwx_node * in)6261 iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6262 {
6263 struct ieee80211com *ic = &sc->sc_ic;
6264 struct iwx_rm_sta_cmd rm_sta_cmd;
6265 int err;
6266
6267 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6268 panic("sta already removed");
6269
6270 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6271 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6272 rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6273 else
6274 rm_sta_cmd.sta_id = IWX_STATION_ID;
6275
6276 err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6277 &rm_sta_cmd);
6278
6279 return err;
6280 }
6281
6282 static int
iwx_rm_sta(struct iwx_softc * sc,struct iwx_node * in)6283 iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6284 {
6285 int err, i, cmd_ver;
6286
6287 err = iwx_flush_sta(sc, in);
6288 if (err) {
6289 printf("%s: could not flush Tx path (error %d)\n",
6290 DEVNAME(sc), err);
6291 return err;
6292 }
6293
6294 /*
6295 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6296 * before a station gets removed.
6297 */
6298 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6299 IWX_SCD_QUEUE_CONFIG_CMD);
6300 if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6301 err = iwx_disable_mgmt_queue(sc);
6302 if (err)
6303 return err;
6304 for (i = IWX_FIRST_AGG_TX_QUEUE;
6305 i < IWX_LAST_AGG_TX_QUEUE; i++) {
6306 struct iwx_tx_ring *ring = &sc->txq[i];
6307 if ((sc->qenablemsk & (1 << i)) == 0)
6308 continue;
6309 err = iwx_disable_txq(sc, IWX_STATION_ID,
6310 ring->qid, ring->tid);
6311 if (err) {
6312 printf("%s: could not disable Tx queue %d "
6313 "(error %d)\n", DEVNAME(sc), ring->qid,
6314 err);
6315 return err;
6316 }
6317 }
6318 }
6319
6320 err = iwx_rm_sta_cmd(sc, in);
6321 if (err) {
6322 printf("%s: could not remove STA (error %d)\n",
6323 DEVNAME(sc), err);
6324 return err;
6325 }
6326
6327 in->in_flags = 0;
6328
6329 sc->sc_rx_ba_sessions = 0;
6330 sc->ba_rx.start_tidmask = 0;
6331 sc->ba_rx.stop_tidmask = 0;
6332 memset(sc->aggqid, 0, sizeof(sc->aggqid));
6333 sc->ba_tx.start_tidmask = 0;
6334 sc->ba_tx.stop_tidmask = 0;
6335 for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6336 sc->qenablemsk &= ~(1 << i);
6337
6338 #if 0
6339 for (i = 0; i < IEEE80211_NUM_TID; i++) {
6340 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6341 if (ba->ba_state != IEEE80211_BA_AGREED)
6342 continue;
6343 ieee80211_delba_request(ic, ni, 0, 1, i);
6344 }
6345 #endif
6346 /* Clear ampdu rx state (GOS-1525) */
6347 for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
6348 struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
6349 ba->ba_flags = 0;
6350 }
6351
6352 return 0;
6353 }
6354
6355 static uint8_t
iwx_umac_scan_fill_channels(struct iwx_softc * sc,struct iwx_scan_channel_cfg_umac * chan,size_t chan_nitems,int n_ssids,uint32_t channel_cfg_flags)6356 iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6357 struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6358 int n_ssids, uint32_t channel_cfg_flags)
6359 {
6360 struct ieee80211com *ic = &sc->sc_ic;
6361 struct ieee80211_scan_state *ss = ic->ic_scan;
6362 struct ieee80211_channel *c;
6363 uint8_t nchan;
6364 int j;
6365
6366 for (nchan = j = 0;
6367 j < ss->ss_last &&
6368 nchan < sc->sc_capa_n_scan_channels;
6369 j++) {
6370 uint8_t channel_num;
6371
6372 c = ss->ss_chans[j];
6373 channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6374 if (isset(sc->sc_ucode_api,
6375 IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6376 chan->v2.channel_num = channel_num;
6377 if (IEEE80211_IS_CHAN_2GHZ(c))
6378 chan->v2.band = IWX_PHY_BAND_24;
6379 else
6380 chan->v2.band = IWX_PHY_BAND_5;
6381 chan->v2.iter_count = 1;
6382 chan->v2.iter_interval = 0;
6383 } else {
6384 chan->v1.channel_num = channel_num;
6385 chan->v1.iter_count = 1;
6386 chan->v1.iter_interval = htole16(0);
6387 }
6388 chan->flags |= htole32(channel_cfg_flags);
6389 chan++;
6390 nchan++;
6391 }
6392
6393 return nchan;
6394 }
6395
6396 static int
iwx_fill_probe_req(struct iwx_softc * sc,struct iwx_scan_probe_req * preq)6397 iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6398 {
6399 struct ieee80211com *ic = &sc->sc_ic;
6400 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6401 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6402 struct ieee80211_rateset *rs;
6403 size_t remain = sizeof(preq->buf);
6404 uint8_t *frm, *pos;
6405
6406 memset(preq, 0, sizeof(*preq));
6407
6408 if (remain < sizeof(*wh) + 2)
6409 return ENOBUFS;
6410
6411 /*
6412 * Build a probe request frame. Most of the following code is a
6413 * copy & paste of what is done in net80211.
6414 */
6415 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6416 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6417 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6418 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6419 IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
6420 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6421 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6422 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6423
6424 frm = (uint8_t *)(wh + 1);
6425 *frm++ = IEEE80211_ELEMID_SSID;
6426 *frm++ = 0;
6427 /* hardware inserts SSID */
6428
6429 /* Tell the firmware where the MAC header is. */
6430 preq->mac_header.offset = 0;
6431 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6432 remain -= frm - (uint8_t *)wh;
6433
6434 /* Fill in 2GHz IEs and tell firmware where they are. */
6435 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6436 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6437 if (remain < 4 + rs->rs_nrates)
6438 return ENOBUFS;
6439 } else if (remain < 2 + rs->rs_nrates)
6440 return ENOBUFS;
6441 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6442 pos = frm;
6443 frm = ieee80211_add_rates(frm, rs);
6444 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6445 frm = ieee80211_add_xrates(frm, rs);
6446 remain -= frm - pos;
6447
6448 if (isset(sc->sc_enabled_capa,
6449 IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6450 if (remain < 3)
6451 return ENOBUFS;
6452 *frm++ = IEEE80211_ELEMID_DSPARMS;
6453 *frm++ = 1;
6454 *frm++ = 0;
6455 remain -= 3;
6456 }
6457 preq->band_data[0].len = htole16(frm - pos);
6458
6459 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6460 /* Fill in 5GHz IEs. */
6461 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6462 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6463 if (remain < 4 + rs->rs_nrates)
6464 return ENOBUFS;
6465 } else if (remain < 2 + rs->rs_nrates)
6466 return ENOBUFS;
6467 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6468 pos = frm;
6469 frm = ieee80211_add_rates(frm, rs);
6470 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6471 frm = ieee80211_add_xrates(frm, rs);
6472 preq->band_data[1].len = htole16(frm - pos);
6473 remain -= frm - pos;
6474 if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
6475 if (remain < 14)
6476 return ENOBUFS;
6477 frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
6478 remain -= frm - pos;
6479 preq->band_data[1].len = htole16(frm - pos);
6480 }
6481 }
6482
6483 /* Send 11n IEs on both 2GHz and 5GHz bands. */
6484 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6485 pos = frm;
6486 if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
6487 if (remain < 28)
6488 return ENOBUFS;
6489 frm = ieee80211_add_htcap(frm, vap->iv_bss);
6490 /* XXX add WME info? */
6491 remain -= frm - pos;
6492 }
6493
6494 preq->common_data.len = htole16(frm - pos);
6495
6496 return 0;
6497 }
6498
6499 static int
iwx_config_umac_scan_reduced(struct iwx_softc * sc)6500 iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6501 {
6502 struct iwx_scan_config scan_cfg;
6503 struct iwx_host_cmd hcmd = {
6504 .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6505 .len[0] = sizeof(scan_cfg),
6506 .data[0] = &scan_cfg,
6507 .flags = 0,
6508 };
6509 int cmdver;
6510
6511 if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
6512 printf("%s: firmware does not support reduced scan config\n",
6513 DEVNAME(sc));
6514 return ENOTSUP;
6515 }
6516
6517 memset(&scan_cfg, 0, sizeof(scan_cfg));
6518
6519 /*
6520 * SCAN_CFG version >= 5 implies that the broadcast
6521 * STA ID field is deprecated.
6522 */
6523 cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
6524 if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
6525 scan_cfg.bcast_sta_id = 0xff;
6526
6527 scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
6528 scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
6529
6530 return iwx_send_cmd(sc, &hcmd);
6531 }
6532
6533 static uint16_t
iwx_scan_umac_flags_v2(struct iwx_softc * sc,int bgscan)6534 iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
6535 {
6536 struct ieee80211com *ic = &sc->sc_ic;
6537 struct ieee80211_scan_state *ss = ic->ic_scan;
6538 uint16_t flags = 0;
6539
6540 if (ss->ss_nssid == 0) {
6541 DPRINTF(("%s: Passive scan started\n", __func__));
6542 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
6543 }
6544
6545 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
6546 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
6547 flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
6548
6549 return flags;
6550 }
6551
6552 #define IWX_SCAN_DWELL_ACTIVE 10
6553 #define IWX_SCAN_DWELL_PASSIVE 110
6554
6555 /* adaptive dwell max budget time [TU] for full scan */
6556 #define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
6557 /* adaptive dwell max budget time [TU] for directed scan */
6558 #define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
6559 /* adaptive dwell default high band APs number */
6560 #define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
6561 /* adaptive dwell default low band APs number */
6562 #define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
6563 /* adaptive dwell default APs number in social channels (1, 6, 11) */
6564 #define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
6565 /* adaptive dwell number of APs override for p2p friendly GO channels */
6566 #define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
6567 /* adaptive dwell number of APs override for social channels */
6568 #define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
6569
6570 static void
iwx_scan_umac_dwell_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * general_params,int bgscan)6571 iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
6572 struct iwx_scan_general_params_v10 *general_params, int bgscan)
6573 {
6574 uint32_t suspend_time, max_out_time;
6575 uint8_t active_dwell, passive_dwell;
6576
6577 active_dwell = IWX_SCAN_DWELL_ACTIVE;
6578 passive_dwell = IWX_SCAN_DWELL_PASSIVE;
6579
6580 general_params->adwell_default_social_chn =
6581 IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
6582 general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
6583 general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
6584
6585 if (bgscan)
6586 general_params->adwell_max_budget =
6587 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
6588 else
6589 general_params->adwell_max_budget =
6590 htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
6591
6592 general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6593 if (bgscan) {
6594 max_out_time = htole32(120);
6595 suspend_time = htole32(120);
6596 } else {
6597 max_out_time = htole32(0);
6598 suspend_time = htole32(0);
6599 }
6600 general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
6601 htole32(max_out_time);
6602 general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
6603 htole32(suspend_time);
6604 general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
6605 htole32(max_out_time);
6606 general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
6607 htole32(suspend_time);
6608
6609 general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
6610 general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
6611 general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
6612 general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
6613 }
6614
6615 static void
iwx_scan_umac_fill_general_p_v10(struct iwx_softc * sc,struct iwx_scan_general_params_v10 * gp,uint16_t gen_flags,int bgscan)6616 iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
6617 struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
6618 {
6619 iwx_scan_umac_dwell_v10(sc, gp, bgscan);
6620
6621 gp->flags = htole16(gen_flags);
6622
6623 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
6624 gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
6625 if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
6626 gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
6627
6628 gp->scan_start_mac_id = 0;
6629 }
6630
6631 static void
iwx_scan_umac_fill_ch_p_v6(struct iwx_softc * sc,struct iwx_scan_channel_params_v6 * cp,uint32_t channel_cfg_flags,int n_ssid)6632 iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
6633 struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
6634 int n_ssid)
6635 {
6636 cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
6637
6638 cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
6639 nitems(cp->channel_config), n_ssid, channel_cfg_flags);
6640
6641 cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
6642 cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
6643 }
6644
6645 static int
iwx_umac_scan_v14(struct iwx_softc * sc,int bgscan)6646 iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
6647 {
6648 struct ieee80211com *ic = &sc->sc_ic;
6649 struct ieee80211_scan_state *ss = ic->ic_scan;
6650 struct iwx_host_cmd hcmd = {
6651 .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
6652 .len = { 0, },
6653 .data = { NULL, },
6654 .flags = 0,
6655 };
6656 struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
6657 struct iwx_scan_req_params_v14 *scan_p;
6658 int err, async = bgscan, n_ssid = 0;
6659 uint16_t gen_flags;
6660 uint32_t bitmap_ssid = 0;
6661
6662 IWX_ASSERT_LOCKED(sc);
6663
6664 bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
6665
6666 scan_p = &cmd->scan_params;
6667
6668 cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
6669 cmd->uid = htole32(0);
6670
6671 gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
6672 iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
6673 gen_flags, bgscan);
6674
6675 scan_p->periodic_params.schedule[0].interval = htole16(0);
6676 scan_p->periodic_params.schedule[0].iter_count = 1;
6677
6678 err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
6679 if (err) {
6680 printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
6681 err);
6682 return err;
6683 }
6684
6685 for (int i=0; i < ss->ss_nssid; i++) {
6686 scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
6687 scan_p->probe_params.direct_scan[i].len =
6688 MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
6689 DPRINTF(("%s: Active scan started for ssid ", __func__));
6690 memcpy(scan_p->probe_params.direct_scan[i].ssid,
6691 ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
6692 n_ssid++;
6693 bitmap_ssid |= (1 << i);
6694 }
6695 DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
6696
6697 iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
6698 n_ssid);
6699
6700 hcmd.len[0] = sizeof(*cmd);
6701 hcmd.data[0] = (void *)cmd;
6702 hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
6703
6704 err = iwx_send_cmd(sc, &hcmd);
6705 return err;
6706 }
6707
6708 static void
iwx_mcc_update(struct iwx_softc * sc,struct iwx_mcc_chub_notif * notif)6709 iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
6710 {
6711 char alpha2[3];
6712
6713 snprintf(alpha2, sizeof(alpha2), "%c%c",
6714 (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
6715
6716 IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
6717 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
6718
6719 /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
6720 }
6721
6722 uint8_t
iwx_ridx2rate(struct ieee80211_rateset * rs,int ridx)6723 iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
6724 {
6725 int i;
6726 uint8_t rval;
6727
6728 for (i = 0; i < rs->rs_nrates; i++) {
6729 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
6730 if (rval == iwx_rates[ridx].rate)
6731 return rs->rs_rates[i];
6732 }
6733
6734 return 0;
6735 }
6736
6737 static int
iwx_rval2ridx(int rval)6738 iwx_rval2ridx(int rval)
6739 {
6740 int ridx;
6741
6742 for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
6743 if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
6744 continue;
6745 if (rval == iwx_rates[ridx].rate)
6746 break;
6747 }
6748
6749 return ridx;
6750 }
6751
6752 static void
iwx_ack_rates(struct iwx_softc * sc,struct iwx_node * in,int * cck_rates,int * ofdm_rates)6753 iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
6754 int *ofdm_rates)
6755 {
6756 struct ieee80211_node *ni = &in->in_ni;
6757 struct ieee80211_rateset *rs = &ni->ni_rates;
6758 int lowest_present_ofdm = -1;
6759 int lowest_present_cck = -1;
6760 uint8_t cck = 0;
6761 uint8_t ofdm = 0;
6762 int i;
6763
6764 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
6765 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
6766 for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
6767 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6768 continue;
6769 cck |= (1 << i);
6770 if (lowest_present_cck == -1 || lowest_present_cck > i)
6771 lowest_present_cck = i;
6772 }
6773 }
6774 for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
6775 if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
6776 continue;
6777 ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
6778 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
6779 lowest_present_ofdm = i;
6780 }
6781
6782 /*
6783 * Now we've got the basic rates as bitmaps in the ofdm and cck
6784 * variables. This isn't sufficient though, as there might not
6785 * be all the right rates in the bitmap. E.g. if the only basic
6786 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
6787 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
6788 *
6789 * [...] a STA responding to a received frame shall transmit
6790 * its Control Response frame [...] at the highest rate in the
6791 * BSSBasicRateSet parameter that is less than or equal to the
6792 * rate of the immediately previous frame in the frame exchange
6793 * sequence ([...]) and that is of the same modulation class
6794 * ([...]) as the received frame. If no rate contained in the
6795 * BSSBasicRateSet parameter meets these conditions, then the
6796 * control frame sent in response to a received frame shall be
6797 * transmitted at the highest mandatory rate of the PHY that is
6798 * less than or equal to the rate of the received frame, and
6799 * that is of the same modulation class as the received frame.
6800 *
6801 * As a consequence, we need to add all mandatory rates that are
6802 * lower than all of the basic rates to these bitmaps.
6803 */
6804
6805 if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
6806 ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
6807 if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
6808 ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
6809 /* 6M already there or needed so always add */
6810 ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
6811
6812 /*
6813 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
6814 * Note, however:
6815 * - if no CCK rates are basic, it must be ERP since there must
6816 * be some basic rates at all, so they're OFDM => ERP PHY
6817 * (or we're in 5 GHz, and the cck bitmap will never be used)
6818 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
6819 * - if 5.5M is basic, 1M and 2M are mandatory
6820 * - if 2M is basic, 1M is mandatory
6821 * - if 1M is basic, that's the only valid ACK rate.
6822 * As a consequence, it's not as complicated as it sounds, just add
6823 * any lower rates to the ACK rate bitmap.
6824 */
6825 if (IWX_RATE_11M_INDEX < lowest_present_cck)
6826 cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
6827 if (IWX_RATE_5M_INDEX < lowest_present_cck)
6828 cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
6829 if (IWX_RATE_2M_INDEX < lowest_present_cck)
6830 cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
6831 /* 1M already there or needed so always add */
6832 cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
6833
6834 *cck_rates = cck;
6835 *ofdm_rates = ofdm;
6836 }
6837
6838 static void
iwx_mac_ctxt_cmd_common(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_ctx_cmd * cmd,uint32_t action)6839 iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
6840 struct iwx_mac_ctx_cmd *cmd, uint32_t action)
6841 {
6842 #define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6843 struct ieee80211com *ic = &sc->sc_ic;
6844 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6845 struct ieee80211_node *ni = vap->iv_bss;
6846 int cck_ack_rates, ofdm_ack_rates;
6847
6848 cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6849 in->in_color));
6850 cmd->action = htole32(action);
6851
6852 if (action == IWX_FW_CTXT_ACTION_REMOVE)
6853 return;
6854
6855 if (ic->ic_opmode == IEEE80211_M_MONITOR)
6856 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
6857 else if (ic->ic_opmode == IEEE80211_M_STA)
6858 cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
6859 else
6860 panic("unsupported operating mode %d", ic->ic_opmode);
6861 cmd->tsf_id = htole32(IWX_TSF_ID_A);
6862
6863 IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
6864 DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
6865 ether_sprintf(cmd->node_addr)));
6866 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6867 IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
6868 return;
6869 }
6870
6871 IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
6872 DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
6873 ether_sprintf(cmd->bssid_addr)));
6874 iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
6875 cmd->cck_rates = htole32(cck_ack_rates);
6876 cmd->ofdm_rates = htole32(ofdm_ack_rates);
6877
6878 cmd->cck_short_preamble
6879 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6880 ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
6881 cmd->short_slot
6882 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
6883 ? IWX_MAC_FLG_SHORT_SLOT : 0);
6884
6885 struct chanAccParams chp;
6886 ieee80211_wme_vap_getparams(vap, &chp);
6887
6888 for (int i = 0; i < WME_NUM_AC; i++) {
6889 int txf = iwx_ac_to_tx_fifo[i];
6890 cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
6891 cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
6892 cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
6893 cmd->ac[txf].fifos_mask = (1 << txf);
6894 cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
6895
6896 cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
6897 }
6898
6899 if (ni->ni_flags & IEEE80211_NODE_QOS) {
6900 DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
6901 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
6902 }
6903
6904 if (ni->ni_flags & IEEE80211_NODE_HT) {
6905 switch (vap->iv_curhtprotmode) {
6906 case IEEE80211_HTINFO_OPMODE_PURE:
6907 break;
6908 case IEEE80211_HTINFO_OPMODE_PROTOPT:
6909 case IEEE80211_HTINFO_OPMODE_MIXED:
6910 cmd->protection_flags |=
6911 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6912 IWX_MAC_PROT_FLG_FAT_PROT);
6913 break;
6914 case IEEE80211_HTINFO_OPMODE_HT20PR:
6915 if (in->in_phyctxt &&
6916 (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
6917 in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
6918 cmd->protection_flags |=
6919 htole32(IWX_MAC_PROT_FLG_HT_PROT |
6920 IWX_MAC_PROT_FLG_FAT_PROT);
6921 }
6922 break;
6923 default:
6924 break;
6925 }
6926 cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
6927 DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
6928 }
6929
6930 if (ic->ic_flags & IEEE80211_F_USEPROT)
6931 cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
6932 cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
6933 #undef IWX_EXP2
6934 }
6935
6936 static void
iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc * sc,struct iwx_node * in,struct iwx_mac_data_sta * sta,int assoc)6937 iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
6938 struct iwx_mac_data_sta *sta, int assoc)
6939 {
6940 struct ieee80211_node *ni = &in->in_ni;
6941 struct ieee80211com *ic = &sc->sc_ic;
6942 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6943 uint32_t dtim_off;
6944 uint64_t tsf;
6945 int dtim_period;
6946
6947 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
6948 tsf = le64toh(ni->ni_tstamp.tsf);
6949 dtim_period = vap->iv_dtim_period;
6950
6951 sta->is_assoc = htole32(assoc);
6952
6953 if (assoc) {
6954 sta->dtim_time = htole32(tsf + dtim_off);
6955 sta->dtim_tsf = htole64(tsf + dtim_off);
6956 // XXX: unset in iwm
6957 sta->assoc_beacon_arrive_time = 0;
6958 }
6959 sta->bi = htole32(ni->ni_intval);
6960 sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
6961 sta->data_policy = htole32(0);
6962 sta->listen_interval = htole32(10);
6963 sta->assoc_id = htole32(ni->ni_associd);
6964 }
6965
6966 static int
iwx_mac_ctxt_cmd(struct iwx_softc * sc,struct iwx_node * in,uint32_t action,int assoc)6967 iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
6968 int assoc)
6969 {
6970 struct ieee80211com *ic = &sc->sc_ic;
6971 struct ieee80211_node *ni = &in->in_ni;
6972 struct iwx_mac_ctx_cmd cmd;
6973 int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
6974
6975 if (action == IWX_FW_CTXT_ACTION_ADD && active)
6976 panic("MAC already added");
6977 if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
6978 panic("MAC already removed");
6979
6980 memset(&cmd, 0, sizeof(cmd));
6981
6982 iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
6983
6984 if (action == IWX_FW_CTXT_ACTION_REMOVE) {
6985 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
6986 sizeof(cmd), &cmd);
6987 }
6988
6989 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6990 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
6991 IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
6992 IWX_MAC_FILTER_ACCEPT_GRP |
6993 IWX_MAC_FILTER_IN_BEACON |
6994 IWX_MAC_FILTER_IN_PROBE_REQUEST |
6995 IWX_MAC_FILTER_IN_CRC32);
6996 // XXX: dtim period is in vap
6997 } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
6998 /*
6999 * Allow beacons to pass through as long as we are not
7000 * associated or we do not have dtim period information.
7001 */
7002 cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
7003 }
7004 iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7005 return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7006 }
7007
7008 static int
iwx_clear_statistics(struct iwx_softc * sc)7009 iwx_clear_statistics(struct iwx_softc *sc)
7010 {
7011 struct iwx_statistics_cmd scmd = {
7012 .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7013 };
7014 struct iwx_host_cmd cmd = {
7015 .id = IWX_STATISTICS_CMD,
7016 .len[0] = sizeof(scmd),
7017 .data[0] = &scmd,
7018 .flags = IWX_CMD_WANT_RESP,
7019 .resp_pkt_len = sizeof(struct iwx_notif_statistics),
7020 };
7021 int err;
7022
7023 err = iwx_send_cmd(sc, &cmd);
7024 if (err)
7025 return err;
7026
7027 iwx_free_resp(sc, &cmd);
7028 return 0;
7029 }
7030
7031 static int
iwx_scan(struct iwx_softc * sc)7032 iwx_scan(struct iwx_softc *sc)
7033 {
7034 int err;
7035 err = iwx_umac_scan_v14(sc, 0);
7036
7037 if (err) {
7038 printf("%s: could not initiate scan\n", DEVNAME(sc));
7039 return err;
7040 }
7041 return 0;
7042 }
7043
7044 static int
iwx_bgscan(struct ieee80211com * ic)7045 iwx_bgscan(struct ieee80211com *ic)
7046 {
7047 struct iwx_softc *sc = ic->ic_softc;
7048 int err;
7049
7050 err = iwx_umac_scan_v14(sc, 1);
7051 if (err) {
7052 printf("%s: could not initiate scan\n", DEVNAME(sc));
7053 return err;
7054 }
7055 return 0;
7056 }
7057
7058 static int
iwx_enable_mgmt_queue(struct iwx_softc * sc)7059 iwx_enable_mgmt_queue(struct iwx_softc *sc)
7060 {
7061 int err;
7062
7063 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7064
7065 /*
7066 * Non-QoS frames use the "MGMT" TID and queue.
7067 * Other TIDs and data queues are reserved for QoS data frames.
7068 */
7069 err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7070 IWX_MGMT_TID, IWX_TX_RING_COUNT);
7071 if (err) {
7072 printf("%s: could not enable Tx queue %d (error %d)\n",
7073 DEVNAME(sc), sc->first_data_qid, err);
7074 return err;
7075 }
7076
7077 return 0;
7078 }
7079
7080 static int
iwx_disable_mgmt_queue(struct iwx_softc * sc)7081 iwx_disable_mgmt_queue(struct iwx_softc *sc)
7082 {
7083 int err, cmd_ver;
7084
7085 /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7086 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7087 IWX_SCD_QUEUE_CONFIG_CMD);
7088 if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7089 return 0;
7090
7091 sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7092
7093 err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7094 IWX_MGMT_TID);
7095 if (err) {
7096 printf("%s: could not disable Tx queue %d (error %d)\n",
7097 DEVNAME(sc), sc->first_data_qid, err);
7098 return err;
7099 }
7100
7101 return 0;
7102 }
7103
7104 static int
iwx_rs_rval2idx(uint8_t rval)7105 iwx_rs_rval2idx(uint8_t rval)
7106 {
7107 /* Firmware expects indices which match our 11g rate set. */
7108 const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7109 int i;
7110
7111 for (i = 0; i < rs->rs_nrates; i++) {
7112 if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7113 return i;
7114 }
7115
7116 return -1;
7117 }
7118
7119 static uint16_t
iwx_rs_ht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int rsidx)7120 iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7121 {
7122 uint16_t htrates = 0;
7123 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7124 int i;
7125
7126 if (rsidx == IEEE80211_HT_RATESET_SISO) {
7127 for (i = 0; i < htrs->rs_nrates; i++) {
7128 if (htrs->rs_rates[i] <= 7)
7129 htrates |= (1 << htrs->rs_rates[i]);
7130 }
7131 } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
7132 for (i = 0; i < htrs->rs_nrates; i++) {
7133 if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
7134 htrates |= (1 << (htrs->rs_rates[i] - 8));
7135 }
7136 } else
7137 panic(("iwx_rs_ht_rates"));
7138
7139 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
7140 "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
7141
7142 return htrates;
7143 }
7144
7145 uint16_t
iwx_rs_vht_rates(struct iwx_softc * sc,struct ieee80211_node * ni,int num_ss)7146 iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7147 {
7148 uint16_t rx_mcs;
7149 int max_mcs = -1;
7150 #define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
7151 #define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
7152 rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
7153 IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7154 IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7155
7156 switch (rx_mcs) {
7157 case IEEE80211_VHT_MCS_NOT_SUPPORTED:
7158 break;
7159 case IEEE80211_VHT_MCS_SUPPORT_0_7:
7160 max_mcs = 7;
7161 break;
7162 case IEEE80211_VHT_MCS_SUPPORT_0_8:
7163 max_mcs = 8;
7164 break;
7165 case IEEE80211_VHT_MCS_SUPPORT_0_9:
7166 /* Disable VHT MCS 9 for 20MHz-only stations. */
7167 if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
7168 max_mcs = 8;
7169 else
7170 max_mcs = 9;
7171 break;
7172 default:
7173 /* Should not happen; Values above cover the possible range. */
7174 panic("invalid VHT Rx MCS value %u", rx_mcs);
7175 }
7176
7177 return ((1 << (max_mcs + 1)) - 1);
7178 }
7179
7180 static int
iwx_rs_init_v3(struct iwx_softc * sc,struct iwx_node * in)7181 iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7182 {
7183 #if 1
7184 panic("iwx: Trying to init rate set on untested version");
7185 #else
7186 struct ieee80211_node *ni = &in->in_ni;
7187 struct ieee80211_rateset *rs = &ni->ni_rates;
7188 struct iwx_tlc_config_cmd_v3 cfg_cmd;
7189 uint32_t cmd_id;
7190 int i;
7191 size_t cmd_size = sizeof(cfg_cmd);
7192
7193 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7194
7195 for (i = 0; i < rs->rs_nrates; i++) {
7196 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7197 int idx = iwx_rs_rval2idx(rval);
7198 if (idx == -1)
7199 return EINVAL;
7200 cfg_cmd.non_ht_rates |= (1 << idx);
7201 }
7202
7203 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7204 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7205 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7206 htole16(iwx_rs_vht_rates(sc, ni, 1));
7207 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7208 htole16(iwx_rs_vht_rates(sc, ni, 2));
7209 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7210 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7211 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7212 htole16(iwx_rs_ht_rates(sc, ni,
7213 IEEE80211_HT_RATESET_SISO));
7214 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7215 htole16(iwx_rs_ht_rates(sc, ni,
7216 IEEE80211_HT_RATESET_MIMO2));
7217 } else
7218 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7219
7220 cfg_cmd.sta_id = IWX_STATION_ID;
7221 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7222 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7223 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7224 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7225 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7226 else
7227 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7228 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7229 if (ni->ni_flags & IEEE80211_NODE_VHT)
7230 cfg_cmd.max_mpdu_len = htole16(3895);
7231 else
7232 cfg_cmd.max_mpdu_len = htole16(3839);
7233 if (ni->ni_flags & IEEE80211_NODE_HT) {
7234 if (ieee80211_node_supports_ht_sgi20(ni)) {
7235 cfg_cmd.sgi_ch_width_supp |= (1 <<
7236 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7237 }
7238 if (ieee80211_node_supports_ht_sgi40(ni)) {
7239 cfg_cmd.sgi_ch_width_supp |= (1 <<
7240 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7241 }
7242 }
7243 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7244 ieee80211_node_supports_vht_sgi80(ni))
7245 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7246
7247 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7248 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7249 #endif
7250 }
7251
7252 static int
iwx_rs_init_v4(struct iwx_softc * sc,struct iwx_node * in)7253 iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7254 {
7255 struct ieee80211_node *ni = &in->in_ni;
7256 struct ieee80211_rateset *rs = &ni->ni_rates;
7257 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
7258 struct iwx_tlc_config_cmd_v4 cfg_cmd;
7259 uint32_t cmd_id;
7260 int i;
7261 int sgi80 = 0;
7262 size_t cmd_size = sizeof(cfg_cmd);
7263
7264 memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7265
7266 for (i = 0; i < rs->rs_nrates; i++) {
7267 uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7268 int idx = iwx_rs_rval2idx(rval);
7269 if (idx == -1)
7270 return EINVAL;
7271 cfg_cmd.non_ht_rates |= (1 << idx);
7272 }
7273 for (i = 0; i < htrs->rs_nrates; i++) {
7274 DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
7275 }
7276
7277 if (ni->ni_flags & IEEE80211_NODE_VHT) {
7278 cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7279 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7280 htole16(iwx_rs_vht_rates(sc, ni, 1));
7281 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7282 htole16(iwx_rs_vht_rates(sc, ni, 2));
7283
7284 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7285 __func__, __LINE__,
7286 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7287 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7288 __func__, __LINE__,
7289 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7290 } else if (ni->ni_flags & IEEE80211_NODE_HT) {
7291 cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7292 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7293 htole16(iwx_rs_ht_rates(sc, ni,
7294 IEEE80211_HT_RATESET_SISO));
7295 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7296 htole16(iwx_rs_ht_rates(sc, ni,
7297 IEEE80211_HT_RATESET_MIMO2));
7298
7299 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
7300 __func__, __LINE__,
7301 cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
7302 IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
7303 __func__, __LINE__,
7304 cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
7305 } else
7306 cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7307
7308 cfg_cmd.sta_id = IWX_STATION_ID;
7309 #if 0
7310 if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7311 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7312 else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7313 in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7314 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7315 else
7316 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7317 #endif
7318 if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
7319 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7320 } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
7321 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7322 } else {
7323 cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7324 }
7325
7326 cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7327 if (ni->ni_flags & IEEE80211_NODE_VHT)
7328 cfg_cmd.max_mpdu_len = htole16(3895);
7329 else
7330 cfg_cmd.max_mpdu_len = htole16(3839);
7331 if (ni->ni_flags & IEEE80211_NODE_HT) {
7332 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
7333 cfg_cmd.sgi_ch_width_supp |= (1 <<
7334 IWX_TLC_MNG_CH_WIDTH_20MHZ);
7335 }
7336 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
7337 cfg_cmd.sgi_ch_width_supp |= (1 <<
7338 IWX_TLC_MNG_CH_WIDTH_40MHZ);
7339 }
7340 }
7341 sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
7342 IEEE80211_VHTCAP_SHORT_GI_80);
7343 if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
7344 cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7345 }
7346
7347 cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7348 return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7349 }
7350
7351 static int
iwx_rs_init(struct iwx_softc * sc,struct iwx_node * in)7352 iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7353 {
7354 int cmd_ver;
7355
7356 cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7357 IWX_TLC_MNG_CONFIG_CMD);
7358 if (cmd_ver == 4)
7359 return iwx_rs_init_v4(sc, in);
7360 else
7361 return iwx_rs_init_v3(sc, in);
7362 }
7363
7364
7365 /**
7366 * @brief Turn the given TX rate control notification into an ieee80211_node_txrate
7367 *
7368 * This populates the given txrate node with the TX rate control notification.
7369 *
7370 * @param sc driver softc
7371 * @param notif firmware notification
7372 * @param ni ieee80211_node update
7373 * @returns true if updated, false if not
7374 */
7375 static bool
iwx_rs_update_node_txrate(struct iwx_softc * sc,const struct iwx_tlc_update_notif * notif,struct ieee80211_node * ni)7376 iwx_rs_update_node_txrate(struct iwx_softc *sc,
7377 const struct iwx_tlc_update_notif *notif, struct ieee80211_node *ni)
7378 {
7379 struct ieee80211com *ic = &sc->sc_ic;
7380 /* XXX TODO: create an inline function in if_iwxreg.h? */
7381 static int cck_idx_to_rate[] = { 2, 4, 11, 22, 2, 2, 2, 2 };
7382 static int ofdm_idx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108 };
7383
7384 uint32_t rate_n_flags;
7385 uint32_t type;
7386
7387 /* Extract the rate and command version */
7388 rate_n_flags = le32toh(notif->rate);
7389
7390 if (sc->sc_rate_n_flags_version != 2) {
7391 net80211_ic_printf(ic,
7392 "%s: unsupported rate_n_flags version (%d)\n",
7393 __func__,
7394 sc->sc_rate_n_flags_version);
7395 return (false);
7396 }
7397
7398 if (sc->sc_debug & IWX_DEBUG_TXRATE)
7399 print_ratenflags(__func__, __LINE__,
7400 rate_n_flags, sc->sc_rate_n_flags_version);
7401
7402 type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7403 switch (type) {
7404 case IWX_RATE_MCS_CCK_MSK:
7405 ieee80211_node_set_txrate_dot11rate(ni,
7406 cck_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
7407 return (true);
7408 case IWX_RATE_MCS_LEGACY_OFDM_MSK:
7409 ieee80211_node_set_txrate_dot11rate(ni,
7410 ofdm_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
7411 return (true);
7412 case IWX_RATE_MCS_HT_MSK:
7413 /*
7414 * TODO: the current API doesn't include channel width
7415 * and other flags, so we can't accurately store them yet!
7416 *
7417 * channel width: (flags & IWX_RATE_MCS_CHAN_WIDTH_MSK)
7418 * >> IWX_RATE_MCS_CHAN_WIDTH_POS)
7419 * LDPC: (flags & (1 << 16))
7420 */
7421 ieee80211_node_set_txrate_ht_mcsrate(ni,
7422 IWX_RATE_HT_MCS_INDEX(rate_n_flags));
7423 return (true);
7424 case IWX_RATE_MCS_VHT_MSK:
7425 /* TODO: same comment on channel width, etc above */
7426 ieee80211_node_set_txrate_vht_rate(ni,
7427 IWX_RATE_VHT_MCS_CODE(rate_n_flags),
7428 IWX_RATE_VHT_MCS_NSS(rate_n_flags));
7429 return (true);
7430 default:
7431 net80211_ic_printf(ic,
7432 "%s: unsupported chosen rate type in "
7433 "IWX_RATE_MCS_MOD_TYPE (%d)\n", __func__,
7434 type >> IWX_RATE_MCS_MOD_TYPE_POS);
7435 return (false);
7436 }
7437
7438 /* Default: if we get here, we didn't successfully update anything */
7439 return (false);
7440 }
7441
7442 /**
7443 * @brief Process a firmware rate control update and update net80211.
7444 *
7445 * Since firmware is doing rate control, this just needs to update
7446 * the txrate in the ieee80211_node entry.
7447 */
7448 static void
iwx_rs_update(struct iwx_softc * sc,struct iwx_tlc_update_notif * notif)7449 iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7450 {
7451 struct ieee80211com *ic = &sc->sc_ic;
7452 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7453 /* XXX TODO: get a node ref! */
7454 struct ieee80211_node *ni = (void *)vap->iv_bss;
7455
7456 /*
7457 * For now the iwx driver only supports a single vdev with a single
7458 * node; it doesn't yet support ibss/hostap/multiple vdevs.
7459 */
7460 if (notif->sta_id != IWX_STATION_ID ||
7461 (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7462 return;
7463
7464 iwx_rs_update_node_txrate(sc, notif, ni);
7465 }
7466
7467 static int
iwx_phy_send_rlc(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,uint8_t chains_static,uint8_t chains_dynamic)7468 iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7469 uint8_t chains_static, uint8_t chains_dynamic)
7470 {
7471 struct iwx_rlc_config_cmd cmd;
7472 uint32_t cmd_id;
7473 uint8_t active_cnt, idle_cnt;
7474
7475 memset(&cmd, 0, sizeof(cmd));
7476
7477 idle_cnt = chains_static;
7478 active_cnt = chains_dynamic;
7479
7480 cmd.phy_id = htole32(phyctxt->id);
7481 cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
7482 IWX_PHY_RX_CHAIN_VALID_POS);
7483 cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
7484 cmd.rlc.rx_chain_info |= htole32(active_cnt <<
7485 IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
7486
7487 cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
7488 return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
7489 }
7490
7491 static int
iwx_phy_ctxt_update(struct iwx_softc * sc,struct iwx_phy_ctxt * phyctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)7492 iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
7493 struct ieee80211_channel *chan, uint8_t chains_static,
7494 uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
7495 uint8_t vht_chan_width)
7496 {
7497 uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
7498 int err;
7499
7500 if (chan == IEEE80211_CHAN_ANYC) {
7501 printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
7502 DEVNAME(sc));
7503 return EIO;
7504 }
7505
7506 if (isset(sc->sc_enabled_capa,
7507 IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
7508 (phyctxt->channel->ic_flags & band_flags) !=
7509 (chan->ic_flags & band_flags)) {
7510 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7511 chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
7512 vht_chan_width);
7513 if (err) {
7514 printf("%s: could not remove PHY context "
7515 "(error %d)\n", DEVNAME(sc), err);
7516 return err;
7517 }
7518 phyctxt->channel = chan;
7519 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7520 chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
7521 vht_chan_width);
7522 if (err) {
7523 printf("%s: could not add PHY context "
7524 "(error %d)\n", DEVNAME(sc), err);
7525 return err;
7526 }
7527 } else {
7528 phyctxt->channel = chan;
7529 err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
7530 chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
7531 vht_chan_width);
7532 if (err) {
7533 printf("%s: could not update PHY context (error %d)\n",
7534 DEVNAME(sc), err);
7535 return err;
7536 }
7537 }
7538
7539 phyctxt->sco = sco;
7540 phyctxt->vht_chan_width = vht_chan_width;
7541
7542 DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
7543 phyctxt->channel->ic_ieee));
7544 DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
7545 DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
7546 phyctxt->vht_chan_width));
7547
7548 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7549 IWX_RLC_CONFIG_CMD) == 2)
7550 return iwx_phy_send_rlc(sc, phyctxt,
7551 chains_static, chains_dynamic);
7552
7553 return 0;
7554 }
7555
7556 static int
iwx_auth(struct ieee80211vap * vap,struct iwx_softc * sc)7557 iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
7558 {
7559 struct ieee80211com *ic = &sc->sc_ic;
7560 struct iwx_node *in;
7561 struct iwx_vap *ivp = IWX_VAP(vap);
7562 struct ieee80211_node *ni;
7563 uint32_t duration;
7564 int generation = sc->sc_generation, err;
7565
7566 IWX_ASSERT_LOCKED(sc);
7567
7568 ni = ieee80211_ref_node(vap->iv_bss);
7569 in = IWX_NODE(ni);
7570
7571 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7572 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7573 ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7574 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7575 if (err)
7576 return err;
7577 } else {
7578 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7579 in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7580 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7581 if (err)
7582 return err;
7583 }
7584 ivp->phy_ctxt = &sc->sc_phyctxt[0];
7585 IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
7586 DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
7587 ether_sprintf(in->in_macaddr)));
7588
7589 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
7590 if (err) {
7591 printf("%s: could not add MAC context (error %d)\n",
7592 DEVNAME(sc), err);
7593 return err;
7594 }
7595 sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
7596
7597 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
7598 if (err) {
7599 printf("%s: could not add binding (error %d)\n",
7600 DEVNAME(sc), err);
7601 goto rm_mac_ctxt;
7602 }
7603 sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
7604
7605 err = iwx_add_sta_cmd(sc, in, 0);
7606 if (err) {
7607 printf("%s: could not add sta (error %d)\n",
7608 DEVNAME(sc), err);
7609 goto rm_binding;
7610 }
7611 sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
7612
7613 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7614 err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
7615 IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
7616 IWX_TX_RING_COUNT);
7617 if (err)
7618 goto rm_sta;
7619 return 0;
7620 }
7621
7622 err = iwx_enable_mgmt_queue(sc);
7623 if (err)
7624 goto rm_sta;
7625
7626 err = iwx_clear_statistics(sc);
7627 if (err)
7628 goto rm_mgmt_queue;
7629
7630 /*
7631 * Prevent the FW from wandering off channel during association
7632 * by "protecting" the session with a time event.
7633 */
7634 if (in->in_ni.ni_intval)
7635 duration = in->in_ni.ni_intval * 9;
7636 else
7637 duration = 900;
7638 return iwx_schedule_session_protection(sc, in, duration);
7639
7640 rm_mgmt_queue:
7641 if (generation == sc->sc_generation)
7642 iwx_disable_mgmt_queue(sc);
7643 rm_sta:
7644 if (generation == sc->sc_generation) {
7645 iwx_rm_sta_cmd(sc, in);
7646 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7647 }
7648 rm_binding:
7649 if (generation == sc->sc_generation) {
7650 iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7651 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7652 }
7653 rm_mac_ctxt:
7654 if (generation == sc->sc_generation) {
7655 iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7656 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7657 }
7658 return err;
7659 }
7660
7661 static int
iwx_deauth(struct iwx_softc * sc)7662 iwx_deauth(struct iwx_softc *sc)
7663 {
7664 struct ieee80211com *ic = &sc->sc_ic;
7665 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7666 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7667 int err;
7668
7669 IWX_ASSERT_LOCKED(sc);
7670
7671 iwx_unprotect_session(sc, in);
7672
7673 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
7674 err = iwx_rm_sta(sc, in);
7675 if (err)
7676 return err;
7677 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
7678 }
7679
7680 if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
7681 err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
7682 if (err) {
7683 printf("%s: could not remove binding (error %d)\n",
7684 DEVNAME(sc), err);
7685 return err;
7686 }
7687 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
7688 }
7689
7690 DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
7691 IWX_FLAG_MAC_ACTIVE));
7692 if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
7693 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
7694 if (err) {
7695 printf("%s: could not remove MAC context (error %d)\n",
7696 DEVNAME(sc), err);
7697 return err;
7698 }
7699 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
7700 }
7701
7702 /* Move unused PHY context to a default channel. */
7703 //TODO uncommented in obsd, but stays on the way of auth->auth
7704 err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
7705 &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
7706 IEEE80211_VHTOP0_CHAN_WIDTH_HT);
7707 if (err)
7708 return err;
7709
7710 return 0;
7711 }
7712
7713 static int
iwx_run(struct ieee80211vap * vap,struct iwx_softc * sc)7714 iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
7715 {
7716 struct ieee80211com *ic = &sc->sc_ic;
7717 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7718 struct ieee80211_node *ni = &in->in_ni;
7719 struct iwx_vap *ivp = IWX_VAP(vap);
7720 int err;
7721
7722 IWX_ASSERT_LOCKED(sc);
7723
7724 if (ni->ni_flags & IEEE80211_NODE_HT) {
7725 uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
7726 uint8_t sco, vht_chan_width;
7727 sco = IEEE80211_HTOP0_SCO_SCN;
7728 if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7729 IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
7730 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
7731 else
7732 vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
7733 err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
7734 ivp->phy_ctxt->channel, chains, chains,
7735 0, sco, vht_chan_width);
7736 if (err) {
7737 printf("%s: failed to update PHY\n", DEVNAME(sc));
7738 return err;
7739 }
7740 }
7741
7742 /* Update STA again to apply HT and VHT settings. */
7743 err = iwx_add_sta_cmd(sc, in, 1);
7744 if (err) {
7745 printf("%s: could not update STA (error %d)\n",
7746 DEVNAME(sc), err);
7747 return err;
7748 }
7749
7750 /* We have now been assigned an associd by the AP. */
7751 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
7752 if (err) {
7753 printf("%s: failed to update MAC\n", DEVNAME(sc));
7754 return err;
7755 }
7756
7757 err = iwx_sf_config(sc, IWX_SF_FULL_ON);
7758 if (err) {
7759 printf("%s: could not set sf full on (error %d)\n",
7760 DEVNAME(sc), err);
7761 return err;
7762 }
7763
7764 err = iwx_allow_mcast(sc);
7765 if (err) {
7766 printf("%s: could not allow mcast (error %d)\n",
7767 DEVNAME(sc), err);
7768 return err;
7769 }
7770
7771 err = iwx_power_update_device(sc);
7772 if (err) {
7773 printf("%s: could not send power command (error %d)\n",
7774 DEVNAME(sc), err);
7775 return err;
7776 }
7777 #ifdef notyet
7778 /*
7779 * Disabled for now. Default beacon filter settings
7780 * prevent net80211 from getting ERP and HT protection
7781 * updates from beacons.
7782 */
7783 err = iwx_enable_beacon_filter(sc, in);
7784 if (err) {
7785 printf("%s: could not enable beacon filter\n",
7786 DEVNAME(sc));
7787 return err;
7788 }
7789 #endif
7790 err = iwx_power_mac_update_mode(sc, in);
7791 if (err) {
7792 printf("%s: could not update MAC power (error %d)\n",
7793 DEVNAME(sc), err);
7794 return err;
7795 }
7796
7797 if (ic->ic_opmode == IEEE80211_M_MONITOR)
7798 return 0;
7799
7800 err = iwx_rs_init(sc, in);
7801 if (err) {
7802 printf("%s: could not init rate scaling (error %d)\n",
7803 DEVNAME(sc), err);
7804 return err;
7805 }
7806
7807 return 0;
7808 }
7809
7810 static int
iwx_run_stop(struct iwx_softc * sc)7811 iwx_run_stop(struct iwx_softc *sc)
7812 {
7813 struct ieee80211com *ic = &sc->sc_ic;
7814 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7815 struct iwx_node *in = IWX_NODE(vap->iv_bss);
7816 struct ieee80211_node *ni = &in->in_ni;
7817 int err, i;
7818
7819 IWX_ASSERT_LOCKED(sc);
7820
7821 err = iwx_flush_sta(sc, in);
7822 if (err) {
7823 printf("%s: could not flush Tx path (error %d)\n",
7824 DEVNAME(sc), err);
7825 return err;
7826 }
7827
7828 /*
7829 * Stop Rx BA sessions now. We cannot rely on the BA task
7830 * for this when moving out of RUN state since it runs in a
7831 * separate thread.
7832 * Note that in->in_ni (struct ieee80211_node) already represents
7833 * our new access point in case we are roaming between APs.
7834 * This means we cannot rely on struct ieee802111_node to tell
7835 * us which BA sessions exist.
7836 */
7837 // TODO agg
7838 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
7839 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
7840 if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
7841 continue;
7842 iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
7843 }
7844
7845 err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
7846 if (err)
7847 return err;
7848
7849 err = iwx_disable_beacon_filter(sc);
7850 if (err) {
7851 printf("%s: could not disable beacon filter (error %d)\n",
7852 DEVNAME(sc), err);
7853 return err;
7854 }
7855
7856 /* Mark station as disassociated. */
7857 err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
7858 if (err) {
7859 printf("%s: failed to update MAC\n", DEVNAME(sc));
7860 return err;
7861 }
7862
7863 return 0;
7864 }
7865
7866 static struct ieee80211_node *
iwx_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])7867 iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
7868 {
7869 return malloc(sizeof (struct iwx_node), M_80211_NODE,
7870 M_NOWAIT | M_ZERO);
7871 }
7872
7873 #if 0
7874 int
7875 iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7876 struct ieee80211_key *k)
7877 {
7878 struct iwx_softc *sc = ic->ic_softc;
7879 struct iwx_node *in = (void *)ni;
7880 struct iwx_setkey_task_arg *a;
7881 int err;
7882
7883 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
7884 /* Fallback to software crypto for other ciphers. */
7885 err = ieee80211_set_key(ic, ni, k);
7886 if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
7887 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7888 return err;
7889 }
7890
7891 if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
7892 return ENOSPC;
7893
7894 a = &sc->setkey_arg[sc->setkey_cur];
7895 a->sta_id = IWX_STATION_ID;
7896 a->ni = ni;
7897 a->k = k;
7898 sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
7899 sc->setkey_nkeys++;
7900 iwx_add_task(sc, systq, &sc->setkey_task);
7901 return EBUSY;
7902 }
7903
7904 int
7905 iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
7906 struct ieee80211_key *k)
7907 {
7908 struct ieee80211com *ic = &sc->sc_ic;
7909 struct iwx_node *in = (void *)ni;
7910 struct iwx_add_sta_key_cmd cmd;
7911 uint32_t status;
7912 const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
7913 IWX_NODE_FLAG_HAVE_GROUP_KEY);
7914 int err;
7915
7916 /*
7917 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
7918 * Currently we only implement station mode where 'ni' is always
7919 * ic->ic_bss so there is no need to validate arguments beyond this:
7920 */
7921 KASSERT(ni == ic->ic_bss);
7922
7923 memset(&cmd, 0, sizeof(cmd));
7924
7925 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
7926 IWX_STA_KEY_FLG_WEP_KEY_MAP |
7927 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
7928 IWX_STA_KEY_FLG_KEYID_MSK));
7929 if (k->k_flags & IEEE80211_KEY_GROUP) {
7930 cmd.common.key_offset = 1;
7931 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
7932 } else
7933 cmd.common.key_offset = 0;
7934
7935 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
7936 cmd.common.sta_id = sta_id;
7937
7938 cmd.transmit_seq_cnt = htole64(k->k_tsc);
7939
7940 status = IWX_ADD_STA_SUCCESS;
7941 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
7942 &status);
7943 if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
7944 return ECANCELED;
7945 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
7946 err = EIO;
7947 if (err) {
7948 IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
7949 IEEE80211_REASON_AUTH_LEAVE);
7950 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
7951 return err;
7952 }
7953
7954 if (k->k_flags & IEEE80211_KEY_GROUP)
7955 in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
7956 else
7957 in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
7958
7959 if ((in->in_flags & want_keymask) == want_keymask) {
7960 DPRINTF(("marking port %s valid\n",
7961 ether_sprintf(ni->ni_macaddr)));
7962 ni->ni_port_valid = 1;
7963 ieee80211_set_link_state(ic, LINK_STATE_UP);
7964 }
7965
7966 return 0;
7967 }
7968
7969 void
7970 iwx_setkey_task(void *arg)
7971 {
7972 struct iwx_softc *sc = arg;
7973 struct iwx_setkey_task_arg *a;
7974 int err = 0, s = splnet();
7975
7976 while (sc->setkey_nkeys > 0) {
7977 if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
7978 break;
7979 a = &sc->setkey_arg[sc->setkey_tail];
7980 err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
7981 a->sta_id = 0;
7982 a->ni = NULL;
7983 a->k = NULL;
7984 sc->setkey_tail = (sc->setkey_tail + 1) %
7985 nitems(sc->setkey_arg);
7986 sc->setkey_nkeys--;
7987 }
7988
7989 refcnt_rele_wake(&sc->task_refs);
7990 splx(s);
7991 }
7992
7993 void
7994 iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
7995 struct ieee80211_key *k)
7996 {
7997 struct iwx_softc *sc = ic->ic_softc;
7998 struct iwx_add_sta_key_cmd cmd;
7999
8000 if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8001 /* Fallback to software crypto for other ciphers. */
8002 ieee80211_delete_key(ic, ni, k);
8003 return;
8004 }
8005
8006 if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
8007 return;
8008
8009 memset(&cmd, 0, sizeof(cmd));
8010
8011 cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
8012 IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8013 ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8014 IWX_STA_KEY_FLG_KEYID_MSK));
8015 memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8016 if (k->k_flags & IEEE80211_KEY_GROUP)
8017 cmd.common.key_offset = 1;
8018 else
8019 cmd.common.key_offset = 0;
8020 cmd.common.sta_id = IWX_STATION_ID;
8021
8022 iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8023 }
8024 #endif
8025
8026 static int
iwx_newstate_sub(struct ieee80211vap * vap,enum ieee80211_state nstate)8027 iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
8028 {
8029 struct ieee80211com *ic = vap->iv_ic;
8030 struct iwx_softc *sc = ic->ic_softc;
8031 enum ieee80211_state ostate = vap->iv_state;
8032 int err = 0;
8033
8034 IWX_LOCK(sc);
8035
8036 if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
8037 switch (ostate) {
8038 case IEEE80211_S_RUN:
8039 err = iwx_run_stop(sc);
8040 if (err)
8041 goto out;
8042 /* FALLTHROUGH */
8043 case IEEE80211_S_ASSOC:
8044 case IEEE80211_S_AUTH:
8045 if (nstate <= IEEE80211_S_AUTH) {
8046 err = iwx_deauth(sc);
8047 if (err)
8048 goto out;
8049 }
8050 /* FALLTHROUGH */
8051 case IEEE80211_S_SCAN:
8052 case IEEE80211_S_INIT:
8053 default:
8054 break;
8055 }
8056 //
8057 // /* Die now if iwx_stop() was called while we were sleeping. */
8058 // if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8059 // refcnt_rele_wake(&sc->task_refs);
8060 // splx(s);
8061 // return;
8062 // }
8063 }
8064
8065 switch (nstate) {
8066 case IEEE80211_S_INIT:
8067 break;
8068
8069 case IEEE80211_S_SCAN:
8070 break;
8071
8072 case IEEE80211_S_AUTH:
8073 err = iwx_auth(vap, sc);
8074 break;
8075
8076 case IEEE80211_S_ASSOC:
8077 break;
8078
8079 case IEEE80211_S_RUN:
8080 err = iwx_run(vap, sc);
8081 break;
8082 default:
8083 break;
8084 }
8085
8086 out:
8087 IWX_UNLOCK(sc);
8088
8089 return (err);
8090 }
8091
8092 static int
iwx_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)8093 iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
8094 {
8095 struct iwx_vap *ivp = IWX_VAP(vap);
8096 struct ieee80211com *ic = vap->iv_ic;
8097 enum ieee80211_state ostate = vap->iv_state;
8098 int err;
8099
8100 /*
8101 * Prevent attempts to transition towards the same state, unless
8102 * we are scanning in which case a SCAN -> SCAN transition
8103 * triggers another scan iteration. And AUTH -> AUTH is needed
8104 * to support band-steering.
8105 */
8106 if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
8107 nstate != IEEE80211_S_AUTH)
8108 return 0;
8109 IEEE80211_UNLOCK(ic);
8110 err = iwx_newstate_sub(vap, nstate);
8111 IEEE80211_LOCK(ic);
8112 if (err == 0)
8113 err = ivp->iv_newstate(vap, nstate, arg);
8114
8115 return (err);
8116 }
8117
8118 static void
iwx_endscan(struct iwx_softc * sc)8119 iwx_endscan(struct iwx_softc *sc)
8120 {
8121 struct ieee80211com *ic = &sc->sc_ic;
8122 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8123
8124 if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8125 return;
8126
8127 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8128
8129 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
8130 wakeup(&vap->iv_state); /* wake up iwx_newstate */
8131 }
8132
8133 /*
8134 * Aging and idle timeouts for the different possible scenarios
8135 * in default configuration
8136 */
8137 static const uint32_t
8138 iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8139 {
8140 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8141 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8142 },
8143 {
8144 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8145 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8146 },
8147 {
8148 htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8149 htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8150 },
8151 {
8152 htole32(IWX_SF_BA_AGING_TIMER_DEF),
8153 htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8154 },
8155 {
8156 htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8157 htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8158 },
8159 };
8160
8161 /*
8162 * Aging and idle timeouts for the different possible scenarios
8163 * in single BSS MAC configuration.
8164 */
8165 static const uint32_t
8166 iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8167 {
8168 htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8169 htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8170 },
8171 {
8172 htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8173 htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8174 },
8175 {
8176 htole32(IWX_SF_MCAST_AGING_TIMER),
8177 htole32(IWX_SF_MCAST_IDLE_TIMER)
8178 },
8179 {
8180 htole32(IWX_SF_BA_AGING_TIMER),
8181 htole32(IWX_SF_BA_IDLE_TIMER)
8182 },
8183 {
8184 htole32(IWX_SF_TX_RE_AGING_TIMER),
8185 htole32(IWX_SF_TX_RE_IDLE_TIMER)
8186 },
8187 };
8188
8189 static void
iwx_fill_sf_command(struct iwx_softc * sc,struct iwx_sf_cfg_cmd * sf_cmd,struct ieee80211_node * ni)8190 iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8191 struct ieee80211_node *ni)
8192 {
8193 int i, j, watermark;
8194
8195 sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8196
8197 /*
8198 * If we are in association flow - check antenna configuration
8199 * capabilities of the AP station, and choose the watermark accordingly.
8200 */
8201 if (ni) {
8202 if (ni->ni_flags & IEEE80211_NODE_HT) {
8203 struct ieee80211_htrateset *htrs = &ni->ni_htrates;
8204 int hasmimo = 0;
8205 for (i = 0; i < htrs->rs_nrates; i++) {
8206 if (htrs->rs_rates[i] > 7) {
8207 hasmimo = 1;
8208 break;
8209 }
8210 }
8211 if (hasmimo)
8212 watermark = IWX_SF_W_MARK_MIMO2;
8213 else
8214 watermark = IWX_SF_W_MARK_SISO;
8215 } else {
8216 watermark = IWX_SF_W_MARK_LEGACY;
8217 }
8218 /* default watermark value for unassociated mode. */
8219 } else {
8220 watermark = IWX_SF_W_MARK_MIMO2;
8221 }
8222 sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8223
8224 for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8225 for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8226 sf_cmd->long_delay_timeouts[i][j] =
8227 htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8228 }
8229 }
8230
8231 if (ni) {
8232 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8233 sizeof(iwx_sf_full_timeout));
8234 } else {
8235 memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8236 sizeof(iwx_sf_full_timeout_def));
8237 }
8238
8239 }
8240
8241 static int
iwx_sf_config(struct iwx_softc * sc,int new_state)8242 iwx_sf_config(struct iwx_softc *sc, int new_state)
8243 {
8244 struct ieee80211com *ic = &sc->sc_ic;
8245 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8246 struct ieee80211_node *ni = vap->iv_bss;
8247 struct iwx_sf_cfg_cmd sf_cmd = {
8248 .state = htole32(new_state),
8249 };
8250 int err = 0;
8251
8252 switch (new_state) {
8253 case IWX_SF_UNINIT:
8254 case IWX_SF_INIT_OFF:
8255 iwx_fill_sf_command(sc, &sf_cmd, NULL);
8256 break;
8257 case IWX_SF_FULL_ON:
8258 iwx_fill_sf_command(sc, &sf_cmd, ni);
8259 break;
8260 default:
8261 return EINVAL;
8262 }
8263
8264 err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8265 sizeof(sf_cmd), &sf_cmd);
8266 return err;
8267 }
8268
8269 static int
iwx_send_bt_init_conf(struct iwx_softc * sc)8270 iwx_send_bt_init_conf(struct iwx_softc *sc)
8271 {
8272 struct iwx_bt_coex_cmd bt_cmd;
8273
8274 bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
8275
8276 bt_cmd.mode = htole32(IWX_BT_COEX_NW);
8277 bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
8278 bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
8279
8280
8281 return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8282 &bt_cmd);
8283 }
8284
8285 static int
iwx_send_soc_conf(struct iwx_softc * sc)8286 iwx_send_soc_conf(struct iwx_softc *sc)
8287 {
8288 struct iwx_soc_configuration_cmd cmd;
8289 int err;
8290 uint32_t cmd_id, flags = 0;
8291
8292 memset(&cmd, 0, sizeof(cmd));
8293
8294 /*
8295 * In VER_1 of this command, the discrete value is considered
8296 * an integer; In VER_2, it's a bitmask. Since we have only 2
8297 * values in VER_1, this is backwards-compatible with VER_2,
8298 * as long as we don't set any other flag bits.
8299 */
8300 if (!sc->sc_integrated) { /* VER_1 */
8301 flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8302 } else { /* VER_2 */
8303 uint8_t scan_cmd_ver;
8304 if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8305 flags |= (sc->sc_ltr_delay &
8306 IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8307 scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8308 IWX_SCAN_REQ_UMAC);
8309 if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8310 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8311 flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8312 }
8313 cmd.flags = htole32(flags);
8314
8315 cmd.latency = htole32(sc->sc_xtal_latency);
8316
8317 cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8318 err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8319 if (err)
8320 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8321 return err;
8322 }
8323
8324 static int
iwx_send_update_mcc_cmd(struct iwx_softc * sc,const char * alpha2)8325 iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8326 {
8327 struct iwx_mcc_update_cmd mcc_cmd;
8328 struct iwx_host_cmd hcmd = {
8329 .id = IWX_MCC_UPDATE_CMD,
8330 .flags = IWX_CMD_WANT_RESP,
8331 .data = { &mcc_cmd },
8332 };
8333 struct iwx_rx_packet *pkt;
8334 struct iwx_mcc_update_resp *resp;
8335 size_t resp_len;
8336 int err;
8337
8338 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8339 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8340 if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8341 isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8342 mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8343 else
8344 mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8345
8346 hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8347 hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8348
8349 err = iwx_send_cmd(sc, &hcmd);
8350 if (err)
8351 return err;
8352
8353 pkt = hcmd.resp_pkt;
8354 if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8355 err = EIO;
8356 goto out;
8357 }
8358
8359 resp_len = iwx_rx_packet_payload_len(pkt);
8360 if (resp_len < sizeof(*resp)) {
8361 err = EIO;
8362 goto out;
8363 }
8364
8365 resp = (void *)pkt->data;
8366 if (resp_len != sizeof(*resp) +
8367 resp->n_channels * sizeof(resp->channels[0])) {
8368 err = EIO;
8369 goto out;
8370 }
8371
8372 DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8373 resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8374
8375 out:
8376 iwx_free_resp(sc, &hcmd);
8377
8378 return err;
8379 }
8380
8381 static int
iwx_send_temp_report_ths_cmd(struct iwx_softc * sc)8382 iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8383 {
8384 struct iwx_temp_report_ths_cmd cmd;
8385 int err;
8386
8387 /*
8388 * In order to give responsibility for critical-temperature-kill
8389 * and TX backoff to FW we need to send an empty temperature
8390 * reporting command at init time.
8391 */
8392 memset(&cmd, 0, sizeof(cmd));
8393
8394 err = iwx_send_cmd_pdu(sc,
8395 IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8396 0, sizeof(cmd), &cmd);
8397 if (err)
8398 printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8399 DEVNAME(sc), err);
8400
8401 return err;
8402 }
8403
8404 static int
iwx_init_hw(struct iwx_softc * sc)8405 iwx_init_hw(struct iwx_softc *sc)
8406 {
8407 struct ieee80211com *ic = &sc->sc_ic;
8408 int err = 0, i;
8409
8410 err = iwx_run_init_mvm_ucode(sc, 0);
8411 if (err)
8412 return err;
8413
8414 if (!iwx_nic_lock(sc))
8415 return EBUSY;
8416
8417 err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
8418 if (err) {
8419 printf("%s: could not init tx ant config (error %d)\n",
8420 DEVNAME(sc), err);
8421 goto err;
8422 }
8423
8424 if (sc->sc_tx_with_siso_diversity) {
8425 err = iwx_send_phy_cfg_cmd(sc);
8426 if (err) {
8427 printf("%s: could not send phy config (error %d)\n",
8428 DEVNAME(sc), err);
8429 goto err;
8430 }
8431 }
8432
8433 err = iwx_send_bt_init_conf(sc);
8434 if (err) {
8435 printf("%s: could not init bt coex (error %d)\n",
8436 DEVNAME(sc), err);
8437 return err;
8438 }
8439
8440 err = iwx_send_soc_conf(sc);
8441 if (err) {
8442 printf("%s: iwx_send_soc_conf failed\n", __func__);
8443 return err;
8444 }
8445
8446 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
8447 printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
8448 err = iwx_send_dqa_cmd(sc);
8449 if (err) {
8450 printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
8451 "failed (error %d)\n", __func__, err);
8452 return err;
8453 }
8454 }
8455 // TODO phyctxt
8456 for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
8457 /*
8458 * The channel used here isn't relevant as it's
8459 * going to be overwritten in the other flows.
8460 * For now use the first channel we have.
8461 */
8462 sc->sc_phyctxt[i].id = i;
8463 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
8464 err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
8465 IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
8466 if (err) {
8467 printf("%s: could not add phy context %d (error %d)\n",
8468 DEVNAME(sc), i, err);
8469 goto err;
8470 }
8471 if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8472 IWX_RLC_CONFIG_CMD) == 2) {
8473 err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
8474 if (err) {
8475 printf("%s: could not configure RLC for PHY "
8476 "%d (error %d)\n", DEVNAME(sc), i, err);
8477 goto err;
8478 }
8479 }
8480 }
8481
8482 err = iwx_config_ltr(sc);
8483 if (err) {
8484 printf("%s: PCIe LTR configuration failed (error %d)\n",
8485 DEVNAME(sc), err);
8486 }
8487
8488 if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
8489 err = iwx_send_temp_report_ths_cmd(sc);
8490 if (err) {
8491 printf("%s: iwx_send_temp_report_ths_cmd failed\n",
8492 __func__);
8493 goto err;
8494 }
8495 }
8496
8497 err = iwx_power_update_device(sc);
8498 if (err) {
8499 printf("%s: could not send power command (error %d)\n",
8500 DEVNAME(sc), err);
8501 goto err;
8502 }
8503
8504 if (sc->sc_nvm.lar_enabled) {
8505 err = iwx_send_update_mcc_cmd(sc, "ZZ");
8506 if (err) {
8507 printf("%s: could not init LAR (error %d)\n",
8508 DEVNAME(sc), err);
8509 goto err;
8510 }
8511 }
8512
8513 err = iwx_config_umac_scan_reduced(sc);
8514 if (err) {
8515 printf("%s: could not configure scan (error %d)\n",
8516 DEVNAME(sc), err);
8517 goto err;
8518 }
8519
8520 err = iwx_disable_beacon_filter(sc);
8521 if (err) {
8522 printf("%s: could not disable beacon filter (error %d)\n",
8523 DEVNAME(sc), err);
8524 goto err;
8525 }
8526
8527 err:
8528 iwx_nic_unlock(sc);
8529 return err;
8530 }
8531
8532 /* Allow multicast from our BSSID. */
8533 static int
iwx_allow_mcast(struct iwx_softc * sc)8534 iwx_allow_mcast(struct iwx_softc *sc)
8535 {
8536 struct ieee80211com *ic = &sc->sc_ic;
8537 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8538 struct iwx_node *in = IWX_NODE(vap->iv_bss);
8539 struct iwx_mcast_filter_cmd *cmd;
8540 size_t size;
8541 int err;
8542
8543 size = roundup(sizeof(*cmd), 4);
8544 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
8545 if (cmd == NULL)
8546 return ENOMEM;
8547 cmd->filter_own = 1;
8548 cmd->port_id = 0;
8549 cmd->count = 0;
8550 cmd->pass_all = 1;
8551 IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
8552
8553 err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
8554 0, size, cmd);
8555 free(cmd, M_DEVBUF);
8556 return err;
8557 }
8558
8559 static int
iwx_init(struct iwx_softc * sc)8560 iwx_init(struct iwx_softc *sc)
8561 {
8562 int err, generation;
8563 generation = ++sc->sc_generation;
8564 iwx_preinit(sc);
8565
8566 err = iwx_start_hw(sc);
8567 if (err) {
8568 printf("%s: iwx_start_hw failed\n", __func__);
8569 return err;
8570 }
8571
8572 err = iwx_init_hw(sc);
8573 if (err) {
8574 if (generation == sc->sc_generation)
8575 iwx_stop_device(sc);
8576 printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
8577 return err;
8578 }
8579
8580 sc->sc_flags |= IWX_FLAG_HW_INITED;
8581 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8582
8583 return 0;
8584 }
8585
8586 static void
iwx_start(struct iwx_softc * sc)8587 iwx_start(struct iwx_softc *sc)
8588 {
8589 struct ieee80211_node *ni;
8590 struct mbuf *m;
8591
8592 IWX_ASSERT_LOCKED(sc);
8593
8594 while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
8595 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
8596 if (iwx_tx(sc, m, ni) != 0) {
8597 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
8598 continue;
8599 }
8600 }
8601 }
8602
8603 static void
iwx_stop(struct iwx_softc * sc)8604 iwx_stop(struct iwx_softc *sc)
8605 {
8606 struct ieee80211com *ic = &sc->sc_ic;
8607 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8608 struct iwx_vap *ivp = IWX_VAP(vap);
8609
8610 iwx_stop_device(sc);
8611
8612 /* Reset soft state. */
8613 sc->sc_generation++;
8614 ivp->phy_ctxt = NULL;
8615
8616 sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8617 sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8618 sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8619 sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8620 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
8621 sc->sc_flags &= ~IWX_FLAG_HW_ERR;
8622 sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
8623 sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
8624
8625 sc->sc_rx_ba_sessions = 0;
8626 sc->ba_rx.start_tidmask = 0;
8627 sc->ba_rx.stop_tidmask = 0;
8628 memset(sc->aggqid, 0, sizeof(sc->aggqid));
8629 sc->ba_tx.start_tidmask = 0;
8630 sc->ba_tx.stop_tidmask = 0;
8631 }
8632
8633 static void
iwx_watchdog(void * arg)8634 iwx_watchdog(void *arg)
8635 {
8636 struct iwx_softc *sc = arg;
8637 struct ieee80211com *ic = &sc->sc_ic;
8638 int i;
8639
8640 /*
8641 * We maintain a separate timer for each Tx queue because
8642 * Tx aggregation queues can get "stuck" while other queues
8643 * keep working. The Linux driver uses a similar workaround.
8644 */
8645 for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
8646 if (sc->sc_tx_timer[i] > 0) {
8647 if (--sc->sc_tx_timer[i] == 0) {
8648 printf("%s: device timeout\n", DEVNAME(sc));
8649
8650 iwx_nic_error(sc);
8651 iwx_dump_driver_status(sc);
8652 ieee80211_restart_all(ic);
8653 return;
8654 }
8655 }
8656 }
8657 callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
8658 }
8659
8660 /*
8661 * Note: This structure is read from the device with IO accesses,
8662 * and the reading already does the endian conversion. As it is
8663 * read with uint32_t-sized accesses, any members with a different size
8664 * need to be ordered correctly though!
8665 */
8666 struct iwx_error_event_table {
8667 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8668 uint32_t error_id; /* type of error */
8669 uint32_t trm_hw_status0; /* TRM HW status */
8670 uint32_t trm_hw_status1; /* TRM HW status */
8671 uint32_t blink2; /* branch link */
8672 uint32_t ilink1; /* interrupt link */
8673 uint32_t ilink2; /* interrupt link */
8674 uint32_t data1; /* error-specific data */
8675 uint32_t data2; /* error-specific data */
8676 uint32_t data3; /* error-specific data */
8677 uint32_t bcon_time; /* beacon timer */
8678 uint32_t tsf_low; /* network timestamp function timer */
8679 uint32_t tsf_hi; /* network timestamp function timer */
8680 uint32_t gp1; /* GP1 timer register */
8681 uint32_t gp2; /* GP2 timer register */
8682 uint32_t fw_rev_type; /* firmware revision type */
8683 uint32_t major; /* uCode version major */
8684 uint32_t minor; /* uCode version minor */
8685 uint32_t hw_ver; /* HW Silicon version */
8686 uint32_t brd_ver; /* HW board version */
8687 uint32_t log_pc; /* log program counter */
8688 uint32_t frame_ptr; /* frame pointer */
8689 uint32_t stack_ptr; /* stack pointer */
8690 uint32_t hcmd; /* last host command header */
8691 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
8692 * rxtx_flag */
8693 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
8694 * host_flag */
8695 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
8696 * enc_flag */
8697 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
8698 * time_flag */
8699 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
8700 * wico interrupt */
8701 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
8702 uint32_t wait_event; /* wait event() caller address */
8703 uint32_t l2p_control; /* L2pControlField */
8704 uint32_t l2p_duration; /* L2pDurationField */
8705 uint32_t l2p_mhvalid; /* L2pMhValidBits */
8706 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
8707 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
8708 * (LMPM_PMG_SEL) */
8709 uint32_t u_timestamp; /* indicate when the date and time of the
8710 * compilation */
8711 uint32_t flow_handler; /* FH read/write pointers, RX credit */
8712 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
8713
8714 /*
8715 * UMAC error struct - relevant starting from family 8000 chip.
8716 * Note: This structure is read from the device with IO accesses,
8717 * and the reading already does the endian conversion. As it is
8718 * read with u32-sized accesses, any members with a different size
8719 * need to be ordered correctly though!
8720 */
8721 struct iwx_umac_error_event_table {
8722 uint32_t valid; /* (nonzero) valid, (0) log is empty */
8723 uint32_t error_id; /* type of error */
8724 uint32_t blink1; /* branch link */
8725 uint32_t blink2; /* branch link */
8726 uint32_t ilink1; /* interrupt link */
8727 uint32_t ilink2; /* interrupt link */
8728 uint32_t data1; /* error-specific data */
8729 uint32_t data2; /* error-specific data */
8730 uint32_t data3; /* error-specific data */
8731 uint32_t umac_major;
8732 uint32_t umac_minor;
8733 uint32_t frame_pointer; /* core register 27*/
8734 uint32_t stack_pointer; /* core register 28 */
8735 uint32_t cmd_header; /* latest host cmd sent to UMAC */
8736 uint32_t nic_isr_pref; /* ISR status register */
8737 } __packed;
8738
8739 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
8740 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
8741
8742 static void
iwx_nic_umac_error(struct iwx_softc * sc)8743 iwx_nic_umac_error(struct iwx_softc *sc)
8744 {
8745 struct iwx_umac_error_event_table table;
8746 uint32_t base;
8747
8748 base = sc->sc_uc.uc_umac_error_event_table;
8749
8750 if (base < 0x400000) {
8751 printf("%s: Invalid error log pointer 0x%08x\n",
8752 DEVNAME(sc), base);
8753 return;
8754 }
8755
8756 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8757 printf("%s: reading errlog failed\n", DEVNAME(sc));
8758 return;
8759 }
8760
8761 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8762 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
8763 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8764 sc->sc_flags, table.valid);
8765 }
8766
8767 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
8768 iwx_desc_lookup(table.error_id));
8769 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
8770 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
8771 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
8772 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
8773 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
8774 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
8775 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
8776 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
8777 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
8778 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
8779 table.frame_pointer);
8780 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
8781 table.stack_pointer);
8782 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
8783 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
8784 table.nic_isr_pref);
8785 }
8786
8787 #define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
8788 static struct {
8789 const char *name;
8790 uint8_t num;
8791 } advanced_lookup[] = {
8792 { "NMI_INTERRUPT_WDG", 0x34 },
8793 { "SYSASSERT", 0x35 },
8794 { "UCODE_VERSION_MISMATCH", 0x37 },
8795 { "BAD_COMMAND", 0x38 },
8796 { "BAD_COMMAND", 0x39 },
8797 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
8798 { "FATAL_ERROR", 0x3D },
8799 { "NMI_TRM_HW_ERR", 0x46 },
8800 { "NMI_INTERRUPT_TRM", 0x4C },
8801 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
8802 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
8803 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
8804 { "NMI_INTERRUPT_HOST", 0x66 },
8805 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
8806 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
8807 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
8808 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
8809 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
8810 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
8811 { "ADVANCED_SYSASSERT", 0 },
8812 };
8813
8814 static const char *
iwx_desc_lookup(uint32_t num)8815 iwx_desc_lookup(uint32_t num)
8816 {
8817 int i;
8818
8819 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
8820 if (advanced_lookup[i].num ==
8821 (num & ~IWX_FW_SYSASSERT_CPU_MASK))
8822 return advanced_lookup[i].name;
8823
8824 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
8825 return advanced_lookup[i].name;
8826 }
8827
8828 /*
8829 * Support for dumping the error log seemed like a good idea ...
8830 * but it's mostly hex junk and the only sensible thing is the
8831 * hw/ucode revision (which we know anyway). Since it's here,
8832 * I'll just leave it in, just in case e.g. the Intel guys want to
8833 * help us decipher some "ADVANCED_SYSASSERT" later.
8834 */
8835 static void
iwx_nic_error(struct iwx_softc * sc)8836 iwx_nic_error(struct iwx_softc *sc)
8837 {
8838 struct iwx_error_event_table table;
8839 uint32_t base;
8840
8841 printf("%s: dumping device error log\n", DEVNAME(sc));
8842 printf("%s: GOS-3758: 1\n", __func__);
8843 base = sc->sc_uc.uc_lmac_error_event_table[0];
8844 printf("%s: GOS-3758: 2\n", __func__);
8845 if (base < 0x400000) {
8846 printf("%s: Invalid error log pointer 0x%08x\n",
8847 DEVNAME(sc), base);
8848 return;
8849 }
8850
8851 printf("%s: GOS-3758: 3\n", __func__);
8852 if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
8853 printf("%s: reading errlog failed\n", DEVNAME(sc));
8854 return;
8855 }
8856
8857 printf("%s: GOS-3758: 4\n", __func__);
8858 if (!table.valid) {
8859 printf("%s: errlog not found, skipping\n", DEVNAME(sc));
8860 return;
8861 }
8862
8863 printf("%s: GOS-3758: 5\n", __func__);
8864 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
8865 printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
8866 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
8867 sc->sc_flags, table.valid);
8868 }
8869
8870 printf("%s: GOS-3758: 6\n", __func__);
8871 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
8872 iwx_desc_lookup(table.error_id));
8873 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
8874 table.trm_hw_status0);
8875 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
8876 table.trm_hw_status1);
8877 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
8878 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
8879 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
8880 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
8881 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
8882 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
8883 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
8884 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
8885 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
8886 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
8887 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
8888 printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
8889 table.fw_rev_type);
8890 printf("%s: %08X | uCode version major\n", DEVNAME(sc),
8891 table.major);
8892 printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
8893 table.minor);
8894 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
8895 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
8896 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
8897 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
8898 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
8899 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
8900 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
8901 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
8902 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
8903 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
8904 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
8905 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
8906 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
8907 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
8908 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
8909 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
8910 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
8911
8912 if (sc->sc_uc.uc_umac_error_event_table)
8913 iwx_nic_umac_error(sc);
8914 }
8915
8916 static void
iwx_dump_driver_status(struct iwx_softc * sc)8917 iwx_dump_driver_status(struct iwx_softc *sc)
8918 {
8919 struct ieee80211com *ic = &sc->sc_ic;
8920 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8921 enum ieee80211_state state = vap->iv_state;
8922 int i;
8923
8924 printf("driver status:\n");
8925 for (i = 0; i < nitems(sc->txq); i++) {
8926 struct iwx_tx_ring *ring = &sc->txq[i];
8927 printf(" tx ring %2d: qid=%-2d cur=%-3d "
8928 "cur_hw=%-3d queued=%-3d\n",
8929 i, ring->qid, ring->cur, ring->cur_hw,
8930 ring->queued);
8931 }
8932 printf(" rx ring: cur=%d\n", sc->rxq.cur);
8933 printf(" 802.11 state %s\n", ieee80211_state_name[state]);
8934 }
8935
8936 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
8937 do { \
8938 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
8939 _var_ = (void *)((_pkt_)+1); \
8940 } while (/*CONSTCOND*/0)
8941
8942 static int
iwx_rx_pkt_valid(struct iwx_rx_packet * pkt)8943 iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
8944 {
8945 int qid, idx, code;
8946
8947 qid = pkt->hdr.qid & ~0x80;
8948 idx = pkt->hdr.idx;
8949 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8950
8951 return (!(qid == 0 && idx == 0 && code == 0) &&
8952 pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
8953 }
8954
8955 static void
iwx_rx_pkt(struct iwx_softc * sc,struct iwx_rx_data * data,struct mbuf * ml)8956 iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
8957 {
8958 struct ieee80211com *ic = &sc->sc_ic;
8959 struct iwx_rx_packet *pkt, *nextpkt;
8960 uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
8961 struct mbuf *m0, *m;
8962 const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
8963 int qid, idx, code, handled = 1;
8964
8965 m0 = data->m;
8966 while (m0 && offset + minsz < IWX_RBUF_SIZE) {
8967 pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
8968 qid = pkt->hdr.qid;
8969 idx = pkt->hdr.idx;
8970 code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
8971
8972 if (!iwx_rx_pkt_valid(pkt))
8973 break;
8974
8975 /*
8976 * XXX Intel inside (tm)
8977 * Any commands in the LONG_GROUP could actually be in the
8978 * LEGACY group. Firmware API versions >= 50 reject commands
8979 * in group 0, forcing us to use this hack.
8980 */
8981 if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
8982 struct iwx_tx_ring *ring = &sc->txq[qid];
8983 struct iwx_tx_data *txdata = &ring->data[idx];
8984 if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
8985 code = iwx_cmd_opcode(code);
8986 }
8987
8988 len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
8989 if (len < minsz || len > (IWX_RBUF_SIZE - offset))
8990 break;
8991
8992 // TODO ???
8993 if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
8994 /* Take mbuf m0 off the RX ring. */
8995 if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
8996 break;
8997 }
8998 KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
8999 }
9000
9001 switch (code) {
9002 case IWX_REPLY_RX_PHY_CMD:
9003 /* XXX-THJ: I've not managed to hit this path in testing */
9004 iwx_rx_rx_phy_cmd(sc, pkt, data);
9005 break;
9006
9007 case IWX_REPLY_RX_MPDU_CMD: {
9008 size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
9009 nextoff = offset +
9010 roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9011 nextpkt = (struct iwx_rx_packet *)
9012 (m0->m_data + nextoff);
9013 /* AX210 devices ship only one packet per Rx buffer. */
9014 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
9015 nextoff + minsz >= IWX_RBUF_SIZE ||
9016 !iwx_rx_pkt_valid(nextpkt)) {
9017 /* No need to copy last frame in buffer. */
9018 if (offset > 0)
9019 m_adj(m0, offset);
9020 iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
9021 m0 = NULL; /* stack owns m0 now; abort loop */
9022 } else {
9023 /*
9024 * Create an mbuf which points to the current
9025 * packet. Always copy from offset zero to
9026 * preserve m_pkthdr.
9027 */
9028 m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
9029 if (m == NULL) {
9030 m_freem(m0);
9031 m0 = NULL;
9032 break;
9033 }
9034 m_adj(m, offset);
9035 iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
9036 }
9037 break;
9038 }
9039
9040 // case IWX_BAR_FRAME_RELEASE:
9041 // iwx_rx_bar_frame_release(sc, pkt, ml);
9042 // break;
9043 //
9044 case IWX_TX_CMD:
9045 iwx_rx_tx_cmd(sc, pkt, data);
9046 break;
9047
9048 case IWX_BA_NOTIF:
9049 iwx_rx_compressed_ba(sc, pkt);
9050 break;
9051
9052 case IWX_MISSED_BEACONS_NOTIFICATION:
9053 IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
9054 "%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
9055 __func__);
9056 iwx_rx_bmiss(sc, pkt, data);
9057 break;
9058
9059 case IWX_MFUART_LOAD_NOTIFICATION:
9060 break;
9061
9062 case IWX_ALIVE: {
9063 struct iwx_alive_resp_v4 *resp4;
9064 struct iwx_alive_resp_v5 *resp5;
9065 struct iwx_alive_resp_v6 *resp6;
9066
9067 DPRINTF(("%s: firmware alive\n", __func__));
9068 sc->sc_uc.uc_ok = 0;
9069
9070 /*
9071 * For v5 and above, we can check the version, for older
9072 * versions we need to check the size.
9073 */
9074 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9075 IWX_ALIVE) == 6) {
9076 SYNC_RESP_STRUCT(resp6, pkt);
9077 if (iwx_rx_packet_payload_len(pkt) !=
9078 sizeof(*resp6)) {
9079 sc->sc_uc.uc_intr = 1;
9080 wakeup(&sc->sc_uc);
9081 break;
9082 }
9083 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9084 resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9085 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9086 resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9087 sc->sc_uc.uc_log_event_table = le32toh(
9088 resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9089 sc->sc_uc.uc_umac_error_event_table = le32toh(
9090 resp6->umac_data.dbg_ptrs.error_info_addr);
9091 sc->sc_sku_id[0] =
9092 le32toh(resp6->sku_id.data[0]);
9093 sc->sc_sku_id[1] =
9094 le32toh(resp6->sku_id.data[1]);
9095 sc->sc_sku_id[2] =
9096 le32toh(resp6->sku_id.data[2]);
9097 if (resp6->status == IWX_ALIVE_STATUS_OK) {
9098 sc->sc_uc.uc_ok = 1;
9099 }
9100 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9101 IWX_ALIVE) == 5) {
9102 SYNC_RESP_STRUCT(resp5, pkt);
9103 if (iwx_rx_packet_payload_len(pkt) !=
9104 sizeof(*resp5)) {
9105 sc->sc_uc.uc_intr = 1;
9106 wakeup(&sc->sc_uc);
9107 break;
9108 }
9109 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9110 resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9111 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9112 resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9113 sc->sc_uc.uc_log_event_table = le32toh(
9114 resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9115 sc->sc_uc.uc_umac_error_event_table = le32toh(
9116 resp5->umac_data.dbg_ptrs.error_info_addr);
9117 sc->sc_sku_id[0] =
9118 le32toh(resp5->sku_id.data[0]);
9119 sc->sc_sku_id[1] =
9120 le32toh(resp5->sku_id.data[1]);
9121 sc->sc_sku_id[2] =
9122 le32toh(resp5->sku_id.data[2]);
9123 if (resp5->status == IWX_ALIVE_STATUS_OK)
9124 sc->sc_uc.uc_ok = 1;
9125 } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9126 SYNC_RESP_STRUCT(resp4, pkt);
9127 sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9128 resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9129 sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9130 resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9131 sc->sc_uc.uc_log_event_table = le32toh(
9132 resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9133 sc->sc_uc.uc_umac_error_event_table = le32toh(
9134 resp4->umac_data.dbg_ptrs.error_info_addr);
9135 if (resp4->status == IWX_ALIVE_STATUS_OK)
9136 sc->sc_uc.uc_ok = 1;
9137 } else
9138 printf("unknown payload version");
9139
9140 sc->sc_uc.uc_intr = 1;
9141 wakeup(&sc->sc_uc);
9142 break;
9143 }
9144
9145 case IWX_STATISTICS_NOTIFICATION: {
9146 struct iwx_notif_statistics *stats;
9147 SYNC_RESP_STRUCT(stats, pkt);
9148 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9149 sc->sc_noise = iwx_get_noise(&stats->rx.general);
9150 break;
9151 }
9152
9153 case IWX_DTS_MEASUREMENT_NOTIFICATION:
9154 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9155 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9156 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9157 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9158 break;
9159
9160 case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9161 IWX_CT_KILL_NOTIFICATION): {
9162 struct iwx_ct_kill_notif *notif;
9163 SYNC_RESP_STRUCT(notif, pkt);
9164 printf("%s: device at critical temperature (%u degC), "
9165 "stopping device\n",
9166 DEVNAME(sc), le16toh(notif->temperature));
9167 sc->sc_flags |= IWX_FLAG_HW_ERR;
9168 ieee80211_restart_all(ic);
9169 break;
9170 }
9171
9172 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9173 IWX_SCD_QUEUE_CONFIG_CMD):
9174 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9175 IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9176 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9177 IWX_SESSION_PROTECTION_CMD):
9178 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9179 IWX_NVM_GET_INFO):
9180 case IWX_ADD_STA_KEY:
9181 case IWX_PHY_CONFIGURATION_CMD:
9182 case IWX_TX_ANT_CONFIGURATION_CMD:
9183 case IWX_ADD_STA:
9184 case IWX_MAC_CONTEXT_CMD:
9185 case IWX_REPLY_SF_CFG_CMD:
9186 case IWX_POWER_TABLE_CMD:
9187 case IWX_LTR_CONFIG:
9188 case IWX_PHY_CONTEXT_CMD:
9189 case IWX_BINDING_CONTEXT_CMD:
9190 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9191 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9192 case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9193 case IWX_REPLY_BEACON_FILTERING_CMD:
9194 case IWX_MAC_PM_POWER_TABLE:
9195 case IWX_TIME_QUOTA_CMD:
9196 case IWX_REMOVE_STA:
9197 case IWX_TXPATH_FLUSH:
9198 case IWX_BT_CONFIG:
9199 case IWX_MCC_UPDATE_CMD:
9200 case IWX_TIME_EVENT_CMD:
9201 case IWX_STATISTICS_CMD:
9202 case IWX_SCD_QUEUE_CFG: {
9203 size_t pkt_len;
9204
9205 if (sc->sc_cmd_resp_pkt[idx] == NULL)
9206 break;
9207
9208 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9209 BUS_DMASYNC_POSTREAD);
9210
9211 pkt_len = sizeof(pkt->len_n_flags) +
9212 iwx_rx_packet_len(pkt);
9213
9214 if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9215 pkt_len < sizeof(*pkt) ||
9216 pkt_len > sc->sc_cmd_resp_len[idx]) {
9217 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
9218 sc->sc_cmd_resp_pkt[idx] = NULL;
9219 break;
9220 }
9221
9222 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9223 BUS_DMASYNC_POSTREAD);
9224 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
9225 break;
9226 }
9227
9228 case IWX_INIT_COMPLETE_NOTIF:
9229 sc->sc_init_complete |= IWX_INIT_COMPLETE;
9230 wakeup(&sc->sc_init_complete);
9231 break;
9232
9233 case IWX_SCAN_COMPLETE_UMAC: {
9234 DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
9235 struct iwx_umac_scan_complete *notif __attribute__((unused));
9236 SYNC_RESP_STRUCT(notif, pkt);
9237 DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
9238 notif->status));
9239 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
9240 iwx_endscan(sc);
9241 break;
9242 }
9243
9244 case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
9245 DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
9246 __func__));
9247 struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
9248 SYNC_RESP_STRUCT(notif, pkt);
9249 DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
9250 notif->status));
9251 iwx_endscan(sc);
9252 break;
9253 }
9254
9255 case IWX_MCC_CHUB_UPDATE_CMD: {
9256 struct iwx_mcc_chub_notif *notif;
9257 SYNC_RESP_STRUCT(notif, pkt);
9258 iwx_mcc_update(sc, notif);
9259 break;
9260 }
9261
9262 case IWX_REPLY_ERROR: {
9263 struct iwx_error_resp *resp;
9264 SYNC_RESP_STRUCT(resp, pkt);
9265 printf("%s: firmware error 0x%x, cmd 0x%x\n",
9266 DEVNAME(sc), le32toh(resp->error_type),
9267 resp->cmd_id);
9268 break;
9269 }
9270
9271 case IWX_TIME_EVENT_NOTIFICATION: {
9272 struct iwx_time_event_notif *notif;
9273 uint32_t action;
9274 SYNC_RESP_STRUCT(notif, pkt);
9275
9276 if (sc->sc_time_event_uid != le32toh(notif->unique_id))
9277 break;
9278 action = le32toh(notif->action);
9279 if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
9280 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9281 break;
9282 }
9283
9284 case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9285 IWX_SESSION_PROTECTION_NOTIF): {
9286 struct iwx_session_prot_notif *notif;
9287 uint32_t status, start, conf_id;
9288
9289 SYNC_RESP_STRUCT(notif, pkt);
9290
9291 status = le32toh(notif->status);
9292 start = le32toh(notif->start);
9293 conf_id = le32toh(notif->conf_id);
9294 /* Check for end of successful PROTECT_CONF_ASSOC. */
9295 if (status == 1 && start == 0 &&
9296 conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
9297 sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9298 break;
9299 }
9300
9301 case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
9302 IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
9303 break;
9304
9305 /*
9306 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
9307 * messages. Just ignore them for now.
9308 */
9309 case IWX_DEBUG_LOG_MSG:
9310 break;
9311
9312 case IWX_MCAST_FILTER_CMD:
9313 break;
9314
9315 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
9316 break;
9317
9318 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
9319 break;
9320
9321 case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
9322 break;
9323
9324 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9325 IWX_NVM_ACCESS_COMPLETE):
9326 break;
9327
9328 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
9329 break; /* happens in monitor mode; ignore for now */
9330
9331 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
9332 break;
9333
9334 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9335 IWX_TLC_MNG_UPDATE_NOTIF): {
9336 struct iwx_tlc_update_notif *notif;
9337 SYNC_RESP_STRUCT(notif, pkt);
9338 (void)notif;
9339 if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
9340 iwx_rs_update(sc, notif);
9341 break;
9342 }
9343
9344 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
9345 break;
9346
9347 /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
9348 case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
9349 break;
9350
9351 case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9352 IWX_PNVM_INIT_COMPLETE):
9353 DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
9354 sc->sc_init_complete |= IWX_PNVM_COMPLETE;
9355 wakeup(&sc->sc_init_complete);
9356 break;
9357
9358 default:
9359 handled = 0;
9360 /* XXX wulf: Get rid of bluetooth-related spam */
9361 if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
9362 (code == 0xce && pkt->len_n_flags == 0x2000002c))
9363 break;
9364 printf("%s: unhandled firmware response 0x%x/0x%x "
9365 "rx ring %d[%d]\n",
9366 DEVNAME(sc), code, pkt->len_n_flags,
9367 (qid & ~0x80), idx);
9368 break;
9369 }
9370
9371 /*
9372 * uCode sets bit 0x80 when it originates the notification,
9373 * i.e. when the notification is not a direct response to a
9374 * command sent by the driver.
9375 * For example, uCode issues IWX_REPLY_RX when it sends a
9376 * received frame to the driver.
9377 */
9378 if (handled && !(qid & (1 << 7))) {
9379 iwx_cmd_done(sc, qid, idx, code);
9380 }
9381
9382 offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9383
9384 /* AX210 devices ship only one packet per Rx buffer. */
9385 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
9386 break;
9387 }
9388
9389 if (m0 && m0 != data->m)
9390 m_freem(m0);
9391 }
9392
9393 static void
iwx_notif_intr(struct iwx_softc * sc)9394 iwx_notif_intr(struct iwx_softc *sc)
9395 {
9396 struct mbuf m;
9397 uint16_t hw;
9398
9399 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
9400 BUS_DMASYNC_POSTREAD);
9401
9402 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
9403 uint16_t *status = sc->rxq.stat_dma.vaddr;
9404 hw = le16toh(*status) & 0xfff;
9405 } else
9406 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
9407 hw &= (IWX_RX_MQ_RING_COUNT - 1);
9408 while (sc->rxq.cur != hw) {
9409 struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
9410
9411 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
9412 BUS_DMASYNC_POSTREAD);
9413
9414 iwx_rx_pkt(sc, data, &m);
9415 sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
9416 }
9417
9418 /*
9419 * Tell the firmware what we have processed.
9420 * Seems like the hardware gets upset unless we align the write by 8??
9421 */
9422 hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
9423 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
9424 }
9425
9426 #if 0
9427 int
9428 iwx_intr(void *arg)
9429 {
9430 struct iwx_softc *sc = arg;
9431 struct ieee80211com *ic = &sc->sc_ic;
9432 struct ifnet *ifp = IC2IFP(ic);
9433 int r1, r2, rv = 0;
9434
9435 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
9436
9437 if (sc->sc_flags & IWX_FLAG_USE_ICT) {
9438 uint32_t *ict = sc->ict_dma.vaddr;
9439 int tmp;
9440
9441 tmp = htole32(ict[sc->ict_cur]);
9442 if (!tmp)
9443 goto out_ena;
9444
9445 /*
9446 * ok, there was something. keep plowing until we have all.
9447 */
9448 r1 = r2 = 0;
9449 while (tmp) {
9450 r1 |= tmp;
9451 ict[sc->ict_cur] = 0;
9452 sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
9453 tmp = htole32(ict[sc->ict_cur]);
9454 }
9455
9456 /* this is where the fun begins. don't ask */
9457 if (r1 == 0xffffffff)
9458 r1 = 0;
9459
9460 /* i am not expected to understand this */
9461 if (r1 & 0xc0000)
9462 r1 |= 0x8000;
9463 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
9464 } else {
9465 r1 = IWX_READ(sc, IWX_CSR_INT);
9466 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
9467 goto out;
9468 r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
9469 }
9470 if (r1 == 0 && r2 == 0) {
9471 goto out_ena;
9472 }
9473
9474 IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
9475
9476 if (r1 & IWX_CSR_INT_BIT_ALIVE) {
9477 #if 0
9478 int i;
9479 /* Firmware has now configured the RFH. */
9480 for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9481 iwx_update_rx_desc(sc, &sc->rxq, i);
9482 #endif
9483 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9484 }
9485
9486
9487 if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
9488 iwx_check_rfkill(sc);
9489 rv = 1;
9490 goto out_ena;
9491 }
9492
9493 if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
9494 if (ifp->if_flags & IFF_DEBUG) {
9495 iwx_nic_error(sc);
9496 iwx_dump_driver_status(sc);
9497 }
9498 printf("%s: fatal firmware error\n", DEVNAME(sc));
9499 ieee80211_restart_all(ic);
9500 rv = 1;
9501 goto out;
9502
9503 }
9504
9505 if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
9506 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9507 iwx_stop(sc);
9508 rv = 1;
9509 goto out;
9510 }
9511
9512 /* firmware chunk loaded */
9513 if (r1 & IWX_CSR_INT_BIT_FH_TX) {
9514 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
9515
9516 sc->sc_fw_chunk_done = 1;
9517 wakeup(&sc->sc_fw);
9518 }
9519
9520 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
9521 IWX_CSR_INT_BIT_RX_PERIODIC)) {
9522 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
9523 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
9524 }
9525 if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
9526 IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
9527 }
9528
9529 /* Disable periodic interrupt; we use it as just a one-shot. */
9530 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
9531
9532 /*
9533 * Enable periodic interrupt in 8 msec only if we received
9534 * real RX interrupt (instead of just periodic int), to catch
9535 * any dangling Rx interrupt. If it was just the periodic
9536 * interrupt, there was no dangling Rx activity, and no need
9537 * to extend the periodic interrupt; one-shot is enough.
9538 */
9539 if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
9540 IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
9541 IWX_CSR_INT_PERIODIC_ENA);
9542
9543 iwx_notif_intr(sc);
9544 }
9545
9546 rv = 1;
9547
9548 out_ena:
9549 iwx_restore_interrupts(sc);
9550 out:
9551 return rv;
9552 }
9553 #endif
9554
9555 static void
iwx_intr_msix(void * arg)9556 iwx_intr_msix(void *arg)
9557 {
9558 struct iwx_softc *sc = arg;
9559 struct ieee80211com *ic = &sc->sc_ic;
9560 uint32_t inta_fh, inta_hw;
9561 int vector = 0;
9562
9563 IWX_LOCK(sc);
9564
9565 inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
9566 inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
9567 IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
9568 IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
9569 inta_fh &= sc->sc_fh_mask;
9570 inta_hw &= sc->sc_hw_mask;
9571
9572 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
9573 inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
9574 iwx_notif_intr(sc);
9575 }
9576
9577 /* firmware chunk loaded */
9578 if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
9579 sc->sc_fw_chunk_done = 1;
9580 wakeup(&sc->sc_fw);
9581 }
9582
9583 if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
9584 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
9585 (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
9586 if (sc->sc_debug) {
9587 iwx_nic_error(sc);
9588 iwx_dump_driver_status(sc);
9589 }
9590 printf("%s: fatal firmware error\n", DEVNAME(sc));
9591 ieee80211_restart_all(ic);
9592 goto out;
9593 }
9594
9595 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
9596 iwx_check_rfkill(sc);
9597 }
9598
9599 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
9600 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
9601 sc->sc_flags |= IWX_FLAG_HW_ERR;
9602 iwx_stop(sc);
9603 goto out;
9604 }
9605
9606 if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
9607 IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
9608 "%s:%d WARNING: Skipping rx desc update\n",
9609 __func__, __LINE__);
9610 #if 0
9611 /*
9612 * XXX-THJ: we don't have the dma segment handy. This is hacked
9613 * out in the fc release, return to it if we ever get this
9614 * warning.
9615 */
9616 /* Firmware has now configured the RFH. */
9617 for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
9618 iwx_update_rx_desc(sc, &sc->rxq, i);
9619 #endif
9620 IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
9621 }
9622
9623 /*
9624 * Before sending the interrupt the HW disables it to prevent
9625 * a nested interrupt. This is done by writing 1 to the corresponding
9626 * bit in the mask register. After handling the interrupt, it should be
9627 * re-enabled by clearing this bit. This register is defined as
9628 * write 1 clear (W1C) register, meaning that it's being clear
9629 * by writing 1 to the bit.
9630 */
9631 IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
9632 out:
9633 IWX_UNLOCK(sc);
9634 return;
9635 }
9636
9637 /*
9638 * The device info table below contains device-specific config overrides.
9639 * The most important parameter derived from this table is the name of the
9640 * firmware image to load.
9641 *
9642 * The Linux iwlwifi driver uses an "old" and a "new" device info table.
9643 * The "old" table matches devices based on PCI vendor/product IDs only.
9644 * The "new" table extends this with various device parameters derived
9645 * from MAC type, and RF type.
9646 *
9647 * In iwlwifi "old" and "new" tables share the same array, where "old"
9648 * entries contain dummy values for data defined only for "new" entries.
9649 * As of 2022, Linux developers are still in the process of moving entries
9650 * from "old" to "new" style and it looks like this effort has stalled in
9651 * in some work-in-progress state for quite a while. Linux commits moving
9652 * entries from "old" to "new" have at times been reverted due to regressions.
9653 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
9654 * devices in the same driver.
9655 *
9656 * Our table below contains mostly "new" entries declared in iwlwifi
9657 * with the _IWL_DEV_INFO() macro (with a leading underscore).
9658 * Other devices are matched based on PCI vendor/product ID as usual,
9659 * unless matching specific PCI subsystem vendor/product IDs is required.
9660 *
9661 * Some "old"-style entries are required to identify the firmware image to use.
9662 * Others might be used to print a specific marketing name into Linux dmesg,
9663 * but we can't be sure whether the corresponding devices would be matched
9664 * correctly in the absence of their entries. So we include them just in case.
9665 */
9666
9667 struct iwx_dev_info {
9668 uint16_t device;
9669 uint16_t subdevice;
9670 uint16_t mac_type;
9671 uint16_t rf_type;
9672 uint8_t mac_step;
9673 uint8_t rf_id;
9674 uint8_t no_160;
9675 uint8_t cores;
9676 uint8_t cdb;
9677 uint8_t jacket;
9678 const struct iwx_device_cfg *cfg;
9679 };
9680
9681 #define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
9682 _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
9683 { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
9684 .mac_type = _mac_type, .rf_type = _rf_type, \
9685 .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
9686 .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
9687
9688 #define IWX_DEV_INFO(_device, _subdevice, _cfg) \
9689 _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
9690 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
9691 IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
9692
9693 /*
9694 * When adding entries to this table keep in mind that entries must
9695 * be listed in the same order as in the Linux driver. Code walks this
9696 * table backwards and uses the first matching entry it finds.
9697 * Device firmware must be available in fw_update(8).
9698 */
9699 static const struct iwx_dev_info iwx_dev_info_table[] = {
9700 /* So with HR */
9701 IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
9702 IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
9703 IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
9704 IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
9705 IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
9706 IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
9707 IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
9708 IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
9709 IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
9710 IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
9711 IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
9712 IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
9713 IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
9714 IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
9715 IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9716 IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9717 IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
9718 IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
9719 IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9720 IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9721 IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
9722 IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
9723 IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
9724 IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
9725 IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
9726 IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
9727 IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
9728 IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
9729 IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
9730 IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9731 IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9732 IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
9733 IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
9734 IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
9735 IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
9736 IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
9737
9738 /* So with GF2 */
9739 IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9740 IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9741 IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9742 IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9743 IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9744 IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9745 IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9746 IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9747 IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9748 IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9749 IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
9750 IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
9751
9752 /* Qu with Jf, C step */
9753 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9754 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9755 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9756 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9757 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
9758 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9759 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9760 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9761 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9762 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
9763 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9764 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9765 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9766 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9767 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
9768 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9769 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9770 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9771 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9772 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
9773 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9774 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9775 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9776 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9777 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
9778 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9779 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9780 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9781 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9782 IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
9783 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9784 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9785 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9786 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9787 IWX_CFG_ANY,
9788 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
9789 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9790 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9791 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9792 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9793 IWX_CFG_ANY,
9794 iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
9795
9796 /* QuZ with Jf */
9797 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9798 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9799 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9800 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9801 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
9802 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9803 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9804 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9805 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9806 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
9807 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9808 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9809 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9810 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9811 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
9812 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9813 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9814 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9815 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9816 IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
9817 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
9818 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9819 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9820 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9821 IWX_CFG_ANY,
9822 iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
9823 _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
9824 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9825 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9826 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9827 IWX_CFG_ANY,
9828 iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
9829
9830 /* Qu with Hr, B step */
9831 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9832 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9833 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9834 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9835 iwx_qu_b0_hr1_b0), /* AX101 */
9836 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9837 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
9838 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9839 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9840 iwx_qu_b0_hr_b0), /* AX203 */
9841
9842 /* Qu with Hr, C step */
9843 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9844 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9845 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9846 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9847 iwx_qu_c0_hr1_b0), /* AX101 */
9848 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9849 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9850 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9851 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9852 iwx_qu_c0_hr_b0), /* AX203 */
9853 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9854 IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
9855 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9856 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9857 iwx_qu_c0_hr_b0), /* AX201 */
9858
9859 /* QuZ with Hr */
9860 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9861 IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
9862 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9863 IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9864 iwx_quz_a0_hr1_b0), /* AX101 */
9865 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9866 IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
9867 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9868 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9869 iwx_cfg_quz_a0_hr_b0), /* AX203 */
9870
9871 /* SoF with JF2 */
9872 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9873 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9874 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9875 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9876 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9877 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9878 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9879 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9880 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9881 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9882
9883 /* SoF with JF */
9884 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9885 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9886 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9887 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9888 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9889 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9890 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9891 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9892 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9893 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9894 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9895 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9896 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9897 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9898 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
9899 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9900 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9901 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9902 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9903 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9904
9905 /* So with Hr */
9906 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9907 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9908 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9909 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9910 iwx_cfg_so_a0_hr_b0), /* AX203 */
9911 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9912 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9913 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9914 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9915 iwx_cfg_so_a0_hr_b0), /* ax101 */
9916 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9917 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9918 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9919 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9920 iwx_cfg_so_a0_hr_b0), /* ax201 */
9921
9922 /* So-F with Hr */
9923 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9924 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9925 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9926 IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9927 iwx_cfg_so_a0_hr_b0), /* AX203 */
9928 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9929 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9930 IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
9931 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9932 iwx_cfg_so_a0_hr_b0), /* AX101 */
9933 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9934 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9935 IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
9936 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9937 iwx_cfg_so_a0_hr_b0), /* AX201 */
9938
9939 /* So-F with GF */
9940 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9941 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9942 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9943 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9944 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9945 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9946 IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
9947 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9948 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9949 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9950
9951 /* So with GF */
9952 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9953 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9954 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9955 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
9956 iwx_2ax_cfg_so_gf_a0), /* AX211 */
9957 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9958 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9959 IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
9960 IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
9961 iwx_2ax_cfg_so_gf4_a0), /* AX411 */
9962
9963 /* So with JF2 */
9964 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9965 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9966 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9967 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9968 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
9969 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9970 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9971 IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
9972 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9973 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
9974
9975 /* So with JF */
9976 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9977 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9978 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9979 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9980 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
9981 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9982 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9983 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9984 IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9985 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
9986 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9987 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9988 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
9989 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9990 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
9991 _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
9992 IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
9993 IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
9994 IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
9995 IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
9996 };
9997
9998 static int
iwx_preinit(struct iwx_softc * sc)9999 iwx_preinit(struct iwx_softc *sc)
10000 {
10001 struct ieee80211com *ic = &sc->sc_ic;
10002 int err;
10003
10004 err = iwx_prepare_card_hw(sc);
10005 if (err) {
10006 printf("%s: could not initialize hardware\n", DEVNAME(sc));
10007 return err;
10008 }
10009
10010 if (sc->attached) {
10011 return 0;
10012 }
10013
10014 err = iwx_start_hw(sc);
10015 if (err) {
10016 printf("%s: could not initialize hardware\n", DEVNAME(sc));
10017 return err;
10018 }
10019
10020 err = iwx_run_init_mvm_ucode(sc, 1);
10021 iwx_stop_device(sc);
10022 if (err) {
10023 printf("%s: failed to stop device\n", DEVNAME(sc));
10024 return err;
10025 }
10026
10027 /* Print version info and MAC address on first successful fw load. */
10028 sc->attached = 1;
10029 if (sc->sc_pnvm_ver) {
10030 printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
10031 "address %s\n",
10032 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10033 sc->sc_fwver, sc->sc_pnvm_ver,
10034 ether_sprintf(sc->sc_nvm.hw_addr));
10035 } else {
10036 printf("%s: hw rev 0x%x, fw %s, address %s\n",
10037 DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10038 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10039 }
10040
10041 /* not all hardware can do 5GHz band */
10042 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10043 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10044 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10045
10046 return 0;
10047 }
10048
10049 static void
iwx_attach_hook(void * self)10050 iwx_attach_hook(void *self)
10051 {
10052 struct iwx_softc *sc = (void *)self;
10053 struct ieee80211com *ic = &sc->sc_ic;
10054 int err;
10055
10056 IWX_LOCK(sc);
10057 err = iwx_preinit(sc);
10058 IWX_UNLOCK(sc);
10059 if (err != 0)
10060 goto out;
10061
10062 iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
10063 ic->ic_channels);
10064
10065 ieee80211_ifattach(ic);
10066 ic->ic_vap_create = iwx_vap_create;
10067 ic->ic_vap_delete = iwx_vap_delete;
10068 ic->ic_raw_xmit = iwx_raw_xmit;
10069 ic->ic_node_alloc = iwx_node_alloc;
10070 ic->ic_scan_start = iwx_scan_start;
10071 ic->ic_scan_end = iwx_scan_end;
10072 ic->ic_update_mcast = iwx_update_mcast;
10073 ic->ic_getradiocaps = iwx_init_channel_map;
10074
10075 ic->ic_set_channel = iwx_set_channel;
10076 ic->ic_scan_curchan = iwx_scan_curchan;
10077 ic->ic_scan_mindwell = iwx_scan_mindwell;
10078 ic->ic_wme.wme_update = iwx_wme_update;
10079 ic->ic_parent = iwx_parent;
10080 ic->ic_transmit = iwx_transmit;
10081
10082 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
10083 ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
10084 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
10085 ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
10086
10087 sc->sc_addba_request = ic->ic_addba_request;
10088 ic->ic_addba_request = iwx_addba_request;
10089 sc->sc_addba_response = ic->ic_addba_response;
10090 ic->ic_addba_response = iwx_addba_response;
10091
10092 iwx_radiotap_attach(sc);
10093 ieee80211_announce(ic);
10094 out:
10095 config_intrhook_disestablish(&sc->sc_preinit_hook);
10096 }
10097
10098 const struct iwx_device_cfg *
iwx_find_device_cfg(struct iwx_softc * sc)10099 iwx_find_device_cfg(struct iwx_softc *sc)
10100 {
10101 uint16_t sdev_id, mac_type, rf_type;
10102 uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10103 int i;
10104
10105 sdev_id = pci_get_subdevice(sc->sc_dev);
10106 mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10107 mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10108 rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10109 cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10110 jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10111
10112 rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10113 no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10114 cores = IWX_SUBDEVICE_CORES(sdev_id);
10115
10116 for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10117 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10118
10119 if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10120 dev_info->device != sc->sc_pid)
10121 continue;
10122
10123 if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10124 dev_info->subdevice != sdev_id)
10125 continue;
10126
10127 if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10128 dev_info->mac_type != mac_type)
10129 continue;
10130
10131 if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10132 dev_info->mac_step != mac_step)
10133 continue;
10134
10135 if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10136 dev_info->rf_type != rf_type)
10137 continue;
10138
10139 if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10140 dev_info->cdb != cdb)
10141 continue;
10142
10143 if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10144 dev_info->jacket != jacket)
10145 continue;
10146
10147 if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10148 dev_info->rf_id != rf_id)
10149 continue;
10150
10151 if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10152 dev_info->no_160 != no_160)
10153 continue;
10154
10155 if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10156 dev_info->cores != cores)
10157 continue;
10158
10159 return dev_info->cfg;
10160 }
10161
10162 return NULL;
10163 }
10164
10165 static int
iwx_probe(device_t dev)10166 iwx_probe(device_t dev)
10167 {
10168 int i;
10169
10170 for (i = 0; i < nitems(iwx_devices); i++) {
10171 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
10172 pci_get_device(dev) == iwx_devices[i].device) {
10173 device_set_desc(dev, iwx_devices[i].name);
10174
10175 /*
10176 * Due to significant existing deployments using
10177 * iwlwifi lower the priority of iwx.
10178 *
10179 * This inverts the advice in bus.h where drivers
10180 * supporting newer hardware should return
10181 * BUS_PROBE_DEFAULT and drivers for older devices
10182 * return BUS_PROBE_LOW_PRIORITY.
10183 *
10184 */
10185 return (BUS_PROBE_LOW_PRIORITY);
10186 }
10187 }
10188
10189 return (ENXIO);
10190 }
10191
10192 static int
iwx_attach(device_t dev)10193 iwx_attach(device_t dev)
10194 {
10195 struct iwx_softc *sc = device_get_softc(dev);
10196 struct ieee80211com *ic = &sc->sc_ic;
10197 const struct iwx_device_cfg *cfg;
10198 int err;
10199 int txq_i, i, j;
10200 size_t ctxt_info_size;
10201 int rid;
10202 int count;
10203 int error;
10204 sc->sc_dev = dev;
10205 sc->sc_pid = pci_get_device(dev);
10206 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
10207
10208 TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
10209 IWX_LOCK_INIT(sc);
10210 mbufq_init(&sc->sc_snd, ifqmaxlen);
10211 TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
10212 TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
10213 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
10214 taskqueue_thread_enqueue, &sc->sc_tq);
10215 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
10216 if (error != 0) {
10217 device_printf(dev, "can't start taskq thread, error %d\n",
10218 error);
10219 return (ENXIO);
10220 }
10221
10222 pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
10223 if (sc->sc_cap_off == 0) {
10224 device_printf(dev, "PCIe capability structure not found!\n");
10225 return (ENXIO);
10226 }
10227
10228 /*
10229 * We disable the RETRY_TIMEOUT register (0x41) to keep
10230 * PCI Tx retries from interfering with C3 CPU state.
10231 */
10232 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10233
10234 if (pci_msix_count(dev)) {
10235 sc->sc_msix = 1;
10236 } else {
10237 device_printf(dev, "no MSI-X found\n");
10238 return (ENXIO);
10239 }
10240
10241 pci_enable_busmaster(dev);
10242 rid = PCIR_BAR(0);
10243 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
10244 RF_ACTIVE);
10245 if (sc->sc_mem == NULL) {
10246 device_printf(sc->sc_dev, "can't map mem space\n");
10247 return (ENXIO);
10248 }
10249 sc->sc_st = rman_get_bustag(sc->sc_mem);
10250 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
10251
10252 count = 1;
10253 rid = 0;
10254 if (pci_alloc_msix(dev, &count) == 0)
10255 rid = 1;
10256 DPRINTF(("%s: count=%d\n", __func__, count));
10257 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
10258 (rid != 0 ? 0 : RF_SHAREABLE));
10259 if (sc->sc_irq == NULL) {
10260 device_printf(dev, "can't map interrupt\n");
10261 return (ENXIO);
10262 }
10263 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
10264 NULL, iwx_intr_msix, sc, &sc->sc_ih);
10265 if (error != 0) {
10266 device_printf(dev, "can't establish interrupt\n");
10267 return (ENXIO);
10268 }
10269
10270 /* Clear pending interrupts. */
10271 IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10272 IWX_WRITE(sc, IWX_CSR_INT, ~0);
10273 IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
10274
10275 sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
10276 DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
10277 sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
10278 DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
10279
10280 /*
10281 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
10282 * changed, and now the revision step also includes bit 0-1 (no more
10283 * "dash" value). To keep hw_rev backwards compatible - we'll store it
10284 * in the old format.
10285 */
10286 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
10287 (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
10288
10289 switch (sc->sc_pid) {
10290 case PCI_PRODUCT_INTEL_WL_22500_1:
10291 sc->sc_fwname = IWX_CC_A_FW;
10292 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10293 sc->sc_integrated = 0;
10294 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10295 sc->sc_low_latency_xtal = 0;
10296 sc->sc_xtal_latency = 0;
10297 sc->sc_tx_with_siso_diversity = 0;
10298 sc->sc_uhb_supported = 0;
10299 break;
10300 case PCI_PRODUCT_INTEL_WL_22500_2:
10301 case PCI_PRODUCT_INTEL_WL_22500_5:
10302 /* These devices should be QuZ only. */
10303 if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
10304 device_printf(dev, "unsupported AX201 adapter\n");
10305 return (ENXIO);
10306 }
10307 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10308 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10309 sc->sc_integrated = 1;
10310 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10311 sc->sc_low_latency_xtal = 0;
10312 sc->sc_xtal_latency = 500;
10313 sc->sc_tx_with_siso_diversity = 0;
10314 sc->sc_uhb_supported = 0;
10315 break;
10316 case PCI_PRODUCT_INTEL_WL_22500_3:
10317 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10318 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10319 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10320 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10321 else
10322 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10323 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10324 sc->sc_integrated = 1;
10325 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
10326 sc->sc_low_latency_xtal = 0;
10327 sc->sc_xtal_latency = 500;
10328 sc->sc_tx_with_siso_diversity = 0;
10329 sc->sc_uhb_supported = 0;
10330 break;
10331 case PCI_PRODUCT_INTEL_WL_22500_4:
10332 case PCI_PRODUCT_INTEL_WL_22500_7:
10333 case PCI_PRODUCT_INTEL_WL_22500_8:
10334 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10335 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10336 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10337 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10338 else
10339 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10340 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10341 sc->sc_integrated = 1;
10342 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
10343 sc->sc_low_latency_xtal = 0;
10344 sc->sc_xtal_latency = 1820;
10345 sc->sc_tx_with_siso_diversity = 0;
10346 sc->sc_uhb_supported = 0;
10347 break;
10348 case PCI_PRODUCT_INTEL_WL_22500_6:
10349 if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
10350 sc->sc_fwname = IWX_QU_C_HR_B_FW;
10351 else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
10352 sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
10353 else
10354 sc->sc_fwname = IWX_QU_B_HR_B_FW;
10355 sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
10356 sc->sc_integrated = 1;
10357 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10358 sc->sc_low_latency_xtal = 1;
10359 sc->sc_xtal_latency = 12000;
10360 sc->sc_tx_with_siso_diversity = 0;
10361 sc->sc_uhb_supported = 0;
10362 break;
10363 case PCI_PRODUCT_INTEL_WL_22500_9:
10364 case PCI_PRODUCT_INTEL_WL_22500_10:
10365 case PCI_PRODUCT_INTEL_WL_22500_11:
10366 case PCI_PRODUCT_INTEL_WL_22500_13:
10367 /* _14 is an MA device, not yet supported */
10368 case PCI_PRODUCT_INTEL_WL_22500_15:
10369 case PCI_PRODUCT_INTEL_WL_22500_16:
10370 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10371 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10372 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10373 sc->sc_integrated = 0;
10374 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
10375 sc->sc_low_latency_xtal = 0;
10376 sc->sc_xtal_latency = 0;
10377 sc->sc_tx_with_siso_diversity = 0;
10378 sc->sc_uhb_supported = 1;
10379 break;
10380 case PCI_PRODUCT_INTEL_WL_22500_12:
10381 case PCI_PRODUCT_INTEL_WL_22500_17:
10382 sc->sc_fwname = IWX_SO_A_GF_A_FW;
10383 sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
10384 sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
10385 sc->sc_integrated = 1;
10386 sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
10387 sc->sc_low_latency_xtal = 1;
10388 sc->sc_xtal_latency = 12000;
10389 sc->sc_tx_with_siso_diversity = 0;
10390 sc->sc_uhb_supported = 0;
10391 sc->sc_imr_enabled = 1;
10392 break;
10393 default:
10394 device_printf(dev, "unknown adapter type\n");
10395 return (ENXIO);
10396 }
10397
10398 cfg = iwx_find_device_cfg(sc);
10399 DPRINTF(("%s: cfg=%p\n", __func__, cfg));
10400 if (cfg) {
10401 sc->sc_fwname = cfg->fw_name;
10402 sc->sc_pnvm_name = cfg->pnvm_name;
10403 sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
10404 sc->sc_uhb_supported = cfg->uhb_supported;
10405 if (cfg->xtal_latency) {
10406 sc->sc_xtal_latency = cfg->xtal_latency;
10407 sc->sc_low_latency_xtal = cfg->low_latency_xtal;
10408 }
10409 }
10410
10411 sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
10412
10413 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10414 sc->sc_umac_prph_offset = 0x300000;
10415 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
10416 } else
10417 sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
10418
10419 /* Allocate DMA memory for loading firmware. */
10420 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10421 ctxt_info_size = sizeof(struct iwx_context_info_gen3);
10422 else
10423 ctxt_info_size = sizeof(struct iwx_context_info);
10424 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
10425 ctxt_info_size, 1);
10426 if (err) {
10427 device_printf(dev,
10428 "could not allocate memory for loading firmware\n");
10429 return (ENXIO);
10430 }
10431
10432 if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10433 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
10434 sizeof(struct iwx_prph_scratch), 1);
10435 if (err) {
10436 device_printf(dev,
10437 "could not allocate prph scratch memory\n");
10438 goto fail1;
10439 }
10440
10441 /*
10442 * Allocate prph information. The driver doesn't use this.
10443 * We use the second half of this page to give the device
10444 * some dummy TR/CR tail pointers - which shouldn't be
10445 * necessary as we don't use this, but the hardware still
10446 * reads/writes there and we can't let it go do that with
10447 * a NULL pointer.
10448 */
10449 KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
10450 ("iwx_prph_info has wrong size"));
10451 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
10452 PAGE_SIZE, 1);
10453 if (err) {
10454 device_printf(dev,
10455 "could not allocate prph info memory\n");
10456 goto fail1;
10457 }
10458 }
10459
10460 /* Allocate interrupt cause table (ICT).*/
10461 err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
10462 IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
10463 if (err) {
10464 device_printf(dev, "could not allocate ICT table\n");
10465 goto fail1;
10466 }
10467
10468 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
10469 err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
10470 if (err) {
10471 device_printf(dev, "could not allocate TX ring %d\n",
10472 txq_i);
10473 goto fail4;
10474 }
10475 }
10476
10477 err = iwx_alloc_rx_ring(sc, &sc->rxq);
10478 if (err) {
10479 device_printf(sc->sc_dev, "could not allocate RX ring\n");
10480 goto fail4;
10481 }
10482
10483 #ifdef IWX_DEBUG
10484 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10485 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
10486 CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
10487
10488 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10489 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
10490 CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
10491 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10492 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
10493 CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
10494
10495 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10496 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
10497 CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
10498
10499 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10500 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
10501 CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
10502 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10503 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
10504 CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
10505 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10506 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
10507 CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
10508 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10509 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
10510 CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
10511 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10512 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
10513 CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
10514 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10515 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
10516 CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
10517 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10518 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
10519 CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
10520 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
10521 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
10522 CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
10523 #endif
10524 ic->ic_softc = sc;
10525 ic->ic_name = device_get_nameunit(sc->sc_dev);
10526 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
10527 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
10528
10529 /* Set device capabilities. */
10530 ic->ic_caps =
10531 IEEE80211_C_STA |
10532 IEEE80211_C_MONITOR |
10533 IEEE80211_C_WPA | /* WPA/RSN */
10534 IEEE80211_C_WME |
10535 IEEE80211_C_PMGT |
10536 IEEE80211_C_SHSLOT | /* short slot time supported */
10537 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
10538 IEEE80211_C_BGSCAN /* capable of bg scanning */
10539 ;
10540 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
10541 /* Enable seqno offload */
10542 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
10543 /* Don't send null data frames; let firmware do it */
10544 ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
10545
10546 ic->ic_txstream = 2;
10547 ic->ic_rxstream = 2;
10548 ic->ic_htcaps |= IEEE80211_HTC_HT
10549 | IEEE80211_HTCAP_SMPS_OFF
10550 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
10551 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
10552 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
10553 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
10554 // | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
10555 | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
10556
10557 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
10558
10559 /*
10560 * XXX: setupcurchan() expects vhtcaps to be non-zero
10561 * https://bugs.freebsd.org/274156
10562 */
10563 ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
10564 | IEEE80211_VHTCAP_SHORT_GI_80
10565 | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
10566 | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
10567 | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
10568
10569 ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
10570 int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
10571 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
10572 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
10573 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
10574 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
10575 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
10576 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
10577 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
10578 ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
10579 ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
10580
10581 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
10582 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10583 struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
10584 rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
10585 rxba->sc = sc;
10586 for (j = 0; j < nitems(rxba->entries); j++)
10587 mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
10588 }
10589
10590 sc->sc_preinit_hook.ich_func = iwx_attach_hook;
10591 sc->sc_preinit_hook.ich_arg = sc;
10592 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
10593 device_printf(dev,
10594 "config_intrhook_establish failed\n");
10595 goto fail4;
10596 }
10597
10598 return (0);
10599
10600 fail4:
10601 while (--txq_i >= 0)
10602 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10603 iwx_free_rx_ring(sc, &sc->rxq);
10604 if (sc->ict_dma.vaddr != NULL)
10605 iwx_dma_contig_free(&sc->ict_dma);
10606
10607 fail1:
10608 iwx_dma_contig_free(&sc->ctxt_info_dma);
10609 iwx_dma_contig_free(&sc->prph_scratch_dma);
10610 iwx_dma_contig_free(&sc->prph_info_dma);
10611 return (ENXIO);
10612 }
10613
10614 static int
iwx_detach(device_t dev)10615 iwx_detach(device_t dev)
10616 {
10617 struct iwx_softc *sc = device_get_softc(dev);
10618 int txq_i;
10619
10620 iwx_stop_device(sc);
10621
10622 taskqueue_drain_all(sc->sc_tq);
10623 taskqueue_free(sc->sc_tq);
10624
10625 ieee80211_ifdetach(&sc->sc_ic);
10626
10627 callout_drain(&sc->watchdog_to);
10628
10629 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
10630 iwx_free_tx_ring(sc, &sc->txq[txq_i]);
10631 iwx_free_rx_ring(sc, &sc->rxq);
10632
10633 if (sc->sc_fwp != NULL) {
10634 firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
10635 sc->sc_fwp = NULL;
10636 }
10637
10638 if (sc->sc_pnvm != NULL) {
10639 firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
10640 sc->sc_pnvm = NULL;
10641 }
10642
10643 if (sc->sc_irq != NULL) {
10644 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
10645 bus_release_resource(dev, SYS_RES_IRQ,
10646 rman_get_rid(sc->sc_irq), sc->sc_irq);
10647 pci_release_msi(dev);
10648 }
10649 if (sc->sc_mem != NULL)
10650 bus_release_resource(dev, SYS_RES_MEMORY,
10651 rman_get_rid(sc->sc_mem), sc->sc_mem);
10652
10653 IWX_LOCK_DESTROY(sc);
10654
10655 return (0);
10656 }
10657
10658 static void
iwx_radiotap_attach(struct iwx_softc * sc)10659 iwx_radiotap_attach(struct iwx_softc *sc)
10660 {
10661 struct ieee80211com *ic = &sc->sc_ic;
10662
10663 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10664 "->%s begin\n", __func__);
10665
10666 ieee80211_radiotap_attach(ic,
10667 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
10668 IWX_TX_RADIOTAP_PRESENT,
10669 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
10670 IWX_RX_RADIOTAP_PRESENT);
10671
10672 IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
10673 "->%s end\n", __func__);
10674 }
10675
10676 struct ieee80211vap *
iwx_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])10677 iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
10678 enum ieee80211_opmode opmode, int flags,
10679 const uint8_t bssid[IEEE80211_ADDR_LEN],
10680 const uint8_t mac[IEEE80211_ADDR_LEN])
10681 {
10682 struct iwx_vap *ivp;
10683 struct ieee80211vap *vap;
10684
10685 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
10686 return NULL;
10687 ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
10688 vap = &ivp->iv_vap;
10689 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
10690 vap->iv_bmissthreshold = 10; /* override default */
10691 /* Override with driver methods. */
10692 ivp->iv_newstate = vap->iv_newstate;
10693 vap->iv_newstate = iwx_newstate;
10694
10695 ivp->id = IWX_DEFAULT_MACID;
10696 ivp->color = IWX_DEFAULT_COLOR;
10697
10698 ivp->have_wme = TRUE;
10699 ivp->ps_disabled = FALSE;
10700
10701 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
10702 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
10703
10704 /* h/w crypto support */
10705 vap->iv_key_alloc = iwx_key_alloc;
10706 vap->iv_key_delete = iwx_key_delete;
10707 vap->iv_key_set = iwx_key_set;
10708 vap->iv_key_update_begin = iwx_key_update_begin;
10709 vap->iv_key_update_end = iwx_key_update_end;
10710
10711 ieee80211_ratectl_init(vap);
10712 /* Complete setup. */
10713 ieee80211_vap_attach(vap, ieee80211_media_change,
10714 ieee80211_media_status, mac);
10715 ic->ic_opmode = opmode;
10716
10717 return vap;
10718 }
10719
10720 static void
iwx_vap_delete(struct ieee80211vap * vap)10721 iwx_vap_delete(struct ieee80211vap *vap)
10722 {
10723 struct iwx_vap *ivp = IWX_VAP(vap);
10724
10725 ieee80211_ratectl_deinit(vap);
10726 ieee80211_vap_detach(vap);
10727 free(ivp, M_80211_VAP);
10728 }
10729
10730 static void
iwx_parent(struct ieee80211com * ic)10731 iwx_parent(struct ieee80211com *ic)
10732 {
10733 struct iwx_softc *sc = ic->ic_softc;
10734 IWX_LOCK(sc);
10735
10736 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10737 iwx_stop(sc);
10738 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10739 } else {
10740 iwx_init(sc);
10741 ieee80211_start_all(ic);
10742 }
10743 IWX_UNLOCK(sc);
10744 }
10745
10746 static int
iwx_suspend(device_t dev)10747 iwx_suspend(device_t dev)
10748 {
10749 struct iwx_softc *sc = device_get_softc(dev);
10750 struct ieee80211com *ic = &sc->sc_ic;
10751
10752 /*
10753 * Suspend everything first, then shutdown hardware if it's
10754 * still up.
10755 */
10756 ieee80211_suspend_all(ic);
10757
10758 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10759 iwx_stop(sc);
10760 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10761 }
10762 return (0);
10763 }
10764
10765 static int
iwx_resume(device_t dev)10766 iwx_resume(device_t dev)
10767 {
10768 struct iwx_softc *sc = device_get_softc(dev);
10769 struct ieee80211com *ic = &sc->sc_ic;
10770
10771 /*
10772 * We disable the RETRY_TIMEOUT register (0x41) to keep
10773 * PCI Tx retries from interfering with C3 CPU state.
10774 */
10775 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
10776
10777 IWX_LOCK(sc);
10778
10779 /* Stop the hardware here if it's still thought of as "up" */
10780 if (sc->sc_flags & IWX_FLAG_HW_INITED) {
10781 iwx_stop(sc);
10782 sc->sc_flags &= ~IWX_FLAG_HW_INITED;
10783 }
10784
10785 IWX_UNLOCK(sc);
10786
10787 /* Start the VAPs, which will bring the hardware back up again */
10788 ieee80211_resume_all(ic);
10789 return (0);
10790 }
10791
10792 static void
iwx_scan_start(struct ieee80211com * ic)10793 iwx_scan_start(struct ieee80211com *ic)
10794 {
10795 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10796 struct iwx_softc *sc = ic->ic_softc;
10797 int err;
10798
10799 IWX_LOCK(sc);
10800 if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
10801 err = iwx_scan(sc);
10802 else
10803 err = iwx_bgscan(ic);
10804 IWX_UNLOCK(sc);
10805 if (err)
10806 ieee80211_cancel_scan(vap);
10807
10808 return;
10809 }
10810
10811 static void
iwx_update_mcast(struct ieee80211com * ic)10812 iwx_update_mcast(struct ieee80211com *ic)
10813 {
10814 }
10815
10816 static void
iwx_scan_curchan(struct ieee80211_scan_state * ss,unsigned long maxdwell)10817 iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
10818 {
10819 }
10820
10821 static void
iwx_scan_mindwell(struct ieee80211_scan_state * ss)10822 iwx_scan_mindwell(struct ieee80211_scan_state *ss)
10823 {
10824 }
10825
10826 static void
iwx_scan_end(struct ieee80211com * ic)10827 iwx_scan_end(struct ieee80211com *ic)
10828 {
10829 iwx_endscan(ic->ic_softc);
10830 }
10831
10832 static void
iwx_set_channel(struct ieee80211com * ic)10833 iwx_set_channel(struct ieee80211com *ic)
10834 {
10835 #if 0
10836 struct iwx_softc *sc = ic->ic_softc;
10837 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
10838
10839 IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
10840 iwx_phy_ctxt_task((void *)sc);
10841 #endif
10842 }
10843
10844 static void
iwx_endscan_cb(void * arg,int pending)10845 iwx_endscan_cb(void *arg, int pending)
10846 {
10847 struct iwx_softc *sc = arg;
10848 struct ieee80211com *ic = &sc->sc_ic;
10849
10850 DPRINTF(("scan ended\n"));
10851 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
10852 }
10853
10854 static int
iwx_wme_update(struct ieee80211com * ic)10855 iwx_wme_update(struct ieee80211com *ic)
10856 {
10857 return 0;
10858 }
10859
10860 static int
iwx_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)10861 iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
10862 const struct ieee80211_bpf_params *params)
10863 {
10864 struct ieee80211com *ic = ni->ni_ic;
10865 struct iwx_softc *sc = ic->ic_softc;
10866 int err;
10867
10868 IWX_LOCK(sc);
10869 if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
10870 err = iwx_tx(sc, m, ni);
10871 IWX_UNLOCK(sc);
10872 return err;
10873 } else {
10874 IWX_UNLOCK(sc);
10875 return EIO;
10876 }
10877 }
10878
10879 static int
iwx_transmit(struct ieee80211com * ic,struct mbuf * m)10880 iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
10881 {
10882 struct iwx_softc *sc = ic->ic_softc;
10883 int error;
10884
10885 // TODO: mbufq_enqueue in iwm
10886 // TODO dequeue in iwm_start, counters, locking
10887 IWX_LOCK(sc);
10888 error = mbufq_enqueue(&sc->sc_snd, m);
10889 if (error) {
10890 IWX_UNLOCK(sc);
10891 return (error);
10892 }
10893
10894 iwx_start(sc);
10895 IWX_UNLOCK(sc);
10896 return (0);
10897 }
10898
10899 static int
iwx_ampdu_rx_start(struct ieee80211_node * ni,struct ieee80211_rx_ampdu * rap,int baparamset,int batimeout,int baseqctl)10900 iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
10901 int baparamset, int batimeout, int baseqctl)
10902 {
10903 struct ieee80211com *ic = ni->ni_ic;
10904 struct iwx_softc *sc = ic->ic_softc;
10905 int tid;
10906
10907 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10908 sc->ni_rx_ba[tid].ba_winstart =
10909 _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
10910 sc->ni_rx_ba[tid].ba_winsize =
10911 _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
10912 sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
10913
10914 if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
10915 tid >= IWX_MAX_TID_COUNT)
10916 return ENOSPC;
10917
10918 if (sc->ba_rx.start_tidmask & (1 << tid)) {
10919 DPRINTF(("%s: tid %d already added\n", __func__, tid));
10920 return EBUSY;
10921 }
10922 DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
10923
10924 sc->ba_rx.start_tidmask |= (1 << tid);
10925 DPRINTF(("%s: tid=%i\n", __func__, tid));
10926 DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
10927 DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
10928 DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
10929
10930 taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
10931
10932 // TODO:misha move to ba_task (serialize)
10933 sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
10934
10935 return (0);
10936 }
10937
10938 static void
iwx_ampdu_rx_stop(struct ieee80211_node * ni,struct ieee80211_rx_ampdu * rap)10939 iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
10940 {
10941 return;
10942 }
10943
10944 /**
10945 * @brief Called by net80211 to request an A-MPDU session be established.
10946 *
10947 * This is called by net80211 to see if an A-MPDU session can be established.
10948 * However, the iwx(4) firmware will take care of establishing the BA
10949 * session for us. net80211 doesn't have to send any action frames here;
10950 * it just needs to plumb up the ampdu session once the BA has been sent.
10951 *
10952 * If we return 0 here then the firmware will set up the state but net80211
10953 * will not; so it's on us to actually complete it via a call to
10954 * ieee80211_ampdu_tx_request_active_ext() .
10955 *
10956 * @param ni ieee80211_node to establish A-MPDU session for
10957 * @param tap pointer to the per-TID state struct
10958 * @param dialogtoken dialogtoken field from the BA request
10959 * @param baparamset baparamset field from the BA request
10960 * @param batimeout batimeout field from the BA request
10961 *
10962 * @returns 0 so net80211 doesn't send the BA action frame to establish A-MPDU.
10963 */
10964 static int
iwx_addba_request(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int dialogtoken,int baparamset,int batimeout)10965 iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10966 int dialogtoken, int baparamset, int batimeout)
10967 {
10968 struct iwx_softc *sc = ni->ni_ic->ic_softc;
10969 int tid;
10970
10971 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
10972 IWX_DPRINTF(sc, IWX_DEBUG_AMPDU_MGMT,
10973 "%s: queuing AMPDU start on tid %i\n", __func__, tid);
10974
10975 /* There's no nice way right now to tell net80211 that we're in the
10976 * middle of an asynchronous ADDBA setup session. So, bump the timeout
10977 * to hz ticks, hopefully we'll get a response by then.
10978 */
10979 tap->txa_nextrequest = ticks + hz;
10980
10981 IWX_LOCK(sc);
10982 sc->ba_tx.start_tidmask |= (1 << tid);
10983 IWX_UNLOCK(sc);
10984
10985 taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
10986
10987 return (0);
10988 }
10989
10990
10991 static int
iwx_addba_response(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int code,int baparamset,int batimeout)10992 iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
10993 int code, int baparamset, int batimeout)
10994 {
10995 return 0;
10996 }
10997
10998 static void
iwx_key_update_begin(struct ieee80211vap * vap)10999 iwx_key_update_begin(struct ieee80211vap *vap)
11000 {
11001 return;
11002 }
11003
11004 static void
iwx_key_update_end(struct ieee80211vap * vap)11005 iwx_key_update_end(struct ieee80211vap *vap)
11006 {
11007 return;
11008 }
11009
11010 static int
iwx_key_alloc(struct ieee80211vap * vap,struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)11011 iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
11012 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
11013 {
11014
11015 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
11016 return (1);
11017 }
11018
11019 if (ieee80211_is_key_unicast(vap, k)) {
11020 *keyix = 0; /* NB: use key index 0 for ucast key */
11021 } else if (ieee80211_is_key_global(vap, k)) {
11022 *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
11023 } else {
11024 net80211_vap_printf(vap, "%s: invalid crypto key type\n",
11025 __func__);
11026 return (0);
11027 }
11028 *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
11029 return (1);
11030 }
11031
11032 static int
iwx_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k)11033 iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
11034 {
11035 struct ieee80211com *ic = vap->iv_ic;
11036 struct iwx_softc *sc = ic->ic_softc;
11037 struct iwx_add_sta_key_cmd cmd;
11038 uint32_t status;
11039 int err;
11040 int id;
11041
11042 if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
11043 return 1;
11044 }
11045
11046 /*
11047 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
11048 * Currently we only implement station mode where 'ni' is always
11049 * ic->ic_bss so there is no need to validate arguments beyond this:
11050 */
11051
11052 memset(&cmd, 0, sizeof(cmd));
11053
11054 if (ieee80211_is_key_global(vap, k)) {
11055 id = ieee80211_crypto_get_key_wepidx(vap, k);
11056 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding group key\n",
11057 __func__);
11058 } else if (ieee80211_is_key_unicast(vap, k)) {
11059 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: adding key\n",
11060 __func__);
11061 id = 0; /* net80211 currently only supports unicast key 0 */
11062 } else {
11063 net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
11064 return (ENXIO);
11065 }
11066
11067 IWX_LOCK(sc);
11068
11069 cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
11070 IWX_STA_KEY_FLG_WEP_KEY_MAP |
11071 ((id << IWX_STA_KEY_FLG_KEYID_POS) &
11072 IWX_STA_KEY_FLG_KEYID_MSK));
11073 if (ieee80211_is_key_global(vap, k)) {
11074 cmd.common.key_offset = 1;
11075 cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
11076 } else if (ieee80211_is_key_unicast(vap, k)) {
11077 cmd.common.key_offset = 0;
11078 } else {
11079 net80211_vap_printf(vap, "%s: unknown key type\n", __func__);
11080 IWX_UNLOCK(sc);
11081 return (ENXIO);
11082 }
11083 memcpy(cmd.common.key, ieee80211_crypto_get_key_data(k),
11084 MIN(sizeof(cmd.common.key), ieee80211_crypto_get_key_len(k)));
11085 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: key: id=%d, len=%i, key=%*D\n",
11086 __func__, id,
11087 ieee80211_crypto_get_key_len(k),
11088 ieee80211_crypto_get_key_len(k),
11089 (const unsigned char *) ieee80211_crypto_get_key_data(k), "");
11090 cmd.common.sta_id = IWX_STATION_ID;
11091
11092 cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
11093 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT, "%s: k->wk_keytsc=%" PRIu64 "\n",
11094 __func__, k->wk_keytsc);
11095
11096 status = IWX_ADD_STA_SUCCESS;
11097 err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
11098 &status);
11099 if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
11100 err = EIO;
11101 if (err) {
11102 net80211_vap_printf(vap,
11103 "%s: can't set wpa2 keys (error %d)\n", __func__, err);
11104 IWX_UNLOCK(sc);
11105 return err;
11106 } else
11107 IWX_DPRINTF(sc, IWX_DEBUG_KEYMGMT,
11108 "%s: key added successfully\n", __func__);
11109 IWX_UNLOCK(sc);
11110 return (1);
11111 }
11112
11113 static int
iwx_key_delete(struct ieee80211vap * vap,const struct ieee80211_key * k)11114 iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
11115 {
11116 /*
11117 * Note: since there's no key allocations to track - it's either
11118 * the 4 static WEP keys or the single unicast key - there's nothing
11119 * else to do here.
11120 *
11121 * This would need some further work to support IBSS/mesh/AP modes.
11122 */
11123 return (1);
11124 }
11125
11126 static device_method_t iwx_pci_methods[] = {
11127 /* Device interface */
11128 DEVMETHOD(device_probe, iwx_probe),
11129 DEVMETHOD(device_attach, iwx_attach),
11130 DEVMETHOD(device_detach, iwx_detach),
11131 DEVMETHOD(device_suspend, iwx_suspend),
11132 DEVMETHOD(device_resume, iwx_resume),
11133
11134 DEVMETHOD_END
11135 };
11136
11137 static driver_t iwx_pci_driver = {
11138 "iwx",
11139 iwx_pci_methods,
11140 sizeof (struct iwx_softc)
11141 };
11142
11143 DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
11144 MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
11145 iwx_devices, nitems(iwx_devices));
11146 MODULE_DEPEND(iwx, firmware, 1, 1, 1);
11147 MODULE_DEPEND(iwx, pci, 1, 1, 1);
11148 MODULE_DEPEND(iwx, wlan, 1, 1, 1);
11149