1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGES.
31 */
32
33 #include <sys/cdefs.h>
34 /*
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
36 */
37
38 #include "opt_inet.h"
39 #include "opt_mwl.h"
40 #include "opt_wlan.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/errno.h>
53 #include <sys/callout.h>
54 #include <sys/bus.h>
55 #include <sys/endian.h>
56 #include <sys/kthread.h>
57 #include <sys/taskqueue.h>
58
59 #include <machine/bus.h>
60
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/if_llc.h>
69
70 #include <net/bpf.h>
71
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_input.h>
74 #include <net80211/ieee80211_regdomain.h>
75
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #endif /* INET */
80
81 #include <dev/mwl/if_mwlvar.h>
82 #include <dev/mwl/mwldiag.h>
83
84 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
85 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
86 const uint8_t [IEEE80211_ADDR_LEN],
87 const uint8_t [IEEE80211_ADDR_LEN]);
88 static void mwl_vap_delete(struct ieee80211vap *);
89 static int mwl_setupdma(struct mwl_softc *);
90 static int mwl_hal_reset(struct mwl_softc *sc);
91 static int mwl_init(struct mwl_softc *);
92 static void mwl_parent(struct ieee80211com *);
93 static int mwl_reset(struct ieee80211vap *, u_long);
94 static void mwl_stop(struct mwl_softc *);
95 static void mwl_start(struct mwl_softc *);
96 static int mwl_transmit(struct ieee80211com *, struct mbuf *);
97 static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
98 const struct ieee80211_bpf_params *);
99 static int mwl_media_change(if_t);
100 static void mwl_watchdog(void *);
101 static int mwl_ioctl(struct ieee80211com *, u_long, void *);
102 static void mwl_radar_proc(void *, int);
103 static void mwl_chanswitch_proc(void *, int);
104 static void mwl_bawatchdog_proc(void *, int);
105 static int mwl_key_alloc(struct ieee80211vap *,
106 struct ieee80211_key *,
107 ieee80211_keyix *, ieee80211_keyix *);
108 static int mwl_key_delete(struct ieee80211vap *,
109 const struct ieee80211_key *);
110 static int mwl_key_set(struct ieee80211vap *,
111 const struct ieee80211_key *);
112 static int _mwl_key_set(struct ieee80211vap *,
113 const struct ieee80211_key *,
114 const uint8_t mac[IEEE80211_ADDR_LEN]);
115 static int mwl_mode_init(struct mwl_softc *);
116 static void mwl_update_mcast(struct ieee80211com *);
117 static void mwl_update_promisc(struct ieee80211com *);
118 static void mwl_updateslot(struct ieee80211com *);
119 static int mwl_beacon_setup(struct ieee80211vap *);
120 static void mwl_beacon_update(struct ieee80211vap *, int);
121 #ifdef MWL_HOST_PS_SUPPORT
122 static void mwl_update_ps(struct ieee80211vap *, int);
123 static int mwl_set_tim(struct ieee80211_node *, int);
124 #endif
125 static int mwl_dma_setup(struct mwl_softc *);
126 static void mwl_dma_cleanup(struct mwl_softc *);
127 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
128 const uint8_t [IEEE80211_ADDR_LEN]);
129 static void mwl_node_cleanup(struct ieee80211_node *);
130 static void mwl_node_drain(struct ieee80211_node *);
131 static void mwl_node_getsignal(const struct ieee80211_node *,
132 int8_t *, int8_t *);
133 static void mwl_node_getmimoinfo(const struct ieee80211_node *,
134 struct ieee80211_mimo_info *);
135 static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
136 static void mwl_rx_proc(void *, int);
137 static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
138 static int mwl_tx_setup(struct mwl_softc *, int, int);
139 static int mwl_wme_update(struct ieee80211com *);
140 static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
141 static void mwl_tx_cleanup(struct mwl_softc *);
142 static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
143 static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
144 struct mwl_txbuf *, struct mbuf *);
145 static void mwl_tx_proc(void *, int);
146 static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
147 static void mwl_draintxq(struct mwl_softc *);
148 static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
149 static int mwl_recv_action(struct ieee80211_node *,
150 const struct ieee80211_frame *,
151 const uint8_t *, const uint8_t *);
152 static int mwl_addba_request(struct ieee80211_node *,
153 struct ieee80211_tx_ampdu *, int dialogtoken,
154 int baparamset, int batimeout);
155 static int mwl_addba_response(struct ieee80211_node *,
156 struct ieee80211_tx_ampdu *, int status,
157 int baparamset, int batimeout);
158 static void mwl_addba_stop(struct ieee80211_node *,
159 struct ieee80211_tx_ampdu *);
160 static int mwl_startrecv(struct mwl_softc *);
161 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
162 struct ieee80211_channel *);
163 static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
164 static void mwl_scan_start(struct ieee80211com *);
165 static void mwl_scan_end(struct ieee80211com *);
166 static void mwl_set_channel(struct ieee80211com *);
167 static int mwl_peerstadb(struct ieee80211_node *,
168 int aid, int staid, MWL_HAL_PEERINFO *pi);
169 static int mwl_localstadb(struct ieee80211vap *);
170 static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
171 static int allocstaid(struct mwl_softc *sc, int aid);
172 static void delstaid(struct mwl_softc *sc, int staid);
173 static void mwl_newassoc(struct ieee80211_node *, int);
174 static void mwl_agestations(void *);
175 static int mwl_setregdomain(struct ieee80211com *,
176 struct ieee80211_regdomain *, int,
177 struct ieee80211_channel []);
178 static void mwl_getradiocaps(struct ieee80211com *, int, int *,
179 struct ieee80211_channel []);
180 static int mwl_getchannels(struct mwl_softc *);
181
182 static void mwl_sysctlattach(struct mwl_softc *);
183 static void mwl_announce(struct mwl_softc *);
184
185 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
186 "Marvell driver parameters");
187
188 static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
189 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
190 0, "rx descriptors allocated");
191 static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
192 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
193 0, "rx buffers allocated");
194 static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
195 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
196 0, "tx buffers allocated");
197 static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
198 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
199 0, "tx buffers to send at once");
200 static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
201 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
202 0, "max rx buffers to process per interrupt");
203 static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
204 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
205 0, "min free rx buffers before restarting traffic");
206
207 #ifdef MWL_DEBUG
208 static int mwl_debug = 0;
209 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
210 0, "control debugging printfs");
211 enum {
212 MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
213 MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
214 MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */
215 MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
216 MWL_DEBUG_RESET = 0x00000010, /* reset processing */
217 MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */
218 MWL_DEBUG_INTR = 0x00000040, /* ISR */
219 MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */
220 MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */
221 MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */
222 MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */
223 MWL_DEBUG_NODE = 0x00000800, /* node management */
224 MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */
225 MWL_DEBUG_TSO = 0x00002000, /* TSO processing */
226 MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */
227 MWL_DEBUG_ANY = 0xffffffff
228 };
229 #define IFF_DUMPPKTS_RECV(sc, wh) \
230 ((sc->sc_debug & MWL_DEBUG_RECV) && \
231 ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IEEE80211_IS_MGMT_BEACON(wh)))
232 #define IFF_DUMPPKTS_XMIT(sc) \
233 (sc->sc_debug & MWL_DEBUG_XMIT)
234
235 #define DPRINTF(sc, m, fmt, ...) do { \
236 if (sc->sc_debug & (m)) \
237 printf(fmt, __VA_ARGS__); \
238 } while (0)
239 #define KEYPRINTF(sc, hk, mac) do { \
240 if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \
241 mwl_keyprint(sc, __func__, hk, mac); \
242 } while (0)
243 static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
244 static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
245 #else
246 #define IFF_DUMPPKTS_RECV(sc, wh) 0
247 #define IFF_DUMPPKTS_XMIT(sc) 0
248 #define DPRINTF(sc, m, fmt, ...) do { (void )sc; } while (0)
249 #define KEYPRINTF(sc, k, mac) do { (void )sc; } while (0)
250 #endif
251
252 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
253
254 /*
255 * Each packet has fixed front matter: a 2-byte length
256 * of the payload, followed by a 4-address 802.11 header
257 * (regardless of the actual header and always w/o any
258 * QoS header). The payload then follows.
259 */
260 struct mwltxrec {
261 uint16_t fwlen;
262 struct ieee80211_frame_addr4 wh;
263 } __packed;
264
265 /*
266 * Read/Write shorthands for accesses to BAR 0. Note
267 * that all BAR 1 operations are done in the "hal" and
268 * there should be no reference to them here.
269 */
270 #ifdef MWL_DEBUG
271 static __inline uint32_t
RD4(struct mwl_softc * sc,bus_size_t off)272 RD4(struct mwl_softc *sc, bus_size_t off)
273 {
274 return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
275 }
276 #endif
277
278 static __inline void
WR4(struct mwl_softc * sc,bus_size_t off,uint32_t val)279 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
280 {
281 bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
282 }
283
284 int
mwl_attach(uint16_t devid,struct mwl_softc * sc)285 mwl_attach(uint16_t devid, struct mwl_softc *sc)
286 {
287 struct ieee80211com *ic = &sc->sc_ic;
288 struct mwl_hal *mh;
289 int error = 0;
290
291 DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
292
293 /*
294 * Setup the RX free list lock early, so it can be consistently
295 * removed.
296 */
297 MWL_RXFREE_INIT(sc);
298
299 mh = mwl_hal_attach(sc->sc_dev, devid,
300 sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
301 if (mh == NULL) {
302 device_printf(sc->sc_dev, "unable to attach HAL\n");
303 error = EIO;
304 goto bad;
305 }
306 sc->sc_mh = mh;
307 /*
308 * Load firmware so we can get setup. We arbitrarily
309 * pick station firmware; we'll re-load firmware as
310 * needed so setting up the wrong mode isn't a big deal.
311 */
312 if (mwl_hal_fwload(mh, NULL) != 0) {
313 device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
314 error = EIO;
315 goto bad1;
316 }
317 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
318 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
319 error = EIO;
320 goto bad1;
321 }
322 error = mwl_getchannels(sc);
323 if (error != 0)
324 goto bad1;
325
326 sc->sc_txantenna = 0; /* h/w default */
327 sc->sc_rxantenna = 0; /* h/w default */
328 sc->sc_invalid = 0; /* ready to go, enable int handling */
329 sc->sc_ageinterval = MWL_AGEINTERVAL;
330
331 /*
332 * Allocate tx+rx descriptors and populate the lists.
333 * We immediately push the information to the firmware
334 * as otherwise it gets upset.
335 */
336 error = mwl_dma_setup(sc);
337 if (error != 0) {
338 device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
339 error);
340 goto bad1;
341 }
342 error = mwl_setupdma(sc); /* push to firmware */
343 if (error != 0) /* NB: mwl_setupdma prints msg */
344 goto bad1;
345
346 callout_init(&sc->sc_timer, 1);
347 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
348 mbufq_init(&sc->sc_snd, ifqmaxlen);
349
350 sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
351 taskqueue_thread_enqueue, &sc->sc_tq);
352 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
353 "%s taskq", device_get_nameunit(sc->sc_dev));
354
355 NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
356 TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
357 TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
358 TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
359
360 /* NB: insure BK queue is the lowest priority h/w queue */
361 if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
362 device_printf(sc->sc_dev,
363 "unable to setup xmit queue for %s traffic!\n",
364 ieee80211_wme_acnames[WME_AC_BK]);
365 error = EIO;
366 goto bad2;
367 }
368 if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
369 !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
370 !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
371 /*
372 * Not enough hardware tx queues to properly do WME;
373 * just punt and assign them all to the same h/w queue.
374 * We could do a better job of this if, for example,
375 * we allocate queues when we switch from station to
376 * AP mode.
377 */
378 if (sc->sc_ac2q[WME_AC_VI] != NULL)
379 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
380 if (sc->sc_ac2q[WME_AC_BE] != NULL)
381 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
382 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
383 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
384 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
385 }
386 TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
387
388 ic->ic_softc = sc;
389 ic->ic_name = device_get_nameunit(sc->sc_dev);
390 /* XXX not right but it's not used anywhere important */
391 ic->ic_phytype = IEEE80211_T_OFDM;
392 ic->ic_opmode = IEEE80211_M_STA;
393 ic->ic_caps =
394 IEEE80211_C_STA /* station mode supported */
395 | IEEE80211_C_HOSTAP /* hostap mode */
396 | IEEE80211_C_MONITOR /* monitor mode */
397 #if 0
398 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
399 | IEEE80211_C_AHDEMO /* adhoc demo mode */
400 #endif
401 | IEEE80211_C_MBSS /* mesh point link mode */
402 | IEEE80211_C_WDS /* WDS supported */
403 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
404 | IEEE80211_C_SHSLOT /* short slot time supported */
405 | IEEE80211_C_WME /* WME/WMM supported */
406 | IEEE80211_C_BURST /* xmit bursting supported */
407 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
408 | IEEE80211_C_BGSCAN /* capable of bg scanning */
409 | IEEE80211_C_TXFRAG /* handle tx frags */
410 | IEEE80211_C_TXPMGT /* capable of txpow mgt */
411 | IEEE80211_C_DFS /* DFS supported */
412 ;
413
414 ic->ic_htcaps =
415 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
416 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
417 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
418 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
419 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
420 #if MWL_AGGR_SIZE == 7935
421 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
422 #else
423 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
424 #endif
425 #if 0
426 | IEEE80211_HTCAP_PSMP /* PSMP supported */
427 | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */
428 #endif
429 /* s/w capabilities */
430 | IEEE80211_HTC_HT /* HT operation */
431 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
432 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
433 | IEEE80211_HTC_SMPS /* SMPS available */
434 ;
435
436 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
437
438 /*
439 * Mark h/w crypto support.
440 * XXX no way to query h/w support.
441 */
442 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
443 | IEEE80211_CRYPTO_AES_CCM
444 | IEEE80211_CRYPTO_TKIP
445 | IEEE80211_CRYPTO_TKIPMIC
446 ;
447 /*
448 * Transmit requires space in the packet for a special
449 * format transmit record and optional padding between
450 * this record and the payload. Ask the net80211 layer
451 * to arrange this when encapsulating packets so we can
452 * add it efficiently.
453 */
454 ic->ic_headroom = sizeof(struct mwltxrec) -
455 sizeof(struct ieee80211_frame);
456
457 IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
458
459 /* call MI attach routine. */
460 ieee80211_ifattach(ic);
461 ic->ic_setregdomain = mwl_setregdomain;
462 ic->ic_getradiocaps = mwl_getradiocaps;
463 /* override default methods */
464 ic->ic_raw_xmit = mwl_raw_xmit;
465 ic->ic_newassoc = mwl_newassoc;
466 ic->ic_updateslot = mwl_updateslot;
467 ic->ic_update_mcast = mwl_update_mcast;
468 ic->ic_update_promisc = mwl_update_promisc;
469 ic->ic_wme.wme_update = mwl_wme_update;
470 ic->ic_transmit = mwl_transmit;
471 ic->ic_ioctl = mwl_ioctl;
472 ic->ic_parent = mwl_parent;
473
474 ic->ic_node_alloc = mwl_node_alloc;
475 sc->sc_node_cleanup = ic->ic_node_cleanup;
476 ic->ic_node_cleanup = mwl_node_cleanup;
477 sc->sc_node_drain = ic->ic_node_drain;
478 ic->ic_node_drain = mwl_node_drain;
479 ic->ic_node_getsignal = mwl_node_getsignal;
480 ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
481
482 ic->ic_scan_start = mwl_scan_start;
483 ic->ic_scan_end = mwl_scan_end;
484 ic->ic_set_channel = mwl_set_channel;
485
486 sc->sc_recv_action = ic->ic_recv_action;
487 ic->ic_recv_action = mwl_recv_action;
488 sc->sc_addba_request = ic->ic_addba_request;
489 ic->ic_addba_request = mwl_addba_request;
490 sc->sc_addba_response = ic->ic_addba_response;
491 ic->ic_addba_response = mwl_addba_response;
492 sc->sc_addba_stop = ic->ic_addba_stop;
493 ic->ic_addba_stop = mwl_addba_stop;
494
495 ic->ic_vap_create = mwl_vap_create;
496 ic->ic_vap_delete = mwl_vap_delete;
497
498 ieee80211_radiotap_attach(ic,
499 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
500 MWL_TX_RADIOTAP_PRESENT,
501 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
502 MWL_RX_RADIOTAP_PRESENT);
503 /*
504 * Setup dynamic sysctl's now that country code and
505 * regdomain are available from the hal.
506 */
507 mwl_sysctlattach(sc);
508
509 if (bootverbose)
510 ieee80211_announce(ic);
511 mwl_announce(sc);
512 return 0;
513 bad2:
514 mwl_dma_cleanup(sc);
515 bad1:
516 mwl_hal_detach(mh);
517 bad:
518 MWL_RXFREE_DESTROY(sc);
519 sc->sc_invalid = 1;
520 return error;
521 }
522
523 int
mwl_detach(struct mwl_softc * sc)524 mwl_detach(struct mwl_softc *sc)
525 {
526 struct ieee80211com *ic = &sc->sc_ic;
527
528 MWL_LOCK(sc);
529 mwl_stop(sc);
530 MWL_UNLOCK(sc);
531 /*
532 * NB: the order of these is important:
533 * o call the 802.11 layer before detaching the hal to
534 * insure callbacks into the driver to delete global
535 * key cache entries can be handled
536 * o reclaim the tx queue data structures after calling
537 * the 802.11 layer as we'll get called back to reclaim
538 * node state and potentially want to use them
539 * o to cleanup the tx queues the hal is called, so detach
540 * it last
541 * Other than that, it's straightforward...
542 */
543 ieee80211_ifdetach(ic);
544 callout_drain(&sc->sc_watchdog);
545 mwl_dma_cleanup(sc);
546 MWL_RXFREE_DESTROY(sc);
547 mwl_tx_cleanup(sc);
548 mwl_hal_detach(sc->sc_mh);
549 mbufq_drain(&sc->sc_snd);
550
551 return 0;
552 }
553
554 /*
555 * MAC address handling for multiple BSS on the same radio.
556 * The first vap uses the MAC address from the EEPROM. For
557 * subsequent vap's we set the U/L bit (bit 1) in the MAC
558 * address and use the next six bits as an index.
559 */
560 static void
assign_address(struct mwl_softc * sc,uint8_t mac[IEEE80211_ADDR_LEN],int clone)561 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
562 {
563 int i;
564
565 if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
566 /* NB: we only do this if h/w supports multiple bssid */
567 for (i = 0; i < 32; i++)
568 if ((sc->sc_bssidmask & (1<<i)) == 0)
569 break;
570 if (i != 0)
571 mac[0] |= (i << 2)|0x2;
572 } else
573 i = 0;
574 sc->sc_bssidmask |= 1<<i;
575 if (i == 0)
576 sc->sc_nbssid0++;
577 }
578
579 static void
reclaim_address(struct mwl_softc * sc,const uint8_t mac[IEEE80211_ADDR_LEN])580 reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
581 {
582 int i = mac[0] >> 2;
583 if (i != 0 || --sc->sc_nbssid0 == 0)
584 sc->sc_bssidmask &= ~(1<<i);
585 }
586
587 static struct ieee80211vap *
mwl_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac0[IEEE80211_ADDR_LEN])588 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
589 enum ieee80211_opmode opmode, int flags,
590 const uint8_t bssid[IEEE80211_ADDR_LEN],
591 const uint8_t mac0[IEEE80211_ADDR_LEN])
592 {
593 struct mwl_softc *sc = ic->ic_softc;
594 struct mwl_hal *mh = sc->sc_mh;
595 struct ieee80211vap *vap, *apvap;
596 struct mwl_hal_vap *hvap;
597 struct mwl_vap *mvp;
598 uint8_t mac[IEEE80211_ADDR_LEN];
599
600 IEEE80211_ADDR_COPY(mac, mac0);
601 switch (opmode) {
602 case IEEE80211_M_HOSTAP:
603 case IEEE80211_M_MBSS:
604 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
605 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
606 hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
607 if (hvap == NULL) {
608 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
609 reclaim_address(sc, mac);
610 return NULL;
611 }
612 break;
613 case IEEE80211_M_STA:
614 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
615 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
616 hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
617 if (hvap == NULL) {
618 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
619 reclaim_address(sc, mac);
620 return NULL;
621 }
622 /* no h/w beacon miss support; always use s/w */
623 flags |= IEEE80211_CLONE_NOBEACONS;
624 break;
625 case IEEE80211_M_WDS:
626 hvap = NULL; /* NB: we use associated AP vap */
627 if (sc->sc_napvaps == 0)
628 return NULL; /* no existing AP vap */
629 break;
630 case IEEE80211_M_MONITOR:
631 hvap = NULL;
632 break;
633 case IEEE80211_M_IBSS:
634 case IEEE80211_M_AHDEMO:
635 default:
636 return NULL;
637 }
638
639 mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
640 mvp->mv_hvap = hvap;
641 if (opmode == IEEE80211_M_WDS) {
642 /*
643 * WDS vaps must have an associated AP vap; find one.
644 * XXX not right.
645 */
646 TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
647 if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
648 mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
649 break;
650 }
651 KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
652 }
653 vap = &mvp->mv_vap;
654 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
655 /* override with driver methods */
656 mvp->mv_newstate = vap->iv_newstate;
657 vap->iv_newstate = mwl_newstate;
658 vap->iv_max_keyix = 0; /* XXX */
659 vap->iv_key_alloc = mwl_key_alloc;
660 vap->iv_key_delete = mwl_key_delete;
661 vap->iv_key_set = mwl_key_set;
662 #ifdef MWL_HOST_PS_SUPPORT
663 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
664 vap->iv_update_ps = mwl_update_ps;
665 mvp->mv_set_tim = vap->iv_set_tim;
666 vap->iv_set_tim = mwl_set_tim;
667 }
668 #endif
669 vap->iv_reset = mwl_reset;
670 vap->iv_update_beacon = mwl_beacon_update;
671
672 /* override max aid so sta's cannot assoc when we're out of sta id's */
673 vap->iv_max_aid = MWL_MAXSTAID;
674 /* override default A-MPDU rx parameters */
675 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
676 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
677
678 /* complete setup */
679 ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
680 mac);
681
682 switch (vap->iv_opmode) {
683 case IEEE80211_M_HOSTAP:
684 case IEEE80211_M_MBSS:
685 case IEEE80211_M_STA:
686 /*
687 * Setup sta db entry for local address.
688 */
689 mwl_localstadb(vap);
690 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
691 vap->iv_opmode == IEEE80211_M_MBSS)
692 sc->sc_napvaps++;
693 else
694 sc->sc_nstavaps++;
695 break;
696 case IEEE80211_M_WDS:
697 sc->sc_nwdsvaps++;
698 break;
699 default:
700 break;
701 }
702 /*
703 * Setup overall operating mode.
704 */
705 if (sc->sc_napvaps)
706 ic->ic_opmode = IEEE80211_M_HOSTAP;
707 else if (sc->sc_nstavaps)
708 ic->ic_opmode = IEEE80211_M_STA;
709 else
710 ic->ic_opmode = opmode;
711
712 return vap;
713 }
714
715 static void
mwl_vap_delete(struct ieee80211vap * vap)716 mwl_vap_delete(struct ieee80211vap *vap)
717 {
718 struct mwl_vap *mvp = MWL_VAP(vap);
719 struct mwl_softc *sc = vap->iv_ic->ic_softc;
720 struct mwl_hal *mh = sc->sc_mh;
721 struct mwl_hal_vap *hvap = mvp->mv_hvap;
722 enum ieee80211_opmode opmode = vap->iv_opmode;
723
724 /* XXX disallow ap vap delete if WDS still present */
725 if (sc->sc_running) {
726 /* quiesce h/w while we remove the vap */
727 mwl_hal_intrset(mh, 0); /* disable interrupts */
728 }
729 ieee80211_vap_detach(vap);
730 switch (opmode) {
731 case IEEE80211_M_HOSTAP:
732 case IEEE80211_M_MBSS:
733 case IEEE80211_M_STA:
734 KASSERT(hvap != NULL, ("no hal vap handle"));
735 (void) mwl_hal_delstation(hvap, vap->iv_myaddr);
736 mwl_hal_delvap(hvap);
737 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
738 sc->sc_napvaps--;
739 else
740 sc->sc_nstavaps--;
741 /* XXX don't do it for IEEE80211_CLONE_MACADDR */
742 reclaim_address(sc, vap->iv_myaddr);
743 break;
744 case IEEE80211_M_WDS:
745 sc->sc_nwdsvaps--;
746 break;
747 default:
748 break;
749 }
750 mwl_cleartxq(sc, vap);
751 free(mvp, M_80211_VAP);
752 if (sc->sc_running)
753 mwl_hal_intrset(mh, sc->sc_imask);
754 }
755
756 void
mwl_suspend(struct mwl_softc * sc)757 mwl_suspend(struct mwl_softc *sc)
758 {
759
760 MWL_LOCK(sc);
761 mwl_stop(sc);
762 MWL_UNLOCK(sc);
763 }
764
765 void
mwl_resume(struct mwl_softc * sc)766 mwl_resume(struct mwl_softc *sc)
767 {
768 int error = EDOOFUS;
769
770 MWL_LOCK(sc);
771 if (sc->sc_ic.ic_nrunning > 0)
772 error = mwl_init(sc);
773 MWL_UNLOCK(sc);
774
775 if (error == 0)
776 ieee80211_start_all(&sc->sc_ic); /* start all vap's */
777 }
778
779 void
mwl_shutdown(void * arg)780 mwl_shutdown(void *arg)
781 {
782 struct mwl_softc *sc = arg;
783
784 MWL_LOCK(sc);
785 mwl_stop(sc);
786 MWL_UNLOCK(sc);
787 }
788
789 /*
790 * Interrupt handler. Most of the actual processing is deferred.
791 */
792 void
mwl_intr(void * arg)793 mwl_intr(void *arg)
794 {
795 struct mwl_softc *sc = arg;
796 struct mwl_hal *mh = sc->sc_mh;
797 uint32_t status;
798
799 if (sc->sc_invalid) {
800 /*
801 * The hardware is not ready/present, don't touch anything.
802 * Note this can happen early on if the IRQ is shared.
803 */
804 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
805 return;
806 }
807 /*
808 * Figure out the reason(s) for the interrupt.
809 */
810 mwl_hal_getisr(mh, &status); /* NB: clears ISR too */
811 if (status == 0) /* must be a shared irq */
812 return;
813
814 DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
815 __func__, status, sc->sc_imask);
816 if (status & MACREG_A2HRIC_BIT_RX_RDY)
817 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
818 if (status & MACREG_A2HRIC_BIT_TX_DONE)
819 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
820 if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
821 taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
822 if (status & MACREG_A2HRIC_BIT_OPC_DONE)
823 mwl_hal_cmddone(mh);
824 if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
825 ;
826 }
827 if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
828 /* TKIP ICV error */
829 sc->sc_stats.mst_rx_badtkipicv++;
830 }
831 if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
832 /* 11n aggregation queue is empty, re-fill */
833 ;
834 }
835 if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
836 ;
837 }
838 if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
839 /* radar detected, process event */
840 taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
841 }
842 if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
843 /* DFS channel switch */
844 taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
845 }
846 }
847
848 static void
mwl_radar_proc(void * arg,int pending)849 mwl_radar_proc(void *arg, int pending)
850 {
851 struct mwl_softc *sc = arg;
852 struct ieee80211com *ic = &sc->sc_ic;
853
854 DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
855 __func__, pending);
856
857 sc->sc_stats.mst_radardetect++;
858 /* XXX stop h/w BA streams? */
859
860 IEEE80211_LOCK(ic);
861 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
862 IEEE80211_UNLOCK(ic);
863 }
864
865 static void
mwl_chanswitch_proc(void * arg,int pending)866 mwl_chanswitch_proc(void *arg, int pending)
867 {
868 struct mwl_softc *sc = arg;
869 struct ieee80211com *ic = &sc->sc_ic;
870
871 DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
872 __func__, pending);
873
874 IEEE80211_LOCK(ic);
875 sc->sc_csapending = 0;
876 ieee80211_csa_completeswitch(ic);
877 IEEE80211_UNLOCK(ic);
878 }
879
880 static void
mwl_bawatchdog(const MWL_HAL_BASTREAM * sp)881 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
882 {
883 struct ieee80211_node *ni = sp->data[0];
884
885 /* send DELBA and drop the stream */
886 ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
887 }
888
889 static void
mwl_bawatchdog_proc(void * arg,int pending)890 mwl_bawatchdog_proc(void *arg, int pending)
891 {
892 struct mwl_softc *sc = arg;
893 struct mwl_hal *mh = sc->sc_mh;
894 const MWL_HAL_BASTREAM *sp;
895 uint8_t bitmap, n;
896
897 sc->sc_stats.mst_bawatchdog++;
898
899 if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
900 DPRINTF(sc, MWL_DEBUG_AMPDU,
901 "%s: could not get bitmap\n", __func__);
902 sc->sc_stats.mst_bawatchdog_failed++;
903 return;
904 }
905 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
906 if (bitmap == 0xff) {
907 n = 0;
908 /* disable all ba streams */
909 for (bitmap = 0; bitmap < 8; bitmap++) {
910 sp = mwl_hal_bastream_lookup(mh, bitmap);
911 if (sp != NULL) {
912 mwl_bawatchdog(sp);
913 n++;
914 }
915 }
916 if (n == 0) {
917 DPRINTF(sc, MWL_DEBUG_AMPDU,
918 "%s: no BA streams found\n", __func__);
919 sc->sc_stats.mst_bawatchdog_empty++;
920 }
921 } else if (bitmap != 0xaa) {
922 /* disable a single ba stream */
923 sp = mwl_hal_bastream_lookup(mh, bitmap);
924 if (sp != NULL) {
925 mwl_bawatchdog(sp);
926 } else {
927 DPRINTF(sc, MWL_DEBUG_AMPDU,
928 "%s: no BA stream %d\n", __func__, bitmap);
929 sc->sc_stats.mst_bawatchdog_notfound++;
930 }
931 }
932 }
933
934 /*
935 * Convert net80211 channel to a HAL channel.
936 */
937 static void
mwl_mapchan(MWL_HAL_CHANNEL * hc,const struct ieee80211_channel * chan)938 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
939 {
940 hc->channel = chan->ic_ieee;
941
942 *(uint32_t *)&hc->channelFlags = 0;
943 if (IEEE80211_IS_CHAN_2GHZ(chan))
944 hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
945 else if (IEEE80211_IS_CHAN_5GHZ(chan))
946 hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
947 if (IEEE80211_IS_CHAN_HT40(chan)) {
948 hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
949 if (IEEE80211_IS_CHAN_HT40U(chan))
950 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
951 else
952 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
953 } else
954 hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
955 /* XXX 10MHz channels */
956 }
957
958 /*
959 * Inform firmware of our tx/rx dma setup. The BAR 0
960 * writes below are for compatibility with older firmware.
961 * For current firmware we send this information with a
962 * cmd block via mwl_hal_sethwdma.
963 */
964 static int
mwl_setupdma(struct mwl_softc * sc)965 mwl_setupdma(struct mwl_softc *sc)
966 {
967 int error, i;
968
969 sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
970 WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
971 WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
972
973 for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
974 struct mwl_txq *txq = &sc->sc_txq[i];
975 sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
976 WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
977 }
978 sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
979 sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
980
981 error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
982 if (error != 0) {
983 device_printf(sc->sc_dev,
984 "unable to setup tx/rx dma; hal status %u\n", error);
985 /* XXX */
986 }
987 return error;
988 }
989
990 /*
991 * Inform firmware of tx rate parameters.
992 * Called after a channel change.
993 */
994 static int
mwl_setcurchanrates(struct mwl_softc * sc)995 mwl_setcurchanrates(struct mwl_softc *sc)
996 {
997 struct ieee80211com *ic = &sc->sc_ic;
998 const struct ieee80211_rateset *rs;
999 MWL_HAL_TXRATE rates;
1000
1001 memset(&rates, 0, sizeof(rates));
1002 rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1003 /* rate used to send management frames */
1004 rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1005 /* rate used to send multicast frames */
1006 rates.McastRate = rates.MgtRate;
1007
1008 return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1009 }
1010
1011 /*
1012 * Inform firmware of tx rate parameters. Called whenever
1013 * user-settable params change and after a channel change.
1014 */
1015 static int
mwl_setrates(struct ieee80211vap * vap)1016 mwl_setrates(struct ieee80211vap *vap)
1017 {
1018 struct mwl_vap *mvp = MWL_VAP(vap);
1019 struct ieee80211_node *ni = vap->iv_bss;
1020 const struct ieee80211_txparam *tp = ni->ni_txparms;
1021 MWL_HAL_TXRATE rates;
1022
1023 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1024
1025 /*
1026 * Update the h/w rate map.
1027 * NB: 0x80 for MCS is passed through unchanged
1028 */
1029 memset(&rates, 0, sizeof(rates));
1030 /* rate used to send management frames */
1031 rates.MgtRate = tp->mgmtrate;
1032 /* rate used to send multicast frames */
1033 rates.McastRate = tp->mcastrate;
1034
1035 /* while here calculate EAPOL fixed rate cookie */
1036 mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1037
1038 return mwl_hal_settxrate(mvp->mv_hvap,
1039 tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1040 RATE_FIXED : RATE_AUTO, &rates);
1041 }
1042
1043 /*
1044 * Setup a fixed xmit rate cookie for EAPOL frames.
1045 */
1046 static void
mwl_seteapolformat(struct ieee80211vap * vap)1047 mwl_seteapolformat(struct ieee80211vap *vap)
1048 {
1049 struct mwl_vap *mvp = MWL_VAP(vap);
1050 struct ieee80211_node *ni = vap->iv_bss;
1051 enum ieee80211_phymode mode;
1052 uint8_t rate;
1053
1054 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1055
1056 mode = ieee80211_chan2mode(ni->ni_chan);
1057 /*
1058 * Use legacy rates when operating a mixed HT+non-HT bss.
1059 * NB: this may violate POLA for sta and wds vap's.
1060 */
1061 if (mode == IEEE80211_MODE_11NA &&
1062 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1063 rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1064 else if (mode == IEEE80211_MODE_11NG &&
1065 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1066 rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1067 else
1068 rate = vap->iv_txparms[mode].mgmtrate;
1069
1070 mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1071 }
1072
1073 /*
1074 * Map SKU+country code to region code for radar bin'ing.
1075 */
1076 static int
mwl_map2regioncode(const struct ieee80211_regdomain * rd)1077 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1078 {
1079 switch (rd->regdomain) {
1080 case SKU_FCC:
1081 case SKU_FCC3:
1082 return DOMAIN_CODE_FCC;
1083 case SKU_CA:
1084 return DOMAIN_CODE_IC;
1085 case SKU_ETSI:
1086 case SKU_ETSI2:
1087 case SKU_ETSI3:
1088 if (rd->country == CTRY_SPAIN)
1089 return DOMAIN_CODE_SPAIN;
1090 if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1091 return DOMAIN_CODE_FRANCE;
1092 /* XXX force 1.3.1 radar type */
1093 return DOMAIN_CODE_ETSI_131;
1094 case SKU_JAPAN:
1095 return DOMAIN_CODE_MKK;
1096 case SKU_ROW:
1097 return DOMAIN_CODE_DGT; /* Taiwan */
1098 case SKU_APAC:
1099 case SKU_APAC2:
1100 case SKU_APAC3:
1101 return DOMAIN_CODE_AUS; /* Australia */
1102 }
1103 /* XXX KOREA? */
1104 return DOMAIN_CODE_FCC; /* XXX? */
1105 }
1106
1107 static int
mwl_hal_reset(struct mwl_softc * sc)1108 mwl_hal_reset(struct mwl_softc *sc)
1109 {
1110 struct ieee80211com *ic = &sc->sc_ic;
1111 struct mwl_hal *mh = sc->sc_mh;
1112
1113 mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1114 mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1115 mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1116 mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1117 mwl_chan_set(sc, ic->ic_curchan);
1118 /* NB: RF/RA performance tuned for indoor mode */
1119 mwl_hal_setrateadaptmode(mh, 0);
1120 mwl_hal_setoptimizationlevel(mh,
1121 (ic->ic_flags & IEEE80211_F_BURST) != 0);
1122
1123 mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1124
1125 mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */
1126 mwl_hal_setcfend(mh, 0); /* XXX */
1127
1128 return 1;
1129 }
1130
1131 static int
mwl_init(struct mwl_softc * sc)1132 mwl_init(struct mwl_softc *sc)
1133 {
1134 struct mwl_hal *mh = sc->sc_mh;
1135 int error = 0;
1136
1137 MWL_LOCK_ASSERT(sc);
1138
1139 /*
1140 * Stop anything previously setup. This is safe
1141 * whether this is the first time through or not.
1142 */
1143 mwl_stop(sc);
1144
1145 /*
1146 * Push vap-independent state to the firmware.
1147 */
1148 if (!mwl_hal_reset(sc)) {
1149 device_printf(sc->sc_dev, "unable to reset hardware\n");
1150 return EIO;
1151 }
1152
1153 /*
1154 * Setup recv (once); transmit is already good to go.
1155 */
1156 error = mwl_startrecv(sc);
1157 if (error != 0) {
1158 device_printf(sc->sc_dev, "unable to start recv logic\n");
1159 return error;
1160 }
1161
1162 /*
1163 * Enable interrupts.
1164 */
1165 sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1166 | MACREG_A2HRIC_BIT_TX_DONE
1167 | MACREG_A2HRIC_BIT_OPC_DONE
1168 #if 0
1169 | MACREG_A2HRIC_BIT_MAC_EVENT
1170 #endif
1171 | MACREG_A2HRIC_BIT_ICV_ERROR
1172 | MACREG_A2HRIC_BIT_RADAR_DETECT
1173 | MACREG_A2HRIC_BIT_CHAN_SWITCH
1174 #if 0
1175 | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1176 #endif
1177 | MACREG_A2HRIC_BIT_BA_WATCHDOG
1178 | MACREQ_A2HRIC_BIT_TX_ACK
1179 ;
1180
1181 sc->sc_running = 1;
1182 mwl_hal_intrset(mh, sc->sc_imask);
1183 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1184
1185 return 0;
1186 }
1187
1188 static void
mwl_stop(struct mwl_softc * sc)1189 mwl_stop(struct mwl_softc *sc)
1190 {
1191
1192 MWL_LOCK_ASSERT(sc);
1193 if (sc->sc_running) {
1194 /*
1195 * Shutdown the hardware and driver.
1196 */
1197 sc->sc_running = 0;
1198 callout_stop(&sc->sc_watchdog);
1199 sc->sc_tx_timer = 0;
1200 mwl_draintxq(sc);
1201 }
1202 }
1203
1204 static int
mwl_reset_vap(struct ieee80211vap * vap,int state)1205 mwl_reset_vap(struct ieee80211vap *vap, int state)
1206 {
1207 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1208 struct ieee80211com *ic = vap->iv_ic;
1209
1210 if (state == IEEE80211_S_RUN)
1211 mwl_setrates(vap);
1212 /* XXX off by 1? */
1213 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1214 /* XXX auto? 20/40 split? */
1215 mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1216 (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1217 mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1218 HTPROTECT_NONE : HTPROTECT_AUTO);
1219 /* XXX txpower cap */
1220
1221 /* re-setup beacons */
1222 if (state == IEEE80211_S_RUN &&
1223 (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1224 vap->iv_opmode == IEEE80211_M_MBSS ||
1225 vap->iv_opmode == IEEE80211_M_IBSS)) {
1226 mwl_setapmode(vap, vap->iv_bss->ni_chan);
1227 mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1228 ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1229 return mwl_beacon_setup(vap);
1230 }
1231 return 0;
1232 }
1233
1234 /*
1235 * Reset the hardware w/o losing operational state.
1236 * Used to reset or reload hardware state for a vap.
1237 */
1238 static int
mwl_reset(struct ieee80211vap * vap,u_long cmd)1239 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1240 {
1241 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1242 int error = 0;
1243
1244 if (hvap != NULL) { /* WDS, MONITOR, etc. */
1245 struct ieee80211com *ic = vap->iv_ic;
1246 struct mwl_softc *sc = ic->ic_softc;
1247 struct mwl_hal *mh = sc->sc_mh;
1248
1249 /* XXX handle DWDS sta vap change */
1250 /* XXX do we need to disable interrupts? */
1251 mwl_hal_intrset(mh, 0); /* disable interrupts */
1252 error = mwl_reset_vap(vap, vap->iv_state);
1253 mwl_hal_intrset(mh, sc->sc_imask);
1254 }
1255 return error;
1256 }
1257
1258 /*
1259 * Allocate a tx buffer for sending a frame. The
1260 * packet is assumed to have the WME AC stored so
1261 * we can use it to select the appropriate h/w queue.
1262 */
1263 static struct mwl_txbuf *
mwl_gettxbuf(struct mwl_softc * sc,struct mwl_txq * txq)1264 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1265 {
1266 struct mwl_txbuf *bf;
1267
1268 /*
1269 * Grab a TX buffer and associated resources.
1270 */
1271 MWL_TXQ_LOCK(txq);
1272 bf = STAILQ_FIRST(&txq->free);
1273 if (bf != NULL) {
1274 STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1275 txq->nfree--;
1276 }
1277 MWL_TXQ_UNLOCK(txq);
1278 if (bf == NULL)
1279 DPRINTF(sc, MWL_DEBUG_XMIT,
1280 "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1281 return bf;
1282 }
1283
1284 /*
1285 * Return a tx buffer to the queue it came from. Note there
1286 * are two cases because we must preserve the order of buffers
1287 * as it reflects the fixed order of descriptors in memory
1288 * (the firmware pre-fetches descriptors so we cannot reorder).
1289 */
1290 static void
mwl_puttxbuf_head(struct mwl_txq * txq,struct mwl_txbuf * bf)1291 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1292 {
1293 bf->bf_m = NULL;
1294 bf->bf_node = NULL;
1295 MWL_TXQ_LOCK(txq);
1296 STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1297 txq->nfree++;
1298 MWL_TXQ_UNLOCK(txq);
1299 }
1300
1301 static void
mwl_puttxbuf_tail(struct mwl_txq * txq,struct mwl_txbuf * bf)1302 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1303 {
1304 bf->bf_m = NULL;
1305 bf->bf_node = NULL;
1306 MWL_TXQ_LOCK(txq);
1307 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1308 txq->nfree++;
1309 MWL_TXQ_UNLOCK(txq);
1310 }
1311
1312 static int
mwl_transmit(struct ieee80211com * ic,struct mbuf * m)1313 mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
1314 {
1315 struct mwl_softc *sc = ic->ic_softc;
1316 int error;
1317
1318 MWL_LOCK(sc);
1319 if (!sc->sc_running) {
1320 MWL_UNLOCK(sc);
1321 return (ENXIO);
1322 }
1323 error = mbufq_enqueue(&sc->sc_snd, m);
1324 if (error) {
1325 MWL_UNLOCK(sc);
1326 return (error);
1327 }
1328 mwl_start(sc);
1329 MWL_UNLOCK(sc);
1330 return (0);
1331 }
1332
1333 static void
mwl_start(struct mwl_softc * sc)1334 mwl_start(struct mwl_softc *sc)
1335 {
1336 struct ieee80211_node *ni;
1337 struct mwl_txbuf *bf;
1338 struct mbuf *m;
1339 struct mwl_txq *txq = NULL; /* XXX silence gcc */
1340 int nqueued;
1341
1342 MWL_LOCK_ASSERT(sc);
1343 if (!sc->sc_running || sc->sc_invalid)
1344 return;
1345 nqueued = 0;
1346 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
1347 /*
1348 * Grab the node for the destination.
1349 */
1350 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1351 KASSERT(ni != NULL, ("no node"));
1352 m->m_pkthdr.rcvif = NULL; /* committed, clear ref */
1353 /*
1354 * Grab a TX buffer and associated resources.
1355 * We honor the classification by the 802.11 layer.
1356 */
1357 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1358 bf = mwl_gettxbuf(sc, txq);
1359 if (bf == NULL) {
1360 m_freem(m);
1361 ieee80211_free_node(ni);
1362 #ifdef MWL_TX_NODROP
1363 sc->sc_stats.mst_tx_qstop++;
1364 break;
1365 #else
1366 DPRINTF(sc, MWL_DEBUG_XMIT,
1367 "%s: tail drop on q %d\n", __func__, txq->qnum);
1368 sc->sc_stats.mst_tx_qdrop++;
1369 continue;
1370 #endif /* MWL_TX_NODROP */
1371 }
1372
1373 /*
1374 * Pass the frame to the h/w for transmission.
1375 */
1376 if (mwl_tx_start(sc, ni, bf, m)) {
1377 if_inc_counter(ni->ni_vap->iv_ifp,
1378 IFCOUNTER_OERRORS, 1);
1379 mwl_puttxbuf_head(txq, bf);
1380 ieee80211_free_node(ni);
1381 continue;
1382 }
1383 nqueued++;
1384 if (nqueued >= mwl_txcoalesce) {
1385 /*
1386 * Poke the firmware to process queued frames;
1387 * see below about (lack of) locking.
1388 */
1389 nqueued = 0;
1390 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1391 }
1392 }
1393 if (nqueued) {
1394 /*
1395 * NB: We don't need to lock against tx done because
1396 * this just prods the firmware to check the transmit
1397 * descriptors. The firmware will also start fetching
1398 * descriptors by itself if it notices new ones are
1399 * present when it goes to deliver a tx done interrupt
1400 * to the host. So if we race with tx done processing
1401 * it's ok. Delivering the kick here rather than in
1402 * mwl_tx_start is an optimization to avoid poking the
1403 * firmware for each packet.
1404 *
1405 * NB: the queue id isn't used so 0 is ok.
1406 */
1407 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1408 }
1409 }
1410
1411 static int
mwl_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)1412 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1413 const struct ieee80211_bpf_params *params)
1414 {
1415 struct ieee80211com *ic = ni->ni_ic;
1416 struct mwl_softc *sc = ic->ic_softc;
1417 struct mwl_txbuf *bf;
1418 struct mwl_txq *txq;
1419
1420 if (!sc->sc_running || sc->sc_invalid) {
1421 m_freem(m);
1422 return ENETDOWN;
1423 }
1424 /*
1425 * Grab a TX buffer and associated resources.
1426 * Note that we depend on the classification
1427 * by the 802.11 layer to get to the right h/w
1428 * queue. Management frames must ALWAYS go on
1429 * queue 1 but we cannot just force that here
1430 * because we may receive non-mgt frames.
1431 */
1432 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1433 bf = mwl_gettxbuf(sc, txq);
1434 if (bf == NULL) {
1435 sc->sc_stats.mst_tx_qstop++;
1436 m_freem(m);
1437 return ENOBUFS;
1438 }
1439 /*
1440 * Pass the frame to the h/w for transmission.
1441 */
1442 if (mwl_tx_start(sc, ni, bf, m)) {
1443 mwl_puttxbuf_head(txq, bf);
1444
1445 return EIO; /* XXX */
1446 }
1447 /*
1448 * NB: We don't need to lock against tx done because
1449 * this just prods the firmware to check the transmit
1450 * descriptors. The firmware will also start fetching
1451 * descriptors by itself if it notices new ones are
1452 * present when it goes to deliver a tx done interrupt
1453 * to the host. So if we race with tx done processing
1454 * it's ok. Delivering the kick here rather than in
1455 * mwl_tx_start is an optimization to avoid poking the
1456 * firmware for each packet.
1457 *
1458 * NB: the queue id isn't used so 0 is ok.
1459 */
1460 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1461 return 0;
1462 }
1463
1464 static int
mwl_media_change(if_t ifp)1465 mwl_media_change(if_t ifp)
1466 {
1467 struct ieee80211vap *vap;
1468 int error;
1469
1470 /* NB: only the fixed rate can change and that doesn't need a reset */
1471 error = ieee80211_media_change(ifp);
1472 if (error != 0)
1473 return (error);
1474
1475 vap = if_getsoftc(ifp);
1476 mwl_setrates(vap);
1477 return (0);
1478 }
1479
1480 #ifdef MWL_DEBUG
1481 static void
mwl_keyprint(struct mwl_softc * sc,const char * tag,const MWL_HAL_KEYVAL * hk,const uint8_t mac[IEEE80211_ADDR_LEN])1482 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1483 const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1484 {
1485 static const char *ciphers[] = {
1486 "WEP",
1487 "TKIP",
1488 "AES-CCM",
1489 };
1490 int i, n;
1491
1492 printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1493 for (i = 0, n = hk->keyLen; i < n; i++)
1494 printf(" %02x", hk->key.aes[i]);
1495 printf(" mac %s", ether_sprintf(mac));
1496 if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1497 printf(" %s", "rxmic");
1498 for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1499 printf(" %02x", hk->key.tkip.rxMic[i]);
1500 printf(" txmic");
1501 for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1502 printf(" %02x", hk->key.tkip.txMic[i]);
1503 }
1504 printf(" flags 0x%x\n", hk->keyFlags);
1505 }
1506 #endif
1507
1508 /*
1509 * Allocate a key cache slot for a unicast key. The
1510 * firmware handles key allocation and every station is
1511 * guaranteed key space so we are always successful.
1512 */
1513 static int
mwl_key_alloc(struct ieee80211vap * vap,struct ieee80211_key * k,ieee80211_keyix * keyix,ieee80211_keyix * rxkeyix)1514 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1515 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1516 {
1517 struct mwl_softc *sc = vap->iv_ic->ic_softc;
1518
1519 if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1520 (k->wk_flags & IEEE80211_KEY_GROUP)) {
1521 if (!ieee80211_is_key_global(vap, k)) {
1522 /* should not happen */
1523 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1524 "%s: bogus group key\n", __func__);
1525 return 0;
1526 }
1527 /* give the caller what they requested */
1528 *keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k);
1529 } else {
1530 /*
1531 * Firmware handles key allocation.
1532 */
1533 *keyix = *rxkeyix = 0;
1534 }
1535 return 1;
1536 }
1537
1538 /*
1539 * Delete a key entry allocated by mwl_key_alloc.
1540 */
1541 static int
mwl_key_delete(struct ieee80211vap * vap,const struct ieee80211_key * k)1542 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1543 {
1544 struct mwl_softc *sc = vap->iv_ic->ic_softc;
1545 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1546 MWL_HAL_KEYVAL hk;
1547 const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1548 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1549
1550 if (hvap == NULL) {
1551 if (vap->iv_opmode != IEEE80211_M_WDS) {
1552 /* XXX monitor mode? */
1553 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1554 "%s: no hvap for opmode %d\n", __func__,
1555 vap->iv_opmode);
1556 return 0;
1557 }
1558 hvap = MWL_VAP(vap)->mv_ap_hvap;
1559 }
1560
1561 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1562 __func__, k->wk_keyix);
1563
1564 memset(&hk, 0, sizeof(hk));
1565 hk.keyIndex = k->wk_keyix;
1566 switch (k->wk_cipher->ic_cipher) {
1567 case IEEE80211_CIPHER_WEP:
1568 hk.keyTypeId = KEY_TYPE_ID_WEP;
1569 break;
1570 case IEEE80211_CIPHER_TKIP:
1571 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1572 break;
1573 case IEEE80211_CIPHER_AES_CCM:
1574 hk.keyTypeId = KEY_TYPE_ID_AES;
1575 break;
1576 default:
1577 /* XXX should not happen */
1578 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1579 __func__, k->wk_cipher->ic_cipher);
1580 return 0;
1581 }
1582 return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/
1583 }
1584
1585 static __inline int
addgroupflags(MWL_HAL_KEYVAL * hk,const struct ieee80211_key * k)1586 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1587 {
1588 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1589 if (k->wk_flags & IEEE80211_KEY_XMIT)
1590 hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1591 if (k->wk_flags & IEEE80211_KEY_RECV)
1592 hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1593 return 1;
1594 } else
1595 return 0;
1596 }
1597
1598 /*
1599 * Set the key cache contents for the specified key. Key cache
1600 * slot(s) must already have been allocated by mwl_key_alloc.
1601 */
1602 static int
mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k)1603 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
1604 {
1605 return (_mwl_key_set(vap, k, k->wk_macaddr));
1606 }
1607
1608 static int
_mwl_key_set(struct ieee80211vap * vap,const struct ieee80211_key * k,const uint8_t mac[IEEE80211_ADDR_LEN])1609 _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1610 const uint8_t mac[IEEE80211_ADDR_LEN])
1611 {
1612 #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1613 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1614 #define IEEE80211_IS_STATICKEY(k) \
1615 (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1616 (GRPXMIT|IEEE80211_KEY_RECV))
1617 struct mwl_softc *sc = vap->iv_ic->ic_softc;
1618 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1619 const struct ieee80211_cipher *cip = k->wk_cipher;
1620 const uint8_t *macaddr;
1621 MWL_HAL_KEYVAL hk;
1622
1623 KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1624 ("s/w crypto set?"));
1625
1626 if (hvap == NULL) {
1627 if (vap->iv_opmode != IEEE80211_M_WDS) {
1628 /* XXX monitor mode? */
1629 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1630 "%s: no hvap for opmode %d\n", __func__,
1631 vap->iv_opmode);
1632 return 0;
1633 }
1634 hvap = MWL_VAP(vap)->mv_ap_hvap;
1635 }
1636 memset(&hk, 0, sizeof(hk));
1637 hk.keyIndex = k->wk_keyix;
1638 switch (cip->ic_cipher) {
1639 case IEEE80211_CIPHER_WEP:
1640 hk.keyTypeId = KEY_TYPE_ID_WEP;
1641 hk.keyLen = ieee80211_crypto_get_key_len(k);
1642 if (k->wk_keyix == vap->iv_def_txkey)
1643 hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1644 if (!IEEE80211_IS_STATICKEY(k)) {
1645 /* NB: WEP is never used for the PTK */
1646 (void) addgroupflags(&hk, k);
1647 }
1648 memcpy(hk.key.aes, ieee80211_crypto_get_key_data(k),
1649 ieee80211_crypto_get_key_len(k));
1650 break;
1651 case IEEE80211_CIPHER_TKIP:
1652 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1653 hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1654 hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1655 hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1656 hk.keyLen = ieee80211_crypto_get_key_len(k)
1657 + IEEE80211_MICBUF_SIZE;
1658 if (!addgroupflags(&hk, k))
1659 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1660
1661 /* Copy in TKIP MIC after the 16 byte main key */
1662 memcpy(hk.key.aes, ieee80211_crypto_get_key_data(k),
1663 ieee80211_crypto_get_key_len(k));
1664 memcpy(hk.key.aes + IEEE80211_KEYBUF_SIZE,
1665 ieee80211_crypto_get_key_txmic_data(k),
1666 8);
1667 memcpy(hk.key.aes + IEEE80211_KEYBUF_SIZE + 8,
1668 ieee80211_crypto_get_key_rxmic_data(k),
1669 8);
1670 break;
1671 case IEEE80211_CIPHER_AES_CCM:
1672 hk.keyTypeId = KEY_TYPE_ID_AES;
1673 hk.keyLen = ieee80211_crypto_get_key_len(k);
1674 if (!addgroupflags(&hk, k))
1675 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1676 memcpy(hk.key.aes, ieee80211_crypto_get_key_data(k),
1677 ieee80211_crypto_get_key_len(k));
1678 break;
1679 default:
1680 /* XXX should not happen */
1681 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1682 __func__, k->wk_cipher->ic_cipher);
1683 return 0;
1684 }
1685
1686 /*
1687 * Locate address of sta db entry for writing key;
1688 * the convention unfortunately is somewhat different
1689 * than how net80211, hostapd, and wpa_supplicant think.
1690 */
1691 if (vap->iv_opmode == IEEE80211_M_STA) {
1692 /*
1693 * NB: keys plumbed before the sta reaches AUTH state
1694 * will be discarded or written to the wrong sta db
1695 * entry because iv_bss is meaningless. This is ok
1696 * (right now) because we handle deferred plumbing of
1697 * WEP keys when the sta reaches AUTH state.
1698 */
1699 macaddr = vap->iv_bss->ni_bssid;
1700 if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1701 /* XXX plumb to local sta db too for static key wep */
1702 mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1703 }
1704 } else if (vap->iv_opmode == IEEE80211_M_WDS &&
1705 vap->iv_state != IEEE80211_S_RUN) {
1706 /*
1707 * Prior to RUN state a WDS vap will not it's BSS node
1708 * setup so we will plumb the key to the wrong mac
1709 * address (it'll be our local address). Workaround
1710 * this for the moment by grabbing the correct address.
1711 */
1712 macaddr = vap->iv_des_bssid;
1713 } else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1714 macaddr = vap->iv_myaddr;
1715 else
1716 macaddr = mac;
1717 KEYPRINTF(sc, &hk, macaddr);
1718 return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1719 #undef IEEE80211_IS_STATICKEY
1720 #undef GRPXMIT
1721 }
1722
1723 /*
1724 * Set the multicast filter contents into the hardware.
1725 * XXX f/w has no support; just defer to the os.
1726 */
1727 static void
mwl_setmcastfilter(struct mwl_softc * sc)1728 mwl_setmcastfilter(struct mwl_softc *sc)
1729 {
1730 #if 0
1731 struct ether_multi *enm;
1732 struct ether_multistep estep;
1733 uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1734 uint8_t *mp;
1735 int nmc;
1736
1737 mp = macs;
1738 nmc = 0;
1739 ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1740 while (enm != NULL) {
1741 /* XXX Punt on ranges. */
1742 if (nmc == MWL_HAL_MCAST_MAX ||
1743 !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1744 if_setflagsbit(ifp, IFF_ALLMULTI, 0);
1745 return;
1746 }
1747 IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1748 mp += IEEE80211_ADDR_LEN, nmc++;
1749 ETHER_NEXT_MULTI(estep, enm);
1750 }
1751 if_setflagsbit(ifp, 0, IFF_ALLMULTI);
1752 mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1753 #endif
1754 }
1755
1756 static int
mwl_mode_init(struct mwl_softc * sc)1757 mwl_mode_init(struct mwl_softc *sc)
1758 {
1759 struct ieee80211com *ic = &sc->sc_ic;
1760 struct mwl_hal *mh = sc->sc_mh;
1761
1762 mwl_hal_setpromisc(mh, ic->ic_promisc > 0);
1763 mwl_setmcastfilter(sc);
1764
1765 return 0;
1766 }
1767
1768 /*
1769 * Callback from the 802.11 layer after a multicast state change.
1770 */
1771 static void
mwl_update_mcast(struct ieee80211com * ic)1772 mwl_update_mcast(struct ieee80211com *ic)
1773 {
1774 struct mwl_softc *sc = ic->ic_softc;
1775
1776 mwl_setmcastfilter(sc);
1777 }
1778
1779 /*
1780 * Callback from the 802.11 layer after a promiscuous mode change.
1781 * Note this interface does not check the operating mode as this
1782 * is an internal callback and we are expected to honor the current
1783 * state (e.g. this is used for setting the interface in promiscuous
1784 * mode when operating in hostap mode to do ACS).
1785 */
1786 static void
mwl_update_promisc(struct ieee80211com * ic)1787 mwl_update_promisc(struct ieee80211com *ic)
1788 {
1789 struct mwl_softc *sc = ic->ic_softc;
1790
1791 mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
1792 }
1793
1794 /*
1795 * Callback from the 802.11 layer to update the slot time
1796 * based on the current setting. We use it to notify the
1797 * firmware of ERP changes and the f/w takes care of things
1798 * like slot time and preamble.
1799 */
1800 static void
mwl_updateslot(struct ieee80211com * ic)1801 mwl_updateslot(struct ieee80211com *ic)
1802 {
1803 struct mwl_softc *sc = ic->ic_softc;
1804 struct mwl_hal *mh = sc->sc_mh;
1805 int prot;
1806
1807 /* NB: can be called early; suppress needless cmds */
1808 if (!sc->sc_running)
1809 return;
1810
1811 /*
1812 * Calculate the ERP flags. The firmware will use
1813 * this to carry out the appropriate measures.
1814 */
1815 prot = 0;
1816 if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1817 if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1818 prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1819 if (ic->ic_flags & IEEE80211_F_USEPROT)
1820 prot |= IEEE80211_ERP_USE_PROTECTION;
1821 if (ic->ic_flags & IEEE80211_F_USEBARKER)
1822 prot |= IEEE80211_ERP_LONG_PREAMBLE;
1823 }
1824
1825 DPRINTF(sc, MWL_DEBUG_RESET,
1826 "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1827 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1828 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1829 ic->ic_flags);
1830
1831 mwl_hal_setgprot(mh, prot);
1832 }
1833
1834 /*
1835 * Setup the beacon frame.
1836 */
1837 static int
mwl_beacon_setup(struct ieee80211vap * vap)1838 mwl_beacon_setup(struct ieee80211vap *vap)
1839 {
1840 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1841 struct ieee80211_node *ni = vap->iv_bss;
1842 struct mbuf *m;
1843
1844 m = ieee80211_beacon_alloc(ni);
1845 if (m == NULL)
1846 return ENOBUFS;
1847 mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1848 m_free(m);
1849
1850 return 0;
1851 }
1852
1853 /*
1854 * Update the beacon frame in response to a change.
1855 */
1856 static void
mwl_beacon_update(struct ieee80211vap * vap,int item)1857 mwl_beacon_update(struct ieee80211vap *vap, int item)
1858 {
1859 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1860 struct ieee80211com *ic = vap->iv_ic;
1861
1862 KASSERT(hvap != NULL, ("no beacon"));
1863 switch (item) {
1864 case IEEE80211_BEACON_ERP:
1865 mwl_updateslot(ic);
1866 break;
1867 case IEEE80211_BEACON_HTINFO:
1868 mwl_hal_setnprotmode(hvap, _IEEE80211_MASKSHIFT(
1869 ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1870 break;
1871 case IEEE80211_BEACON_CAPS:
1872 case IEEE80211_BEACON_WME:
1873 case IEEE80211_BEACON_APPIE:
1874 case IEEE80211_BEACON_CSA:
1875 break;
1876 case IEEE80211_BEACON_TIM:
1877 /* NB: firmware always forms TIM */
1878 return;
1879 }
1880 /* XXX retain beacon frame and update */
1881 mwl_beacon_setup(vap);
1882 }
1883
1884 static void
mwl_load_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1885 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1886 {
1887 bus_addr_t *paddr = (bus_addr_t*) arg;
1888 KASSERT(error == 0, ("error %u on bus_dma callback", error));
1889 *paddr = segs->ds_addr;
1890 }
1891
1892 #ifdef MWL_HOST_PS_SUPPORT
1893 /*
1894 * Handle power save station occupancy changes.
1895 */
1896 static void
mwl_update_ps(struct ieee80211vap * vap,int nsta)1897 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1898 {
1899 struct mwl_vap *mvp = MWL_VAP(vap);
1900
1901 if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1902 mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1903 mvp->mv_last_ps_sta = nsta;
1904 }
1905
1906 /*
1907 * Handle associated station power save state changes.
1908 */
1909 static int
mwl_set_tim(struct ieee80211_node * ni,int set)1910 mwl_set_tim(struct ieee80211_node *ni, int set)
1911 {
1912 struct ieee80211vap *vap = ni->ni_vap;
1913 struct mwl_vap *mvp = MWL_VAP(vap);
1914
1915 if (mvp->mv_set_tim(ni, set)) { /* NB: state change */
1916 mwl_hal_setpowersave_sta(mvp->mv_hvap,
1917 IEEE80211_AID(ni->ni_associd), set);
1918 return 1;
1919 } else
1920 return 0;
1921 }
1922 #endif /* MWL_HOST_PS_SUPPORT */
1923
1924 static int
mwl_desc_setup(struct mwl_softc * sc,const char * name,struct mwl_descdma * dd,int nbuf,size_t bufsize,int ndesc,size_t descsize)1925 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1926 struct mwl_descdma *dd,
1927 int nbuf, size_t bufsize, int ndesc, size_t descsize)
1928 {
1929 uint8_t *ds;
1930 int error;
1931
1932 DPRINTF(sc, MWL_DEBUG_RESET,
1933 "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
1934 __func__, name, nbuf, (uintmax_t) bufsize,
1935 ndesc, (uintmax_t) descsize);
1936
1937 dd->dd_name = name;
1938 dd->dd_desc_len = nbuf * ndesc * descsize;
1939
1940 /*
1941 * Setup DMA descriptor area.
1942 */
1943 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
1944 PAGE_SIZE, 0, /* alignment, bounds */
1945 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1946 BUS_SPACE_MAXADDR, /* highaddr */
1947 NULL, NULL, /* filter, filterarg */
1948 dd->dd_desc_len, /* maxsize */
1949 1, /* nsegments */
1950 dd->dd_desc_len, /* maxsegsize */
1951 BUS_DMA_ALLOCNOW, /* flags */
1952 NULL, /* lockfunc */
1953 NULL, /* lockarg */
1954 &dd->dd_dmat);
1955 if (error != 0) {
1956 device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
1957 return error;
1958 }
1959
1960 /* allocate descriptors */
1961 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
1962 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
1963 &dd->dd_dmamap);
1964 if (error != 0) {
1965 device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
1966 "error %u\n", nbuf * ndesc, dd->dd_name, error);
1967 goto fail1;
1968 }
1969
1970 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
1971 dd->dd_desc, dd->dd_desc_len,
1972 mwl_load_cb, &dd->dd_desc_paddr,
1973 BUS_DMA_NOWAIT);
1974 if (error != 0) {
1975 device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
1976 dd->dd_name, error);
1977 goto fail2;
1978 }
1979
1980 ds = dd->dd_desc;
1981 memset(ds, 0, dd->dd_desc_len);
1982 DPRINTF(sc, MWL_DEBUG_RESET,
1983 "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
1984 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
1985 (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
1986
1987 return 0;
1988 fail2:
1989 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
1990 fail1:
1991 bus_dma_tag_destroy(dd->dd_dmat);
1992 memset(dd, 0, sizeof(*dd));
1993 return error;
1994 #undef DS2PHYS
1995 }
1996
1997 static void
mwl_desc_cleanup(struct mwl_softc * sc,struct mwl_descdma * dd)1998 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
1999 {
2000 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2001 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2002 bus_dma_tag_destroy(dd->dd_dmat);
2003
2004 memset(dd, 0, sizeof(*dd));
2005 }
2006
2007 /*
2008 * Construct a tx q's free list. The order of entries on
2009 * the list must reflect the physical layout of tx descriptors
2010 * because the firmware pre-fetches descriptors.
2011 *
2012 * XXX might be better to use indices into the buffer array.
2013 */
2014 static void
mwl_txq_reset(struct mwl_softc * sc,struct mwl_txq * txq)2015 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2016 {
2017 struct mwl_txbuf *bf;
2018 int i;
2019
2020 bf = txq->dma.dd_bufptr;
2021 STAILQ_INIT(&txq->free);
2022 for (i = 0; i < mwl_txbuf; i++, bf++)
2023 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2024 txq->nfree = i;
2025 }
2026
2027 #define DS2PHYS(_dd, _ds) \
2028 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2029
2030 static int
mwl_txdma_setup(struct mwl_softc * sc,struct mwl_txq * txq)2031 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2032 {
2033 int error, bsize, i;
2034 struct mwl_txbuf *bf;
2035 struct mwl_txdesc *ds;
2036
2037 error = mwl_desc_setup(sc, "tx", &txq->dma,
2038 mwl_txbuf, sizeof(struct mwl_txbuf),
2039 MWL_TXDESC, sizeof(struct mwl_txdesc));
2040 if (error != 0)
2041 return error;
2042
2043 /* allocate and setup tx buffers */
2044 bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2045 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2046 if (bf == NULL) {
2047 device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
2048 mwl_txbuf);
2049 return ENOMEM;
2050 }
2051 txq->dma.dd_bufptr = bf;
2052
2053 ds = txq->dma.dd_desc;
2054 for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2055 bf->bf_desc = ds;
2056 bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2057 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2058 &bf->bf_dmamap);
2059 if (error != 0) {
2060 device_printf(sc->sc_dev, "unable to create dmamap for tx "
2061 "buffer %u, error %u\n", i, error);
2062 return error;
2063 }
2064 }
2065 mwl_txq_reset(sc, txq);
2066 return 0;
2067 }
2068
2069 static void
mwl_txdma_cleanup(struct mwl_softc * sc,struct mwl_txq * txq)2070 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2071 {
2072 struct mwl_txbuf *bf;
2073 int i;
2074
2075 bf = txq->dma.dd_bufptr;
2076 for (i = 0; i < mwl_txbuf; i++, bf++) {
2077 KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2078 KASSERT(bf->bf_node == NULL, ("node on free list"));
2079 if (bf->bf_dmamap != NULL)
2080 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2081 }
2082 STAILQ_INIT(&txq->free);
2083 txq->nfree = 0;
2084 if (txq->dma.dd_bufptr != NULL) {
2085 free(txq->dma.dd_bufptr, M_MWLDEV);
2086 txq->dma.dd_bufptr = NULL;
2087 }
2088 if (txq->dma.dd_desc_len != 0)
2089 mwl_desc_cleanup(sc, &txq->dma);
2090 }
2091
2092 static int
mwl_rxdma_setup(struct mwl_softc * sc)2093 mwl_rxdma_setup(struct mwl_softc *sc)
2094 {
2095 int error, jumbosize, bsize, i;
2096 struct mwl_rxbuf *bf;
2097 struct mwl_jumbo *rbuf;
2098 struct mwl_rxdesc *ds;
2099 caddr_t data;
2100
2101 error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2102 mwl_rxdesc, sizeof(struct mwl_rxbuf),
2103 1, sizeof(struct mwl_rxdesc));
2104 if (error != 0)
2105 return error;
2106
2107 /*
2108 * Receive is done to a private pool of jumbo buffers.
2109 * This allows us to attach to mbuf's and avoid re-mapping
2110 * memory on each rx we post. We allocate a large chunk
2111 * of memory and manage it in the driver. The mbuf free
2112 * callback method is used to reclaim frames after sending
2113 * them up the stack. By default we allocate 2x the number of
2114 * rx descriptors configured so we have some slop to hold
2115 * us while frames are processed.
2116 */
2117 if (mwl_rxbuf < 2*mwl_rxdesc) {
2118 device_printf(sc->sc_dev,
2119 "too few rx dma buffers (%d); increasing to %d\n",
2120 mwl_rxbuf, 2*mwl_rxdesc);
2121 mwl_rxbuf = 2*mwl_rxdesc;
2122 }
2123 jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2124 sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2125
2126 error = bus_dma_tag_create(sc->sc_dmat, /* parent */
2127 PAGE_SIZE, 0, /* alignment, bounds */
2128 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2129 BUS_SPACE_MAXADDR, /* highaddr */
2130 NULL, NULL, /* filter, filterarg */
2131 sc->sc_rxmemsize, /* maxsize */
2132 1, /* nsegments */
2133 sc->sc_rxmemsize, /* maxsegsize */
2134 BUS_DMA_ALLOCNOW, /* flags */
2135 NULL, /* lockfunc */
2136 NULL, /* lockarg */
2137 &sc->sc_rxdmat);
2138 if (error != 0) {
2139 device_printf(sc->sc_dev, "could not create rx DMA tag\n");
2140 return error;
2141 }
2142
2143 error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2144 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2145 &sc->sc_rxmap);
2146 if (error != 0) {
2147 device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
2148 (uintmax_t) sc->sc_rxmemsize);
2149 return error;
2150 }
2151
2152 error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2153 sc->sc_rxmem, sc->sc_rxmemsize,
2154 mwl_load_cb, &sc->sc_rxmem_paddr,
2155 BUS_DMA_NOWAIT);
2156 if (error != 0) {
2157 device_printf(sc->sc_dev, "could not load rx DMA map\n");
2158 return error;
2159 }
2160
2161 /*
2162 * Allocate rx buffers and set them up.
2163 */
2164 bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2165 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2166 if (bf == NULL) {
2167 device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
2168 return error;
2169 }
2170 sc->sc_rxdma.dd_bufptr = bf;
2171
2172 STAILQ_INIT(&sc->sc_rxbuf);
2173 ds = sc->sc_rxdma.dd_desc;
2174 for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2175 bf->bf_desc = ds;
2176 bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2177 /* pre-assign dma buffer */
2178 bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2179 /* NB: tail is intentional to preserve descriptor order */
2180 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2181 }
2182
2183 /*
2184 * Place remainder of dma memory buffers on the free list.
2185 */
2186 SLIST_INIT(&sc->sc_rxfree);
2187 for (; i < mwl_rxbuf; i++) {
2188 data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2189 rbuf = MWL_JUMBO_DATA2BUF(data);
2190 SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2191 sc->sc_nrxfree++;
2192 }
2193 return 0;
2194 }
2195 #undef DS2PHYS
2196
2197 static void
mwl_rxdma_cleanup(struct mwl_softc * sc)2198 mwl_rxdma_cleanup(struct mwl_softc *sc)
2199 {
2200 if (sc->sc_rxmem_paddr != 0) {
2201 bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2202 sc->sc_rxmem_paddr = 0;
2203 }
2204 if (sc->sc_rxmem != NULL) {
2205 bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2206 sc->sc_rxmem = NULL;
2207 }
2208 if (sc->sc_rxdma.dd_bufptr != NULL) {
2209 free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2210 sc->sc_rxdma.dd_bufptr = NULL;
2211 }
2212 if (sc->sc_rxdma.dd_desc_len != 0)
2213 mwl_desc_cleanup(sc, &sc->sc_rxdma);
2214 }
2215
2216 static int
mwl_dma_setup(struct mwl_softc * sc)2217 mwl_dma_setup(struct mwl_softc *sc)
2218 {
2219 int error, i;
2220
2221 error = mwl_rxdma_setup(sc);
2222 if (error != 0) {
2223 mwl_rxdma_cleanup(sc);
2224 return error;
2225 }
2226
2227 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2228 error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2229 if (error != 0) {
2230 mwl_dma_cleanup(sc);
2231 return error;
2232 }
2233 }
2234 return 0;
2235 }
2236
2237 static void
mwl_dma_cleanup(struct mwl_softc * sc)2238 mwl_dma_cleanup(struct mwl_softc *sc)
2239 {
2240 int i;
2241
2242 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2243 mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2244 mwl_rxdma_cleanup(sc);
2245 }
2246
2247 static struct ieee80211_node *
mwl_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])2248 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2249 {
2250 struct ieee80211com *ic = vap->iv_ic;
2251 struct mwl_softc *sc = ic->ic_softc;
2252 const size_t space = sizeof(struct mwl_node);
2253 struct mwl_node *mn;
2254
2255 mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2256 if (mn == NULL) {
2257 /* XXX stat+msg */
2258 return NULL;
2259 }
2260 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2261 return &mn->mn_node;
2262 }
2263
2264 static void
mwl_node_cleanup(struct ieee80211_node * ni)2265 mwl_node_cleanup(struct ieee80211_node *ni)
2266 {
2267 struct ieee80211com *ic = ni->ni_ic;
2268 struct mwl_softc *sc = ic->ic_softc;
2269 struct mwl_node *mn = MWL_NODE(ni);
2270
2271 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2272 __func__, ni, ni->ni_ic, mn->mn_staid);
2273
2274 if (mn->mn_staid != 0) {
2275 struct ieee80211vap *vap = ni->ni_vap;
2276
2277 if (mn->mn_hvap != NULL) {
2278 if (vap->iv_opmode == IEEE80211_M_STA)
2279 mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2280 else
2281 mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2282 }
2283 /*
2284 * NB: legacy WDS peer sta db entry is installed using
2285 * the associate ap's hvap; use it again to delete it.
2286 * XXX can vap be NULL?
2287 */
2288 else if (vap->iv_opmode == IEEE80211_M_WDS &&
2289 MWL_VAP(vap)->mv_ap_hvap != NULL)
2290 mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2291 ni->ni_macaddr);
2292 delstaid(sc, mn->mn_staid);
2293 mn->mn_staid = 0;
2294 }
2295 sc->sc_node_cleanup(ni);
2296 }
2297
2298 /*
2299 * Reclaim rx dma buffers from packets sitting on the ampdu
2300 * reorder queue for a station. We replace buffers with a
2301 * system cluster (if available).
2302 */
2303 static void
mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu * rap)2304 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2305 {
2306 #if 0
2307 int i, n, off;
2308 struct mbuf *m;
2309 void *cl;
2310
2311 n = rap->rxa_qframes;
2312 for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2313 m = rap->rxa_m[i];
2314 if (m == NULL)
2315 continue;
2316 n--;
2317 /* our dma buffers have a well-known free routine */
2318 if ((m->m_flags & M_EXT) == 0 ||
2319 m->m_ext.ext_free != mwl_ext_free)
2320 continue;
2321 /*
2322 * Try to allocate a cluster and move the data.
2323 */
2324 off = m->m_data - m->m_ext.ext_buf;
2325 if (off + m->m_pkthdr.len > MCLBYTES) {
2326 /* XXX no AMSDU for now */
2327 continue;
2328 }
2329 cl = pool_cache_get_paddr(&mclpool_cache, 0,
2330 &m->m_ext.ext_paddr);
2331 if (cl != NULL) {
2332 /*
2333 * Copy the existing data to the cluster, remove
2334 * the rx dma buffer, and attach the cluster in
2335 * its place. Note we preserve the offset to the
2336 * data so frames being bridged can still prepend
2337 * their headers without adding another mbuf.
2338 */
2339 memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2340 MEXTREMOVE(m);
2341 MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2342 /* setup mbuf like _MCLGET does */
2343 m->m_flags |= M_CLUSTER | M_EXT_RW;
2344 _MOWNERREF(m, M_EXT | M_CLUSTER);
2345 /* NB: m_data is clobbered by MEXTADDR, adjust */
2346 m->m_data += off;
2347 }
2348 }
2349 #endif
2350 }
2351
2352 /*
2353 * Callback to reclaim resources. We first let the
2354 * net80211 layer do it's thing, then if we are still
2355 * blocked by a lack of rx dma buffers we walk the ampdu
2356 * reorder q's to reclaim buffers by copying to a system
2357 * cluster.
2358 */
2359 static void
mwl_node_drain(struct ieee80211_node * ni)2360 mwl_node_drain(struct ieee80211_node *ni)
2361 {
2362 struct ieee80211com *ic = ni->ni_ic;
2363 struct mwl_softc *sc = ic->ic_softc;
2364 struct mwl_node *mn = MWL_NODE(ni);
2365
2366 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2367 __func__, ni, ni->ni_vap, mn->mn_staid);
2368
2369 /* NB: call up first to age out ampdu q's */
2370 sc->sc_node_drain(ni);
2371
2372 /* XXX better to not check low water mark? */
2373 if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2374 (ni->ni_flags & IEEE80211_NODE_HT)) {
2375 uint8_t tid;
2376 /*
2377 * Walk the reorder q and reclaim rx dma buffers by copying
2378 * the packet contents into clusters.
2379 */
2380 for (tid = 0; tid < WME_NUM_TID; tid++) {
2381 struct ieee80211_rx_ampdu *rap;
2382
2383 rap = &ni->ni_rx_ampdu[tid];
2384 if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2385 continue;
2386 if (rap->rxa_qframes)
2387 mwl_ampdu_rxdma_reclaim(rap);
2388 }
2389 }
2390 }
2391
2392 static void
mwl_node_getsignal(const struct ieee80211_node * ni,int8_t * rssi,int8_t * noise)2393 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2394 {
2395 *rssi = ni->ni_ic->ic_node_getrssi(ni);
2396 #ifdef MWL_ANT_INFO_SUPPORT
2397 #if 0
2398 /* XXX need to smooth data */
2399 *noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2400 #else
2401 *noise = -95; /* XXX */
2402 #endif
2403 #else
2404 *noise = -95; /* XXX */
2405 #endif
2406 }
2407
2408 /*
2409 * Convert Hardware per-antenna rssi info to common format:
2410 * Let a1, a2, a3 represent the amplitudes per chain
2411 * Let amax represent max[a1, a2, a3]
2412 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2413 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2414 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2415 * maintain some extra precision.
2416 *
2417 * Values are stored in .5 db format capped at 127.
2418 */
2419 static void
mwl_node_getmimoinfo(const struct ieee80211_node * ni,struct ieee80211_mimo_info * mi)2420 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2421 struct ieee80211_mimo_info *mi)
2422 {
2423 #define CVT(_dst, _src) do { \
2424 (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \
2425 (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \
2426 } while (0)
2427 static const int8_t logdbtbl[32] = {
2428 0, 0, 24, 38, 48, 56, 62, 68,
2429 72, 76, 80, 83, 86, 89, 92, 94,
2430 96, 98, 100, 102, 104, 106, 107, 109,
2431 110, 112, 113, 115, 116, 117, 118, 119
2432 };
2433 const struct mwl_node *mn = MWL_NODE_CONST(ni);
2434 uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */
2435 uint32_t rssi_max;
2436
2437 rssi_max = mn->mn_ai.rssi_a;
2438 if (mn->mn_ai.rssi_b > rssi_max)
2439 rssi_max = mn->mn_ai.rssi_b;
2440 if (mn->mn_ai.rssi_c > rssi_max)
2441 rssi_max = mn->mn_ai.rssi_c;
2442
2443 CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a);
2444 CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b);
2445 CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c);
2446
2447 mi->ch[0].noise[0] = mn->mn_ai.nf_a;
2448 mi->ch[1].noise[0] = mn->mn_ai.nf_b;
2449 mi->ch[2].noise[0] = mn->mn_ai.nf_c;
2450 #undef CVT
2451 }
2452
2453 static __inline void *
mwl_getrxdma(struct mwl_softc * sc)2454 mwl_getrxdma(struct mwl_softc *sc)
2455 {
2456 struct mwl_jumbo *buf;
2457 void *data;
2458
2459 /*
2460 * Allocate from jumbo pool.
2461 */
2462 MWL_RXFREE_LOCK(sc);
2463 buf = SLIST_FIRST(&sc->sc_rxfree);
2464 if (buf == NULL) {
2465 DPRINTF(sc, MWL_DEBUG_ANY,
2466 "%s: out of rx dma buffers\n", __func__);
2467 sc->sc_stats.mst_rx_nodmabuf++;
2468 data = NULL;
2469 } else {
2470 SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2471 sc->sc_nrxfree--;
2472 data = MWL_JUMBO_BUF2DATA(buf);
2473 }
2474 MWL_RXFREE_UNLOCK(sc);
2475 return data;
2476 }
2477
2478 static __inline void
mwl_putrxdma(struct mwl_softc * sc,void * data)2479 mwl_putrxdma(struct mwl_softc *sc, void *data)
2480 {
2481 struct mwl_jumbo *buf;
2482
2483 /* XXX bounds check data */
2484 MWL_RXFREE_LOCK(sc);
2485 buf = MWL_JUMBO_DATA2BUF(data);
2486 SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2487 sc->sc_nrxfree++;
2488 MWL_RXFREE_UNLOCK(sc);
2489 }
2490
2491 static int
mwl_rxbuf_init(struct mwl_softc * sc,struct mwl_rxbuf * bf)2492 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2493 {
2494 struct mwl_rxdesc *ds;
2495
2496 ds = bf->bf_desc;
2497 if (bf->bf_data == NULL) {
2498 bf->bf_data = mwl_getrxdma(sc);
2499 if (bf->bf_data == NULL) {
2500 /* mark descriptor to be skipped */
2501 ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2502 /* NB: don't need PREREAD */
2503 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2504 sc->sc_stats.mst_rxbuf_failed++;
2505 return ENOMEM;
2506 }
2507 }
2508 /*
2509 * NB: DMA buffer contents is known to be unmodified
2510 * so there's no need to flush the data cache.
2511 */
2512
2513 /*
2514 * Setup descriptor.
2515 */
2516 ds->QosCtrl = 0;
2517 ds->RSSI = 0;
2518 ds->Status = EAGLE_RXD_STATUS_IDLE;
2519 ds->Channel = 0;
2520 ds->PktLen = htole16(MWL_AGGR_SIZE);
2521 ds->SQ2 = 0;
2522 ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2523 /* NB: don't touch pPhysNext, set once */
2524 ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2525 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2526
2527 return 0;
2528 }
2529
2530 static void
mwl_ext_free(struct mbuf * m)2531 mwl_ext_free(struct mbuf *m)
2532 {
2533 struct mwl_softc *sc = m->m_ext.ext_arg1;
2534
2535 /* XXX bounds check data */
2536 mwl_putrxdma(sc, m->m_ext.ext_buf);
2537 /*
2538 * If we were previously blocked by a lack of rx dma buffers
2539 * check if we now have enough to restart rx interrupt handling.
2540 */
2541 if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2542 sc->sc_rxblocked = 0;
2543 mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2544 }
2545 }
2546
2547 struct mwl_frame_bar {
2548 u_int8_t i_fc[2];
2549 u_int8_t i_dur[2];
2550 u_int8_t i_ra[IEEE80211_ADDR_LEN];
2551 u_int8_t i_ta[IEEE80211_ADDR_LEN];
2552 /* ctl, seq, FCS */
2553 } __packed;
2554
2555 /*
2556 * Like ieee80211_anyhdrsize, but handles BAR frames
2557 * specially so the logic below to piece the 802.11
2558 * header together works.
2559 */
2560 static __inline int
mwl_anyhdrsize(const void * data)2561 mwl_anyhdrsize(const void *data)
2562 {
2563 const struct ieee80211_frame *wh = data;
2564
2565 if (IEEE80211_IS_CTL(wh)) {
2566 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2567 case IEEE80211_FC0_SUBTYPE_CTS:
2568 case IEEE80211_FC0_SUBTYPE_ACK:
2569 return sizeof(struct ieee80211_frame_ack);
2570 case IEEE80211_FC0_SUBTYPE_BAR:
2571 return sizeof(struct mwl_frame_bar);
2572 }
2573 return sizeof(struct ieee80211_frame_min);
2574 } else
2575 return ieee80211_hdrsize(data);
2576 }
2577
2578 static void
mwl_handlemicerror(struct ieee80211com * ic,const uint8_t * data)2579 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2580 {
2581 const struct ieee80211_frame *wh;
2582 struct ieee80211_node *ni;
2583
2584 wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2585 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2586 if (ni != NULL) {
2587 ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2588 ieee80211_free_node(ni);
2589 }
2590 }
2591
2592 /*
2593 * Convert hardware signal strength to rssi. The value
2594 * provided by the device has the noise floor added in;
2595 * we need to compensate for this but we don't have that
2596 * so we use a fixed value.
2597 *
2598 * The offset of 8 is good for both 2.4 and 5GHz. The LNA
2599 * offset is already set as part of the initial gain. This
2600 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2601 */
2602 static __inline int
cvtrssi(uint8_t ssi)2603 cvtrssi(uint8_t ssi)
2604 {
2605 int rssi = (int) ssi + 8;
2606 /* XXX hack guess until we have a real noise floor */
2607 rssi = 2*(87 - rssi); /* NB: .5 dBm units */
2608 return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2609 }
2610
2611 static void
mwl_rx_proc(void * arg,int npending)2612 mwl_rx_proc(void *arg, int npending)
2613 {
2614 struct mwl_softc *sc = arg;
2615 struct ieee80211com *ic = &sc->sc_ic;
2616 struct mwl_rxbuf *bf;
2617 struct mwl_rxdesc *ds;
2618 struct mbuf *m;
2619 struct ieee80211_qosframe *wh;
2620 struct ieee80211_node *ni;
2621 struct mwl_node *mn;
2622 int off, len, hdrlen, pktlen, rssi, ntodo;
2623 uint8_t *data, status;
2624 void *newdata;
2625 int16_t nf;
2626
2627 DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2628 __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2629 RD4(sc, sc->sc_hwspecs.rxDescWrite));
2630 nf = -96; /* XXX */
2631 bf = sc->sc_rxnext;
2632 for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2633 if (bf == NULL)
2634 bf = STAILQ_FIRST(&sc->sc_rxbuf);
2635 ds = bf->bf_desc;
2636 data = bf->bf_data;
2637 if (data == NULL) {
2638 /*
2639 * If data allocation failed previously there
2640 * will be no buffer; try again to re-populate it.
2641 * Note the firmware will not advance to the next
2642 * descriptor with a dma buffer so we must mimic
2643 * this or we'll get out of sync.
2644 */
2645 DPRINTF(sc, MWL_DEBUG_ANY,
2646 "%s: rx buf w/o dma memory\n", __func__);
2647 (void) mwl_rxbuf_init(sc, bf);
2648 sc->sc_stats.mst_rx_dmabufmissing++;
2649 break;
2650 }
2651 MWL_RXDESC_SYNC(sc, ds,
2652 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2653 if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2654 break;
2655 #ifdef MWL_DEBUG
2656 if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2657 mwl_printrxbuf(bf, 0);
2658 #endif
2659 status = ds->Status;
2660 if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2661 counter_u64_add(ic->ic_ierrors, 1);
2662 sc->sc_stats.mst_rx_crypto++;
2663 /*
2664 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2665 * for backwards compatibility.
2666 */
2667 if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2668 (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2669 /*
2670 * MIC error, notify upper layers.
2671 */
2672 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2673 BUS_DMASYNC_POSTREAD);
2674 mwl_handlemicerror(ic, data);
2675 sc->sc_stats.mst_rx_tkipmic++;
2676 }
2677 /* XXX too painful to tap packets */
2678 goto rx_next;
2679 }
2680 /*
2681 * Sync the data buffer.
2682 */
2683 len = le16toh(ds->PktLen);
2684 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2685 /*
2686 * The 802.11 header is provided all or in part at the front;
2687 * use it to calculate the true size of the header that we'll
2688 * construct below. We use this to figure out where to copy
2689 * payload prior to constructing the header.
2690 */
2691 hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2692 off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2693
2694 /* calculate rssi early so we can re-use for each aggregate */
2695 rssi = cvtrssi(ds->RSSI);
2696
2697 pktlen = hdrlen + (len - off);
2698 /*
2699 * NB: we know our frame is at least as large as
2700 * IEEE80211_MIN_LEN because there is a 4-address
2701 * frame at the front. Hence there's no need to
2702 * vet the packet length. If the frame in fact
2703 * is too small it should be discarded at the
2704 * net80211 layer.
2705 */
2706
2707 /*
2708 * Attach dma buffer to an mbuf. We tried
2709 * doing this based on the packet size (i.e.
2710 * copying small packets) but it turns out to
2711 * be a net loss. The tradeoff might be system
2712 * dependent (cache architecture is important).
2713 */
2714 MGETHDR(m, M_NOWAIT, MT_DATA);
2715 if (m == NULL) {
2716 DPRINTF(sc, MWL_DEBUG_ANY,
2717 "%s: no rx mbuf\n", __func__);
2718 sc->sc_stats.mst_rx_nombuf++;
2719 goto rx_next;
2720 }
2721 /*
2722 * Acquire the replacement dma buffer before
2723 * processing the frame. If we're out of dma
2724 * buffers we disable rx interrupts and wait
2725 * for the free pool to reach mlw_rxdmalow buffers
2726 * before starting to do work again. If the firmware
2727 * runs out of descriptors then it will toss frames
2728 * which is better than our doing it as that can
2729 * starve our processing. It is also important that
2730 * we always process rx'd frames in case they are
2731 * A-MPDU as otherwise the host's view of the BA
2732 * window may get out of sync with the firmware.
2733 */
2734 newdata = mwl_getrxdma(sc);
2735 if (newdata == NULL) {
2736 /* NB: stat+msg in mwl_getrxdma */
2737 m_free(m);
2738 /* disable RX interrupt and mark state */
2739 mwl_hal_intrset(sc->sc_mh,
2740 sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2741 sc->sc_rxblocked = 1;
2742 ieee80211_drain(ic);
2743 /* XXX check rxblocked and immediately start again? */
2744 goto rx_stop;
2745 }
2746 bf->bf_data = newdata;
2747 /*
2748 * Attach the dma buffer to the mbuf;
2749 * mwl_rxbuf_init will re-setup the rx
2750 * descriptor using the replacement dma
2751 * buffer we just installed above.
2752 */
2753 m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0,
2754 EXT_NET_DRV);
2755 m->m_data += off - hdrlen;
2756 m->m_pkthdr.len = m->m_len = pktlen;
2757 /* NB: dma buffer assumed read-only */
2758
2759 /*
2760 * Piece 802.11 header together.
2761 */
2762 wh = mtod(m, struct ieee80211_qosframe *);
2763 /* NB: don't need to do this sometimes but ... */
2764 /* XXX special case so we can memcpy after m_devget? */
2765 ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2766 if (IEEE80211_QOS_HAS_SEQ(wh))
2767 *(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl;
2768 /*
2769 * The f/w strips WEP header but doesn't clear
2770 * the WEP bit; mark the packet with M_WEP so
2771 * net80211 will treat the data as decrypted.
2772 * While here also clear the PWR_MGT bit since
2773 * power save is handled by the firmware and
2774 * passing this up will potentially cause the
2775 * upper layer to put a station in power save
2776 * (except when configured with MWL_HOST_PS_SUPPORT).
2777 */
2778 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2779 m->m_flags |= M_WEP;
2780 #ifdef MWL_HOST_PS_SUPPORT
2781 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2782 #else
2783 wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2784 IEEE80211_FC1_PWR_MGT);
2785 #endif
2786
2787 if (ieee80211_radiotap_active(ic)) {
2788 struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2789
2790 tap->wr_flags = 0;
2791 tap->wr_rate = ds->Rate;
2792 tap->wr_antsignal = rssi + nf;
2793 tap->wr_antnoise = nf;
2794 }
2795 if (IFF_DUMPPKTS_RECV(sc, wh)) {
2796 ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2797 len, ds->Rate, rssi);
2798 }
2799 /* dispatch */
2800 ni = ieee80211_find_rxnode(ic,
2801 (const struct ieee80211_frame_min *) wh);
2802 if (ni != NULL) {
2803 mn = MWL_NODE(ni);
2804 #ifdef MWL_ANT_INFO_SUPPORT
2805 mn->mn_ai.rssi_a = ds->ai.rssi_a;
2806 mn->mn_ai.rssi_b = ds->ai.rssi_b;
2807 mn->mn_ai.rssi_c = ds->ai.rssi_c;
2808 mn->mn_ai.rsvd1 = rssi;
2809 #endif
2810 /* tag AMPDU aggregates for reorder processing */
2811 if (ni->ni_flags & IEEE80211_NODE_HT)
2812 m->m_flags |= M_AMPDU;
2813 (void) ieee80211_input(ni, m, rssi, nf);
2814 ieee80211_free_node(ni);
2815 } else
2816 (void) ieee80211_input_all(ic, m, rssi, nf);
2817 rx_next:
2818 /* NB: ignore ENOMEM so we process more descriptors */
2819 (void) mwl_rxbuf_init(sc, bf);
2820 bf = STAILQ_NEXT(bf, bf_list);
2821 }
2822 rx_stop:
2823 sc->sc_rxnext = bf;
2824
2825 if (mbufq_first(&sc->sc_snd) != NULL) {
2826 /* NB: kick fw; the tx thread may have been preempted */
2827 mwl_hal_txstart(sc->sc_mh, 0);
2828 mwl_start(sc);
2829 }
2830 }
2831
2832 static void
mwl_txq_init(struct mwl_softc * sc,struct mwl_txq * txq,int qnum)2833 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2834 {
2835 struct mwl_txbuf *bf, *bn;
2836 struct mwl_txdesc *ds;
2837
2838 MWL_TXQ_LOCK_INIT(sc, txq);
2839 txq->qnum = qnum;
2840 txq->txpri = 0; /* XXX */
2841 #if 0
2842 /* NB: q setup by mwl_txdma_setup XXX */
2843 STAILQ_INIT(&txq->free);
2844 #endif
2845 STAILQ_FOREACH(bf, &txq->free, bf_list) {
2846 bf->bf_txq = txq;
2847
2848 ds = bf->bf_desc;
2849 bn = STAILQ_NEXT(bf, bf_list);
2850 if (bn == NULL)
2851 bn = STAILQ_FIRST(&txq->free);
2852 ds->pPhysNext = htole32(bn->bf_daddr);
2853 }
2854 STAILQ_INIT(&txq->active);
2855 }
2856
2857 /*
2858 * Setup a hardware data transmit queue for the specified
2859 * access control. We record the mapping from ac's
2860 * to h/w queues for use by mwl_tx_start.
2861 */
2862 static int
mwl_tx_setup(struct mwl_softc * sc,int ac,int mvtype)2863 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2864 {
2865 struct mwl_txq *txq;
2866
2867 if (ac >= nitems(sc->sc_ac2q)) {
2868 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2869 ac, nitems(sc->sc_ac2q));
2870 return 0;
2871 }
2872 if (mvtype >= MWL_NUM_TX_QUEUES) {
2873 device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2874 mvtype, MWL_NUM_TX_QUEUES);
2875 return 0;
2876 }
2877 txq = &sc->sc_txq[mvtype];
2878 mwl_txq_init(sc, txq, mvtype);
2879 sc->sc_ac2q[ac] = txq;
2880 return 1;
2881 }
2882
2883 /*
2884 * Update WME parameters for a transmit queue.
2885 */
2886 static int
mwl_txq_update(struct mwl_softc * sc,int ac)2887 mwl_txq_update(struct mwl_softc *sc, int ac)
2888 {
2889 #define MWL_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2890 struct ieee80211com *ic = &sc->sc_ic;
2891 struct chanAccParams chp;
2892 struct mwl_txq *txq = sc->sc_ac2q[ac];
2893 struct wmeParams *wmep;
2894 struct mwl_hal *mh = sc->sc_mh;
2895 int aifs, cwmin, cwmax, txoplim;
2896
2897 ieee80211_wme_ic_getparams(ic, &chp);
2898 wmep = &chp.cap_wmeParams[ac];
2899
2900 aifs = wmep->wmep_aifsn;
2901 /* XXX in sta mode need to pass log values for cwmin/max */
2902 cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2903 cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2904 txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */
2905
2906 if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
2907 device_printf(sc->sc_dev, "unable to update hardware queue "
2908 "parameters for %s traffic!\n",
2909 ieee80211_wme_acnames[ac]);
2910 return 0;
2911 }
2912 return 1;
2913 #undef MWL_EXPONENT_TO_VALUE
2914 }
2915
2916 /*
2917 * Callback from the 802.11 layer to update WME parameters.
2918 */
2919 static int
mwl_wme_update(struct ieee80211com * ic)2920 mwl_wme_update(struct ieee80211com *ic)
2921 {
2922 struct mwl_softc *sc = ic->ic_softc;
2923
2924 return !mwl_txq_update(sc, WME_AC_BE) ||
2925 !mwl_txq_update(sc, WME_AC_BK) ||
2926 !mwl_txq_update(sc, WME_AC_VI) ||
2927 !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
2928 }
2929
2930 /*
2931 * Reclaim resources for a setup queue.
2932 */
2933 static void
mwl_tx_cleanupq(struct mwl_softc * sc,struct mwl_txq * txq)2934 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
2935 {
2936 /* XXX hal work? */
2937 MWL_TXQ_LOCK_DESTROY(txq);
2938 }
2939
2940 /*
2941 * Reclaim all tx queue resources.
2942 */
2943 static void
mwl_tx_cleanup(struct mwl_softc * sc)2944 mwl_tx_cleanup(struct mwl_softc *sc)
2945 {
2946 int i;
2947
2948 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2949 mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
2950 }
2951
2952 static int
mwl_tx_dmasetup(struct mwl_softc * sc,struct mwl_txbuf * bf,struct mbuf * m0)2953 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
2954 {
2955 struct mbuf *m;
2956 int error;
2957
2958 /*
2959 * Load the DMA map so any coalescing is done. This
2960 * also calculates the number of descriptors we need.
2961 */
2962 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2963 bf->bf_segs, &bf->bf_nseg,
2964 BUS_DMA_NOWAIT);
2965 if (error == EFBIG) {
2966 /* XXX packet requires too many descriptors */
2967 bf->bf_nseg = MWL_TXDESC+1;
2968 } else if (error != 0) {
2969 sc->sc_stats.mst_tx_busdma++;
2970 m_freem(m0);
2971 return error;
2972 }
2973 /*
2974 * Discard null packets and check for packets that
2975 * require too many TX descriptors. We try to convert
2976 * the latter to a cluster.
2977 */
2978 if (error == EFBIG) { /* too many desc's, linearize */
2979 sc->sc_stats.mst_tx_linear++;
2980 #if MWL_TXDESC > 1
2981 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
2982 #else
2983 m = m_defrag(m0, M_NOWAIT);
2984 #endif
2985 if (m == NULL) {
2986 m_freem(m0);
2987 sc->sc_stats.mst_tx_nombuf++;
2988 return ENOMEM;
2989 }
2990 m0 = m;
2991 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
2992 bf->bf_segs, &bf->bf_nseg,
2993 BUS_DMA_NOWAIT);
2994 if (error != 0) {
2995 sc->sc_stats.mst_tx_busdma++;
2996 m_freem(m0);
2997 return error;
2998 }
2999 KASSERT(bf->bf_nseg <= MWL_TXDESC,
3000 ("too many segments after defrag; nseg %u", bf->bf_nseg));
3001 } else if (bf->bf_nseg == 0) { /* null packet, discard */
3002 sc->sc_stats.mst_tx_nodata++;
3003 m_freem(m0);
3004 return EIO;
3005 }
3006 DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3007 __func__, m0, m0->m_pkthdr.len);
3008 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3009 bf->bf_m = m0;
3010
3011 return 0;
3012 }
3013
3014 static __inline int
mwl_cvtlegacyrate(int rate)3015 mwl_cvtlegacyrate(int rate)
3016 {
3017 switch (rate) {
3018 case 2: return 0;
3019 case 4: return 1;
3020 case 11: return 2;
3021 case 22: return 3;
3022 case 44: return 4;
3023 case 12: return 5;
3024 case 18: return 6;
3025 case 24: return 7;
3026 case 36: return 8;
3027 case 48: return 9;
3028 case 72: return 10;
3029 case 96: return 11;
3030 case 108:return 12;
3031 }
3032 return 0;
3033 }
3034
3035 /*
3036 * Calculate fixed tx rate information per client state;
3037 * this value is suitable for writing to the Format field
3038 * of a tx descriptor.
3039 */
3040 static uint16_t
mwl_calcformat(uint8_t rate,const struct ieee80211_node * ni)3041 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3042 {
3043 uint16_t fmt;
3044
3045 fmt = _IEEE80211_SHIFTMASK(3, EAGLE_TXD_ANTENNA)
3046 | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3047 EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3048 if (rate & IEEE80211_RATE_MCS) { /* HT MCS */
3049 fmt |= EAGLE_TXD_FORMAT_HT
3050 /* NB: 0x80 implicitly stripped from ucastrate */
3051 | _IEEE80211_SHIFTMASK(rate, EAGLE_TXD_RATE);
3052 /* XXX short/long GI may be wrong; re-check */
3053 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3054 fmt |= EAGLE_TXD_CHW_40
3055 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3056 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3057 } else {
3058 fmt |= EAGLE_TXD_CHW_20
3059 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3060 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3061 }
3062 } else { /* legacy rate */
3063 fmt |= EAGLE_TXD_FORMAT_LEGACY
3064 | _IEEE80211_SHIFTMASK(mwl_cvtlegacyrate(rate),
3065 EAGLE_TXD_RATE)
3066 | EAGLE_TXD_CHW_20
3067 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3068 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3069 EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3070 }
3071 return fmt;
3072 }
3073
3074 static int
mwl_tx_start(struct mwl_softc * sc,struct ieee80211_node * ni,struct mwl_txbuf * bf,struct mbuf * m0)3075 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3076 struct mbuf *m0)
3077 {
3078 struct ieee80211com *ic = &sc->sc_ic;
3079 struct ieee80211vap *vap = ni->ni_vap;
3080 int error, iswep, ismcast;
3081 int hdrlen, pktlen;
3082 struct mwl_txdesc *ds;
3083 struct mwl_txq *txq;
3084 struct ieee80211_frame *wh;
3085 struct mwltxrec *tr;
3086 struct mwl_node *mn;
3087 uint16_t qos;
3088 #if MWL_TXDESC > 1
3089 int i;
3090 #endif
3091
3092 wh = mtod(m0, struct ieee80211_frame *);
3093 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3094 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3095 hdrlen = ieee80211_anyhdrsize(wh);
3096 pktlen = m0->m_pkthdr.len;
3097 if (IEEE80211_QOS_HAS_SEQ(wh)) {
3098 qos = *(uint16_t *)ieee80211_getqos(wh);
3099 } else
3100 qos = 0;
3101
3102 ieee80211_output_seqno_assign(ni, -1, m0);
3103
3104 if (iswep) {
3105 const struct ieee80211_cipher *cip;
3106 struct ieee80211_key *k;
3107
3108 /*
3109 * Construct the 802.11 header+trailer for an encrypted
3110 * frame. The only reason this can fail is because of an
3111 * unknown or unsupported cipher/key type.
3112 *
3113 * NB: we do this even though the firmware will ignore
3114 * what we've done for WEP and TKIP as we need the
3115 * ExtIV filled in for CCMP and this also adjusts
3116 * the headers which simplifies our work below.
3117 */
3118 k = ieee80211_crypto_encap(ni, m0);
3119 if (k == NULL) {
3120 /*
3121 * This can happen when the key is yanked after the
3122 * frame was queued. Just discard the frame; the
3123 * 802.11 layer counts failures and provides
3124 * debugging/diagnostics.
3125 */
3126 m_freem(m0);
3127 return EIO;
3128 }
3129 /*
3130 * Adjust the packet length for the crypto additions
3131 * done during encap and any other bits that the f/w
3132 * will add later on.
3133 */
3134 cip = k->wk_cipher;
3135 pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3136
3137 /* packet header may have moved, reset our local pointer */
3138 wh = mtod(m0, struct ieee80211_frame *);
3139 }
3140
3141 if (ieee80211_radiotap_active_vap(vap)) {
3142 sc->sc_tx_th.wt_flags = 0; /* XXX */
3143 if (iswep)
3144 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3145 #if 0
3146 sc->sc_tx_th.wt_rate = ds->DataRate;
3147 #endif
3148 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3149 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3150
3151 ieee80211_radiotap_tx(vap, m0);
3152 }
3153 /*
3154 * Copy up/down the 802.11 header; the firmware requires
3155 * we present a 2-byte payload length followed by a
3156 * 4-address header (w/o QoS), followed (optionally) by
3157 * any WEP/ExtIV header (but only filled in for CCMP).
3158 * We are assured the mbuf has sufficient headroom to
3159 * prepend in-place by the setup of ic_headroom in
3160 * mwl_attach.
3161 */
3162 if (hdrlen < sizeof(struct mwltxrec)) {
3163 const int space = sizeof(struct mwltxrec) - hdrlen;
3164 if (M_LEADINGSPACE(m0) < space) {
3165 /* NB: should never happen */
3166 device_printf(sc->sc_dev,
3167 "not enough headroom, need %d found %zd, "
3168 "m_flags 0x%x m_len %d\n",
3169 space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3170 ieee80211_dump_pkt(ic,
3171 mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3172 m_freem(m0);
3173 sc->sc_stats.mst_tx_noheadroom++;
3174 return EIO;
3175 }
3176 M_PREPEND(m0, space, M_NOWAIT);
3177 }
3178 tr = mtod(m0, struct mwltxrec *);
3179 if (wh != (struct ieee80211_frame *) &tr->wh)
3180 ovbcopy(wh, &tr->wh, hdrlen);
3181 /*
3182 * Note: the "firmware length" is actually the length
3183 * of the fully formed "802.11 payload". That is, it's
3184 * everything except for the 802.11 header. In particular
3185 * this includes all crypto material including the MIC!
3186 */
3187 tr->fwlen = htole16(pktlen - hdrlen);
3188
3189 /*
3190 * Load the DMA map so any coalescing is done. This
3191 * also calculates the number of descriptors we need.
3192 */
3193 error = mwl_tx_dmasetup(sc, bf, m0);
3194 if (error != 0) {
3195 /* NB: stat collected in mwl_tx_dmasetup */
3196 DPRINTF(sc, MWL_DEBUG_XMIT,
3197 "%s: unable to setup dma\n", __func__);
3198 return error;
3199 }
3200 bf->bf_node = ni; /* NB: held reference */
3201 m0 = bf->bf_m; /* NB: may have changed */
3202 tr = mtod(m0, struct mwltxrec *);
3203 wh = (struct ieee80211_frame *)&tr->wh;
3204
3205 /*
3206 * Formulate tx descriptor.
3207 */
3208 ds = bf->bf_desc;
3209 txq = bf->bf_txq;
3210
3211 ds->QosCtrl = qos; /* NB: already little-endian */
3212 #if MWL_TXDESC == 1
3213 /*
3214 * NB: multiframes should be zero because the descriptors
3215 * are initialized to zero. This should handle the case
3216 * where the driver is built with MWL_TXDESC=1 but we are
3217 * using firmware with multi-segment support.
3218 */
3219 ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3220 ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3221 #else
3222 ds->multiframes = htole32(bf->bf_nseg);
3223 ds->PktLen = htole16(m0->m_pkthdr.len);
3224 for (i = 0; i < bf->bf_nseg; i++) {
3225 ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3226 ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3227 }
3228 #endif
3229 /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3230 ds->Format = 0;
3231 ds->pad = 0;
3232 ds->ack_wcb_addr = 0;
3233
3234 mn = MWL_NODE(ni);
3235 /*
3236 * Select transmit rate.
3237 */
3238 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3239 case IEEE80211_FC0_TYPE_MGT:
3240 sc->sc_stats.mst_tx_mgmt++;
3241 /* fall thru... */
3242 case IEEE80211_FC0_TYPE_CTL:
3243 /* NB: assign to BE q to avoid bursting */
3244 ds->TxPriority = MWL_WME_AC_BE;
3245 break;
3246 case IEEE80211_FC0_TYPE_DATA:
3247 if (!ismcast) {
3248 const struct ieee80211_txparam *tp = ni->ni_txparms;
3249 /*
3250 * EAPOL frames get forced to a fixed rate and w/o
3251 * aggregation; otherwise check for any fixed rate
3252 * for the client (may depend on association state).
3253 */
3254 if (m0->m_flags & M_EAPOL) {
3255 const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3256 ds->Format = mvp->mv_eapolformat;
3257 ds->pad = htole16(
3258 EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3259 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3260 /* XXX pre-calculate per node */
3261 ds->Format = htole16(
3262 mwl_calcformat(tp->ucastrate, ni));
3263 ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3264 }
3265 /* NB: EAPOL frames will never have qos set */
3266 if (qos == 0)
3267 ds->TxPriority = txq->qnum;
3268 #if MWL_MAXBA > 3
3269 else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3270 ds->TxPriority = mn->mn_ba[3].txq;
3271 #endif
3272 #if MWL_MAXBA > 2
3273 else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3274 ds->TxPriority = mn->mn_ba[2].txq;
3275 #endif
3276 #if MWL_MAXBA > 1
3277 else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3278 ds->TxPriority = mn->mn_ba[1].txq;
3279 #endif
3280 #if MWL_MAXBA > 0
3281 else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3282 ds->TxPriority = mn->mn_ba[0].txq;
3283 #endif
3284 else
3285 ds->TxPriority = txq->qnum;
3286 } else
3287 ds->TxPriority = txq->qnum;
3288 break;
3289 default:
3290 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
3291 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3292 sc->sc_stats.mst_tx_badframetype++;
3293 m_freem(m0);
3294 return EIO;
3295 }
3296
3297 if (IFF_DUMPPKTS_XMIT(sc))
3298 ieee80211_dump_pkt(ic,
3299 mtod(m0, const uint8_t *)+sizeof(uint16_t),
3300 m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3301
3302 MWL_TXQ_LOCK(txq);
3303 ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3304 STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3305 MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3306
3307 sc->sc_tx_timer = 5;
3308 MWL_TXQ_UNLOCK(txq);
3309
3310 return 0;
3311 }
3312
3313 static __inline int
mwl_cvtlegacyrix(int rix)3314 mwl_cvtlegacyrix(int rix)
3315 {
3316 static const int ieeerates[] =
3317 { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3318 return (rix < nitems(ieeerates) ? ieeerates[rix] : 0);
3319 }
3320
3321 /*
3322 * Process completed xmit descriptors from the specified queue.
3323 */
3324 static int
mwl_tx_processq(struct mwl_softc * sc,struct mwl_txq * txq)3325 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3326 {
3327 #define EAGLE_TXD_STATUS_MCAST \
3328 (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3329 struct ieee80211com *ic = &sc->sc_ic;
3330 struct mwl_txbuf *bf;
3331 struct mwl_txdesc *ds;
3332 struct ieee80211_node *ni;
3333 int nreaped;
3334 uint32_t status;
3335
3336 DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3337 for (nreaped = 0;; nreaped++) {
3338 MWL_TXQ_LOCK(txq);
3339 bf = STAILQ_FIRST(&txq->active);
3340 if (bf == NULL) {
3341 MWL_TXQ_UNLOCK(txq);
3342 break;
3343 }
3344 ds = bf->bf_desc;
3345 MWL_TXDESC_SYNC(txq, ds,
3346 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3347 if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3348 MWL_TXQ_UNLOCK(txq);
3349 break;
3350 }
3351 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3352 MWL_TXQ_UNLOCK(txq);
3353
3354 #ifdef MWL_DEBUG
3355 if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3356 mwl_printtxbuf(bf, txq->qnum, nreaped);
3357 #endif
3358 ni = bf->bf_node;
3359 if (ni != NULL) {
3360 status = le32toh(ds->Status);
3361 int rate;
3362 if (status & EAGLE_TXD_STATUS_OK) {
3363 uint16_t Format = le16toh(ds->Format);
3364 uint8_t txant = _IEEE80211_MASKSHIFT(Format,
3365 EAGLE_TXD_ANTENNA);
3366
3367 sc->sc_stats.mst_ant_tx[txant]++;
3368 if (status & EAGLE_TXD_STATUS_OK_RETRY)
3369 sc->sc_stats.mst_tx_retries++;
3370 if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3371 sc->sc_stats.mst_tx_mretries++;
3372 if (txq->qnum >= MWL_WME_AC_VO)
3373 ic->ic_wme.wme_hipri_traffic++;
3374 rate = _IEEE80211_MASKSHIFT(Format,
3375 EAGLE_TXD_RATE);
3376 if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3377 rate = mwl_cvtlegacyrix(rate);
3378 } else
3379 rate |= IEEE80211_RATE_MCS;
3380 sc->sc_stats.mst_tx_rate = rate;
3381 ieee80211_node_set_txrate_dot11rate(ni, rate);
3382 } else {
3383 if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3384 sc->sc_stats.mst_tx_linkerror++;
3385 if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3386 sc->sc_stats.mst_tx_xretries++;
3387 if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3388 sc->sc_stats.mst_tx_aging++;
3389 if (bf->bf_m->m_flags & M_FF)
3390 sc->sc_stats.mst_ff_txerr++;
3391 }
3392 if (bf->bf_m->m_flags & M_TXCB)
3393 /* XXX strip fw len in case header inspected */
3394 m_adj(bf->bf_m, sizeof(uint16_t));
3395 ieee80211_tx_complete(ni, bf->bf_m,
3396 (status & EAGLE_TXD_STATUS_OK) == 0);
3397 } else
3398 m_freem(bf->bf_m);
3399 ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3400
3401 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3402 BUS_DMASYNC_POSTWRITE);
3403 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3404
3405 mwl_puttxbuf_tail(txq, bf);
3406 }
3407 return nreaped;
3408 #undef EAGLE_TXD_STATUS_MCAST
3409 }
3410
3411 /*
3412 * Deferred processing of transmit interrupt; special-cased
3413 * for four hardware queues, 0-3.
3414 */
3415 static void
mwl_tx_proc(void * arg,int npending)3416 mwl_tx_proc(void *arg, int npending)
3417 {
3418 struct mwl_softc *sc = arg;
3419 int nreaped;
3420
3421 /*
3422 * Process each active queue.
3423 */
3424 nreaped = 0;
3425 if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3426 nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3427 if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3428 nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3429 if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3430 nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3431 if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3432 nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3433
3434 if (nreaped != 0) {
3435 sc->sc_tx_timer = 0;
3436 if (mbufq_first(&sc->sc_snd) != NULL) {
3437 /* NB: kick fw; the tx thread may have been preempted */
3438 mwl_hal_txstart(sc->sc_mh, 0);
3439 mwl_start(sc);
3440 }
3441 }
3442 }
3443
3444 static void
mwl_tx_draintxq(struct mwl_softc * sc,struct mwl_txq * txq)3445 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3446 {
3447 struct ieee80211_node *ni;
3448 struct mwl_txbuf *bf;
3449 u_int ix __unused;
3450
3451 /*
3452 * NB: this assumes output has been stopped and
3453 * we do not need to block mwl_tx_tasklet
3454 */
3455 for (ix = 0;; ix++) {
3456 MWL_TXQ_LOCK(txq);
3457 bf = STAILQ_FIRST(&txq->active);
3458 if (bf == NULL) {
3459 MWL_TXQ_UNLOCK(txq);
3460 break;
3461 }
3462 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3463 MWL_TXQ_UNLOCK(txq);
3464 #ifdef MWL_DEBUG
3465 if (sc->sc_debug & MWL_DEBUG_RESET) {
3466 struct ieee80211com *ic = &sc->sc_ic;
3467 const struct mwltxrec *tr =
3468 mtod(bf->bf_m, const struct mwltxrec *);
3469 mwl_printtxbuf(bf, txq->qnum, ix);
3470 ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3471 bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3472 }
3473 #endif /* MWL_DEBUG */
3474 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3475 ni = bf->bf_node;
3476 if (ni != NULL) {
3477 /*
3478 * Reclaim node reference.
3479 */
3480 ieee80211_free_node(ni);
3481 }
3482 m_freem(bf->bf_m);
3483
3484 mwl_puttxbuf_tail(txq, bf);
3485 }
3486 }
3487
3488 /*
3489 * Drain the transmit queues and reclaim resources.
3490 */
3491 static void
mwl_draintxq(struct mwl_softc * sc)3492 mwl_draintxq(struct mwl_softc *sc)
3493 {
3494 int i;
3495
3496 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3497 mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3498 sc->sc_tx_timer = 0;
3499 }
3500
3501 #ifdef MWL_DIAGAPI
3502 /*
3503 * Reset the transmit queues to a pristine state after a fw download.
3504 */
3505 static void
mwl_resettxq(struct mwl_softc * sc)3506 mwl_resettxq(struct mwl_softc *sc)
3507 {
3508 int i;
3509
3510 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3511 mwl_txq_reset(sc, &sc->sc_txq[i]);
3512 }
3513 #endif /* MWL_DIAGAPI */
3514
3515 /*
3516 * Clear the transmit queues of any frames submitted for the
3517 * specified vap. This is done when the vap is deleted so we
3518 * don't potentially reference the vap after it is gone.
3519 * Note we cannot remove the frames; we only reclaim the node
3520 * reference.
3521 */
3522 static void
mwl_cleartxq(struct mwl_softc * sc,struct ieee80211vap * vap)3523 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3524 {
3525 struct mwl_txq *txq;
3526 struct mwl_txbuf *bf;
3527 int i;
3528
3529 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3530 txq = &sc->sc_txq[i];
3531 MWL_TXQ_LOCK(txq);
3532 STAILQ_FOREACH(bf, &txq->active, bf_list) {
3533 struct ieee80211_node *ni = bf->bf_node;
3534 if (ni != NULL && ni->ni_vap == vap) {
3535 bf->bf_node = NULL;
3536 ieee80211_free_node(ni);
3537 }
3538 }
3539 MWL_TXQ_UNLOCK(txq);
3540 }
3541 }
3542
3543 static int
mwl_recv_action(struct ieee80211_node * ni,const struct ieee80211_frame * wh,const uint8_t * frm,const uint8_t * efrm)3544 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3545 const uint8_t *frm, const uint8_t *efrm)
3546 {
3547 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3548 const struct ieee80211_action *ia;
3549
3550 ia = (const struct ieee80211_action *) frm;
3551 if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3552 ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3553 const struct ieee80211_action_ht_mimopowersave *mps =
3554 (const struct ieee80211_action_ht_mimopowersave *) ia;
3555
3556 mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3557 mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3558 _IEEE80211_MASKSHIFT(mps->am_control,
3559 IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3560 return 0;
3561 } else
3562 return sc->sc_recv_action(ni, wh, frm, efrm);
3563 }
3564
3565 static int
mwl_addba_request(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int dialogtoken,int baparamset,int batimeout)3566 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3567 int dialogtoken, int baparamset, int batimeout)
3568 {
3569 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3570 struct ieee80211vap *vap = ni->ni_vap;
3571 struct mwl_node *mn = MWL_NODE(ni);
3572 struct mwl_bastate *bas;
3573
3574 bas = tap->txa_private;
3575 if (bas == NULL) {
3576 const MWL_HAL_BASTREAM *sp;
3577 /*
3578 * Check for a free BA stream slot.
3579 */
3580 #if MWL_MAXBA > 3
3581 if (mn->mn_ba[3].bastream == NULL)
3582 bas = &mn->mn_ba[3];
3583 else
3584 #endif
3585 #if MWL_MAXBA > 2
3586 if (mn->mn_ba[2].bastream == NULL)
3587 bas = &mn->mn_ba[2];
3588 else
3589 #endif
3590 #if MWL_MAXBA > 1
3591 if (mn->mn_ba[1].bastream == NULL)
3592 bas = &mn->mn_ba[1];
3593 else
3594 #endif
3595 #if MWL_MAXBA > 0
3596 if (mn->mn_ba[0].bastream == NULL)
3597 bas = &mn->mn_ba[0];
3598 else
3599 #endif
3600 {
3601 /* sta already has max BA streams */
3602 /* XXX assign BA stream to highest priority tid */
3603 DPRINTF(sc, MWL_DEBUG_AMPDU,
3604 "%s: already has max bastreams\n", __func__);
3605 sc->sc_stats.mst_ampdu_reject++;
3606 return 0;
3607 }
3608 /* NB: no held reference to ni */
3609 sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3610 (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3611 ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3612 ni, tap);
3613 if (sp == NULL) {
3614 /*
3615 * No available stream, return 0 so no
3616 * a-mpdu aggregation will be done.
3617 */
3618 DPRINTF(sc, MWL_DEBUG_AMPDU,
3619 "%s: no bastream available\n", __func__);
3620 sc->sc_stats.mst_ampdu_nostream++;
3621 return 0;
3622 }
3623 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3624 __func__, sp);
3625 /* NB: qos is left zero so we won't match in mwl_tx_start */
3626 bas->bastream = sp;
3627 tap->txa_private = bas;
3628 }
3629 /* fetch current seq# from the firmware; if available */
3630 if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3631 vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3632 &tap->txa_start) != 0)
3633 tap->txa_start = 0;
3634 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3635 }
3636
3637 static int
mwl_addba_response(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap,int code,int baparamset,int batimeout)3638 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3639 int code, int baparamset, int batimeout)
3640 {
3641 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3642 struct mwl_bastate *bas;
3643
3644 bas = tap->txa_private;
3645 if (bas == NULL) {
3646 /* XXX should not happen */
3647 DPRINTF(sc, MWL_DEBUG_AMPDU,
3648 "%s: no BA stream allocated, TID %d\n",
3649 __func__, tap->txa_tid);
3650 sc->sc_stats.mst_addba_nostream++;
3651 return 0;
3652 }
3653 if (code == IEEE80211_STATUS_SUCCESS) {
3654 struct ieee80211vap *vap = ni->ni_vap;
3655 int bufsiz, error;
3656
3657 /*
3658 * Tell the firmware to setup the BA stream;
3659 * we know resources are available because we
3660 * pre-allocated one before forming the request.
3661 */
3662 bufsiz = _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_BUFSIZ);
3663 if (bufsiz == 0)
3664 bufsiz = IEEE80211_AGGR_BAWMAX;
3665 error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3666 bas->bastream, bufsiz, bufsiz, tap->txa_start);
3667 if (error != 0) {
3668 /*
3669 * Setup failed, return immediately so no a-mpdu
3670 * aggregation will be done.
3671 */
3672 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3673 mwl_bastream_free(bas);
3674 tap->txa_private = NULL;
3675
3676 DPRINTF(sc, MWL_DEBUG_AMPDU,
3677 "%s: create failed, error %d, bufsiz %d TID %d "
3678 "htparam 0x%x\n", __func__, error, bufsiz,
3679 tap->txa_tid, ni->ni_htparam);
3680 sc->sc_stats.mst_bacreate_failed++;
3681 return 0;
3682 }
3683 /* NB: cache txq to avoid ptr indirect */
3684 mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3685 DPRINTF(sc, MWL_DEBUG_AMPDU,
3686 "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3687 "htparam 0x%x\n", __func__, bas->bastream,
3688 bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3689 } else {
3690 /*
3691 * Other side NAK'd us; return the resources.
3692 */
3693 DPRINTF(sc, MWL_DEBUG_AMPDU,
3694 "%s: request failed with code %d, destroy bastream %p\n",
3695 __func__, code, bas->bastream);
3696 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3697 mwl_bastream_free(bas);
3698 tap->txa_private = NULL;
3699 }
3700 /* NB: firmware sends BAR so we don't need to */
3701 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3702 }
3703
3704 static void
mwl_addba_stop(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)3705 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3706 {
3707 struct mwl_softc *sc = ni->ni_ic->ic_softc;
3708 struct mwl_bastate *bas;
3709
3710 bas = tap->txa_private;
3711 if (bas != NULL) {
3712 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3713 __func__, bas->bastream);
3714 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3715 mwl_bastream_free(bas);
3716 tap->txa_private = NULL;
3717 }
3718 sc->sc_addba_stop(ni, tap);
3719 }
3720
3721 /*
3722 * Setup the rx data structures. This should only be
3723 * done once or we may get out of sync with the firmware.
3724 */
3725 static int
mwl_startrecv(struct mwl_softc * sc)3726 mwl_startrecv(struct mwl_softc *sc)
3727 {
3728 if (!sc->sc_recvsetup) {
3729 struct mwl_rxbuf *bf, *prev;
3730 struct mwl_rxdesc *ds;
3731
3732 prev = NULL;
3733 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3734 int error = mwl_rxbuf_init(sc, bf);
3735 if (error != 0) {
3736 DPRINTF(sc, MWL_DEBUG_RECV,
3737 "%s: mwl_rxbuf_init failed %d\n",
3738 __func__, error);
3739 return error;
3740 }
3741 if (prev != NULL) {
3742 ds = prev->bf_desc;
3743 ds->pPhysNext = htole32(bf->bf_daddr);
3744 }
3745 prev = bf;
3746 }
3747 if (prev != NULL) {
3748 ds = prev->bf_desc;
3749 ds->pPhysNext =
3750 htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3751 }
3752 sc->sc_recvsetup = 1;
3753 }
3754 mwl_mode_init(sc); /* set filters, etc. */
3755 return 0;
3756 }
3757
3758 static MWL_HAL_APMODE
mwl_getapmode(const struct ieee80211vap * vap,struct ieee80211_channel * chan)3759 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3760 {
3761 MWL_HAL_APMODE mode;
3762
3763 if (IEEE80211_IS_CHAN_HT(chan)) {
3764 if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3765 mode = AP_MODE_N_ONLY;
3766 else if (IEEE80211_IS_CHAN_5GHZ(chan))
3767 mode = AP_MODE_AandN;
3768 else if (vap->iv_flags & IEEE80211_F_PUREG)
3769 mode = AP_MODE_GandN;
3770 else
3771 mode = AP_MODE_BandGandN;
3772 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3773 if (vap->iv_flags & IEEE80211_F_PUREG)
3774 mode = AP_MODE_G_ONLY;
3775 else
3776 mode = AP_MODE_MIXED;
3777 } else if (IEEE80211_IS_CHAN_B(chan))
3778 mode = AP_MODE_B_ONLY;
3779 else if (IEEE80211_IS_CHAN_A(chan))
3780 mode = AP_MODE_A_ONLY;
3781 else
3782 mode = AP_MODE_MIXED; /* XXX should not happen? */
3783 return mode;
3784 }
3785
3786 static int
mwl_setapmode(struct ieee80211vap * vap,struct ieee80211_channel * chan)3787 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3788 {
3789 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3790 return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3791 }
3792
3793 /*
3794 * Set/change channels.
3795 */
3796 static int
mwl_chan_set(struct mwl_softc * sc,struct ieee80211_channel * chan)3797 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3798 {
3799 struct mwl_hal *mh = sc->sc_mh;
3800 struct ieee80211com *ic = &sc->sc_ic;
3801 MWL_HAL_CHANNEL hchan;
3802 int maxtxpow;
3803
3804 DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3805 __func__, chan->ic_freq, chan->ic_flags);
3806
3807 /*
3808 * Convert to a HAL channel description with
3809 * the flags constrained to reflect the current
3810 * operating mode.
3811 */
3812 mwl_mapchan(&hchan, chan);
3813 mwl_hal_intrset(mh, 0); /* disable interrupts */
3814 #if 0
3815 mwl_draintxq(sc); /* clear pending tx frames */
3816 #endif
3817 mwl_hal_setchannel(mh, &hchan);
3818 /*
3819 * Tx power is cap'd by the regulatory setting and
3820 * possibly a user-set limit. We pass the min of
3821 * these to the hal to apply them to the cal data
3822 * for this channel.
3823 * XXX min bound?
3824 */
3825 maxtxpow = 2*chan->ic_maxregpower;
3826 if (maxtxpow > ic->ic_txpowlimit)
3827 maxtxpow = ic->ic_txpowlimit;
3828 mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3829 /* NB: potentially change mcast/mgt rates */
3830 mwl_setcurchanrates(sc);
3831
3832 /*
3833 * Update internal state.
3834 */
3835 sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3836 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3837 if (IEEE80211_IS_CHAN_A(chan)) {
3838 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3839 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3840 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3841 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3842 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3843 } else {
3844 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3845 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3846 }
3847 sc->sc_curchan = hchan;
3848 mwl_hal_intrset(mh, sc->sc_imask);
3849
3850 return 0;
3851 }
3852
3853 static void
mwl_scan_start(struct ieee80211com * ic)3854 mwl_scan_start(struct ieee80211com *ic)
3855 {
3856 struct mwl_softc *sc = ic->ic_softc;
3857
3858 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3859 }
3860
3861 static void
mwl_scan_end(struct ieee80211com * ic)3862 mwl_scan_end(struct ieee80211com *ic)
3863 {
3864 struct mwl_softc *sc = ic->ic_softc;
3865
3866 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3867 }
3868
3869 static void
mwl_set_channel(struct ieee80211com * ic)3870 mwl_set_channel(struct ieee80211com *ic)
3871 {
3872 struct mwl_softc *sc = ic->ic_softc;
3873
3874 (void) mwl_chan_set(sc, ic->ic_curchan);
3875 }
3876
3877 /*
3878 * Handle a channel switch request. We inform the firmware
3879 * and mark the global state to suppress various actions.
3880 * NB: we issue only one request to the fw; we may be called
3881 * multiple times if there are multiple vap's.
3882 */
3883 static void
mwl_startcsa(struct ieee80211vap * vap)3884 mwl_startcsa(struct ieee80211vap *vap)
3885 {
3886 struct ieee80211com *ic = vap->iv_ic;
3887 struct mwl_softc *sc = ic->ic_softc;
3888 MWL_HAL_CHANNEL hchan;
3889
3890 if (sc->sc_csapending)
3891 return;
3892
3893 mwl_mapchan(&hchan, ic->ic_csa_newchan);
3894 /* 1 =>'s quiet channel */
3895 mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
3896 sc->sc_csapending = 1;
3897 }
3898
3899 /*
3900 * Plumb any static WEP key for the station. This is
3901 * necessary as we must propagate the key from the
3902 * global key table of the vap to each sta db entry.
3903 */
3904 static void
mwl_setanywepkey(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])3905 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3906 {
3907 if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
3908 IEEE80211_F_PRIVACY &&
3909 vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
3910 vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
3911 (void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey],
3912 mac);
3913 }
3914
3915 static int
mwl_peerstadb(struct ieee80211_node * ni,int aid,int staid,MWL_HAL_PEERINFO * pi)3916 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
3917 {
3918 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
3919 struct ieee80211vap *vap = ni->ni_vap;
3920 struct mwl_hal_vap *hvap;
3921 int error;
3922
3923 if (vap->iv_opmode == IEEE80211_M_WDS) {
3924 /*
3925 * WDS vap's do not have a f/w vap; instead they piggyback
3926 * on an AP vap and we must install the sta db entry and
3927 * crypto state using that AP's handle (the WDS vap has none).
3928 */
3929 hvap = MWL_VAP(vap)->mv_ap_hvap;
3930 } else
3931 hvap = MWL_VAP(vap)->mv_hvap;
3932 error = mwl_hal_newstation(hvap, ni->ni_macaddr,
3933 aid, staid, pi,
3934 ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
3935 ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
3936 if (error == 0) {
3937 /*
3938 * Setup security for this station. For sta mode this is
3939 * needed even though do the same thing on transition to
3940 * AUTH state because the call to mwl_hal_newstation
3941 * clobbers the crypto state we setup.
3942 */
3943 mwl_setanywepkey(vap, ni->ni_macaddr);
3944 }
3945 return error;
3946 #undef WME
3947 }
3948
3949 static void
mwl_setglobalkeys(struct ieee80211vap * vap)3950 mwl_setglobalkeys(struct ieee80211vap *vap)
3951 {
3952 struct ieee80211_key *wk;
3953
3954 wk = &vap->iv_nw_keys[0];
3955 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
3956 if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
3957 (void) _mwl_key_set(vap, wk, vap->iv_myaddr);
3958 }
3959
3960 /*
3961 * Convert a legacy rate set to a firmware bitmask.
3962 */
3963 static uint32_t
get_rate_bitmap(const struct ieee80211_rateset * rs)3964 get_rate_bitmap(const struct ieee80211_rateset *rs)
3965 {
3966 uint32_t rates;
3967 int i;
3968
3969 rates = 0;
3970 for (i = 0; i < rs->rs_nrates; i++)
3971 switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
3972 case 2: rates |= 0x001; break;
3973 case 4: rates |= 0x002; break;
3974 case 11: rates |= 0x004; break;
3975 case 22: rates |= 0x008; break;
3976 case 44: rates |= 0x010; break;
3977 case 12: rates |= 0x020; break;
3978 case 18: rates |= 0x040; break;
3979 case 24: rates |= 0x080; break;
3980 case 36: rates |= 0x100; break;
3981 case 48: rates |= 0x200; break;
3982 case 72: rates |= 0x400; break;
3983 case 96: rates |= 0x800; break;
3984 case 108: rates |= 0x1000; break;
3985 }
3986 return rates;
3987 }
3988
3989 /*
3990 * Construct an HT firmware bitmask from an HT rate set.
3991 */
3992 static uint32_t
get_htrate_bitmap(const struct ieee80211_htrateset * rs)3993 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
3994 {
3995 uint32_t rates;
3996 int i;
3997
3998 rates = 0;
3999 for (i = 0; i < rs->rs_nrates; i++) {
4000 if (rs->rs_rates[i] < 16)
4001 rates |= 1<<rs->rs_rates[i];
4002 }
4003 return rates;
4004 }
4005
4006 /*
4007 * Craft station database entry for station.
4008 * NB: use host byte order here, the hal handles byte swapping.
4009 */
4010 static MWL_HAL_PEERINFO *
mkpeerinfo(MWL_HAL_PEERINFO * pi,const struct ieee80211_node * ni)4011 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4012 {
4013 const struct ieee80211vap *vap = ni->ni_vap;
4014
4015 memset(pi, 0, sizeof(*pi));
4016 pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4017 pi->CapInfo = ni->ni_capinfo;
4018 if (ni->ni_flags & IEEE80211_NODE_HT) {
4019 /* HT capabilities, etc */
4020 pi->HTCapabilitiesInfo = ni->ni_htcap;
4021 /* XXX pi.HTCapabilitiesInfo */
4022 pi->MacHTParamInfo = ni->ni_htparam;
4023 pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4024 pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4025 pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4026 pi->AddHtInfo.OpMode = ni->ni_htopmode;
4027 pi->AddHtInfo.stbc = ni->ni_htstbc;
4028
4029 /* constrain according to local configuration */
4030 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4031 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4032 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4033 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4034 if (ni->ni_chw != NET80211_STA_RX_BW_40)
4035 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4036 }
4037 return pi;
4038 }
4039
4040 /*
4041 * Re-create the local sta db entry for a vap to ensure
4042 * up to date WME state is pushed to the firmware. Because
4043 * this resets crypto state this must be followed by a
4044 * reload of any keys in the global key table.
4045 */
4046 static int
mwl_localstadb(struct ieee80211vap * vap)4047 mwl_localstadb(struct ieee80211vap *vap)
4048 {
4049 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4050 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4051 struct ieee80211_node *bss;
4052 MWL_HAL_PEERINFO pi;
4053 int error;
4054
4055 switch (vap->iv_opmode) {
4056 case IEEE80211_M_STA:
4057 bss = vap->iv_bss;
4058 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4059 vap->iv_state == IEEE80211_S_RUN ?
4060 mkpeerinfo(&pi, bss) : NULL,
4061 (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4062 bss->ni_ies.wme_ie != NULL ?
4063 WME(bss->ni_ies.wme_ie)->wme_info : 0);
4064 if (error == 0)
4065 mwl_setglobalkeys(vap);
4066 break;
4067 case IEEE80211_M_HOSTAP:
4068 case IEEE80211_M_MBSS:
4069 error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4070 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4071 if (error == 0)
4072 mwl_setglobalkeys(vap);
4073 break;
4074 default:
4075 error = 0;
4076 break;
4077 }
4078 return error;
4079 #undef WME
4080 }
4081
4082 static int
mwl_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)4083 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4084 {
4085 struct mwl_vap *mvp = MWL_VAP(vap);
4086 struct mwl_hal_vap *hvap = mvp->mv_hvap;
4087 struct ieee80211com *ic = vap->iv_ic;
4088 struct ieee80211_node *ni = NULL;
4089 struct mwl_softc *sc = ic->ic_softc;
4090 struct mwl_hal *mh = sc->sc_mh;
4091 enum ieee80211_state ostate = vap->iv_state;
4092 int error;
4093
4094 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4095 if_name(vap->iv_ifp), __func__,
4096 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4097
4098 callout_stop(&sc->sc_timer);
4099 /*
4100 * Clear current radar detection state.
4101 */
4102 if (ostate == IEEE80211_S_CAC) {
4103 /* stop quiet mode radar detection */
4104 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4105 } else if (sc->sc_radarena) {
4106 /* stop in-service radar detection */
4107 mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4108 sc->sc_radarena = 0;
4109 }
4110 /*
4111 * Carry out per-state actions before doing net80211 work.
4112 */
4113 if (nstate == IEEE80211_S_INIT) {
4114 /* NB: only ap+sta vap's have a fw entity */
4115 if (hvap != NULL)
4116 mwl_hal_stop(hvap);
4117 } else if (nstate == IEEE80211_S_SCAN) {
4118 mwl_hal_start(hvap);
4119 /* NB: this disables beacon frames */
4120 mwl_hal_setinframode(hvap);
4121 } else if (nstate == IEEE80211_S_AUTH) {
4122 /*
4123 * Must create a sta db entry in case a WEP key needs to
4124 * be plumbed. This entry will be overwritten if we
4125 * associate; otherwise it will be reclaimed on node free.
4126 */
4127 ni = vap->iv_bss;
4128 MWL_NODE(ni)->mn_hvap = hvap;
4129 (void) mwl_peerstadb(ni, 0, 0, NULL);
4130 } else if (nstate == IEEE80211_S_CSA) {
4131 /* XXX move to below? */
4132 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4133 vap->iv_opmode == IEEE80211_M_MBSS)
4134 mwl_startcsa(vap);
4135 } else if (nstate == IEEE80211_S_CAC) {
4136 /* XXX move to below? */
4137 /* stop ap xmit and enable quiet mode radar detection */
4138 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4139 }
4140
4141 /*
4142 * Invoke the parent method to do net80211 work.
4143 */
4144 error = mvp->mv_newstate(vap, nstate, arg);
4145
4146 /*
4147 * Carry out work that must be done after net80211 runs;
4148 * this work requires up to date state (e.g. iv_bss).
4149 */
4150 if (error == 0 && nstate == IEEE80211_S_RUN) {
4151 /* NB: collect bss node again, it may have changed */
4152 ni = vap->iv_bss;
4153
4154 DPRINTF(sc, MWL_DEBUG_STATE,
4155 "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4156 "capinfo 0x%04x chan %d\n",
4157 if_name(vap->iv_ifp), __func__, vap->iv_flags,
4158 ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4159 ieee80211_chan2ieee(ic, ic->ic_curchan));
4160
4161 /*
4162 * Recreate local sta db entry to update WME/HT state.
4163 */
4164 mwl_localstadb(vap);
4165 switch (vap->iv_opmode) {
4166 case IEEE80211_M_HOSTAP:
4167 case IEEE80211_M_MBSS:
4168 if (ostate == IEEE80211_S_CAC) {
4169 /* enable in-service radar detection */
4170 mwl_hal_setradardetection(mh,
4171 DR_IN_SERVICE_MONITOR_START);
4172 sc->sc_radarena = 1;
4173 }
4174 /*
4175 * Allocate and setup the beacon frame
4176 * (and related state).
4177 */
4178 error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4179 if (error != 0) {
4180 DPRINTF(sc, MWL_DEBUG_STATE,
4181 "%s: beacon setup failed, error %d\n",
4182 __func__, error);
4183 goto bad;
4184 }
4185 /* NB: must be after setting up beacon */
4186 mwl_hal_start(hvap);
4187 break;
4188 case IEEE80211_M_STA:
4189 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4190 if_name(vap->iv_ifp), __func__, ni->ni_associd);
4191 /*
4192 * Set state now that we're associated.
4193 */
4194 mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4195 mwl_setrates(vap);
4196 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4197 if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4198 sc->sc_ndwdsvaps++ == 0)
4199 mwl_hal_setdwds(mh, 1);
4200 break;
4201 case IEEE80211_M_WDS:
4202 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4203 if_name(vap->iv_ifp), __func__,
4204 ether_sprintf(ni->ni_bssid));
4205 mwl_seteapolformat(vap);
4206 break;
4207 default:
4208 break;
4209 }
4210 /*
4211 * Set CS mode according to operating channel;
4212 * this mostly an optimization for 5GHz.
4213 *
4214 * NB: must follow mwl_hal_start which resets csmode
4215 */
4216 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4217 mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4218 else
4219 mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4220 /*
4221 * Start timer to prod firmware.
4222 */
4223 if (sc->sc_ageinterval != 0)
4224 callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4225 mwl_agestations, sc);
4226 } else if (nstate == IEEE80211_S_SLEEP) {
4227 /* XXX set chip in power save */
4228 } else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4229 --sc->sc_ndwdsvaps == 0)
4230 mwl_hal_setdwds(mh, 0);
4231 bad:
4232 return error;
4233 }
4234
4235 /*
4236 * Manage station id's; these are separate from AID's
4237 * as AID's may have values out of the range of possible
4238 * station id's acceptable to the firmware.
4239 */
4240 static int
allocstaid(struct mwl_softc * sc,int aid)4241 allocstaid(struct mwl_softc *sc, int aid)
4242 {
4243 int staid;
4244
4245 if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4246 /* NB: don't use 0 */
4247 for (staid = 1; staid < MWL_MAXSTAID; staid++)
4248 if (isclr(sc->sc_staid, staid))
4249 break;
4250 } else
4251 staid = aid;
4252 setbit(sc->sc_staid, staid);
4253 return staid;
4254 }
4255
4256 static void
delstaid(struct mwl_softc * sc,int staid)4257 delstaid(struct mwl_softc *sc, int staid)
4258 {
4259 clrbit(sc->sc_staid, staid);
4260 }
4261
4262 /*
4263 * Setup driver-specific state for a newly associated node.
4264 * Note that we're called also on a re-associate, the isnew
4265 * param tells us if this is the first time or not.
4266 */
4267 static void
mwl_newassoc(struct ieee80211_node * ni,int isnew)4268 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4269 {
4270 struct ieee80211vap *vap = ni->ni_vap;
4271 struct mwl_softc *sc = vap->iv_ic->ic_softc;
4272 struct mwl_node *mn = MWL_NODE(ni);
4273 MWL_HAL_PEERINFO pi;
4274 uint16_t aid;
4275 int error;
4276
4277 aid = IEEE80211_AID(ni->ni_associd);
4278 if (isnew) {
4279 mn->mn_staid = allocstaid(sc, aid);
4280 mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4281 } else {
4282 mn = MWL_NODE(ni);
4283 /* XXX reset BA stream? */
4284 }
4285 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4286 __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4287 error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4288 if (error != 0) {
4289 DPRINTF(sc, MWL_DEBUG_NODE,
4290 "%s: error %d creating sta db entry\n",
4291 __func__, error);
4292 /* XXX how to deal with error? */
4293 }
4294 }
4295
4296 /*
4297 * Periodically poke the firmware to age out station state
4298 * (power save queues, pending tx aggregates).
4299 */
4300 static void
mwl_agestations(void * arg)4301 mwl_agestations(void *arg)
4302 {
4303 struct mwl_softc *sc = arg;
4304
4305 mwl_hal_setkeepalive(sc->sc_mh);
4306 if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */
4307 callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4308 }
4309
4310 static const struct mwl_hal_channel *
findhalchannel(const MWL_HAL_CHANNELINFO * ci,int ieee)4311 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4312 {
4313 int i;
4314
4315 for (i = 0; i < ci->nchannels; i++) {
4316 const struct mwl_hal_channel *hc = &ci->channels[i];
4317 if (hc->ieee == ieee)
4318 return hc;
4319 }
4320 return NULL;
4321 }
4322
4323 static int
mwl_setregdomain(struct ieee80211com * ic,struct ieee80211_regdomain * rd,int nchan,struct ieee80211_channel chans[])4324 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4325 int nchan, struct ieee80211_channel chans[])
4326 {
4327 struct mwl_softc *sc = ic->ic_softc;
4328 struct mwl_hal *mh = sc->sc_mh;
4329 const MWL_HAL_CHANNELINFO *ci;
4330 int i;
4331
4332 for (i = 0; i < nchan; i++) {
4333 struct ieee80211_channel *c = &chans[i];
4334 const struct mwl_hal_channel *hc;
4335
4336 if (IEEE80211_IS_CHAN_2GHZ(c)) {
4337 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4338 IEEE80211_IS_CHAN_HT40(c) ?
4339 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4340 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4341 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4342 IEEE80211_IS_CHAN_HT40(c) ?
4343 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4344 } else {
4345 device_printf(sc->sc_dev,
4346 "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4347 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4348 return EINVAL;
4349 }
4350 /*
4351 * Verify channel has cal data and cap tx power.
4352 */
4353 hc = findhalchannel(ci, c->ic_ieee);
4354 if (hc != NULL) {
4355 if (c->ic_maxpower > 2*hc->maxTxPow)
4356 c->ic_maxpower = 2*hc->maxTxPow;
4357 goto next;
4358 }
4359 if (IEEE80211_IS_CHAN_HT40(c)) {
4360 /*
4361 * Look for the extension channel since the
4362 * hal table only has the primary channel.
4363 */
4364 hc = findhalchannel(ci, c->ic_extieee);
4365 if (hc != NULL) {
4366 if (c->ic_maxpower > 2*hc->maxTxPow)
4367 c->ic_maxpower = 2*hc->maxTxPow;
4368 goto next;
4369 }
4370 }
4371 device_printf(sc->sc_dev,
4372 "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4373 __func__, c->ic_ieee, c->ic_extieee,
4374 c->ic_freq, c->ic_flags);
4375 return EINVAL;
4376 next:
4377 ;
4378 }
4379 return 0;
4380 }
4381
4382 #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4383 #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4384
4385 static void
addht40channels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,int flags)4386 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4387 const MWL_HAL_CHANNELINFO *ci, int flags)
4388 {
4389 int i, error;
4390
4391 for (i = 0; i < ci->nchannels; i++) {
4392 const struct mwl_hal_channel *hc = &ci->channels[i];
4393
4394 error = ieee80211_add_channel_ht40(chans, maxchans, nchans,
4395 hc->ieee, hc->maxTxPow, flags);
4396 if (error != 0 && error != ENOENT)
4397 break;
4398 }
4399 }
4400
4401 static void
addchannels(struct ieee80211_channel chans[],int maxchans,int * nchans,const MWL_HAL_CHANNELINFO * ci,const uint8_t bands[])4402 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4403 const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[])
4404 {
4405 int i, error;
4406
4407 error = 0;
4408 for (i = 0; i < ci->nchannels && error == 0; i++) {
4409 const struct mwl_hal_channel *hc = &ci->channels[i];
4410
4411 error = ieee80211_add_channel(chans, maxchans, nchans,
4412 hc->ieee, hc->freq, hc->maxTxPow, 0, bands);
4413 }
4414 }
4415
4416 static void
getchannels(struct mwl_softc * sc,int maxchans,int * nchans,struct ieee80211_channel chans[])4417 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4418 struct ieee80211_channel chans[])
4419 {
4420 const MWL_HAL_CHANNELINFO *ci;
4421 uint8_t bands[IEEE80211_MODE_BYTES];
4422
4423 /*
4424 * Use the channel info from the hal to craft the
4425 * channel list. Note that we pass back an unsorted
4426 * list; the caller is required to sort it for us
4427 * (if desired).
4428 */
4429 *nchans = 0;
4430 if (mwl_hal_getchannelinfo(sc->sc_mh,
4431 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4432 memset(bands, 0, sizeof(bands));
4433 setbit(bands, IEEE80211_MODE_11B);
4434 setbit(bands, IEEE80211_MODE_11G);
4435 setbit(bands, IEEE80211_MODE_11NG);
4436 addchannels(chans, maxchans, nchans, ci, bands);
4437 }
4438 if (mwl_hal_getchannelinfo(sc->sc_mh,
4439 MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) {
4440 memset(bands, 0, sizeof(bands));
4441 setbit(bands, IEEE80211_MODE_11A);
4442 setbit(bands, IEEE80211_MODE_11NA);
4443 addchannels(chans, maxchans, nchans, ci, bands);
4444 }
4445 if (mwl_hal_getchannelinfo(sc->sc_mh,
4446 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4447 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4448 if (mwl_hal_getchannelinfo(sc->sc_mh,
4449 MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4450 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4451 }
4452
4453 static void
mwl_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])4454 mwl_getradiocaps(struct ieee80211com *ic,
4455 int maxchans, int *nchans, struct ieee80211_channel chans[])
4456 {
4457 struct mwl_softc *sc = ic->ic_softc;
4458
4459 getchannels(sc, maxchans, nchans, chans);
4460 }
4461
4462 static int
mwl_getchannels(struct mwl_softc * sc)4463 mwl_getchannels(struct mwl_softc *sc)
4464 {
4465 struct ieee80211com *ic = &sc->sc_ic;
4466
4467 /*
4468 * Use the channel info from the hal to craft the
4469 * channel list for net80211. Note that we pass up
4470 * an unsorted list; net80211 will sort it for us.
4471 */
4472 memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4473 ic->ic_nchans = 0;
4474 getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4475
4476 ic->ic_regdomain.regdomain = SKU_DEBUG;
4477 ic->ic_regdomain.country = CTRY_DEFAULT;
4478 ic->ic_regdomain.location = 'I';
4479 ic->ic_regdomain.isocc[0] = ' '; /* XXX? */
4480 ic->ic_regdomain.isocc[1] = ' ';
4481 return (ic->ic_nchans == 0 ? EIO : 0);
4482 }
4483 #undef IEEE80211_CHAN_HTA
4484 #undef IEEE80211_CHAN_HTG
4485
4486 #ifdef MWL_DEBUG
4487 static void
mwl_printrxbuf(const struct mwl_rxbuf * bf,u_int ix)4488 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4489 {
4490 const struct mwl_rxdesc *ds = bf->bf_desc;
4491 uint32_t status = le32toh(ds->Status);
4492
4493 printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4494 " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4495 ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4496 le32toh(ds->pPhysBuffData), ds->RxControl,
4497 ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4498 "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4499 ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4500 ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4501 }
4502
4503 static void
mwl_printtxbuf(const struct mwl_txbuf * bf,u_int qnum,u_int ix)4504 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4505 {
4506 const struct mwl_txdesc *ds = bf->bf_desc;
4507 uint32_t status = le32toh(ds->Status);
4508
4509 printf("Q%u[%3u]", qnum, ix);
4510 printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4511 printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4512 le32toh(ds->pPhysNext),
4513 le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4514 status & EAGLE_TXD_STATUS_USED ?
4515 "" : (status & 3) != 0 ? " *" : " !");
4516 printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4517 ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4518 le32toh(ds->SapPktInfo), le16toh(ds->Format));
4519 #if MWL_TXDESC > 1
4520 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4521 , le32toh(ds->multiframes)
4522 , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4523 , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4524 , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4525 );
4526 printf(" DATA:%08x %08x %08x %08x %08x %08x\n"
4527 , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4528 , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4529 , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4530 );
4531 #endif
4532 #if 0
4533 { const uint8_t *cp = (const uint8_t *) ds;
4534 int i;
4535 for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4536 printf("%02x ", cp[i]);
4537 if (((i+1) % 16) == 0)
4538 printf("\n");
4539 }
4540 printf("\n");
4541 }
4542 #endif
4543 }
4544 #endif /* MWL_DEBUG */
4545
4546 #if 0
4547 static void
4548 mwl_txq_dump(struct mwl_txq *txq)
4549 {
4550 struct mwl_txbuf *bf;
4551 int i = 0;
4552
4553 MWL_TXQ_LOCK(txq);
4554 STAILQ_FOREACH(bf, &txq->active, bf_list) {
4555 struct mwl_txdesc *ds = bf->bf_desc;
4556 MWL_TXDESC_SYNC(txq, ds,
4557 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4558 #ifdef MWL_DEBUG
4559 mwl_printtxbuf(bf, txq->qnum, i);
4560 #endif
4561 i++;
4562 }
4563 MWL_TXQ_UNLOCK(txq);
4564 }
4565 #endif
4566
4567 static void
mwl_watchdog(void * arg)4568 mwl_watchdog(void *arg)
4569 {
4570 struct mwl_softc *sc = arg;
4571
4572 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4573 if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4574 return;
4575
4576 if (sc->sc_running && !sc->sc_invalid) {
4577 if (mwl_hal_setkeepalive(sc->sc_mh))
4578 device_printf(sc->sc_dev,
4579 "transmit timeout (firmware hung?)\n");
4580 else
4581 device_printf(sc->sc_dev,
4582 "transmit timeout\n");
4583 #if 0
4584 mwl_reset(sc);
4585 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4586 #endif
4587 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4588 sc->sc_stats.mst_watchdog++;
4589 }
4590 }
4591
4592 #ifdef MWL_DIAGAPI
4593 /*
4594 * Diagnostic interface to the HAL. This is used by various
4595 * tools to do things like retrieve register contents for
4596 * debugging. The mechanism is intentionally opaque so that
4597 * it can change frequently w/o concern for compatibility.
4598 */
4599 static int
mwl_ioctl_diag(struct mwl_softc * sc,struct mwl_diag * md)4600 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4601 {
4602 struct mwl_hal *mh = sc->sc_mh;
4603 u_int id = md->md_id & MWL_DIAG_ID;
4604 void *indata = NULL;
4605 void *outdata = NULL;
4606 u_int32_t insize = md->md_in_size;
4607 u_int32_t outsize = md->md_out_size;
4608 int error = 0;
4609
4610 if (md->md_id & MWL_DIAG_IN) {
4611 /*
4612 * Copy in data.
4613 */
4614 indata = malloc(insize, M_TEMP, M_NOWAIT);
4615 if (indata == NULL) {
4616 error = ENOMEM;
4617 goto bad;
4618 }
4619 error = copyin(md->md_in_data, indata, insize);
4620 if (error)
4621 goto bad;
4622 }
4623 if (md->md_id & MWL_DIAG_DYN) {
4624 /*
4625 * Allocate a buffer for the results (otherwise the HAL
4626 * returns a pointer to a buffer where we can read the
4627 * results). Note that we depend on the HAL leaving this
4628 * pointer for us to use below in reclaiming the buffer;
4629 * may want to be more defensive.
4630 */
4631 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4632 if (outdata == NULL) {
4633 error = ENOMEM;
4634 goto bad;
4635 }
4636 }
4637 if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4638 if (outsize < md->md_out_size)
4639 md->md_out_size = outsize;
4640 if (outdata != NULL)
4641 error = copyout(outdata, md->md_out_data,
4642 md->md_out_size);
4643 } else {
4644 error = EINVAL;
4645 }
4646 bad:
4647 if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4648 free(indata, M_TEMP);
4649 if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4650 free(outdata, M_TEMP);
4651 return error;
4652 }
4653
4654 static int
mwl_ioctl_reset(struct mwl_softc * sc,struct mwl_diag * md)4655 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4656 {
4657 struct mwl_hal *mh = sc->sc_mh;
4658 int error;
4659
4660 MWL_LOCK_ASSERT(sc);
4661
4662 if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4663 device_printf(sc->sc_dev, "unable to load firmware\n");
4664 return EIO;
4665 }
4666 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4667 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4668 return EIO;
4669 }
4670 error = mwl_setupdma(sc);
4671 if (error != 0) {
4672 /* NB: mwl_setupdma prints a msg */
4673 return error;
4674 }
4675 /*
4676 * Reset tx/rx data structures; after reload we must
4677 * re-start the driver's notion of the next xmit/recv.
4678 */
4679 mwl_draintxq(sc); /* clear pending frames */
4680 mwl_resettxq(sc); /* rebuild tx q lists */
4681 sc->sc_rxnext = NULL; /* force rx to start at the list head */
4682 return 0;
4683 }
4684 #endif /* MWL_DIAGAPI */
4685
4686 static void
mwl_parent(struct ieee80211com * ic)4687 mwl_parent(struct ieee80211com *ic)
4688 {
4689 struct mwl_softc *sc = ic->ic_softc;
4690 int startall = 0;
4691
4692 MWL_LOCK(sc);
4693 if (ic->ic_nrunning > 0) {
4694 if (sc->sc_running) {
4695 /*
4696 * To avoid rescanning another access point,
4697 * do not call mwl_init() here. Instead,
4698 * only reflect promisc mode settings.
4699 */
4700 mwl_mode_init(sc);
4701 } else {
4702 /*
4703 * Beware of being called during attach/detach
4704 * to reset promiscuous mode. In that case we
4705 * will still be marked UP but not RUNNING.
4706 * However trying to re-init the interface
4707 * is the wrong thing to do as we've already
4708 * torn down much of our state. There's
4709 * probably a better way to deal with this.
4710 */
4711 if (!sc->sc_invalid) {
4712 mwl_init(sc); /* XXX lose error */
4713 startall = 1;
4714 }
4715 }
4716 } else
4717 mwl_stop(sc);
4718 MWL_UNLOCK(sc);
4719 if (startall)
4720 ieee80211_start_all(ic);
4721 }
4722
4723 static int
mwl_ioctl(struct ieee80211com * ic,u_long cmd,void * data)4724 mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
4725 {
4726 struct mwl_softc *sc = ic->ic_softc;
4727 struct ifreq *ifr = data;
4728 int error = 0;
4729
4730 switch (cmd) {
4731 case SIOCGMVSTATS:
4732 mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4733 #if 0
4734 /* NB: embed these numbers to get a consistent view */
4735 sc->sc_stats.mst_tx_packets =
4736 if_get_counter(ifp, IFCOUNTER_OPACKETS);
4737 sc->sc_stats.mst_rx_packets =
4738 if_get_counter(ifp, IFCOUNTER_IPACKETS);
4739 #endif
4740 /*
4741 * NB: Drop the softc lock in case of a page fault;
4742 * we'll accept any potential inconsisentcy in the
4743 * statistics. The alternative is to copy the data
4744 * to a local structure.
4745 */
4746 return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr),
4747 sizeof (sc->sc_stats)));
4748 #ifdef MWL_DIAGAPI
4749 case SIOCGMVDIAG:
4750 /* XXX check privs */
4751 return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4752 case SIOCGMVRESET:
4753 /* XXX check privs */
4754 MWL_LOCK(sc);
4755 error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4756 MWL_UNLOCK(sc);
4757 break;
4758 #endif /* MWL_DIAGAPI */
4759 default:
4760 error = ENOTTY;
4761 break;
4762 }
4763 return (error);
4764 }
4765
4766 #ifdef MWL_DEBUG
4767 static int
mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)4768 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4769 {
4770 struct mwl_softc *sc = arg1;
4771 int debug, error;
4772
4773 debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4774 error = sysctl_handle_int(oidp, &debug, 0, req);
4775 if (error || !req->newptr)
4776 return error;
4777 mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4778 sc->sc_debug = debug & 0x00ffffff;
4779 return 0;
4780 }
4781 #endif /* MWL_DEBUG */
4782
4783 static void
mwl_sysctlattach(struct mwl_softc * sc)4784 mwl_sysctlattach(struct mwl_softc *sc)
4785 {
4786 #ifdef MWL_DEBUG
4787 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4788 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4789
4790 sc->sc_debug = mwl_debug;
4791 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
4792 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
4793 mwl_sysctl_debug, "I", "control debugging printfs");
4794 #endif
4795 }
4796
4797 /*
4798 * Announce various information on device/driver attach.
4799 */
4800 static void
mwl_announce(struct mwl_softc * sc)4801 mwl_announce(struct mwl_softc *sc)
4802 {
4803
4804 device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
4805 sc->sc_hwspecs.hwVersion,
4806 (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
4807 (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
4808 (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
4809 (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
4810 sc->sc_hwspecs.regionCode);
4811 sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
4812
4813 if (bootverbose) {
4814 int i;
4815 for (i = 0; i <= WME_AC_VO; i++) {
4816 struct mwl_txq *txq = sc->sc_ac2q[i];
4817 device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
4818 txq->qnum, ieee80211_wme_acnames[i]);
4819 }
4820 }
4821 if (bootverbose || mwl_rxdesc != MWL_RXDESC)
4822 device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
4823 if (bootverbose || mwl_rxbuf != MWL_RXBUF)
4824 device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
4825 if (bootverbose || mwl_txbuf != MWL_TXBUF)
4826 device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
4827 if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
4828 device_printf(sc->sc_dev, "multi-bss support\n");
4829 #ifdef MWL_TX_NODROP
4830 if (bootverbose)
4831 device_printf(sc->sc_dev, "no tx drop\n");
4832 #endif
4833 }
4834