1 /*-
2 * Copyright (c) 2008-2010 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2013-2014 Kevin Lo
4 * Copyright (c) 2021 James Hastings
5 * Ported to FreeBSD by Jesper Schmitz Mouridsen jsm@FreeBSD.org
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * MediaTek MT7601U 802.11b/g/n WLAN.
22 */
23
24 #include "opt_wlan.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/bus.h>
29 #include <sys/endian.h>
30 #include <sys/eventhandler.h>
31 #include <sys/firmware.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/linker.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43
44 #include <net/bpf.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net80211/ieee80211_var.h>
53 #include <net80211/ieee80211_radiotap.h>
54 #include <net80211/ieee80211_ratectl.h>
55 #include <net80211/ieee80211_regdomain.h>
56 #ifdef IEEE80211_SUPPORT_SUPERG
57 #include <net80211/ieee80211_superg.h>
58 #endif
59 #include <netinet/if_ether.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64
65 #include <dev/usb/usb.h>
66 #include <dev/usb/usbdi.h>
67 #include <dev/usb/usb_request.h>
68
69 #include "usbdevs.h"
70
71 #define USB_DEBUG_VAR mtw_debug
72 #include <dev/usb/usb_debug.h>
73 #include <dev/usb/usb_msctest.h>
74
75 #include "if_mtwreg.h"
76 #include "if_mtwvar.h"
77
78 #define MTW_DEBUG
79
80 #ifdef MTW_DEBUG
81 int mtw_debug;
82 static SYSCTL_NODE(_hw_usb, OID_AUTO, mtw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
83 "USB mtw");
84 SYSCTL_INT(_hw_usb_mtw, OID_AUTO, debug, CTLFLAG_RWTUN, &mtw_debug, 0,
85 "mtw debug level");
86
87 enum {
88 MTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
89 MTW_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
90 MTW_DEBUG_RECV = 0x00000004, /* basic recv operation */
91 MTW_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
92 MTW_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */
93 MTW_DEBUG_RATE = 0x00000020, /* rate adaptation */
94 MTW_DEBUG_USB = 0x00000040, /* usb requests */
95 MTW_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */
96 MTW_DEBUG_BEACON = 0x00000100, /* beacon handling */
97 MTW_DEBUG_INTR = 0x00000200, /* ISR */
98 MTW_DEBUG_TEMP = 0x00000400, /* temperature calibration */
99 MTW_DEBUG_ROM = 0x00000800, /* various ROM info */
100 MTW_DEBUG_KEY = 0x00001000, /* crypto keys management */
101 MTW_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */
102 MTW_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */
103 MTW_DEBUG_RESET = 0x00008000, /* initialization progress */
104 MTW_DEBUG_CALIB = 0x00010000, /* calibration progress */
105 MTW_DEBUG_CMD = 0x00020000, /* command queue */
106 MTW_DEBUG_ANY = 0xffffffff
107 };
108
109 #define MTW_DPRINTF(_sc, _m, ...) \
110 do { \
111 if (mtw_debug & (_m)) \
112 device_printf((_sc)->sc_dev, __VA_ARGS__); \
113 } while (0)
114
115 #else
116 #define MTW_DPRINTF(_sc, _m, ...) \
117 do { \
118 (void)_sc; \
119 } while (0)
120 #endif
121
122 #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh)
123
124 /* NB: "11" is the maximum number of padding bytes needed for Tx */
125 #define MTW_MAX_TXSZ \
126 (sizeof(struct mtw_txd) + sizeof(struct mtw_txwi) + MCLBYTES + 11)
127
128 /*
129 * Because of LOR in mtw_key_delete(), use atomic instead.
130 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
131 */
132 #define MTW_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & MTW_CMDQ_MASQ)
133
134 static const STRUCT_USB_HOST_ID mtw_devs[] = {
135 #define MTW_DEV(v, p) \
136 { \
137 USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) \
138 }
139 MTW_DEV(EDIMAX, MT7601U),
140 MTW_DEV(RALINK, MT7601U),
141 MTW_DEV(XIAOMI, MT7601U)
142 };
143 #undef MTW_DEV
144
145 static device_probe_t mtw_match;
146 static device_attach_t mtw_attach;
147 static device_detach_t mtw_detach;
148
149 static usb_callback_t mtw_bulk_rx_callback;
150 static usb_callback_t mtw_bulk_tx_callback0;
151 static usb_callback_t mtw_bulk_tx_callback1;
152 static usb_callback_t mtw_bulk_tx_callback2;
153 static usb_callback_t mtw_bulk_tx_callback3;
154 static usb_callback_t mtw_bulk_tx_callback4;
155 static usb_callback_t mtw_bulk_tx_callback5;
156 static usb_callback_t mtw_fw_callback;
157
158 static void mtw_autoinst(void *, struct usb_device *, struct usb_attach_arg *);
159 static int mtw_driver_loaded(struct module *, int, void *);
160 static void mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error,
161 u_int index);
162 static struct ieee80211vap *mtw_vap_create(struct ieee80211com *,
163 const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
164 const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
165 static void mtw_vap_delete(struct ieee80211vap *);
166 static void mtw_cmdq_cb(void *, int);
167 static void mtw_setup_tx_list(struct mtw_softc *, struct mtw_endpoint_queue *);
168 static void mtw_unsetup_tx_list(struct mtw_softc *,
169 struct mtw_endpoint_queue *);
170 static void mtw_load_microcode(void *arg);
171
172 static usb_error_t mtw_do_request(struct mtw_softc *,
173 struct usb_device_request *, void *);
174 static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *);
175 static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
176 static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t);
177 static int mtw_write(struct mtw_softc *, uint16_t, uint32_t);
178 static int mtw_write_region_1(struct mtw_softc *, uint16_t, const uint8_t *, int);
179 static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int);
180 static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *);
181 static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *);
182 static int mtw_bbp_write(struct mtw_softc *, uint8_t, uint8_t);
183 static int mtw_mcu_cmd(struct mtw_softc *sc, uint8_t cmd, void *buf, int len);
184 static void mtw_get_txpower(struct mtw_softc *);
185 static int mtw_read_eeprom(struct mtw_softc *);
186 static struct ieee80211_node *mtw_node_alloc(struct ieee80211vap *,
187 const uint8_t mac[IEEE80211_ADDR_LEN]);
188 static int mtw_media_change(if_t);
189 static int mtw_newstate(struct ieee80211vap *, enum ieee80211_state, int);
190 static int mtw_wme_update(struct ieee80211com *);
191 static void mtw_key_set_cb(void *);
192 static int mtw_key_set(struct ieee80211vap *, struct ieee80211_key *);
193 static void mtw_key_delete_cb(void *);
194 static int mtw_key_delete(struct ieee80211vap *, struct ieee80211_key *);
195 static void mtw_ratectl_to(void *);
196 static void mtw_ratectl_cb(void *, int);
197 static void mtw_drain_fifo(void *);
198 static void mtw_iter_func(void *, struct ieee80211_node *);
199 static void mtw_newassoc_cb(void *);
200 static void mtw_newassoc(struct ieee80211_node *, int);
201 static int mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val);
202 static void mtw_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
203 const struct ieee80211_rx_stats *, int, int);
204 static void mtw_rx_frame(struct mtw_softc *, struct mbuf *, uint32_t);
205 static void mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *,
206 int);
207 static void mtw_set_tx_desc(struct mtw_softc *, struct mtw_tx_data *);
208 static int mtw_tx(struct mtw_softc *, struct mbuf *, struct ieee80211_node *);
209 static int mtw_tx_mgt(struct mtw_softc *, struct mbuf *,
210 struct ieee80211_node *);
211 static int mtw_sendprot(struct mtw_softc *, const struct mbuf *,
212 struct ieee80211_node *, int, int);
213 static int mtw_tx_param(struct mtw_softc *, struct mbuf *,
214 struct ieee80211_node *, const struct ieee80211_bpf_params *);
215 static int mtw_raw_xmit(struct ieee80211_node *, struct mbuf *,
216 const struct ieee80211_bpf_params *);
217 static int mtw_transmit(struct ieee80211com *, struct mbuf *);
218 static void mtw_start(struct mtw_softc *);
219 static void mtw_parent(struct ieee80211com *);
220 static void mtw_select_chan_group(struct mtw_softc *, int);
221
222 static int mtw_set_chan(struct mtw_softc *, struct ieee80211_channel *);
223 static void mtw_set_channel(struct ieee80211com *);
224 static void mtw_getradiocaps(struct ieee80211com *, int, int *,
225 struct ieee80211_channel[]);
226 static void mtw_scan_start(struct ieee80211com *);
227 static void mtw_scan_end(struct ieee80211com *);
228 static void mtw_update_beacon(struct ieee80211vap *, int);
229 static void mtw_update_beacon_cb(void *);
230 static void mtw_updateprot(struct ieee80211com *);
231 static void mtw_updateprot_cb(void *);
232 static void mtw_usb_timeout_cb(void *);
233 static int mtw_reset(struct mtw_softc *sc);
234 static void mtw_enable_tsf_sync(struct mtw_softc *);
235
236
237 static void mtw_enable_mrr(struct mtw_softc *);
238 static void mtw_set_txpreamble(struct mtw_softc *);
239 static void mtw_set_basicrates(struct mtw_softc *);
240 static void mtw_set_leds(struct mtw_softc *, uint16_t);
241 static void mtw_set_bssid(struct mtw_softc *, const uint8_t *);
242 static void mtw_set_macaddr(struct mtw_softc *, const uint8_t *);
243 static void mtw_updateslot(struct ieee80211com *);
244 static void mtw_updateslot_cb(void *);
245 static void mtw_update_mcast(struct ieee80211com *);
246 static int8_t mtw_rssi2dbm(struct mtw_softc *, uint8_t, uint8_t);
247 static void mtw_update_promisc_locked(struct mtw_softc *);
248 static void mtw_update_promisc(struct ieee80211com *);
249 static int mtw_txrx_enable(struct mtw_softc *);
250 static void mtw_init_locked(struct mtw_softc *);
251 static void mtw_stop(void *);
252 static void mtw_delay(struct mtw_softc *, u_int);
253 static void mtw_update_chw(struct ieee80211com *ic);
254 static int mtw_ampdu_enable(struct ieee80211_node *ni,
255 struct ieee80211_tx_ampdu *tap);
256
257 static eventhandler_tag mtw_etag;
258
259 static const struct {
260 uint8_t reg;
261 uint8_t val;
262 } mt7601_rf_bank0[] = { MT7601_BANK0_RF },
263 mt7601_rf_bank4[] = { MT7601_BANK4_RF },
264 mt7601_rf_bank5[] = { MT7601_BANK5_RF };
265 static const struct {
266 uint32_t reg;
267 uint32_t val;
268 } mt7601_def_mac[] = { MT7601_DEF_MAC };
269 static const struct {
270 uint8_t reg;
271 uint8_t val;
272 } mt7601_def_bbp[] = { MT7601_DEF_BBP };
273
274
275 static const struct {
276 u_int chan;
277 uint8_t r17, r18, r19, r20;
278 } mt7601_rf_chan[] = { MT7601_RF_CHAN };
279
280
281 static const struct usb_config mtw_config[MTW_N_XFER] = {
282 [MTW_BULK_RX] = {
283 .type = UE_BULK,
284 .endpoint = UE_ADDR_ANY,
285 .direction = UE_DIR_IN,
286 .bufsize = MTW_MAX_RXSZ,
287 .flags = {.pipe_bof = 1,
288 .short_xfer_ok = 1,},
289 .callback = mtw_bulk_rx_callback,
290 },
291 [MTW_BULK_TX_BE] = {
292 .type = UE_BULK,
293 .endpoint = UE_ADDR_ANY,
294 .direction = UE_DIR_OUT,
295 .bufsize = MTW_MAX_TXSZ,
296 .flags = {.pipe_bof = 1,
297 .force_short_xfer = 0,},
298 .callback = mtw_bulk_tx_callback0,
299 .timeout = 5000, /* ms */
300 },
301 [MTW_BULK_TX_BK] = {
302 .type = UE_BULK,
303 .endpoint = UE_ADDR_ANY,
304 .direction = UE_DIR_OUT,
305 .bufsize = MTW_MAX_TXSZ,
306 .flags = {.pipe_bof = 1,
307 .force_short_xfer = 1,},
308 .callback = mtw_bulk_tx_callback1,
309 .timeout = 5000, /* ms */
310 },
311 [MTW_BULK_TX_VI] = {
312 .type = UE_BULK,
313 .endpoint = UE_ADDR_ANY,
314 .direction = UE_DIR_OUT,
315 .bufsize = MTW_MAX_TXSZ,
316 .flags = {.pipe_bof = 1,
317 .force_short_xfer = 1,},
318 .callback = mtw_bulk_tx_callback2,
319 .timeout = 5000, /* ms */
320 },
321 [MTW_BULK_TX_VO] = {
322 .type = UE_BULK,
323 .endpoint = UE_ADDR_ANY,
324 .direction = UE_DIR_OUT,
325 .bufsize = MTW_MAX_TXSZ,
326 .flags = {.pipe_bof = 1,
327 .force_short_xfer = 1,},
328 .callback = mtw_bulk_tx_callback3,
329 .timeout = 5000, /* ms */
330 },
331 [MTW_BULK_TX_HCCA] = {
332 .type = UE_BULK,
333 .endpoint = UE_ADDR_ANY,
334 .direction = UE_DIR_OUT,
335 .bufsize = MTW_MAX_TXSZ,
336 .flags = {.pipe_bof = 1,
337 .force_short_xfer = 1, .no_pipe_ok = 1,},
338 .callback = mtw_bulk_tx_callback4,
339 .timeout = 5000, /* ms */
340 },
341 [MTW_BULK_TX_PRIO] = {
342 .type = UE_BULK,
343 .endpoint = UE_ADDR_ANY,
344 .direction = UE_DIR_OUT,
345 .bufsize = MTW_MAX_TXSZ,
346 .flags = {.pipe_bof = 1,
347 .force_short_xfer = 1, .no_pipe_ok = 1,},
348 .callback = mtw_bulk_tx_callback5,
349 .timeout = 5000, /* ms */
350 },
351
352 [MTW_BULK_FW_CMD] = {
353 .type = UE_BULK,
354 .endpoint = UE_ADDR_ANY,
355 .direction = UE_DIR_OUT,
356 .bufsize = 0x2c44,
357 .flags = {.pipe_bof = 1,
358 .force_short_xfer = 1, .no_pipe_ok = 1,},
359 .callback = mtw_fw_callback,
360
361 },
362
363 [MTW_BULK_RAW_TX] = {
364 .type = UE_BULK,
365 .ep_index = 0,
366 .endpoint = UE_ADDR_ANY,
367 .direction = UE_DIR_OUT,
368 .bufsize = MTW_MAX_TXSZ,
369 .flags = {.pipe_bof = 1,
370 .force_short_xfer = 1, .no_pipe_ok = 1,},
371 .callback = mtw_bulk_tx_callback0,
372 .timeout = 5000, /* ms */
373 },
374
375 };
376 static uint8_t mtw_wme_ac_xfer_map[4] = {
377 [WME_AC_BE] = MTW_BULK_TX_BE,
378 [WME_AC_BK] = MTW_BULK_TX_BK,
379 [WME_AC_VI] = MTW_BULK_TX_VI,
380 [WME_AC_VO] = MTW_BULK_TX_VO,
381 };
382 static void
mtw_autoinst(void * arg,struct usb_device * udev,struct usb_attach_arg * uaa)383 mtw_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa)
384 {
385 struct usb_interface *iface;
386 struct usb_interface_descriptor *id;
387
388 if (uaa->dev_state != UAA_DEV_READY)
389 return;
390
391 iface = usbd_get_iface(udev, 0);
392 if (iface == NULL)
393 return;
394 id = iface->idesc;
395 if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
396 return;
397 if (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa))
398 return;
399
400 if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0)
401 uaa->dev_state = UAA_DEV_EJECTING;
402 }
403
404 static int
mtw_driver_loaded(struct module * mod,int what,void * arg)405 mtw_driver_loaded(struct module *mod, int what, void *arg)
406 {
407 switch (what) {
408 case MOD_LOAD:
409 mtw_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
410 mtw_autoinst, NULL, EVENTHANDLER_PRI_ANY);
411 break;
412 case MOD_UNLOAD:
413 EVENTHANDLER_DEREGISTER(usb_dev_configured, mtw_etag);
414 break;
415 default:
416 return (EOPNOTSUPP);
417 }
418 return (0);
419 }
420
421 static const char *
mtw_get_rf(int rev)422 mtw_get_rf(int rev)
423 {
424 switch (rev) {
425 case MT7601_RF_7601:
426 return ("MT7601");
427 case MT7610_RF_7610:
428 return ("MT7610");
429 case MT7612_RF_7612:
430 return ("MT7612");
431 }
432 return ("unknown");
433 }
434 static int
mtw_wlan_enable(struct mtw_softc * sc,int enable)435 mtw_wlan_enable(struct mtw_softc *sc, int enable)
436 {
437 uint32_t tmp;
438 int error = 0;
439
440 if (enable) {
441 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
442 if (sc->asic_ver == 0x7612)
443 tmp &= ~0xfffff000;
444
445 tmp &= ~MTW_WLAN_CLK_EN;
446 tmp |= MTW_WLAN_EN;
447 mtw_write(sc, MTW_WLAN_CTRL, tmp);
448 mtw_delay(sc, 2);
449
450 tmp |= MTW_WLAN_CLK_EN;
451 if (sc->asic_ver == 0x7612) {
452 tmp |= (MTW_WLAN_RESET | MTW_WLAN_RESET_RF);
453 }
454 mtw_write(sc, MTW_WLAN_CTRL, tmp);
455 mtw_delay(sc, 2);
456
457 mtw_read(sc, MTW_OSC_CTRL, &tmp);
458 tmp |= MTW_OSC_EN;
459 mtw_write(sc, MTW_OSC_CTRL, tmp);
460 tmp |= MTW_OSC_CAL_REQ;
461 mtw_write(sc, MTW_OSC_CTRL, tmp);
462 } else {
463 mtw_read(sc, MTW_WLAN_CTRL, &tmp);
464 tmp &= ~(MTW_WLAN_CLK_EN | MTW_WLAN_EN);
465 mtw_write(sc, MTW_WLAN_CTRL, tmp);
466
467 mtw_read(sc, MTW_OSC_CTRL, &tmp);
468 tmp &= ~MTW_OSC_EN;
469 mtw_write(sc, MTW_OSC_CTRL, tmp);
470 }
471 return (error);
472 }
473
474 static int
mtw_read_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t * val)475 mtw_read_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
476 {
477 usb_device_request_t req;
478 uint32_t tmp;
479 uint16_t actlen;
480 int error;
481
482 req.bmRequestType = UT_READ_VENDOR_DEVICE;
483 req.bRequest = MTW_READ_CFG;
484 USETW(req.wValue, 0);
485 USETW(req.wIndex, reg);
486 USETW(req.wLength, 4);
487 error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, &tmp, 0,
488 &actlen, 1000);
489
490 if (error == 0)
491 *val = le32toh(tmp);
492 else
493 *val = 0xffffffff;
494 return (error);
495 }
496
497 static int
mtw_match(device_t self)498 mtw_match(device_t self)
499 {
500 struct usb_attach_arg *uaa = device_get_ivars(self);
501
502 if (uaa->usb_mode != USB_MODE_HOST)
503 return (ENXIO);
504 if (uaa->info.bConfigIndex != 0)
505 return (ENXIO);
506 if (uaa->info.bIfaceIndex != 0)
507 return (ENXIO);
508
509 return (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa));
510 }
511
512 static int
mtw_attach(device_t self)513 mtw_attach(device_t self)
514 {
515 struct mtw_softc *sc = device_get_softc(self);
516 struct usb_attach_arg *uaa = device_get_ivars(self);
517 struct ieee80211com *ic = &sc->sc_ic;
518 uint32_t ver;
519 int i, ret;
520 uint32_t tmp;
521 uint8_t iface_index;
522 int ntries, error;
523
524 device_set_usb_desc(self);
525 sc->sc_udev = uaa->device;
526 sc->sc_dev = self;
527 sc->sc_sent = 0;
528
529 /*
530 * Reset the device to clear any stale state left over from
531 * a previous warm reboot. Some MT7601U devices fail otherwise.
532 */
533 error = usbd_req_re_enumerate(uaa->device, NULL);
534 if (error != 0)
535 device_printf(self, "USB re-enumerate failed, continuing\n");
536 DELAY(100000); /* 100ms settle time */
537
538 mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
539 MTX_NETWORK_LOCK, MTX_DEF);
540
541 iface_index = 0;
542
543 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
544 mtw_config, MTW_N_XFER, sc, &sc->sc_mtx);
545 if (error) {
546 device_printf(sc->sc_dev,
547 "could not allocate USB transfers, "
548 "err=%s\n",
549 usbd_errstr(error));
550 goto detach;
551 }
552 for (i = 0; i < 4; i++) {
553 sc->txd_fw[i] = (struct mtw_txd_fw *)
554 malloc(sizeof(struct mtw_txd_fw),
555 M_USBDEV, M_NOWAIT | M_ZERO);
556 }
557 MTW_LOCK(sc);
558 sc->sc_idx = 0;
559 mbufq_init(&sc->sc_snd, ifqmaxlen);
560
561 /*enable WLAN core */
562 if ((error = mtw_wlan_enable(sc, 1)) != 0) {
563 device_printf(sc->sc_dev, "could not enable WLAN core\n");
564 return (ENXIO);
565 }
566
567 /* wait for the chip to settle */
568 DELAY(100);
569 for (ntries = 0; ntries < 100; ntries++) {
570 if (mtw_read(sc, MTW_ASIC_VER, &ver) != 0) {
571 goto detach;
572 }
573 if (ver != 0 && ver != 0xffffffff)
574 break;
575 DELAY(10);
576 }
577 if (ntries == 100) {
578 device_printf(sc->sc_dev,
579 "timeout waiting for NIC to initialize\n");
580 goto detach;
581 }
582 sc->asic_ver = ver >> 16;
583 sc->asic_rev = ver & 0xffff;
584 DELAY(100);
585 if (sc->asic_ver != 0x7601) {
586 device_printf(sc->sc_dev,
587 "Your revision 0x04%x is not supported yet\n",
588 sc->asic_rev);
589 goto detach;
590 }
591
592
593 if (mtw_read(sc, MTW_MAC_VER_ID, &tmp) != 0)
594 goto detach;
595 sc->mac_rev = tmp & 0xffff;
596
597 mtw_load_microcode(sc);
598 ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 10 * hz);
599 if (ret == EWOULDBLOCK || sc->fwloading != 1) {
600 device_printf(sc->sc_dev,
601 "timeout waiting for MCU to initialize\n");
602 goto detach;
603 }
604
605 sc->sc_srom_read = mtw_efuse_read_2;
606 /* retrieve RF rev. no and various other things from EEPROM */
607 mtw_read_eeprom(sc);
608
609 device_printf(sc->sc_dev,
610 "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n",
611 sc->asic_ver, sc->mac_rev, mtw_get_rf(sc->rf_rev), sc->ntxchains,
612 sc->nrxchains, ether_sprintf(ic->ic_macaddr));
613 DELAY(100);
614
615 //mtw_set_leds(sc,5);
616 // mtw_mcu_radio(sc,0x31,0);
617 MTW_UNLOCK(sc);
618
619
620 ic->ic_softc = sc;
621 ic->ic_name = device_get_nameunit(self);
622 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
623 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
624
625 ic->ic_caps = IEEE80211_C_STA | /* station mode supported */
626 IEEE80211_C_MONITOR | /* monitor mode supported */
627 IEEE80211_C_IBSS |
628 IEEE80211_C_HOSTAP |
629 IEEE80211_C_WDS | /* 4-address traffic works */
630 IEEE80211_C_MBSS |
631 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
632 IEEE80211_C_SHSLOT | /* short slot time supported */
633 IEEE80211_C_WME | /* WME */
634 IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */
635 device_printf(sc->sc_dev, "[HT] Enabling 802.11n\n");
636 ic->ic_htcaps = IEEE80211_HTC_HT
637 | IEEE80211_HTC_AMPDU
638 | IEEE80211_HTC_AMSDU
639 | IEEE80211_HTCAP_MAXAMSDU_3839
640 | IEEE80211_HTCAP_SMPS_OFF;
641
642 ic->ic_rxstream = sc->nrxchains;
643 ic->ic_txstream = sc->ntxchains;
644
645 ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM |
646 IEEE80211_CRYPTO_AES_OCB | IEEE80211_CRYPTO_TKIP |
647 IEEE80211_CRYPTO_TKIPMIC;
648
649 ic->ic_flags |= IEEE80211_F_DATAPAD;
650 ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
651 ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
652
653 mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
654 ic->ic_channels);
655
656 ieee80211_ifattach(ic);
657
658 ic->ic_scan_start = mtw_scan_start;
659 ic->ic_scan_end = mtw_scan_end;
660 ic->ic_set_channel = mtw_set_channel;
661 ic->ic_getradiocaps = mtw_getradiocaps;
662 ic->ic_node_alloc = mtw_node_alloc;
663 ic->ic_newassoc = mtw_newassoc;
664 ic->ic_update_mcast = mtw_update_mcast;
665 ic->ic_updateslot = mtw_updateslot;
666 ic->ic_wme.wme_update = mtw_wme_update;
667 ic->ic_raw_xmit = mtw_raw_xmit;
668 ic->ic_update_promisc = mtw_update_promisc;
669 ic->ic_vap_create = mtw_vap_create;
670 ic->ic_vap_delete = mtw_vap_delete;
671 ic->ic_transmit = mtw_transmit;
672 ic->ic_parent = mtw_parent;
673
674 ic->ic_update_chw = mtw_update_chw;
675 ic->ic_ampdu_enable = mtw_ampdu_enable;
676
677 ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr,
678 sizeof(sc->sc_txtap), MTW_TX_RADIOTAP_PRESENT,
679 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
680 MTW_RX_RADIOTAP_PRESENT);
681 TASK_INIT(&sc->cmdq_task, 0, mtw_cmdq_cb, sc);
682 TASK_INIT(&sc->ratectl_task, 0, mtw_ratectl_cb, sc);
683 usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0);
684
685 if (bootverbose)
686 ieee80211_announce(ic);
687
688 return (0);
689
690 detach:
691 MTW_UNLOCK(sc);
692 mtw_detach(self);
693 return (ENXIO);
694 }
695
696 static void
mtw_drain_mbufq(struct mtw_softc * sc)697 mtw_drain_mbufq(struct mtw_softc *sc)
698 {
699 struct mbuf *m;
700 struct ieee80211_node *ni;
701
702 MTW_LOCK_ASSERT(sc, MA_OWNED);
703 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
704 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
705 m->m_pkthdr.rcvif = NULL;
706 ieee80211_free_node(ni);
707 m_freem(m);
708 }
709 }
710
711 static int
mtw_detach(device_t self)712 mtw_detach(device_t self)
713 {
714 struct mtw_softc *sc = device_get_softc(self);
715 struct ieee80211com *ic = &sc->sc_ic;
716 int i;
717 MTW_LOCK(sc);
718 mtw_reset(sc);
719 DELAY(10000);
720 sc->sc_detached = 1;
721 MTW_UNLOCK(sc);
722
723
724 /* stop all USB transfers */
725 for (i = 0; i < MTW_N_XFER; i++)
726 usbd_transfer_drain(sc->sc_xfer[i]);
727
728 MTW_LOCK(sc);
729 sc->ratectl_run = MTW_RATECTL_OFF;
730 sc->cmdq_run = sc->cmdq_key_set = MTW_CMDQ_ABORT;
731
732 /* free TX list, if any */
733 if (ic->ic_nrunning > 0)
734 for (i = 0; i < MTW_EP_QUEUES; i++)
735 mtw_unsetup_tx_list(sc, &sc->sc_epq[i]);
736
737 /* Free TX queue */
738 mtw_drain_mbufq(sc);
739 MTW_UNLOCK(sc);
740 if (sc->sc_ic.ic_softc == sc) {
741 /* drain tasks */
742 usb_callout_drain(&sc->ratectl_ch);
743 ieee80211_draintask(ic, &sc->cmdq_task);
744 ieee80211_draintask(ic, &sc->ratectl_task);
745 ieee80211_ifdetach(ic);
746 }
747 for (i = 0; i < 4; i++) {
748 free(sc->txd_fw[i], M_USBDEV);
749 }
750 firmware_unregister("/mediatek/mt7601u");
751 mtx_destroy(&sc->sc_mtx);
752
753 return (0);
754 }
755
756 static struct ieee80211vap *
mtw_vap_create(struct ieee80211com * ic,const char name[IFNAMSIZ],int unit,enum ieee80211_opmode opmode,int flags,const uint8_t bssid[IEEE80211_ADDR_LEN],const uint8_t mac[IEEE80211_ADDR_LEN])757 mtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
758 enum ieee80211_opmode opmode, int flags,
759 const uint8_t bssid[IEEE80211_ADDR_LEN],
760 const uint8_t mac[IEEE80211_ADDR_LEN])
761 {
762 struct mtw_softc *sc = ic->ic_softc;
763 struct mtw_vap *rvp;
764 struct ieee80211vap *vap;
765 int i;
766
767 if (sc->rvp_cnt >= MTW_VAP_MAX) {
768 device_printf(sc->sc_dev, "number of VAPs maxed out\n");
769 return (NULL);
770 }
771
772 switch (opmode) {
773 case IEEE80211_M_STA:
774 /* enable s/w bmiss handling for sta mode */
775 flags |= IEEE80211_CLONE_NOBEACONS;
776 /* fall though */
777 case IEEE80211_M_IBSS:
778 case IEEE80211_M_MONITOR:
779 case IEEE80211_M_HOSTAP:
780 case IEEE80211_M_MBSS:
781 /* other than WDS vaps, only one at a time */
782 if (!TAILQ_EMPTY(&ic->ic_vaps))
783 return (NULL);
784 break;
785 case IEEE80211_M_WDS:
786 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
787 if (vap->iv_opmode != IEEE80211_M_HOSTAP)
788 continue;
789 /* WDS vap's always share the local mac address. */
790 flags &= ~IEEE80211_CLONE_BSSID;
791 break;
792 }
793 if (vap == NULL) {
794 device_printf(sc->sc_dev,
795 "wds only supported in ap mode\n");
796 return (NULL);
797 }
798 break;
799 default:
800 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
801 return (NULL);
802 }
803
804 rvp = malloc(sizeof(struct mtw_vap), M_80211_VAP, M_WAITOK | M_ZERO);
805 vap = &rvp->vap;
806
807 if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) !=
808 0) {
809 /* out of memory */
810 free(rvp, M_80211_VAP);
811 return (NULL);
812 }
813
814 vap->iv_update_beacon = mtw_update_beacon;
815 vap->iv_max_aid = MTW_WCID_MAX;
816
817 /*
818 * The linux rt2800 driver limits 1 stream devices to a 32KB
819 * RX AMPDU.
820 */
821 if (ic->ic_rxstream > 1)
822 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
823 else
824 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
825 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_2; /* 2uS */
826
827 /*
828 * To delete the right key from h/w, we need wcid.
829 * Luckily, there is unused space in ieee80211_key{}, wk_pad,
830 * and matching wcid will be written into there. So, cast
831 * some spells to remove 'const' from ieee80211_key{}
832 */
833 vap->iv_key_delete = (void *)mtw_key_delete;
834 vap->iv_key_set = (void *)mtw_key_set;
835
836 // override state transition machine
837 rvp->newstate = vap->iv_newstate;
838 vap->iv_newstate = mtw_newstate;
839 if (opmode == IEEE80211_M_IBSS) {
840 rvp->recv_mgmt = vap->iv_recv_mgmt;
841 vap->iv_recv_mgmt = mtw_recv_mgmt;
842 }
843
844 ieee80211_ratectl_init(vap);
845 ieee80211_ratectl_setinterval(vap, 1000); // 1 second
846
847 /* complete setup */
848 ieee80211_vap_attach(vap, mtw_media_change, ieee80211_media_status,
849 mac);
850
851 /* make sure id is always unique */
852 for (i = 0; i < MTW_VAP_MAX; i++) {
853 if ((sc->rvp_bmap & 1 << i) == 0) {
854 sc->rvp_bmap |= 1 << i;
855 rvp->rvp_id = i;
856 break;
857 }
858 }
859 if (sc->rvp_cnt++ == 0)
860 ic->ic_opmode = opmode;
861
862 if (opmode == IEEE80211_M_HOSTAP)
863 sc->cmdq_run = MTW_CMDQ_GO;
864
865 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n",
866 rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt);
867
868 return (vap);
869 }
870
871 static void
mtw_vap_delete(struct ieee80211vap * vap)872 mtw_vap_delete(struct ieee80211vap *vap)
873 {
874 struct mtw_vap *rvp = MTW_VAP(vap);
875 struct ieee80211com *ic;
876 struct mtw_softc *sc;
877 uint8_t rvp_id;
878
879 if (vap == NULL)
880 return;
881
882 ic = vap->iv_ic;
883 sc = ic->ic_softc;
884
885 MTW_LOCK(sc);
886 m_freem(rvp->beacon_mbuf);
887 rvp->beacon_mbuf = NULL;
888
889 rvp_id = rvp->rvp_id;
890 sc->ratectl_run &= ~(1 << rvp_id);
891 sc->rvp_bmap &= ~(1 << rvp_id);
892 mtw_set_region_4(sc, MTW_SKEY(rvp_id, 0), 0, 256);
893 mtw_set_region_4(sc, (0x7800 + (rvp_id) * 512), 0, 512);
894 --sc->rvp_cnt;
895
896 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
897 "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap,
898 sc->rvp_cnt);
899
900 MTW_UNLOCK(sc);
901
902 ieee80211_ratectl_deinit(vap);
903 ieee80211_vap_detach(vap);
904 free(rvp, M_80211_VAP);
905 }
906
907 /*
908 * There are numbers of functions need to be called in context thread.
909 * Rather than creating taskqueue event for each of those functions,
910 * here is all-for-one taskqueue callback function. This function
911 * guarantees deferred functions are executed in the same order they
912 * were enqueued.
913 * '& MTW_CMDQ_MASQ' is to loop cmdq[].
914 */
915 static void
mtw_cmdq_cb(void * arg,int pending)916 mtw_cmdq_cb(void *arg, int pending)
917 {
918 struct mtw_softc *sc = arg;
919 uint8_t i;
920 /* call cmdq[].func locked */
921 MTW_LOCK(sc);
922 for (i = sc->cmdq_exec; sc->cmdq[i].func && pending;
923 i = sc->cmdq_exec, pending--) {
924 MTW_DPRINTF(sc, MTW_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i,
925 pending);
926 if (sc->cmdq_run == MTW_CMDQ_GO) {
927 /*
928 * If arg0 is NULL, callback func needs more
929 * than one arg. So, pass ptr to cmdq struct.
930 */
931 if (sc->cmdq[i].arg0)
932 sc->cmdq[i].func(sc->cmdq[i].arg0);
933 else
934 sc->cmdq[i].func(&sc->cmdq[i]);
935 }
936 sc->cmdq[i].arg0 = NULL;
937 sc->cmdq[i].func = NULL;
938 sc->cmdq_exec++;
939 sc->cmdq_exec &= MTW_CMDQ_MASQ;
940 }
941 MTW_UNLOCK(sc);
942 }
943
944 static void
mtw_setup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)945 mtw_setup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
946 {
947 struct mtw_tx_data *data;
948
949 memset(pq, 0, sizeof(*pq));
950
951 STAILQ_INIT(&pq->tx_qh);
952 STAILQ_INIT(&pq->tx_fh);
953
954 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
955 data++) {
956 data->sc = sc;
957 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
958 }
959 pq->tx_nfree = MTW_TX_RING_COUNT;
960 }
961
962 static void
mtw_unsetup_tx_list(struct mtw_softc * sc,struct mtw_endpoint_queue * pq)963 mtw_unsetup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
964 {
965 struct mtw_tx_data *data;
966 /* make sure any subsequent use of the queues will fail */
967 pq->tx_nfree = 0;
968
969 STAILQ_INIT(&pq->tx_fh);
970 STAILQ_INIT(&pq->tx_qh);
971
972 /* free up all node references and mbufs */
973 for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
974 data++) {
975 if (data->m != NULL) {
976 m_freem(data->m);
977 data->m = NULL;
978 }
979 if (data->ni != NULL) {
980 ieee80211_free_node(data->ni);
981 data->ni = NULL;
982 }
983 }
984 }
985
986 static int
mtw_write_ivb(struct mtw_softc * sc,void * buf,uint16_t len)987 mtw_write_ivb(struct mtw_softc *sc, void *buf, uint16_t len)
988 {
989 usb_device_request_t req;
990 uint16_t actlen;
991 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
992 req.bRequest = MTW_RESET;
993 USETW(req.wValue, 0x12);
994 USETW(req.wIndex, 0);
995 USETW(req.wLength, len);
996
997 int error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, buf,
998 0, &actlen, 1000);
999
1000 return (error);
1001 }
1002
1003 static int
mtw_write_cfg(struct mtw_softc * sc,uint16_t reg,uint32_t val)1004 mtw_write_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t val)
1005 {
1006 usb_device_request_t req;
1007 int error;
1008
1009 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1010 req.bRequest = MTW_WRITE_CFG;
1011 USETW(req.wValue, 0);
1012 USETW(req.wIndex, reg);
1013 USETW(req.wLength, 4);
1014 val = htole32(val);
1015 error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &val);
1016 return (error);
1017 }
1018
1019 static int
mtw_usb_dma_write(struct mtw_softc * sc,uint32_t val)1020 mtw_usb_dma_write(struct mtw_softc *sc, uint32_t val)
1021 {
1022 // if (sc->asic_ver == 0x7612)
1023 // return mtw_write_cfg(sc, MTW_USB_U3DMA_CFG, val);
1024 // else
1025 return (mtw_write(sc, MTW_USB_DMA_CFG, val));
1026 }
1027
1028 static void
mtw_ucode_setup(struct mtw_softc * sc)1029 mtw_ucode_setup(struct mtw_softc *sc)
1030 {
1031
1032 mtw_usb_dma_write(sc, (MTW_USB_TX_EN | MTW_USB_RX_EN));
1033 mtw_write(sc, MTW_FCE_PSE_CTRL, 1);
1034 mtw_write(sc, MTW_TX_CPU_FCE_BASE, 0x400230);
1035 mtw_write(sc, MTW_TX_CPU_FCE_MAX_COUNT, 1);
1036 mtw_write(sc, MTW_MCU_FW_IDX, 1);
1037 mtw_write(sc, MTW_FCE_PDMA, 0x44);
1038 mtw_write(sc, MTW_FCE_SKIP_FS, 3);
1039 }
1040 static int
mtw_ucode_write(struct mtw_softc * sc,const uint8_t * fw,const uint8_t * ivb,int32_t len,uint32_t offset)1041 mtw_ucode_write(struct mtw_softc *sc, const uint8_t *fw, const uint8_t *ivb,
1042 int32_t len, uint32_t offset)
1043 {
1044
1045 // struct usb_attach_arg *uaa = device_get_ivars(sc->sc_dev);
1046 #if 0 // firmware not tested
1047
1048 if (sc->asic_ver == 0x7612 && offset >= 0x90000)
1049 blksz = 0x800; /* MT7612 ROM Patch */
1050
1051 xfer = usbd_alloc_xfer(sc->sc_udev);
1052 if (xfer == NULL) {
1053 error = ENOMEM;
1054 goto fail;
1055 }
1056 buf = usbd_alloc_buffer(xfer, blksz + 12);
1057 if (buf == NULL) {
1058 error = ENOMEM;
1059 goto fail;
1060 }
1061 #endif
1062
1063
1064
1065 int mlen;
1066 int idx = 0;
1067
1068 mlen = 0x2c44;
1069
1070 while (len > 0) {
1071
1072 if (len < 0x2c44 && len > 0) {
1073 mlen = len;
1074 }
1075
1076 sc->txd_fw[idx]->len = htole16(mlen);
1077 sc->txd_fw[idx]->flags = htole16(MTW_TXD_DATA | MTW_TXD_MCU);
1078
1079 memcpy(&sc->txd_fw[idx]->fw, fw, mlen);
1080 // memcpy(&txd[1], fw, mlen);
1081 // memset(&txd[1] + mlen, 0, MTW_DMA_PAD);
1082 // mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, offset
1083 //+sent); 1mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (mlen << 16));
1084
1085 // sc->sc_fw_data[idx]->len=htole16(mlen);
1086
1087 // memcpy(tmpbuf,fw,mlen);
1088 // memset(tmpbuf+mlen,0,MTW_DMA_PAD);
1089 // memcpy(sc->sc_fw_data[idx].buf, fw, mlen);
1090
1091 fw += mlen;
1092 len -= mlen;
1093 // sent+=mlen;
1094 idx++;
1095 }
1096 sc->sc_sent = 0;
1097 memcpy(sc->sc_ivb_1, ivb, MTW_MCU_IVB_LEN);
1098
1099 usbd_transfer_start(sc->sc_xfer[7]);
1100
1101 return (0);
1102 }
1103
1104 static void
mtw_load_microcode(void * arg)1105 mtw_load_microcode(void *arg)
1106 {
1107
1108 struct mtw_softc *sc = (struct mtw_softc *)arg;
1109 const struct mtw_ucode_hdr *hdr;
1110 // onst struct mtw_ucode *fw = NULL;
1111 const char *fwname;
1112 size_t size;
1113 int error = 0;
1114 uint32_t tmp, iofs = 0x40;
1115 // int ntries;
1116 int dlen, ilen;
1117 device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver);
1118 /*
1119 * Firmware may still be running from a previous warm reboot.
1120 * Force a reset of the MCU to ensure a clean state.
1121 */
1122 mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp);
1123 if (tmp == MTW_MCU_READY) {
1124 device_printf(sc->sc_dev, "MCU already running, resetting\n");
1125 mtw_write(sc, MTW_MCU_RESET_CTL, MTW_RESET);
1126 DELAY(10000);
1127 mtw_write(sc, MTW_MCU_RESET_CTL, 0);
1128 DELAY(10000);
1129 /* Clear ready flag */
1130 mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0);
1131 DELAY(1000);
1132 }
1133
1134 if (sc->asic_ver == 0x7612) {
1135 fwname = "mtw-mt7662u_rom_patch";
1136
1137 const struct firmware *firmware = firmware_get_flags(fwname,FIRMWARE_GET_NOWARN);
1138 if (firmware == NULL) {
1139 device_printf(sc->sc_dev,
1140 "failed loadfirmware of file %s (error %d)\n",
1141 fwname, error);
1142 return;
1143 }
1144 size = firmware->datasize;
1145
1146 const struct mtw_ucode *fw = (const struct mtw_ucode *)
1147 firmware->data;
1148 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1149 // memcpy(fw,(const unsigned char*)firmware->data +
1150 // 0x1e,size-0x1e);
1151 ilen = size - 0x1e;
1152
1153 mtw_ucode_setup(sc);
1154
1155 if ((error = mtw_ucode_write(sc, firmware->data, fw->ivb, ilen,
1156 0x90000)) != 0) {
1157 goto fail;
1158 }
1159 mtw_usb_dma_write(sc, 0x00e41814);
1160 }
1161
1162 fwname = "/mediatek/mt7601u.bin";
1163 iofs = 0x40;
1164 // dofs = 0;
1165 if (sc->asic_ver == 0x7612) {
1166 fwname = "mtw-mt7662u";
1167 iofs = 0x80040;
1168 // dofs = 0x110800;
1169 } else if (sc->asic_ver == 0x7610) {
1170 fwname = "mt7610u";
1171 // dofs = 0x80000;
1172 }
1173 MTW_UNLOCK(sc);
1174 const struct firmware *firmware = firmware_get_flags(fwname, FIRMWARE_GET_NOWARN);
1175
1176 if (firmware == NULL) {
1177 device_printf(sc->sc_dev,
1178 "failed loadfirmware of file %s (error %d)\n", fwname,
1179 error);
1180 MTW_LOCK(sc);
1181 return;
1182 }
1183 MTW_LOCK(sc);
1184 size = firmware->datasize;
1185 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, "firmware size:%zu\n", size);
1186 const struct mtw_ucode *fw = (const struct mtw_ucode *)firmware->data;
1187
1188 if (size < sizeof(struct mtw_ucode_hdr)) {
1189 device_printf(sc->sc_dev, "firmware header too short\n");
1190 goto fail;
1191 }
1192
1193 hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
1194
1195 if (size < sizeof(struct mtw_ucode_hdr) + le32toh(hdr->ilm_len) +
1196 le32toh(hdr->dlm_len)) {
1197 device_printf(sc->sc_dev, "firmware payload too short\n");
1198 goto fail;
1199 }
1200
1201 ilen = le32toh(hdr->ilm_len) - MTW_MCU_IVB_LEN;
1202 dlen = le32toh(hdr->dlm_len);
1203
1204 if (ilen > size || dlen > size) {
1205 device_printf(sc->sc_dev, "firmware payload too large\n");
1206 goto fail;
1207 }
1208
1209 mtw_write(sc, MTW_FCE_PDMA, 0);
1210 mtw_write(sc, MTW_FCE_PSE_CTRL, 0);
1211 mtw_ucode_setup(sc);
1212
1213 if ((error = mtw_ucode_write(sc, fw->data, fw->ivb, ilen, iofs)) != 0)
1214 device_printf(sc->sc_dev, "Could not write ucode errro=%d\n",
1215 error);
1216
1217 device_printf(sc->sc_dev, "loaded firmware ver %.8x %.8x %s\n",
1218 le32toh(hdr->fw_ver), le32toh(hdr->build_ver), hdr->build_time);
1219
1220 return;
1221 fail:
1222 return;
1223 }
1224 static usb_error_t
mtw_do_request(struct mtw_softc * sc,struct usb_device_request * req,void * data)1225 mtw_do_request(struct mtw_softc *sc, struct usb_device_request *req, void *data)
1226 {
1227 usb_error_t err;
1228 int ntries = 5;
1229
1230 MTW_LOCK_ASSERT(sc, MA_OWNED);
1231
1232 while (ntries--) {
1233 err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data,
1234 0, NULL, 2000); // ms seconds
1235 if (err == 0)
1236 break;
1237 MTW_DPRINTF(sc, MTW_DEBUG_USB,
1238 "Control request failed, %s (retrying)\n",
1239 usbd_errstr(err));
1240 mtw_delay(sc, 10);
1241 }
1242 return (err);
1243 }
1244
1245 static int
mtw_read(struct mtw_softc * sc,uint16_t reg,uint32_t * val)1246 mtw_read(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
1247 {
1248 uint32_t tmp;
1249 int error;
1250
1251 error = mtw_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp);
1252 if (error == 0)
1253 *val = le32toh(tmp);
1254 else
1255 *val = 0xffffffff;
1256 return (error);
1257 }
1258
1259 static int
mtw_read_region_1(struct mtw_softc * sc,uint16_t reg,uint8_t * buf,int len)1260 mtw_read_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
1261 {
1262 usb_device_request_t req;
1263
1264 req.bmRequestType = UT_READ_VENDOR_DEVICE;
1265 req.bRequest = MTW_READ_REGION_1;
1266 USETW(req.wValue, 0);
1267 USETW(req.wIndex, reg);
1268 USETW(req.wLength, len);
1269
1270 return (mtw_do_request(sc, &req, buf));
1271 }
1272
1273 static int
mtw_write_2(struct mtw_softc * sc,uint16_t reg,uint16_t val)1274 mtw_write_2(struct mtw_softc *sc, uint16_t reg, uint16_t val)
1275 {
1276
1277 usb_device_request_t req;
1278 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1279 req.bRequest = MTW_WRITE_2;
1280 USETW(req.wValue, val);
1281 USETW(req.wIndex, reg);
1282 USETW(req.wLength, 0);
1283 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL));
1284 }
1285
1286 static int
mtw_write(struct mtw_softc * sc,uint16_t reg,uint32_t val)1287 mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val)
1288 {
1289
1290 int error;
1291
1292 if ((error = mtw_write_2(sc, reg, val & 0xffff)) == 0) {
1293
1294 error = mtw_write_2(sc, reg + 2, val >> 16);
1295 }
1296
1297 return (error);
1298 }
1299
1300 static int
mtw_write_region_1(struct mtw_softc * sc,uint16_t reg,const uint8_t * buf,int len)1301 mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, const uint8_t *buf,
1302 int len)
1303 {
1304
1305 usb_device_request_t req;
1306 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1307 req.bRequest = MTW_WRITE_REGION_1;
1308 USETW(req.wValue, 0);
1309 USETW(req.wIndex, reg);
1310 USETW(req.wLength, len);
1311 return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req,
1312 __DECONST(uint8_t *, buf)));
1313 }
1314
1315 static int
mtw_set_region_4(struct mtw_softc * sc,uint16_t reg,uint32_t val,int count)1316 mtw_set_region_4(struct mtw_softc *sc, uint16_t reg, uint32_t val, int count)
1317 {
1318 int i, error = 0;
1319
1320 KASSERT((count & 3) == 0, ("mte_set_region_4: Invalid data length.\n"));
1321 for (i = 0; i < count && error == 0; i += 4)
1322 error = mtw_write(sc, reg + i, val);
1323 return (error);
1324 }
1325
1326 static int
mtw_efuse_read_2(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1327 mtw_efuse_read_2(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1328 {
1329
1330 uint32_t tmp;
1331 uint16_t reg;
1332 int error, ntries;
1333
1334 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1335 return (error);
1336
1337 addr *= 2;
1338 /*
1339 * Read one 16-byte block into registers EFUSE_DATA[0-3]:
1340 * DATA0: 3 2 1 0
1341 * DATA1: 7 6 5 4
1342 * DATA2: B A 9 8
1343 * DATA3: F E D C
1344 */
1345 tmp &= ~(MTW_EFSROM_MODE_MASK | MTW_EFSROM_AIN_MASK);
1346 tmp |= (addr & ~0xf) << MTW_EFSROM_AIN_SHIFT | MTW_EFSROM_KICK;
1347 mtw_write(sc, MTW_EFUSE_CTRL, tmp);
1348 for (ntries = 0; ntries < 100; ntries++) {
1349 if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
1350 return (error);
1351 if (!(tmp & MTW_EFSROM_KICK))
1352 break;
1353 DELAY(2);
1354 }
1355 if (ntries == 100)
1356 return (ETIMEDOUT);
1357
1358 if ((tmp & MTW_EFUSE_AOUT_MASK) == MTW_EFUSE_AOUT_MASK) {
1359 *val = 0xffff; // address not found
1360 return (0);
1361 }
1362 // determine to which 32-bit register our 16-bit word belongs
1363 reg = MTW_EFUSE_DATA0 + (addr & 0xc);
1364 if ((error = mtw_read(sc, reg, &tmp)) != 0)
1365 return (error);
1366
1367 *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff;
1368 return (0);
1369 }
1370
1371 static __inline int
mtw_srom_read(struct mtw_softc * sc,uint16_t addr,uint16_t * val)1372 mtw_srom_read(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
1373 {
1374 /* either eFUSE ROM or EEPROM */
1375 return (sc->sc_srom_read(sc, addr, val));
1376 }
1377
1378 static int
mtw_bbp_read(struct mtw_softc * sc,uint8_t reg,uint8_t * val)1379 mtw_bbp_read(struct mtw_softc *sc, uint8_t reg, uint8_t *val)
1380 {
1381 uint32_t tmp;
1382 int ntries, error;
1383
1384 for (ntries = 0; ntries < 10; ntries++) {
1385 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1386 return (error);
1387 if (!(tmp & MTW_BBP_CSR_KICK))
1388 break;
1389 }
1390 if (ntries == 10)
1391 return (ETIMEDOUT);
1392
1393 tmp = MTW_BBP_CSR_READ | MTW_BBP_CSR_KICK | reg << 8;
1394 if ((error = mtw_write(sc, MTW_BBP_CSR, tmp)) != 0)
1395 return (error);
1396
1397 for (ntries = 0; ntries < 10; ntries++) {
1398 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1399 return (error);
1400 if (!(tmp & MTW_BBP_CSR_KICK))
1401 break;
1402 }
1403 if (ntries == 10)
1404 return (ETIMEDOUT);
1405
1406 *val = tmp & 0xff;
1407 return (0);
1408 }
1409
1410 static int
mtw_bbp_write(struct mtw_softc * sc,uint8_t reg,uint8_t val)1411 mtw_bbp_write(struct mtw_softc *sc, uint8_t reg, uint8_t val)
1412 {
1413 uint32_t tmp;
1414 int ntries, error;
1415
1416 for (ntries = 0; ntries < 10; ntries++) {
1417 if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
1418 return (error);
1419 if (!(tmp & MTW_BBP_CSR_KICK))
1420 break;
1421 }
1422 if (ntries == 10)
1423 return (ETIMEDOUT);
1424
1425 tmp = MTW_BBP_CSR_KICK | reg << 8 | val;
1426 return (mtw_write(sc, MTW_BBP_CSR, tmp));
1427 }
1428
1429 static int
mtw_mcu_cmd(struct mtw_softc * sc,u_int8_t cmd,void * buf,int len)1430 mtw_mcu_cmd(struct mtw_softc *sc, u_int8_t cmd, void *buf, int len)
1431 {
1432 sc->sc_idx = 0;
1433 sc->txd_fw[sc->sc_idx]->len = htole16(
1434 len + 8);
1435 sc->txd_fw[sc->sc_idx]->flags = htole16(MTW_TXD_CMD | MTW_TXD_MCU |
1436 (cmd & 0x1f) << MTW_TXD_CMD_SHIFT | (0 & 0xf));
1437
1438 memset(&sc->txd_fw[sc->sc_idx]->fw, 0, 2004);
1439 memcpy(&sc->txd_fw[sc->sc_idx]->fw, buf, len);
1440 usbd_transfer_start(sc->sc_xfer[7]);
1441 return (0);
1442 }
1443
1444 /*
1445 * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word.
1446 * Used to adjust per-rate Tx power registers.
1447 */
1448 static __inline uint32_t
b4inc(uint32_t b32,int8_t delta)1449 b4inc(uint32_t b32, int8_t delta)
1450 {
1451 int8_t i, b4;
1452
1453 for (i = 0; i < 8; i++) {
1454 b4 = b32 & 0xf;
1455 b4 += delta;
1456 if (b4 < 0)
1457 b4 = 0;
1458 else if (b4 > 0xf)
1459 b4 = 0xf;
1460 b32 = b32 >> 4 | b4 << 28;
1461 }
1462 return (b32);
1463 }
1464 static void
mtw_get_txpower(struct mtw_softc * sc)1465 mtw_get_txpower(struct mtw_softc *sc)
1466 {
1467 uint16_t val;
1468 int i;
1469
1470 /* Read power settings for 2GHz channels. */
1471 for (i = 0; i < 14; i += 2) {
1472 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE1 + i / 2, &val);
1473 sc->txpow1[i + 0] = (int8_t)(val & 0xff);
1474 sc->txpow1[i + 1] = (int8_t)(val >> 8);
1475 mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE2 + i / 2, &val);
1476 sc->txpow2[i + 0] = (int8_t)(val & 0xff);
1477 sc->txpow2[i + 1] = (int8_t)(val >> 8);
1478 }
1479 /* Fix broken Tx power entries. */
1480 for (i = 0; i < 14; i++) {
1481 if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27)
1482 sc->txpow1[i] = 5;
1483 if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27)
1484 sc->txpow2[i] = 5;
1485 MTW_DPRINTF(sc, MTW_DEBUG_TXPWR,
1486 "chan %d: power1=%d, power2=%d\n", mt7601_rf_chan[i].chan,
1487 sc->txpow1[i], sc->txpow2[i]);
1488 }
1489 }
1490
1491 struct ieee80211_node *
mtw_node_alloc(struct ieee80211vap * vap,const uint8_t mac[IEEE80211_ADDR_LEN])1492 mtw_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1493 {
1494 return (malloc(sizeof(struct mtw_node), M_80211_NODE,
1495 M_NOWAIT | M_ZERO));
1496 }
1497 static int
mtw_read_eeprom(struct mtw_softc * sc)1498 mtw_read_eeprom(struct mtw_softc *sc)
1499 {
1500 struct ieee80211com *ic = &sc->sc_ic;
1501 int8_t delta_2ghz, delta_5ghz;
1502 uint16_t val;
1503 int ridx, ant;
1504
1505 sc->sc_srom_read = mtw_efuse_read_2;
1506
1507 /* read RF information */
1508 mtw_srom_read(sc, MTW_EEPROM_CHIPID, &val);
1509 sc->rf_rev = val;
1510 mtw_srom_read(sc, MTW_EEPROM_ANTENNA, &val);
1511 sc->ntxchains = (val >> 4) & 0xf;
1512 sc->nrxchains = val & 0xf;
1513 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM RF rev=0x%02x chains=%dT%dR\n",
1514 sc->rf_rev, sc->ntxchains, sc->nrxchains);
1515
1516 /* read ROM version */
1517 mtw_srom_read(sc, MTW_EEPROM_VERSION, &val);
1518 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val & 0xff,
1519 val >> 8);
1520
1521 /* read MAC address */
1522 mtw_srom_read(sc, MTW_EEPROM_MAC01, &val);
1523 ic->ic_macaddr[0] = val & 0xff;
1524 ic->ic_macaddr[1] = val >> 8;
1525 mtw_srom_read(sc, MTW_EEPROM_MAC23, &val);
1526 ic->ic_macaddr[2] = val & 0xff;
1527 ic->ic_macaddr[3] = val >> 8;
1528 mtw_srom_read(sc, MTW_EEPROM_MAC45, &val);
1529 ic->ic_macaddr[4] = val & 0xff;
1530 ic->ic_macaddr[5] = val >> 8;
1531 #if 0
1532 printf("eFUSE ROM\n00: ");
1533 for (int i = 0; i < 256; i++) {
1534 if (((i % 8) == 0) && i > 0)
1535 printf("\n%02x: ", i);
1536 mtw_srom_read(sc, i, &val);
1537 printf(" %04x", val);
1538 }
1539 printf("\n");
1540 #endif
1541 /* check if RF supports automatic Tx access gain control */
1542 mtw_srom_read(sc, MTW_EEPROM_CONFIG, &val);
1543 device_printf(sc->sc_dev, "EEPROM CFG 0x%04x\n", val);
1544 if ((val & 0xff) != 0xff) {
1545 sc->ext_5ghz_lna = (val >> 3) & 1;
1546 sc->ext_2ghz_lna = (val >> 2) & 1;
1547 /* check if RF supports automatic Tx access gain control */
1548 sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1;
1549 /* check if we have a hardware radio switch */
1550 sc->rfswitch = val & 1;
1551 }
1552
1553 /* read RF frequency offset from EEPROM */
1554 mtw_srom_read(sc, MTW_EEPROM_FREQ_OFFSET, &val);
1555 if ((val & 0xff) != 0xff)
1556 sc->rf_freq_offset = val;
1557 else
1558 sc->rf_freq_offset = 0;
1559 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "frequency offset 0x%x\n",
1560 sc->rf_freq_offset);
1561
1562 /* Read Tx power settings. */
1563 mtw_get_txpower(sc);
1564
1565 /* read Tx power compensation for each Tx rate */
1566 mtw_srom_read(sc, MTW_EEPROM_DELTAPWR, &val);
1567 delta_2ghz = delta_5ghz = 0;
1568 if ((val & 0xff) != 0xff && (val & 0x80)) {
1569 delta_2ghz = val & 0xf;
1570 if (!(val & 0x40)) /* negative number */
1571 delta_2ghz = -delta_2ghz;
1572 }
1573 val >>= 8;
1574 if ((val & 0xff) != 0xff && (val & 0x80)) {
1575 delta_5ghz = val & 0xf;
1576 if (!(val & 0x40)) /* negative number */
1577 delta_5ghz = -delta_5ghz;
1578 }
1579 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1580 "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz,
1581 delta_5ghz);
1582
1583 for (ridx = 0; ridx < 5; ridx++) {
1584 uint32_t reg;
1585
1586 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2, &val);
1587 reg = val;
1588 mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2 + 1, &val);
1589 reg |= (uint32_t)val << 16;
1590
1591 sc->txpow20mhz[ridx] = reg;
1592 sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz);
1593 sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz);
1594
1595 MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
1596 "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, "
1597 "40MHz/5GHz=0x%08x\n",
1598 ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx],
1599 sc->txpow40mhz_5ghz[ridx]);
1600 }
1601
1602 /* read RSSI offsets and LNA gains from EEPROM */
1603 val = 0;
1604 mtw_srom_read(sc, MTW_EEPROM_RSSI1_2GHZ, &val);
1605 sc->rssi_2ghz[0] = val & 0xff; /* Ant A */
1606 sc->rssi_2ghz[1] = val >> 8; /* Ant B */
1607 mtw_srom_read(sc, MTW_EEPROM_RSSI2_2GHZ, &val);
1608 /*
1609 * On RT3070 chips (limited to 2 Rx chains), this ROM
1610 * field contains the Tx mixer gain for the 2GHz band.
1611 */
1612 if ((val & 0xff) != 0xff)
1613 sc->txmixgain_2ghz = val & 0x7;
1614 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n",
1615 sc->txmixgain_2ghz);
1616 sc->lna[2] = val >> 8; /* channel group 2 */
1617 mtw_srom_read(sc, MTW_EEPROM_RSSI1_5GHZ, &val);
1618 sc->rssi_5ghz[0] = val & 0xff; /* Ant A */
1619 sc->rssi_5ghz[1] = val >> 8; /* Ant B */
1620 mtw_srom_read(sc, MTW_EEPROM_RSSI2_5GHZ, &val);
1621 sc->rssi_5ghz[2] = val & 0xff; /* Ant C */
1622
1623 sc->lna[3] = val >> 8; /* channel group 3 */
1624
1625 mtw_srom_read(sc, MTW_EEPROM_LNA, &val);
1626 sc->lna[0] = val & 0xff; /* channel group 0 */
1627 sc->lna[1] = val >> 8; /* channel group 1 */
1628 MTW_DPRINTF(sc, MTW_DEBUG_ROM, "LNA0 0x%x\n", sc->lna[0]);
1629
1630 /* fix broken 5GHz LNA entries */
1631 if (sc->lna[2] == 0 || sc->lna[2] == 0xff) {
1632 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1633 "invalid LNA for channel group %d\n", 2);
1634 sc->lna[2] = sc->lna[1];
1635 }
1636 if (sc->lna[3] == 0 || sc->lna[3] == 0xff) {
1637 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1638 "invalid LNA for channel group %d\n", 3);
1639 sc->lna[3] = sc->lna[1];
1640 }
1641
1642 /* fix broken RSSI offset entries */
1643 for (ant = 0; ant < 3; ant++) {
1644 if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) {
1645 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1646 "invalid RSSI%d offset: %d (2GHz)\n", ant + 1,
1647 sc->rssi_2ghz[ant]);
1648 sc->rssi_2ghz[ant] = 0;
1649 }
1650 if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) {
1651 MTW_DPRINTF(sc, MTW_DEBUG_ROM,
1652 "invalid RSSI%d offset: %d (5GHz)\n", ant + 1,
1653 sc->rssi_5ghz[ant]);
1654 sc->rssi_5ghz[ant] = 0;
1655 }
1656 }
1657 return (0);
1658 }
1659 static int
mtw_media_change(if_t ifp)1660 mtw_media_change(if_t ifp)
1661 {
1662 struct ieee80211vap *vap = if_getsoftc(ifp);
1663 struct ieee80211com *ic = vap->iv_ic;
1664 const struct ieee80211_txparam *tp;
1665 struct mtw_softc *sc = ic->ic_softc;
1666 uint8_t rate, ridx;
1667
1668 MTW_LOCK(sc);
1669 ieee80211_media_change(ifp);
1670 //tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
1671 tp = &vap->iv_txparms[ic->ic_curmode];
1672 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
1673 struct ieee80211_node *ni;
1674 struct mtw_node *rn;
1675 /* XXX TODO: methodize with MCS rates */
1676 rate =
1677 ic->ic_sup_rates[ic->ic_curmode].rs_rates[tp->ucastrate] &
1678 IEEE80211_RATE_VAL;
1679 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
1680 if (rt2860_rates[ridx].rate == rate)
1681 break;
1682 }
1683 ni = ieee80211_ref_node(vap->iv_bss);
1684 rn = MTW_NODE(ni);
1685 rn->fix_ridx = ridx;
1686
1687 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate,
1688 rn->fix_ridx);
1689 ieee80211_free_node(ni);
1690 }
1691 MTW_UNLOCK(sc);
1692
1693 return (0);
1694 }
1695
1696 void
mtw_set_leds(struct mtw_softc * sc,uint16_t which)1697 mtw_set_leds(struct mtw_softc *sc, uint16_t which)
1698 {
1699 struct mtw_mcu_cmd_8 cmd;
1700 cmd.func = htole32(0x1);
1701 cmd.val = htole32(which);
1702 mtw_mcu_cmd(sc, CMD_LED_MODE, &cmd, sizeof(struct mtw_mcu_cmd_8));
1703 }
1704 static void
mtw_abort_tsf_sync(struct mtw_softc * sc)1705 mtw_abort_tsf_sync(struct mtw_softc *sc)
1706 {
1707 uint32_t tmp;
1708
1709 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
1710 tmp &= ~(MTW_BCN_TX_EN | MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN);
1711 mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
1712 }
1713 static int
mtw_newstate(struct ieee80211vap * vap,enum ieee80211_state nstate,int arg)1714 mtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1715 {
1716 const struct ieee80211_txparam *tp;
1717 struct ieee80211com *ic = vap->iv_ic;
1718 struct mtw_softc *sc = ic->ic_softc;
1719 struct mtw_vap *rvp = MTW_VAP(vap);
1720 enum ieee80211_state ostate;
1721 uint32_t sta[3];
1722 uint8_t ratectl = 0;
1723 uint8_t restart_ratectl = 0;
1724 uint8_t bid = 1 << rvp->rvp_id;
1725
1726
1727 ostate = vap->iv_state;
1728 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "%s -> %s\n",
1729 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
1730 IEEE80211_UNLOCK(ic);
1731 MTW_LOCK(sc);
1732 ratectl = sc->ratectl_run; /* remember current state */
1733 usb_callout_stop(&sc->ratectl_ch);
1734 sc->ratectl_run = MTW_RATECTL_OFF;
1735 if (ostate == IEEE80211_S_RUN) {
1736 /* turn link LED off */
1737 }
1738
1739 switch (nstate) {
1740 case IEEE80211_S_INIT:
1741 restart_ratectl = 1;
1742 if (ostate != IEEE80211_S_RUN)
1743 break;
1744
1745 ratectl &= ~bid;
1746 sc->runbmap &= ~bid;
1747
1748 /* abort TSF synchronization if there is no vap running */
1749 if (--sc->running == 0)
1750 mtw_abort_tsf_sync(sc);
1751 break;
1752
1753 case IEEE80211_S_RUN:
1754 if (!(sc->runbmap & bid)) {
1755 if (sc->running++)
1756 restart_ratectl = 1;
1757 sc->runbmap |= bid;
1758 }
1759
1760 m_freem(rvp->beacon_mbuf);
1761 rvp->beacon_mbuf = NULL;
1762
1763 switch (vap->iv_opmode) {
1764 case IEEE80211_M_HOSTAP:
1765 case IEEE80211_M_MBSS:
1766 sc->ap_running |= bid;
1767 ic->ic_opmode = vap->iv_opmode;
1768 mtw_update_beacon_cb(vap);
1769 break;
1770 case IEEE80211_M_IBSS:
1771 sc->adhoc_running |= bid;
1772 if (!sc->ap_running)
1773 ic->ic_opmode = vap->iv_opmode;
1774 mtw_update_beacon_cb(vap);
1775 break;
1776 case IEEE80211_M_STA:
1777 sc->sta_running |= bid;
1778 if (!sc->ap_running && !sc->adhoc_running)
1779 ic->ic_opmode = vap->iv_opmode;
1780
1781 /* read statistic counters (clear on read) */
1782 mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
1783 sizeof sta);
1784
1785 break;
1786 default:
1787 ic->ic_opmode = vap->iv_opmode;
1788 break;
1789 }
1790
1791 if (vap->iv_opmode != IEEE80211_M_MONITOR) {
1792 struct ieee80211_node *ni;
1793
1794 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
1795 MTW_UNLOCK(sc);
1796 IEEE80211_LOCK(ic);
1797 return (-1);
1798 }
1799 mtw_updateslot(ic);
1800 mtw_enable_mrr(sc);
1801 mtw_set_txpreamble(sc);
1802 mtw_set_basicrates(sc);
1803 ni = ieee80211_ref_node(vap->iv_bss);
1804 IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
1805 mtw_set_bssid(sc, sc->sc_bssid);
1806 ieee80211_free_node(ni);
1807 mtw_enable_tsf_sync(sc);
1808
1809 /* enable automatic rate adaptation */
1810 tp = &vap->iv_txparms[ieee80211_chan2mode(
1811 ic->ic_curchan)];
1812 if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
1813 ratectl |= bid;
1814 } else {
1815 mtw_enable_tsf_sync(sc);
1816 }
1817
1818 break;
1819 default:
1820 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "undefined state\n");
1821 break;
1822 }
1823
1824 /* restart amrr for running VAPs */
1825 if ((sc->ratectl_run = ratectl) && restart_ratectl) {
1826 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
1827 }
1828 MTW_UNLOCK(sc);
1829 IEEE80211_LOCK(ic);
1830 return (rvp->newstate(vap, nstate, arg));
1831 }
1832
1833 static int
mtw_wme_update(struct ieee80211com * ic)1834 mtw_wme_update(struct ieee80211com *ic)
1835 {
1836 struct chanAccParams chp;
1837 struct mtw_softc *sc = ic->ic_softc;
1838 const struct wmeParams *ac;
1839 int aci, error = 0;
1840 ieee80211_wme_ic_getparams(ic, &chp);
1841 ac = chp.cap_wmeParams;
1842
1843 MTW_LOCK(sc);
1844 /* update MAC TX configuration registers */
1845 for (aci = 0; aci < WME_NUM_AC; aci++) {
1846 error = mtw_write(sc, MTW_EDCA_AC_CFG(aci),
1847 ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 |
1848 ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit);
1849 if (error)
1850 goto err;
1851 }
1852
1853 /* update SCH/DMA registers too */
1854 error = mtw_write(sc, MTW_WMM_AIFSN_CFG,
1855 ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 |
1856 ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn);
1857 if (error)
1858 goto err;
1859 error = mtw_write(sc, MTW_WMM_CWMIN_CFG,
1860 ac[WME_AC_VO].wmep_logcwmin << 12 |
1861 ac[WME_AC_VI].wmep_logcwmin << 8 |
1862 ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin);
1863 if (error)
1864 goto err;
1865 error = mtw_write(sc, MTW_WMM_CWMAX_CFG,
1866 ac[WME_AC_VO].wmep_logcwmax << 12 |
1867 ac[WME_AC_VI].wmep_logcwmax << 8 |
1868 ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax);
1869 if (error)
1870 goto err;
1871 error = mtw_write(sc, MTW_WMM_TXOP0_CFG,
1872 ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit);
1873 if (error)
1874 goto err;
1875 error = mtw_write(sc, MTW_WMM_TXOP1_CFG,
1876 ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit);
1877
1878 err:
1879 MTW_UNLOCK(sc);
1880 if (error)
1881 MTW_DPRINTF(sc, MTW_DEBUG_USB, "WME update failed\n");
1882
1883 return (error);
1884 }
1885
1886 static int
mtw_key_set(struct ieee80211vap * vap,struct ieee80211_key * k)1887 mtw_key_set(struct ieee80211vap *vap, struct ieee80211_key *k)
1888 {
1889 struct ieee80211com *ic = vap->iv_ic;
1890 struct mtw_softc *sc = ic->ic_softc;
1891 uint32_t i;
1892
1893 i = MTW_CMDQ_GET(&sc->cmdq_store);
1894 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
1895 sc->cmdq[i].func = mtw_key_set_cb;
1896 sc->cmdq[i].arg0 = NULL;
1897 sc->cmdq[i].arg1 = vap;
1898 sc->cmdq[i].k = k;
1899 IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr);
1900 ieee80211_runtask(ic, &sc->cmdq_task);
1901
1902 /*
1903 * To make sure key will be set when hostapd
1904 * calls iv_key_set() before if_init().
1905 */
1906 if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1907 MTW_LOCK(sc);
1908 sc->cmdq_key_set = MTW_CMDQ_GO;
1909 MTW_UNLOCK(sc);
1910 }
1911
1912 return (1);
1913 }
1914 static void
mtw_key_set_cb(void * arg)1915 mtw_key_set_cb(void *arg)
1916 {
1917 struct mtw_cmdq *cmdq = arg;
1918 struct ieee80211vap *vap = cmdq->arg1;
1919 struct ieee80211_key *k = cmdq->k;
1920 struct ieee80211com *ic = vap->iv_ic;
1921 struct mtw_softc *sc = ic->ic_softc;
1922 struct ieee80211_node *ni;
1923 u_int cipher = k->wk_cipher->ic_cipher;
1924 uint32_t attr;
1925 uint16_t base;
1926 uint8_t mode, wcid, iv[8];
1927 MTW_LOCK_ASSERT(sc, MA_OWNED);
1928
1929 if (vap->iv_opmode == IEEE80211_M_HOSTAP)
1930 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac);
1931 else
1932 ni = vap->iv_bss;
1933
1934 /* map net80211 cipher to RT2860 security mode */
1935 switch (cipher) {
1936 case IEEE80211_CIPHER_WEP:
1937 if (ieee80211_crypto_get_key_len(k) < 8)
1938 mode = MTW_MODE_WEP40;
1939 else
1940 mode = MTW_MODE_WEP104;
1941 break;
1942 case IEEE80211_CIPHER_TKIP:
1943 mode = MTW_MODE_TKIP;
1944 break;
1945 case IEEE80211_CIPHER_AES_CCM:
1946 mode = MTW_MODE_AES_CCMP;
1947 break;
1948 default:
1949 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "undefined case\n");
1950 return;
1951 }
1952
1953 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1954 wcid = 0; /* NB: update WCID0 for group keys */
1955 base = MTW_SKEY(0, k->wk_keyix);
1956 } else {
1957 wcid = (ni != NULL) ? MTW_AID2WCID(ni->ni_associd) : 0;
1958 base = MTW_PKEY(wcid);
1959 }
1960
1961 if (cipher == IEEE80211_CIPHER_TKIP) {
1962 /* TODO: note the direct use of tx/rx mic offsets! ew! */
1963 mtw_write_region_1(sc, base,
1964 ieee80211_crypto_get_key_data(k), 16);
1965 /* rxmic */
1966 mtw_write_region_1(sc, base + 16,
1967 ieee80211_crypto_get_key_rxmic_data(k), 8);
1968 /* txmic */
1969 mtw_write_region_1(sc, base + 24,
1970 ieee80211_crypto_get_key_txmic_data(k), 8);
1971 } else {
1972 /* roundup len to 16-bit: XXX fix write_region_1() instead */
1973 mtw_write_region_1(sc, base, k->wk_key,
1974 (ieee80211_crypto_get_key_len(k) + 1) & ~1);
1975 }
1976
1977 if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
1978 (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) {
1979 /* set initial packet number in IV+EIV */
1980 if (cipher == IEEE80211_CIPHER_WEP) {
1981 memset(iv, 0, sizeof iv);
1982 iv[3] = vap->iv_def_txkey << 6;
1983 } else {
1984 if (cipher == IEEE80211_CIPHER_TKIP) {
1985 iv[0] = k->wk_keytsc >> 8;
1986 iv[1] = (iv[0] | 0x20) & 0x7f;
1987 iv[2] = k->wk_keytsc;
1988 } else { //CCMP
1989 iv[0] = k->wk_keytsc;
1990 iv[1] = k->wk_keytsc >> 8;
1991 iv[2] = 0;
1992 }
1993 iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV;
1994 iv[4] = k->wk_keytsc >> 16;
1995 iv[5] = k->wk_keytsc >> 24;
1996 iv[6] = k->wk_keytsc >> 32;
1997 iv[7] = k->wk_keytsc >> 40;
1998 }
1999 mtw_write_region_1(sc, MTW_IVEIV(wcid), iv, 8);
2000 }
2001
2002 if (k->wk_flags & IEEE80211_KEY_GROUP) {
2003 /* install group key */
2004 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
2005 attr &= ~(0xf << (k->wk_keyix * 4));
2006 attr |= mode << (k->wk_keyix * 4);
2007 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
2008
2009 if (cipher & (IEEE80211_CIPHER_WEP)) {
2010 mtw_read(sc, MTW_WCID_ATTR(wcid + 1), &attr);
2011 attr = (attr & ~0xf) | (mode << 1);
2012 mtw_write(sc, MTW_WCID_ATTR(wcid + 1), attr);
2013
2014 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 4);
2015
2016 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
2017 attr = (attr & ~0xf) | (mode << 1);
2018 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
2019 }
2020 } else {
2021 /* install pairwise key */
2022 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
2023 attr = (attr & ~0xf) | (mode << 1) | MTW_RX_PKEY_EN;
2024 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
2025 }
2026 k->wk_pad = wcid;
2027 }
2028
2029 /*
2030 * If wlan is destroyed without being brought down i.e. without
2031 * wlan down or wpa_cli terminate, this function is called after
2032 * vap is gone. Don't refer it.
2033 */
2034 static void
mtw_key_delete_cb(void * arg)2035 mtw_key_delete_cb(void *arg)
2036 {
2037 struct mtw_cmdq *cmdq = arg;
2038 struct mtw_softc *sc = cmdq->arg1;
2039 struct ieee80211_key *k = &cmdq->key;
2040 uint32_t attr;
2041 uint8_t wcid;
2042
2043 MTW_LOCK_ASSERT(sc, MA_OWNED);
2044
2045 if (k->wk_flags & IEEE80211_KEY_GROUP) {
2046 /* remove group key */
2047 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing group key\n");
2048 mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
2049 attr &= ~(0xf << (k->wk_keyix * 4));
2050 mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
2051 } else {
2052 /* remove pairwise key */
2053 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing key for wcid %x\n",
2054 k->wk_pad);
2055 /* matching wcid was written to wk_pad in mtw_key_set() */
2056 wcid = k->wk_pad;
2057 mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
2058 attr &= ~0xf;
2059 mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
2060 }
2061
2062 k->wk_pad = 0;
2063 }
2064
2065 /*
2066 * return 0 on error
2067 */
2068 static int
mtw_key_delete(struct ieee80211vap * vap,struct ieee80211_key * k)2069 mtw_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k)
2070 {
2071 struct ieee80211com *ic = vap->iv_ic;
2072 struct mtw_softc *sc = ic->ic_softc;
2073 struct ieee80211_key *k0;
2074 uint32_t i;
2075 if (sc->sc_flags & MTW_RUNNING)
2076 return (1);
2077
2078 /*
2079 * When called back, key might be gone. So, make a copy
2080 * of some values need to delete keys before deferring.
2081 * But, because of LOR with node lock, cannot use lock here.
2082 * So, use atomic instead.
2083 */
2084 i = MTW_CMDQ_GET(&sc->cmdq_store);
2085 MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
2086 sc->cmdq[i].func = mtw_key_delete_cb;
2087 sc->cmdq[i].arg0 = NULL;
2088 sc->cmdq[i].arg1 = sc;
2089 k0 = &sc->cmdq[i].key;
2090 k0->wk_flags = k->wk_flags;
2091 k0->wk_keyix = k->wk_keyix;
2092 /* matching wcid was written to wk_pad in mtw_key_set() */
2093 k0->wk_pad = k->wk_pad;
2094 ieee80211_runtask(ic, &sc->cmdq_task);
2095 return (1); /* return fake success */
2096 }
2097
2098 static void
mtw_ratectl_to(void * arg)2099 mtw_ratectl_to(void *arg)
2100 {
2101 struct mtw_softc *sc = arg;
2102 /* do it in a process context, so it can go sleep */
2103 ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task);
2104 /* next timeout will be rescheduled in the callback task */
2105 }
2106
2107 /* ARGSUSED */
2108 static void
mtw_ratectl_cb(void * arg,int pending)2109 mtw_ratectl_cb(void *arg, int pending)
2110 {
2111
2112 struct mtw_softc *sc = arg;
2113 struct ieee80211com *ic = &sc->sc_ic;
2114 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2115
2116 if (vap == NULL)
2117 return;
2118
2119 ieee80211_iterate_nodes(&ic->ic_sta, mtw_iter_func, sc);
2120
2121 usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
2122
2123
2124 }
2125
2126 static void
mtw_drain_fifo(void * arg)2127 mtw_drain_fifo(void *arg)
2128 {
2129 struct mtw_softc *sc = arg;
2130 uint32_t stat;
2131 uint16_t(*wstat)[3];
2132 uint8_t wcid, mcs, pid;
2133 int8_t retry;
2134
2135 MTW_LOCK_ASSERT(sc, MA_OWNED);
2136
2137 for (;;) {
2138 /* drain Tx status FIFO (maxsize = 16) */
2139 mtw_read(sc, MTW_TX_STAT_FIFO, &stat);
2140 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx stat 0x%08x\n", stat);
2141 if (!(stat & MTW_TXQ_VLD))
2142 break;
2143
2144 wcid = (stat >> MTW_TXQ_WCID_SHIFT) & 0xff;
2145
2146 /* if no ACK was requested, no feedback is available */
2147 if (!(stat & MTW_TXQ_ACKREQ) || wcid > MTW_WCID_MAX ||
2148 wcid == 0)
2149 continue;
2150
2151 /*
2152 * Even though each stat is Tx-complete-status like format,
2153 * the device can poll stats. Because there is no guarantee
2154 * that the referring node is still around when read the stats.
2155 * So that, if we use ieee80211_ratectl_tx_update(), we will
2156 * have hard time not to refer already freed node.
2157 *
2158 * To eliminate such page faults, we poll stats in softc.
2159 * Then, update the rates later with
2160 * ieee80211_ratectl_tx_update().
2161 */
2162 wstat = &(sc->wcid_stats[wcid]);
2163 (*wstat)[MTW_TXCNT]++;
2164 if (stat & MTW_TXQ_OK)
2165 (*wstat)[MTW_SUCCESS]++;
2166 else
2167 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
2168 /*
2169 * Check if there were retries, ie if the Tx success rate is
2170 * different from the requested rate. Note that it works only
2171 * because we do not allow rate fallback from OFDM to CCK.
2172 */
2173 mcs = (stat >> MTW_TXQ_MCS_SHIFT) & 0x7f;
2174 pid = (stat >> MTW_TXQ_PID_SHIFT) & 0xf;
2175 if ((retry = pid - 1 - mcs) > 0) {
2176 (*wstat)[MTW_TXCNT] += retry;
2177 (*wstat)[MTW_RETRY] += retry;
2178 }
2179 }
2180 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt);
2181
2182 sc->fifo_cnt = 0;
2183 }
2184
2185 static void
mtw_iter_func(void * arg,struct ieee80211_node * ni)2186 mtw_iter_func(void *arg, struct ieee80211_node *ni)
2187 {
2188 struct mtw_softc *sc = arg;
2189 MTW_LOCK(sc);
2190 struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs;
2191 struct ieee80211vap *vap = ni->ni_vap;
2192 struct mtw_node *rn = MTW_NODE(ni);
2193 uint32_t sta[3];
2194 uint16_t(*wstat)[3];
2195 int error, ridx;
2196 uint8_t txrate = 0;
2197
2198 /* Check for special case */
2199 if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA &&
2200 ni != vap->iv_bss)
2201 goto fail;
2202
2203 txs->flags = IEEE80211_RATECTL_TX_STATS_NODE |
2204 IEEE80211_RATECTL_TX_STATS_RETRIES;
2205 txs->ni = ni;
2206 if (sc->rvp_cnt <= 1 &&
2207 (vap->iv_opmode == IEEE80211_M_IBSS ||
2208 vap->iv_opmode == IEEE80211_M_STA)) {
2209 /*
2210 * read statistic counters (clear on read) and update AMRR state
2211 */
2212 error = mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
2213 sizeof sta);
2214 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "error:%d\n", error);
2215 if (error != 0)
2216 goto fail;
2217
2218 /* count failed TX as errors */
2219 if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS,
2220 le32toh(sta[0]) & 0xffff);
2221
2222 txs->nretries = (le32toh(sta[1]) >> 16);
2223 txs->nsuccess = (le32toh(sta[1]) & 0xffff);
2224 /* nretries??? */
2225 txs->nframes = txs->nsuccess + (le32toh(sta[0]) & 0xffff);
2226
2227 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2228 "retrycnt=%d success=%d failcnt=%d\n", txs->nretries,
2229 txs->nsuccess, le32toh(sta[0]) & 0xffff);
2230 } else {
2231 wstat = &(sc->wcid_stats[MTW_AID2WCID(ni->ni_associd)]);
2232
2233 if (wstat == &(sc->wcid_stats[0]) ||
2234 wstat > &(sc->wcid_stats[MTW_WCID_MAX]))
2235 goto fail;
2236
2237 txs->nretries = (*wstat)[MTW_RETRY];
2238 txs->nsuccess = (*wstat)[MTW_SUCCESS];
2239 txs->nframes = (*wstat)[MTW_TXCNT];
2240 MTW_DPRINTF(sc, MTW_DEBUG_RATE,
2241 "wstat retrycnt=%d txcnt=%d success=%d\n", txs->nretries,
2242 txs->nframes, txs->nsuccess);
2243
2244 memset(wstat, 0, sizeof(*wstat));
2245 }
2246
2247 ieee80211_ratectl_tx_update(vap, txs);
2248 ieee80211_ratectl_rate(ni, NULL, 0);
2249 txrate = ieee80211_node_get_txrate_dot11rate(ni);
2250
2251 /* XXX TODO: methodize with MCS rates */
2252 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
2253 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "ni_txrate=0x%x\n",
2254 txrate);
2255 if (rt2860_rates[ridx].rate == txrate) {
2256 break;
2257 }
2258 }
2259 rn->amrr_ridx = ridx;
2260 fail:
2261 MTW_UNLOCK(sc);
2262
2263 MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, ridx=%d\n",
2264 txrate, rn->amrr_ridx);
2265 }
2266
2267 static void
mtw_newassoc_cb(void * arg)2268 mtw_newassoc_cb(void *arg)
2269 {
2270 struct mtw_cmdq *cmdq = arg;
2271 struct ieee80211_node *ni = cmdq->arg1;
2272 struct mtw_softc *sc = ni->ni_vap->iv_ic->ic_softc;
2273
2274 uint8_t wcid = cmdq->wcid;
2275
2276 MTW_LOCK_ASSERT(sc, MA_OWNED);
2277
2278 mtw_write_region_1(sc, MTW_WCID_ENTRY(wcid), ni->ni_macaddr,
2279 IEEE80211_ADDR_LEN);
2280
2281 memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid]));
2282 }
2283
2284 static void
mtw_newassoc(struct ieee80211_node * ni,int isnew)2285 mtw_newassoc(struct ieee80211_node *ni, int isnew)
2286 {
2287
2288 struct mtw_node *mn = MTW_NODE(ni);
2289 struct ieee80211vap *vap = ni->ni_vap;
2290 struct ieee80211com *ic = vap->iv_ic;
2291 struct mtw_softc *sc = ic->ic_softc;
2292
2293 uint8_t rate;
2294 uint8_t ridx;
2295 uint8_t wcid;
2296 //int i;
2297 // int i,j;
2298 wcid = MTW_AID2WCID(ni->ni_associd);
2299
2300 if (wcid > MTW_WCID_MAX) {
2301 device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid);
2302 return;
2303 }
2304
2305 /* only interested in true associations */
2306 if (isnew && ni->ni_associd != 0) {
2307 /*
2308 * This function could is called though timeout function.
2309 * Need to deferggxr.
2310 */
2311
2312 uint32_t cnt = MTW_CMDQ_GET(&sc->cmdq_store);
2313 MTW_DPRINTF(sc, MTW_DEBUG_STATE, "cmdq_store=%d\n", cnt);
2314 sc->cmdq[cnt].func = mtw_newassoc_cb;
2315 sc->cmdq[cnt].arg0 = NULL;
2316 sc->cmdq[cnt].arg1 = ni;
2317 sc->cmdq[cnt].wcid = wcid;
2318 ieee80211_runtask(ic, &sc->cmdq_task);
2319 }
2320
2321 MTW_DPRINTF(sc, MTW_DEBUG_STATE,
2322 "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd,
2323 ether_sprintf(ni->ni_macaddr));
2324 rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
2325 /* XXX TODO: methodize with MCS rates */
2326 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
2327 if (rt2860_rates[ridx].rate == rate)
2328 break;
2329 mn->mgt_ridx = ridx;
2330 MTW_DPRINTF(sc, MTW_DEBUG_STATE | MTW_DEBUG_RATE,
2331 "rate=%d, ctl_ridx=%d\n", rate, ridx);
2332 MTW_LOCK(sc);
2333 if (sc->ratectl_run != MTW_RATECTL_OFF) {
2334 usb_callout_reset(&sc->ratectl_ch, hz, &mtw_ratectl_to, sc);
2335 }
2336 MTW_UNLOCK(sc);
2337
2338 }
2339
2340 /*
2341 * Return the Rx chain with the highest RSSI for a given frame.
2342 */
2343 static __inline uint8_t
mtw_maxrssi_chain(struct mtw_softc * sc,const struct mtw_rxwi * rxwi)2344 mtw_maxrssi_chain(struct mtw_softc *sc, const struct mtw_rxwi *rxwi)
2345 {
2346 uint8_t rxchain = 0;
2347
2348 if (sc->nrxchains > 1) {
2349 if (rxwi->rssi[1] > rxwi->rssi[rxchain])
2350 rxchain = 1;
2351 if (sc->nrxchains > 2)
2352 if (rxwi->rssi[2] > rxwi->rssi[rxchain])
2353 rxchain = 2;
2354 }
2355 return (rxchain);
2356 }
2357 static void
mtw_get_tsf(struct mtw_softc * sc,uint64_t * buf)2358 mtw_get_tsf(struct mtw_softc *sc, uint64_t *buf)
2359 {
2360 mtw_read_region_1(sc, MTW_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf));
2361 }
2362
2363 static void
mtw_recv_mgmt(struct ieee80211_node * ni,struct mbuf * m,int subtype,const struct ieee80211_rx_stats * rxs,int rssi,int nf)2364 mtw_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
2365 const struct ieee80211_rx_stats *rxs, int rssi, int nf)
2366 {
2367 struct ieee80211vap *vap = ni->ni_vap;
2368 struct mtw_softc *sc = vap->iv_ic->ic_softc;
2369 struct mtw_vap *rvp = MTW_VAP(vap);
2370 uint64_t ni_tstamp, rx_tstamp;
2371
2372 rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf);
2373
2374 if (vap->iv_state == IEEE80211_S_RUN &&
2375 (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
2376 subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
2377 ni_tstamp = le64toh(ni->ni_tstamp.tsf);
2378 MTW_LOCK(sc);
2379 mtw_get_tsf(sc, &rx_tstamp);
2380 MTW_UNLOCK(sc);
2381 rx_tstamp = le64toh(rx_tstamp);
2382
2383 if (ni_tstamp >= rx_tstamp) {
2384 MTW_DPRINTF(sc, MTW_DEBUG_RECV | MTW_DEBUG_BEACON,
2385 "ibss merge, tsf %ju tstamp %ju\n",
2386 (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
2387 (void)ieee80211_ibss_merge(ni);
2388 }
2389 }
2390 }
2391 static void
mtw_rx_frame(struct mtw_softc * sc,struct mbuf * m,uint32_t dmalen)2392 mtw_rx_frame(struct mtw_softc *sc, struct mbuf *m, uint32_t dmalen)
2393 {
2394 struct ieee80211com *ic = &sc->sc_ic;
2395 struct ieee80211_frame *wh;
2396 struct ieee80211_node *ni;
2397 struct epoch_tracker et;
2398
2399 struct mtw_rxwi *rxwi;
2400 uint32_t flags;
2401 uint16_t len, rxwisize;
2402 uint8_t ant, rssi;
2403 int8_t nf;
2404
2405 rxwisize = sizeof(struct mtw_rxwi);
2406
2407 if (__predict_false(
2408 dmalen < rxwisize + sizeof(struct ieee80211_frame_ack))) {
2409 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2410 "payload is too short: dma length %u < %zu\n", dmalen,
2411 rxwisize + sizeof(struct ieee80211_frame_ack));
2412 goto fail;
2413 }
2414
2415 rxwi = mtod(m, struct mtw_rxwi *);
2416 len = le16toh(rxwi->len) & 0xfff;
2417 flags = le32toh(rxwi->flags);
2418 if (__predict_false(len > dmalen - rxwisize)) {
2419 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "bad RXWI length %u > %u\n",
2420 len, dmalen);
2421 goto fail;
2422 }
2423
2424 if (__predict_false(flags & (MTW_RX_CRCERR | MTW_RX_ICVERR))) {
2425 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s error.\n",
2426 (flags & MTW_RX_CRCERR) ? "CRC" : "ICV");
2427 goto fail;
2428 }
2429
2430 if (flags & MTW_RX_L2PAD) {
2431 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2432 "received RT2860_RX_L2PAD frame\n");
2433 len += 2;
2434 }
2435
2436 m->m_data += rxwisize;
2437 m->m_pkthdr.len = m->m_len = len;
2438
2439 wh = mtod(m, struct ieee80211_frame *);
2440 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2441 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2442 m->m_flags |= M_WEP;
2443 }
2444
2445 if (len >= sizeof(struct ieee80211_frame_min)) {
2446 ni = ieee80211_find_rxnode(ic,
2447 mtod(m, struct ieee80211_frame_min *));
2448 } else
2449 ni = NULL;
2450
2451 if (ni && ni->ni_flags & IEEE80211_NODE_HT) {
2452 m->m_flags |= M_AMPDU;
2453 }
2454
2455 if (__predict_false(flags & MTW_RX_MICERR)) {
2456 /* report MIC failures to net80211 for TKIP */
2457 if (ni != NULL)
2458 ieee80211_notify_michael_failure(ni->ni_vap, wh,
2459 rxwi->keyidx);
2460 MTW_DPRINTF(sc, MTW_DEBUG_RECV,
2461 "MIC error. Someone is lying.\n");
2462 goto fail;
2463 }
2464
2465 ant = mtw_maxrssi_chain(sc, rxwi);
2466 rssi = rxwi->rssi[ant];
2467 nf = mtw_rssi2dbm(sc, rssi, ant);
2468
2469 if (__predict_false(ieee80211_radiotap_active(ic))) {
2470 struct mtw_rx_radiotap_header *tap = &sc->sc_rxtap;
2471 uint16_t phy;
2472
2473 tap->wr_flags = 0;
2474 if (flags & MTW_RX_L2PAD)
2475 tap->wr_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2476 tap->wr_antsignal = rssi;
2477 tap->wr_antenna = ant;
2478 tap->wr_dbm_antsignal = mtw_rssi2dbm(sc, rssi, ant);
2479 tap->wr_rate = 2; /* in case it can't be found below */
2480 //MTW_LOCK(sc);
2481
2482 // MTW_UNLOCK(sc);
2483 phy = le16toh(rxwi->phy);
2484 switch (phy >> MT7601_PHY_SHIFT) {
2485 case MTW_PHY_CCK:
2486 switch ((phy & MTW_PHY_MCS) & ~MTW_PHY_SHPRE) {
2487 case 0:
2488 tap->wr_rate = 2;
2489 break;
2490 case 1:
2491 tap->wr_rate = 4;
2492 break;
2493 case 2:
2494 tap->wr_rate = 11;
2495 break;
2496 case 3:
2497 tap->wr_rate = 22;
2498 break;
2499 }
2500 if (phy & MTW_PHY_SHPRE)
2501 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2502 break;
2503 case MTW_PHY_OFDM:
2504 switch (phy & MTW_PHY_MCS) {
2505 case 0:
2506 tap->wr_rate = 12;
2507 break;
2508 case 1:
2509 tap->wr_rate = 18;
2510 break;
2511 case 2:
2512 tap->wr_rate = 24;
2513 break;
2514 case 3:
2515 tap->wr_rate = 36;
2516 break;
2517 case 4:
2518 tap->wr_rate = 48;
2519 break;
2520 case 5:
2521 tap->wr_rate = 72;
2522 break;
2523 case 6:
2524 tap->wr_rate = 96;
2525 break;
2526 case 7:
2527 tap->wr_rate = 108;
2528 break;
2529 }
2530 break;
2531 }
2532 }
2533
2534 NET_EPOCH_ENTER(et);
2535 if (ni != NULL) {
2536 (void)ieee80211_input(ni, m, rssi, nf);
2537 ieee80211_free_node(ni);
2538 } else {
2539 (void)ieee80211_input_all(ic, m, rssi, nf);
2540 }
2541 NET_EPOCH_EXIT(et);
2542
2543 return;
2544
2545 fail:
2546 m_freem(m);
2547 counter_u64_add(ic->ic_ierrors, 1);
2548 }
2549
2550 static void
mtw_bulk_rx_callback(struct usb_xfer * xfer,usb_error_t error)2551 mtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
2552 {
2553 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2554 struct ieee80211com *ic = &sc->sc_ic;
2555 struct mbuf *m = NULL;
2556 struct mbuf *m0;
2557 uint32_t dmalen, mbuf_len;
2558 uint16_t rxwisize;
2559 int xferlen;
2560
2561 rxwisize = sizeof(struct mtw_rxwi);
2562
2563 usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL);
2564
2565 switch (USB_GET_STATE(xfer)) {
2566 case USB_ST_TRANSFERRED:
2567 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "rx done, actlen=%d\n",
2568 xferlen);
2569 if (xferlen < (int)(sizeof(uint32_t) + rxwisize +
2570 sizeof(struct mtw_rxd))) {
2571 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2572 "xfer too short %d %d\n", xferlen,
2573 (int)(sizeof(uint32_t) + rxwisize +
2574 sizeof(struct mtw_rxd)));
2575 goto tr_setup;
2576 }
2577
2578 m = sc->rx_m;
2579 sc->rx_m = NULL;
2580
2581 /* FALLTHROUGH */
2582 case USB_ST_SETUP:
2583 tr_setup:
2584
2585 if (sc->rx_m == NULL) {
2586 sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2587 MTW_MAX_RXSZ);
2588 }
2589 if (sc->rx_m == NULL) {
2590 MTW_DPRINTF(sc,
2591 MTW_DEBUG_RECV | MTW_DEBUG_RECV_DESC |
2592 MTW_DEBUG_USB,
2593 "could not allocate mbuf - idle with stall\n");
2594 counter_u64_add(ic->ic_ierrors, 1);
2595 usbd_xfer_set_stall(xfer);
2596 usbd_xfer_set_frames(xfer, 0);
2597 } else {
2598 /*
2599 * Directly loading a mbuf cluster into DMA to
2600 * save some data copying. This works because
2601 * there is only one cluster.
2602 */
2603 usbd_xfer_set_frame_data(xfer, 0,
2604 mtod(sc->rx_m, caddr_t), MTW_MAX_RXSZ);
2605 usbd_xfer_set_frames(xfer, 1);
2606 }
2607 usbd_transfer_submit(xfer);
2608 break;
2609
2610 default: /* Error */
2611 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2612 "USB transfer error, %s\n", usbd_errstr(error));
2613
2614 if (error != USB_ERR_CANCELLED) {
2615 /* try to clear stall first */
2616 usbd_xfer_set_stall(xfer);
2617 if (error == USB_ERR_TIMEOUT)
2618 device_printf(sc->sc_dev, "device timeout %s\n",
2619 __func__);
2620 counter_u64_add(ic->ic_ierrors, 1);
2621 goto tr_setup;
2622 }
2623 if (sc->rx_m != NULL) {
2624 m_freem(sc->rx_m);
2625 sc->rx_m = NULL;
2626 }
2627 break;
2628 }
2629
2630 if (m == NULL)
2631 return;
2632
2633 /* inputting all the frames must be last */
2634
2635 MTW_UNLOCK(sc);
2636
2637 m->m_pkthdr.len = m->m_len = xferlen;
2638
2639 /* HW can aggregate multiple 802.11 frames in a single USB xfer */
2640 for (;;) {
2641 dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff;
2642
2643 if ((dmalen >= (uint32_t)-8) || (dmalen == 0) ||
2644 ((dmalen & 3) != 0)) {
2645 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2646 "bad DMA length %u\n", dmalen);
2647 break;
2648 }
2649 if ((dmalen + 8) > (uint32_t)xferlen) {
2650 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2651 "bad DMA length %u > %d\n", dmalen + 8, xferlen);
2652 break;
2653 }
2654
2655 /* If it is the last one or a single frame, we won't copy. */
2656 if ((xferlen -= dmalen + 8) <= 8) {
2657 /* trim 32-bit DMA-len header */
2658 m->m_data += 4;
2659 m->m_pkthdr.len = m->m_len -= 4;
2660 mtw_rx_frame(sc, m, dmalen);
2661 m = NULL; /* don't free source buffer */
2662 break;
2663 }
2664
2665 mbuf_len = dmalen + sizeof(struct mtw_rxd);
2666 if (__predict_false(mbuf_len > MCLBYTES)) {
2667 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
2668 "payload is too big: mbuf_len %u\n", mbuf_len);
2669 counter_u64_add(ic->ic_ierrors, 1);
2670 break;
2671 }
2672
2673 /* copy aggregated frames to another mbuf */
2674 m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2675 if (__predict_false(m0 == NULL)) {
2676 MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC,
2677 "could not allocate mbuf\n");
2678 counter_u64_add(ic->ic_ierrors, 1);
2679 break;
2680 }
2681 m_copydata(m, 4 /* skip 32-bit DMA-len header */, mbuf_len,
2682 mtod(m0, caddr_t));
2683 m0->m_pkthdr.len = m0->m_len = mbuf_len;
2684 mtw_rx_frame(sc, m0, dmalen);
2685
2686 /* update data ptr */
2687 m->m_data += mbuf_len + 4;
2688 m->m_pkthdr.len = m->m_len -= mbuf_len + 4;
2689 }
2690
2691 /* make sure we free the source buffer, if any */
2692 m_freem(m);
2693
2694 #ifdef IEEE80211_SUPPORT_SUPERG
2695 ieee80211_ff_age_all(ic, 100);
2696 #endif
2697 MTW_LOCK(sc);
2698 }
2699
2700 static void
mtw_tx_free(struct mtw_endpoint_queue * pq,struct mtw_tx_data * data,int txerr)2701 mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *data, int txerr)
2702 {
2703
2704 ieee80211_tx_complete(data->ni, data->m, txerr);
2705 data->m = NULL;
2706 data->ni = NULL;
2707
2708 STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
2709 pq->tx_nfree++;
2710 }
2711 static void
mtw_bulk_tx_callbackN(struct usb_xfer * xfer,usb_error_t error,u_int index)2712 mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index)
2713 {
2714 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2715 struct ieee80211com *ic = &sc->sc_ic;
2716 struct mtw_tx_data *data;
2717 struct ieee80211vap *vap = NULL;
2718 struct usb_page_cache *pc;
2719 struct mtw_endpoint_queue *pq = &sc->sc_epq[index];
2720 struct mbuf *m;
2721 usb_frlength_t size;
2722 int actlen;
2723 int sumlen;
2724 usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
2725
2726 switch (USB_GET_STATE(xfer)) {
2727 case USB_ST_TRANSFERRED:
2728 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2729 "transfer complete: %d bytes @ index %d\n", actlen, index);
2730
2731 data = usbd_xfer_get_priv(xfer);
2732 mtw_tx_free(pq, data, 0);
2733 usbd_xfer_set_priv(xfer, NULL);
2734
2735 /* FALLTHROUGH */
2736 case USB_ST_SETUP:
2737 tr_setup:
2738 data = STAILQ_FIRST(&pq->tx_qh);
2739 if (data == NULL)
2740 break;
2741
2742 STAILQ_REMOVE_HEAD(&pq->tx_qh, next);
2743
2744 m = data->m;
2745
2746 size = sizeof(data->desc);
2747 if ((m->m_pkthdr.len + size + 3 + 8) > MTW_MAX_TXSZ) {
2748 MTW_DPRINTF(sc, MTW_DEBUG_XMIT_DESC | MTW_DEBUG_USB,
2749 "data overflow, %u bytes\n", m->m_pkthdr.len);
2750 mtw_tx_free(pq, data, 1);
2751 goto tr_setup;
2752 }
2753
2754 pc = usbd_xfer_get_frame(xfer, 0);
2755 usbd_copy_in(pc, 0, &data->desc, size);
2756 usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
2757 size += m->m_pkthdr.len;
2758 /*
2759 * Align end on a 4-byte boundary, pad 8 bytes (CRC +
2760 * 4-byte padding), and be sure to zero those trailing
2761 * bytes:
2762 */
2763 usbd_frame_zero(pc, size, ((-size) & 3) + MTW_DMA_PAD);
2764 size += ((-size) & 3) + MTW_DMA_PAD;
2765
2766 vap = data->ni->ni_vap;
2767 if (ieee80211_radiotap_active_vap(vap)) {
2768 const struct ieee80211_frame *wh;
2769 struct mtw_tx_radiotap_header *tap = &sc->sc_txtap;
2770 struct mtw_txwi *txwi =
2771 (struct mtw_txwi *)(&data->desc +
2772 sizeof(struct mtw_txd));
2773 int has_l2pad;
2774
2775 wh = mtod(m, struct ieee80211_frame *);
2776 has_l2pad = IEEE80211_HAS_ADDR4(wh) !=
2777 IEEE80211_QOS_HAS_SEQ(wh);
2778
2779 tap->wt_flags = 0;
2780 tap->wt_rate = rt2860_rates[data->ridx].rate;
2781 tap->wt_hwqueue = index;
2782 if (le16toh(txwi->phy) & MTW_PHY_SHPRE)
2783 tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2784 if (has_l2pad)
2785 tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
2786
2787 ieee80211_radiotap_tx(vap, m);
2788 }
2789
2790 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2791 "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len,
2792 size, index);
2793
2794 usbd_xfer_set_frame_len(xfer, 0, size);
2795 usbd_xfer_set_priv(xfer, data);
2796 usbd_transfer_submit(xfer);
2797 mtw_start(sc);
2798
2799 break;
2800
2801 default:
2802 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2803 "USB transfer error, %s\n", usbd_errstr(error));
2804
2805 data = usbd_xfer_get_priv(xfer);
2806
2807 if (data != NULL) {
2808 if (data->ni != NULL)
2809 vap = data->ni->ni_vap;
2810 mtw_tx_free(pq, data, error);
2811 usbd_xfer_set_priv(xfer, NULL);
2812 }
2813
2814 if (vap == NULL)
2815 vap = TAILQ_FIRST(&ic->ic_vaps);
2816
2817 if (error != USB_ERR_CANCELLED) {
2818 if (error == USB_ERR_TIMEOUT) {
2819 device_printf(sc->sc_dev, "device timeout %s\n",
2820 __func__);
2821 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
2822 MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
2823 "cmdq_store=%d\n", i);
2824 sc->cmdq[i].func = mtw_usb_timeout_cb;
2825 sc->cmdq[i].arg0 = vap;
2826 ieee80211_runtask(ic, &sc->cmdq_task);
2827 }
2828
2829 /*
2830 * Try to clear stall first, also if other
2831 * errors occur, hence clearing stall
2832 * introduces a 50 ms delay:
2833 */
2834 usbd_xfer_set_stall(xfer);
2835 goto tr_setup;
2836 }
2837 break;
2838 }
2839 #ifdef IEEE80211_SUPPORT_SUPERG
2840 /* XXX TODO: make this deferred rather than unlock/relock */
2841 /* XXX TODO: should only do the QoS AC this belongs to */
2842 if (pq->tx_nfree >= MTW_TX_RING_COUNT) {
2843 MTW_UNLOCK(sc);
2844 ieee80211_ff_flush_all(ic);
2845 MTW_LOCK(sc);
2846 }
2847 #endif
2848 }
2849
2850 static void
mtw_fw_callback(struct usb_xfer * xfer,usb_error_t error)2851 mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error)
2852 {
2853 struct mtw_softc *sc = usbd_xfer_softc(xfer);
2854
2855 int actlen;
2856 int ntries, tmp;
2857 // struct mtw_txd *data;
2858
2859 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
2860 // data = usbd_xfer_get_priv(xfer);
2861 usbd_xfer_set_priv(xfer, NULL);
2862 switch (USB_GET_STATE(xfer)) {
2863
2864 case USB_ST_TRANSFERRED:
2865 sc->sc_sent += actlen;
2866 memset(sc->txd_fw[sc->sc_idx], 0, actlen);
2867
2868 if (actlen < 0x2c44 && sc->sc_idx == 0) {
2869 return;
2870 }
2871 if (sc->sc_idx == 3) {
2872
2873 if ((error = mtw_write_ivb(sc, sc->sc_ivb_1,
2874 MTW_MCU_IVB_LEN)) != 0) {
2875 device_printf(sc->sc_dev,
2876 "Could not write ivb error: %d\n", error);
2877 }
2878
2879 mtw_delay(sc, 10);
2880 for (ntries = 0; ntries < 300; ntries++) {
2881 if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR,
2882 &tmp)) != 0) {
2883 device_printf(sc->sc_dev,
2884 "Could not read cfg error: %d\n", error);
2885
2886 }
2887 if (tmp == MTW_MCU_READY) {
2888 MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE,
2889 "mcu reaady %d\n", tmp);
2890 sc->fwloading = 1;
2891 break;
2892 }
2893
2894 mtw_delay(sc, 30);
2895 }
2896 if (ntries == 300)
2897 sc->fwloading = 0;
2898 wakeup(&sc->fwloading);
2899 return;
2900 }
2901
2902 if (actlen == 0x2c44) {
2903 sc->sc_idx++;
2904 DELAY(1000);
2905 }
2906
2907 case USB_ST_SETUP: {
2908 int dlen = 0;
2909 dlen = sc->txd_fw[sc->sc_idx]->len;
2910
2911 mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0x40 + sc->sc_sent);
2912 mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (dlen << 16));
2913
2914 usbd_xfer_set_frame_len(xfer, 0, dlen);
2915 usbd_xfer_set_frame_data(xfer, 0, sc->txd_fw[sc->sc_idx], dlen);
2916
2917 // usbd_xfer_set_priv(xfer,sc->txd[sc->sc_idx]);
2918 usbd_transfer_submit(xfer);
2919 break;
2920
2921 default: /* Error */
2922 device_printf(sc->sc_dev, "%s:%d %s\n", __FILE__, __LINE__,
2923 usbd_errstr(error));
2924 sc->fwloading = 0;
2925 wakeup(&sc->fwloading);
2926 /*
2927 * Print error message and clear stall
2928 * for example.
2929 */
2930 break;
2931 }
2932 /*
2933 * Here it is safe to do something without the private
2934 * USB mutex locked.
2935 */
2936 }
2937 return;
2938 }
2939 static void
mtw_bulk_tx_callback0(struct usb_xfer * xfer,usb_error_t error)2940 mtw_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error)
2941 {
2942 mtw_bulk_tx_callbackN(xfer, error, 0);
2943 }
2944
2945 static void
mtw_bulk_tx_callback1(struct usb_xfer * xfer,usb_error_t error)2946 mtw_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error)
2947 {
2948
2949
2950 mtw_bulk_tx_callbackN(xfer, error, 1);
2951 }
2952
2953 static void
mtw_bulk_tx_callback2(struct usb_xfer * xfer,usb_error_t error)2954 mtw_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error)
2955 {
2956 mtw_bulk_tx_callbackN(xfer, error, 2);
2957 }
2958
2959 static void
mtw_bulk_tx_callback3(struct usb_xfer * xfer,usb_error_t error)2960 mtw_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error)
2961 {
2962 mtw_bulk_tx_callbackN(xfer, error, 3);
2963 }
2964
2965 static void
mtw_bulk_tx_callback4(struct usb_xfer * xfer,usb_error_t error)2966 mtw_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error)
2967 {
2968 mtw_bulk_tx_callbackN(xfer, error, 4);
2969 }
2970
2971 static void
mtw_bulk_tx_callback5(struct usb_xfer * xfer,usb_error_t error)2972 mtw_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error)
2973 {
2974 mtw_bulk_tx_callbackN(xfer, error, 5);
2975 }
2976
2977 static void
mtw_set_tx_desc(struct mtw_softc * sc,struct mtw_tx_data * data)2978 mtw_set_tx_desc(struct mtw_softc *sc, struct mtw_tx_data *data)
2979 {
2980 struct mbuf *m = data->m;
2981 struct ieee80211com *ic = &sc->sc_ic;
2982 struct ieee80211vap *vap = data->ni->ni_vap;
2983 struct ieee80211_frame *wh;
2984 struct mtw_txd *txd;
2985 struct mtw_txwi *txwi;
2986 uint16_t xferlen, txwisize;
2987 uint16_t mcs;
2988 uint8_t ridx = data->ridx;
2989 uint8_t pad;
2990
2991 /* get MCS code from rate index */
2992 mcs = rt2860_rates[ridx].mcs;
2993
2994 txwisize = sizeof(*txwi);
2995 xferlen = txwisize + m->m_pkthdr.len;
2996
2997 /* roundup to 32-bit alignment */
2998 xferlen = (xferlen + 3) & ~3;
2999
3000 txd = (struct mtw_txd *)&data->desc;
3001 txd->len = htole16(xferlen);
3002
3003 wh = mtod(m, struct ieee80211_frame *);
3004
3005 /*
3006 * Ether both are true or both are false, the header
3007 * are nicely aligned to 32-bit. So, no L2 padding.
3008 */
3009 if (IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh))
3010 pad = 0;
3011 else
3012 pad = 2;
3013
3014 /* setup TX Wireless Information */
3015 txwi = (struct mtw_txwi *)(txd + 1);
3016 txwi->len = htole16(m->m_pkthdr.len - pad);
3017 if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
3018 mcs |= MTW_PHY_CCK;
3019 if (ridx != MTW_RIDX_CCK1 &&
3020 (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
3021 mcs |= MTW_PHY_SHPRE;
3022 } else if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) {
3023 mcs |= MTW_PHY_OFDM;
3024 } else if (rt2860_rates[ridx].phy == IEEE80211_T_HT) {
3025 /* XXX TODO: [adrian] set short preamble for MCS? */
3026 mcs |= MTW_PHY_HT; /* Mixed, not greenfield */
3027 }
3028 txwi->phy = htole16(mcs);
3029
3030 /* check if RTS/CTS or CTS-to-self protection is required */
3031 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3032 ((m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) ||
3033 ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3034 rt2860_rates[ridx].phy == IEEE80211_T_OFDM) ||
3035 ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
3036 rt2860_rates[ridx].phy == IEEE80211_T_HT)))
3037 txwi->txop |= MTW_TX_TXOP_HT;
3038 else
3039 txwi->txop |= MTW_TX_TXOP_BACKOFF;
3040
3041 }
3042
3043 /* This function must be called locked */
3044 static int
mtw_tx(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3045 mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3046 {
3047 struct ieee80211com *ic = &sc->sc_ic;
3048 struct ieee80211vap *vap = ni->ni_vap;
3049 struct ieee80211_frame *wh;
3050
3051
3052 //const struct ieee80211_txparam *tp = ni->ni_txparms;
3053 struct mtw_node *rn = MTW_NODE(ni);
3054 struct mtw_tx_data *data;
3055 struct mtw_txd *txd;
3056 struct mtw_txwi *txwi;
3057 uint16_t qos;
3058 uint16_t dur;
3059 uint16_t qid;
3060 uint8_t type;
3061 uint8_t tid;
3062 uint16_t ridx;
3063 uint8_t ctl_ridx;
3064 uint16_t qflags;
3065 uint8_t xflags = 0;
3066
3067 int hasqos;
3068
3069 MTW_LOCK_ASSERT(sc, MA_OWNED);
3070
3071 wh = mtod(m, struct ieee80211_frame *);
3072 const struct ieee80211_txparam *tp = ni->ni_txparms;
3073 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3074
3075 qflags = htole16(MTW_TXD_DATA | MTW_TXD_80211 |
3076 MTW_TXD_WLAN | MTW_TXD_QSEL_HCCA);
3077
3078 if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) {
3079 uint8_t *frm;
3080 frm = ieee80211_getqos(wh);
3081
3082
3083 //device_printf(sc->sc_dev,"JSS:frm:%d",*frm);
3084 qos = le16toh(*(const uint16_t *)frm);
3085 tid = ieee80211_gettid(wh);
3086 qid = TID_TO_WME_AC(tid);
3087 qflags |= MTW_TXD_QSEL_EDCA;
3088 } else {
3089 qos = 0;
3090 tid = 0;
3091 qid = WME_AC_BE;
3092 }
3093 if (type & IEEE80211_FC0_TYPE_MGT) {
3094 qid = 0;
3095 }
3096
3097 if (type != IEEE80211_FC0_TYPE_DATA)
3098 qflags |= htole16(MTW_TXD_WIV);
3099
3100 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3101 type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) {
3102 /* XXX TODO: methodize for 11n; use MCS0 for 11NA/11NG */
3103 ridx = (ic->ic_curmode == IEEE80211_MODE_11A
3104 || ic->ic_curmode == IEEE80211_MODE_11NA) ?
3105 MTW_RIDX_OFDM6 : MTW_RIDX_CCK1;
3106 if (type == IEEE80211_MODE_11NG) {
3107 ridx = 12;
3108 }
3109 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3110 } else {
3111 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3112 ridx = rn->fix_ridx;
3113
3114 } else {
3115 ridx = rn->amrr_ridx;
3116 ctl_ridx = rt2860_rates[ridx].ctl_ridx;
3117 }
3118 }
3119
3120 if (hasqos)
3121 xflags = 0;
3122 else
3123 xflags = MTW_TX_NSEQ;
3124
3125 if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
3126 (!hasqos ||
3127 (qos & IEEE80211_QOS_ACKPOLICY) !=
3128 IEEE80211_QOS_ACKPOLICY_NOACK)) {
3129 xflags |= MTW_TX_ACK;
3130 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3131 dur = rt2860_rates[ctl_ridx].sp_ack_dur;
3132 else
3133 dur = rt2860_rates[ctl_ridx].lp_ack_dur;
3134 USETW(wh->i_dur, dur);
3135 }
3136 /* reserve slots for mgmt packets, just in case */
3137 if (sc->sc_epq[qid].tx_nfree < 3) {
3138 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx ring %d is full\n", qid);
3139 return (-1);
3140 }
3141
3142 data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh);
3143 STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next);
3144 sc->sc_epq[qid].tx_nfree--;
3145
3146 txd = (struct mtw_txd *)&data->desc;
3147 txd->flags = qflags;
3148
3149 txwi = (struct mtw_txwi *)(txd + 1);
3150 txwi->xflags = xflags;
3151 txwi->wcid = (type == IEEE80211_FC0_TYPE_DATA) ?
3152
3153 MTW_AID2WCID(ni->ni_associd) :
3154 0xff;
3155
3156 /* clear leftover garbage bits */
3157 txwi->flags = 0;
3158 txwi->txop = 0;
3159
3160 data->m = m;
3161 data->ni = ni;
3162 data->ridx = ridx;
3163
3164 ieee80211_output_seqno_assign(ni, -1, m);
3165
3166 mtw_set_tx_desc(sc, data);
3167
3168 /*
3169 * The chip keeps track of 2 kind of Tx stats,
3170 * * TX_STAT_FIFO, for per WCID stats, and
3171 * * TX_STA_CNT0 for all-TX-in-one stats.
3172 *
3173 * To use FIFO stats, we need to store MCS into the driver-private
3174 * PacketID field. So that, we can tell whose stats when we read them.
3175 * We add 1 to the MCS because setting the PacketID field to 0 means
3176 * that we don't want feedback in TX_STAT_FIFO.
3177 * And, that's what we want for STA mode, since TX_STA_CNT0 does the
3178 * job.
3179 *
3180 * FIFO stats doesn't count Tx with WCID 0xff, so we do this in
3181 * run_tx().
3182 */
3183
3184 if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP ||
3185 vap->iv_opmode == IEEE80211_M_MBSS) {
3186
3187 /*
3188 * Unlike PCI based devices, we don't get any interrupt from
3189 * USB devices, so we simulate FIFO-is-full interrupt here.
3190 * Ralink recommends to drain FIFO stats every 100 ms, but 16
3191 * slots quickly get fulled. To prevent overflow, increment a
3192 * counter on every FIFO stat request, so we know how many slots
3193 * are left. We do this only in HOSTAP or multiple vap mode
3194 * since FIFO stats are used only in those modes. We just drain
3195 * stats. AMRR gets updated every 1 sec by run_ratectl_cb() via
3196 * callout. Call it early. Otherwise overflow.
3197 */
3198 if (sc->fifo_cnt++ == 10) {
3199 /*
3200 * With multiple vaps or if_bridge, if_start() is called
3201 * with a non-sleepable lock, tcpinp. So, need to defer.
3202 */
3203 uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
3204 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "cmdq_store=%d\n", i);
3205 sc->cmdq[i].func = mtw_drain_fifo;
3206 sc->cmdq[i].arg0 = sc;
3207 ieee80211_runtask(ic, &sc->cmdq_task);
3208 }
3209 }
3210
3211 STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next);
3212 usbd_transfer_start(sc->sc_xfer[mtw_wme_ac_xfer_map[qid]]);
3213
3214 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3215 "sending data frame len=%d rate=%d qid=%d\n",
3216 m->m_pkthdr.len +
3217 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3218 rt2860_rates[ridx].rate, qid);
3219
3220 return (0);
3221 }
3222
3223 static int
mtw_tx_mgt(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni)3224 mtw_tx_mgt(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3225 {
3226 struct ieee80211com *ic = &sc->sc_ic;
3227 struct mtw_node *rn = MTW_NODE(ni);
3228 struct mtw_tx_data *data;
3229 struct ieee80211_frame *wh;
3230 struct mtw_txd *txd;
3231 struct mtw_txwi *txwi;
3232 uint8_t type;
3233 uint16_t dur;
3234 uint8_t ridx = rn->mgt_ridx;
3235 uint8_t xflags = 0;
3236 uint8_t wflags = 0;
3237
3238 MTW_LOCK_ASSERT(sc, MA_OWNED);
3239
3240 wh = mtod(m, struct ieee80211_frame *);
3241
3242 /* tell hardware to add timestamp for probe responses */
3243 if ((wh->i_fc[0] &
3244 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3245 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
3246 wflags |= MTW_TX_TS;
3247 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3248 xflags |= MTW_TX_ACK;
3249
3250 dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate,
3251 ic->ic_flags & IEEE80211_F_SHPREAMBLE);
3252 USETW(wh->i_dur, dur);
3253 }
3254 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3255 if (sc->sc_epq[0].tx_nfree == 0)
3256 /* let caller free mbuf */
3257 return (EIO);
3258 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3259 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3260 sc->sc_epq[0].tx_nfree--;
3261
3262 txd = (struct mtw_txd *)&data->desc;
3263 txd->flags = htole16(
3264 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3265 if (type != IEEE80211_FC0_TYPE_DATA)
3266 txd->flags |= htole16(MTW_TXD_WIV);
3267
3268 txwi = (struct mtw_txwi *)(txd + 1);
3269 txwi->wcid = 0xff;
3270 txwi->xflags = xflags;
3271 txwi->flags = wflags;
3272
3273 txwi->txop = 0; /* clear leftover garbage bits */
3274
3275 data->m = m;
3276 data->ni = ni;
3277 data->ridx = ridx;
3278
3279 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
3280 m->m_pkthdr.len +
3281 (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
3282 rt2860_rates[ridx].rate);
3283
3284 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3285
3286 usbd_transfer_start(sc->sc_xfer[MTW_BULK_TX_BE]);
3287
3288 return (0);
3289 }
3290
3291 static int
mtw_sendprot(struct mtw_softc * sc,const struct mbuf * m,struct ieee80211_node * ni,int prot,int rate)3292 mtw_sendprot(struct mtw_softc *sc, const struct mbuf *m,
3293 struct ieee80211_node *ni, int prot, int rate)
3294 {
3295 struct ieee80211com *ic = ni->ni_ic;
3296 struct mtw_tx_data *data;
3297 struct mtw_txd *txd;
3298 struct mtw_txwi *txwi;
3299 struct mbuf *mprot;
3300 int ridx;
3301 int protrate;
3302 uint8_t wflags = 0;
3303 uint8_t xflags = 0;
3304
3305 MTW_LOCK_ASSERT(sc, MA_OWNED);
3306
3307 /* check that there are free slots before allocating the mbuf */
3308 if (sc->sc_epq[0].tx_nfree == 0)
3309 /* let caller free mbuf */
3310 return (ENOBUFS);
3311
3312 mprot = ieee80211_alloc_prot(ni, m, rate, prot);
3313 if (mprot == NULL) {
3314 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
3315 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "could not allocate mbuf\n");
3316 return (ENOBUFS);
3317 }
3318
3319 protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
3320 wflags = MTW_TX_FRAG;
3321 xflags = 0;
3322 if (prot == IEEE80211_PROT_RTSCTS)
3323 xflags |= MTW_TX_ACK;
3324
3325 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3326 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3327 sc->sc_epq[0].tx_nfree--;
3328
3329 txd = (struct mtw_txd *)&data->desc;
3330 txd->flags = RT2860_TX_QSEL_EDCA;
3331 txwi = (struct mtw_txwi *)(txd + 1);
3332 txwi->wcid = 0xff;
3333 txwi->flags = wflags;
3334 txwi->xflags = xflags;
3335 txwi->txop = 0; /* clear leftover garbage bits */
3336
3337 data->m = mprot;
3338 data->ni = ieee80211_ref_node(ni);
3339
3340 /* XXX TODO: methodize with MCS rates */
3341 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3342 if (rt2860_rates[ridx].rate == protrate)
3343 break;
3344 data->ridx = ridx;
3345
3346 mtw_set_tx_desc(sc, data);
3347 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending prot len=%u rate=%u\n",
3348 m->m_pkthdr.len, rate);
3349
3350 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3351
3352 usbd_transfer_start(sc->sc_xfer[0]);
3353
3354 return (0);
3355 }
3356
3357 static int
mtw_tx_param(struct mtw_softc * sc,struct mbuf * m,struct ieee80211_node * ni,const struct ieee80211_bpf_params * params)3358 mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
3359 const struct ieee80211_bpf_params *params)
3360 {
3361 struct ieee80211com *ic = ni->ni_ic;
3362 struct mtw_tx_data *data;
3363 struct mtw_txd *txd;
3364 struct mtw_txwi *txwi;
3365 uint8_t ridx;
3366 uint8_t rate;
3367 uint8_t opflags = 0;
3368 uint8_t xflags = 0;
3369 int error;
3370
3371 MTW_LOCK_ASSERT(sc, MA_OWNED);
3372
3373 KASSERT(params != NULL, ("no raw xmit params"));
3374
3375 rate = params->ibp_rate0;
3376 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3377 /* let caller free mbuf */
3378 return (EINVAL);
3379 }
3380
3381 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3382 xflags |= MTW_TX_ACK;
3383 if (params->ibp_flags & (IEEE80211_BPF_RTS | IEEE80211_BPF_CTS)) {
3384 error = mtw_sendprot(sc, m, ni,
3385 params->ibp_flags & IEEE80211_BPF_RTS ?
3386 IEEE80211_PROT_RTSCTS :
3387 IEEE80211_PROT_CTSONLY,
3388 rate);
3389 if (error) {
3390 device_printf(sc->sc_dev, "%s:%d %d\n", __FILE__,
3391 __LINE__, error);
3392 return (error);
3393 }
3394 opflags |= MTW_TX_TXOP_SIFS;
3395 }
3396
3397 if (sc->sc_epq[0].tx_nfree == 0) {
3398 /* let caller free mbuf */
3399 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3400 "sending raw frame, but tx ring is full\n");
3401 return (EIO);
3402 }
3403 data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
3404 STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
3405 sc->sc_epq[0].tx_nfree--;
3406
3407 txd = (struct mtw_txd *)&data->desc;
3408 txd->flags = htole16(
3409 MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
3410 // txd->flags = htole16(MTW_TXD_QSEL_EDCA);
3411 txwi = (struct mtw_txwi *)(txd + 1);
3412 txwi->wcid = 0xff;
3413 txwi->xflags = xflags;
3414 txwi->txop = opflags;
3415 txwi->flags = 0; /* clear leftover garbage bits */
3416
3417 data->m = m;
3418 data->ni = ni;
3419 /* XXX TODO: methodize with MCS rates */
3420 for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
3421 if (rt2860_rates[ridx].rate == rate)
3422 break;
3423 data->ridx = ridx;
3424
3425 ieee80211_output_seqno_assign(ni, -1, m);
3426
3427 mtw_set_tx_desc(sc, data);
3428
3429 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
3430 m->m_pkthdr.len, rate);
3431
3432 STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
3433
3434 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RAW_TX]);
3435
3436 return (0);
3437 }
3438
3439 static int
mtw_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)3440 mtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3441 const struct ieee80211_bpf_params *params)
3442 {
3443 struct mtw_softc *sc = ni->ni_ic->ic_softc;
3444 int error = 0;
3445 MTW_LOCK(sc);
3446 /* prevent management frames from being sent if we're not ready */
3447 if (!(sc->sc_flags & MTW_RUNNING)) {
3448 error = ENETDOWN;
3449 goto done;
3450 }
3451
3452 if (params == NULL) {
3453 /* tx mgt packet */
3454 if ((error = mtw_tx_mgt(sc, m, ni)) != 0) {
3455 MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "mgt tx failed\n");
3456 goto done;
3457 }
3458 } else {
3459 /* tx raw packet with param */
3460 if ((error = mtw_tx_param(sc, m, ni, params)) != 0) {
3461 MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
3462 "tx with param failed\n");
3463 goto done;
3464 }
3465 }
3466
3467 done:
3468
3469 MTW_UNLOCK(sc);
3470
3471 if (error != 0) {
3472 if (m != NULL)
3473 m_freem(m);
3474 }
3475
3476 return (error);
3477 }
3478
3479 static int
mtw_transmit(struct ieee80211com * ic,struct mbuf * m)3480 mtw_transmit(struct ieee80211com *ic, struct mbuf *m)
3481 {
3482 struct mtw_softc *sc = ic->ic_softc;
3483 int error;
3484 MTW_LOCK(sc);
3485 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3486 MTW_UNLOCK(sc);
3487 return (ENXIO);
3488 }
3489 error = mbufq_enqueue(&sc->sc_snd, m);
3490 if (error) {
3491 MTW_UNLOCK(sc);
3492 return (error);
3493 }
3494 mtw_start(sc);
3495 MTW_UNLOCK(sc);
3496
3497 return (0);
3498 }
3499
3500 static void
mtw_start(struct mtw_softc * sc)3501 mtw_start(struct mtw_softc *sc)
3502 {
3503 struct ieee80211_node *ni;
3504 struct mbuf *m;
3505
3506 MTW_LOCK_ASSERT(sc, MA_OWNED);
3507
3508 if ((sc->sc_flags & MTW_RUNNING) == 0) {
3509
3510 return;
3511 }
3512 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3513 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3514 if (mtw_tx(sc, m, ni) != 0) {
3515 mbufq_prepend(&sc->sc_snd, m);
3516 break;
3517 }
3518 }
3519 }
3520
3521 static void
mtw_parent(struct ieee80211com * ic)3522 mtw_parent(struct ieee80211com *ic)
3523 {
3524
3525 struct mtw_softc *sc = ic->ic_softc;
3526
3527 MTW_LOCK(sc);
3528 if (sc->sc_detached) {
3529 MTW_UNLOCK(sc);
3530 return;
3531 }
3532
3533 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3534 mtw_init_locked(sc);
3535 MTW_UNLOCK(sc);
3536 ieee80211_start_all(ic);
3537 return;
3538 }
3539 if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
3540 mtw_update_promisc_locked(sc);
3541 MTW_UNLOCK(sc);
3542 return;
3543 }
3544 if ((sc->sc_flags & MTW_RUNNING) && sc->rvp_cnt <= 1 &&
3545 ic->ic_nrunning == 0) {
3546 mtw_stop(sc);
3547 MTW_UNLOCK(sc);
3548 return;
3549 }
3550 return;
3551 }
3552
3553 static void
mt7601_set_agc(struct mtw_softc * sc,uint8_t agc)3554 mt7601_set_agc(struct mtw_softc *sc, uint8_t agc)
3555 {
3556 uint8_t bbp;
3557
3558 mtw_bbp_write(sc, 66, agc);
3559 mtw_bbp_write(sc, 195, 0x87);
3560 bbp = (agc & 0xf0) | 0x08;
3561 mtw_bbp_write(sc, 196, bbp);
3562 }
3563
3564 static int
mtw_mcu_calibrate(struct mtw_softc * sc,int func,uint32_t val)3565 mtw_mcu_calibrate(struct mtw_softc *sc, int func, uint32_t val)
3566 {
3567 struct mtw_mcu_cmd_8 cmd;
3568
3569 cmd.func = htole32(func);
3570 cmd.val = htole32(val);
3571 return (mtw_mcu_cmd(sc, 31, &cmd, sizeof(struct mtw_mcu_cmd_8)));
3572 }
3573
3574 static int
mtw_rf_write(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t val)3575 mtw_rf_write(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t val)
3576 {
3577 uint32_t tmp;
3578 int error, ntries, shift;
3579
3580 for (ntries = 0; ntries < 10; ntries++) {
3581 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3582 return (error);
3583 if (!(tmp & MTW_RF_CSR_KICK))
3584 break;
3585 }
3586 if (ntries == 10)
3587 return (ETIMEDOUT);
3588
3589 if (sc->asic_ver == 0x7601)
3590 shift = MT7601_BANK_SHIFT;
3591 else
3592 shift = MT7610_BANK_SHIFT;
3593
3594 tmp = MTW_RF_CSR_WRITE | MTW_RF_CSR_KICK | (bank & 0xf) << shift |
3595 reg << 8 | val;
3596 return (mtw_write(sc, MTW_RF_CSR, tmp));
3597 }
3598
3599 void
mtw_select_chan_group(struct mtw_softc * sc,int group)3600 mtw_select_chan_group(struct mtw_softc *sc, int group)
3601 {
3602 uint32_t tmp;
3603 uint8_t bbp;
3604
3605 /* Tx band 20MHz 2G */
3606 mtw_read(sc, MTW_TX_BAND_CFG, &tmp);
3607 tmp &= ~(
3608 MTW_TX_BAND_SEL_2G | MTW_TX_BAND_SEL_5G | MTW_TX_BAND_UPPER_40M);
3609 tmp |= (group == 0) ? MTW_TX_BAND_SEL_2G : MTW_TX_BAND_SEL_5G;
3610 mtw_write(sc, MTW_TX_BAND_CFG, tmp);
3611
3612 /* select 20 MHz bandwidth */
3613 mtw_bbp_read(sc, 4, &bbp);
3614 bbp &= ~0x18;
3615 bbp |= 0x40;
3616 mtw_bbp_write(sc, 4, bbp);
3617
3618 /* calibrate BBP */
3619 mtw_bbp_write(sc, 69, 0x12);
3620 mtw_bbp_write(sc, 91, 0x07);
3621 mtw_bbp_write(sc, 195, 0x23);
3622 mtw_bbp_write(sc, 196, 0x17);
3623 mtw_bbp_write(sc, 195, 0x24);
3624 mtw_bbp_write(sc, 196, 0x06);
3625 mtw_bbp_write(sc, 195, 0x81);
3626 mtw_bbp_write(sc, 196, 0x12);
3627 mtw_bbp_write(sc, 195, 0x83);
3628 mtw_bbp_write(sc, 196, 0x17);
3629 mtw_rf_write(sc, 5, 8, 0x00);
3630 // mtw_mcu_calibrate(sc, 0x6, 0x10001);
3631
3632 /* set initial AGC value */
3633 mt7601_set_agc(sc, 0x14);
3634 }
3635
3636 static int
mtw_rf_read(struct mtw_softc * sc,uint8_t bank,uint8_t reg,uint8_t * val)3637 mtw_rf_read(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t *val)
3638 {
3639 uint32_t tmp;
3640 int error, ntries, shift;
3641
3642 for (ntries = 0; ntries < 100; ntries++) {
3643 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3644 return (error);
3645 if (!(tmp & MTW_RF_CSR_KICK))
3646 break;
3647 }
3648 if (ntries == 100)
3649 return (ETIMEDOUT);
3650
3651 if (sc->asic_ver == 0x7601)
3652 shift = MT7601_BANK_SHIFT;
3653 else
3654 shift = MT7610_BANK_SHIFT;
3655
3656 tmp = MTW_RF_CSR_KICK | (bank & 0xf) << shift | reg << 8;
3657 if ((error = mtw_write(sc, MTW_RF_CSR, tmp)) != 0)
3658 return (error);
3659
3660 for (ntries = 0; ntries < 100; ntries++) {
3661 if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
3662 return (error);
3663 if (!(tmp & MTW_RF_CSR_KICK))
3664 break;
3665 }
3666 if (ntries == 100)
3667 return (ETIMEDOUT);
3668
3669 *val = tmp & 0xff;
3670 return (0);
3671 }
3672 static void
mt7601_set_chan(struct mtw_softc * sc,u_int chan)3673 mt7601_set_chan(struct mtw_softc *sc, u_int chan)
3674 {
3675 uint32_t tmp;
3676 uint8_t bbp, rf, txpow1;
3677 int i;
3678 /* find the settings for this channel */
3679 for (i = 0; mt7601_rf_chan[i].chan != chan; i++)
3680 ;
3681
3682 mtw_rf_write(sc, 0, 17, mt7601_rf_chan[i].r17);
3683 mtw_rf_write(sc, 0, 18, mt7601_rf_chan[i].r18);
3684 mtw_rf_write(sc, 0, 19, mt7601_rf_chan[i].r19);
3685 mtw_rf_write(sc, 0, 20, mt7601_rf_chan[i].r20);
3686
3687 /* use Tx power values from EEPROM */
3688 txpow1 = sc->txpow1[i];
3689
3690 /* Tx automatic level control */
3691 mtw_read(sc, MTW_TX_ALC_CFG0, &tmp);
3692 tmp &= ~0x3f3f;
3693 tmp |= (txpow1 & 0x3f);
3694 mtw_write(sc, MTW_TX_ALC_CFG0, tmp);
3695
3696 /* LNA */
3697 mtw_bbp_write(sc, 62, 0x37 - sc->lna[0]);
3698 mtw_bbp_write(sc, 63, 0x37 - sc->lna[0]);
3699 mtw_bbp_write(sc, 64, 0x37 - sc->lna[0]);
3700
3701 /* VCO calibration */
3702 mtw_rf_write(sc, 0, 4, 0x0a);
3703 mtw_rf_write(sc, 0, 5, 0x20);
3704 mtw_rf_read(sc, 0, 4, &rf);
3705 mtw_rf_write(sc, 0, 4, rf | 0x80);
3706
3707 /* select 20 MHz bandwidth */
3708 mtw_bbp_read(sc, 4, &bbp);
3709 bbp &= ~0x18;
3710 bbp |= 0x40;
3711 mtw_bbp_write(sc, 4, bbp);
3712 mtw_bbp_write(sc, 178, 0xff);
3713 }
3714
3715 static int
mtw_set_chan(struct mtw_softc * sc,struct ieee80211_channel * c)3716 mtw_set_chan(struct mtw_softc *sc, struct ieee80211_channel *c)
3717 {
3718 struct ieee80211com *ic = &sc->sc_ic;
3719 u_int chan, group;
3720
3721 chan = ieee80211_chan2ieee(ic, c);
3722 if (chan == 0 || chan == IEEE80211_CHAN_ANY)
3723 return (EINVAL);
3724
3725 /* determine channel group */
3726 if (chan <= 14)
3727 group = 0;
3728 else if (chan <= 64)
3729 group = 1;
3730 else if (chan <= 128)
3731 group = 2;
3732 else
3733 group = 3;
3734
3735 if (group != sc->sc_chan_group || !sc->sc_bw_calibrated)
3736 mtw_select_chan_group(sc, group);
3737
3738 sc->sc_chan_group = group;
3739
3740 /* chipset specific */
3741 if (sc->asic_ver == 0x7601)
3742 mt7601_set_chan(sc, chan);
3743
3744 DELAY(1000);
3745 return (0);
3746 }
3747
3748 static void
mtw_set_channel(struct ieee80211com * ic)3749 mtw_set_channel(struct ieee80211com *ic)
3750 {
3751 struct mtw_softc *sc = ic->ic_softc;
3752
3753 MTW_LOCK(sc);
3754 mtw_set_chan(sc, ic->ic_curchan);
3755 MTW_UNLOCK(sc);
3756
3757 return;
3758 }
3759
3760 static void
mtw_getradiocaps(struct ieee80211com * ic,int maxchans,int * nchans,struct ieee80211_channel chans[])3761 mtw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans,
3762 struct ieee80211_channel chans[])
3763 {
3764 // struct mtw_softc *sc = ic->ic_softc;
3765 uint8_t bands[IEEE80211_MODE_BYTES];
3766
3767 memset(bands, 0, sizeof(bands));
3768 setbit(bands, IEEE80211_MODE_11B);
3769 setbit(bands, IEEE80211_MODE_11G);
3770 setbit(bands, IEEE80211_MODE_11NG);
3771
3772 /* Note: for now, only support HT20 channels */
3773 ieee80211_add_channels_default_2ghz(chans, maxchans, nchans, bands, 0);
3774 }
3775
3776 static void
mtw_scan_start(struct ieee80211com * ic)3777 mtw_scan_start(struct ieee80211com *ic)
3778 {
3779 struct mtw_softc *sc = ic->ic_softc;
3780 MTW_LOCK(sc);
3781 /* abort TSF synchronization */
3782 mtw_abort_tsf_sync(sc);
3783 mtw_set_bssid(sc, ieee80211broadcastaddr);
3784
3785 MTW_UNLOCK(sc);
3786
3787 return;
3788 }
3789
3790 static void
mtw_scan_end(struct ieee80211com * ic)3791 mtw_scan_end(struct ieee80211com *ic)
3792 {
3793 struct mtw_softc *sc = ic->ic_softc;
3794
3795 MTW_LOCK(sc);
3796
3797 mtw_enable_tsf_sync(sc);
3798 mtw_set_bssid(sc, sc->sc_bssid);
3799
3800 MTW_UNLOCK(sc);
3801
3802 return;
3803 }
3804
3805 /*
3806 * Could be called from ieee80211_node_timeout()
3807 * (non-sleepable thread)
3808 */
3809 static void
mtw_update_beacon(struct ieee80211vap * vap,int item)3810 mtw_update_beacon(struct ieee80211vap *vap, int item)
3811 {
3812 struct ieee80211com *ic = vap->iv_ic;
3813 struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
3814 struct ieee80211_node *ni = vap->iv_bss;
3815 struct mtw_softc *sc = ic->ic_softc;
3816 struct mtw_vap *rvp = MTW_VAP(vap);
3817 int mcast = 0;
3818 uint32_t i;
3819
3820 switch (item) {
3821 case IEEE80211_BEACON_ERP:
3822 mtw_updateslot(ic);
3823 break;
3824 case IEEE80211_BEACON_HTINFO:
3825 mtw_updateprot(ic);
3826 break;
3827 case IEEE80211_BEACON_TIM:
3828 mcast = 1; /*TODO*/
3829 break;
3830 default:
3831 break;
3832 }
3833
3834 setbit(bo->bo_flags, item);
3835 if (rvp->beacon_mbuf == NULL) {
3836 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3837 if (rvp->beacon_mbuf == NULL)
3838 return;
3839 }
3840 ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast);
3841
3842 i = MTW_CMDQ_GET(&sc->cmdq_store);
3843 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
3844 sc->cmdq[i].func = mtw_update_beacon_cb;
3845 sc->cmdq[i].arg0 = vap;
3846 ieee80211_runtask(ic, &sc->cmdq_task);
3847
3848 return;
3849 }
3850
3851 static void
mtw_update_beacon_cb(void * arg)3852 mtw_update_beacon_cb(void *arg)
3853 {
3854
3855 struct ieee80211vap *vap = arg;
3856 struct ieee80211_node *ni = vap->iv_bss;
3857 struct mtw_vap *rvp = MTW_VAP(vap);
3858 struct ieee80211com *ic = vap->iv_ic;
3859 struct mtw_softc *sc = ic->ic_softc;
3860 struct mtw_txwi txwi;
3861 struct mbuf *m;
3862 uint16_t txwisize;
3863 uint8_t ridx;
3864 if (ni->ni_chan == IEEE80211_CHAN_ANYC)
3865 return;
3866 if (ic->ic_bsschan == IEEE80211_CHAN_ANYC)
3867 return;
3868
3869 /*
3870 * No need to call ieee80211_beacon_update(), mtw_update_beacon()
3871 * is taking care of appropriate calls.
3872 */
3873 if (rvp->beacon_mbuf == NULL) {
3874 rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
3875 if (rvp->beacon_mbuf == NULL)
3876 return;
3877 }
3878 m = rvp->beacon_mbuf;
3879
3880 memset(&txwi, 0, sizeof(txwi));
3881 txwi.wcid = 0xff;
3882 txwi.len = htole16(m->m_pkthdr.len);
3883
3884 /* send beacons at the lowest available rate */
3885 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? MTW_RIDX_OFDM6 :
3886 MTW_RIDX_CCK1;
3887 txwi.phy = htole16(rt2860_rates[ridx].mcs);
3888 if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM)
3889 txwi.phy |= htole16(MTW_PHY_OFDM);
3890 txwi.txop = MTW_TX_TXOP_HT;
3891 txwi.flags = MTW_TX_TS;
3892 txwi.xflags = MTW_TX_NSEQ;
3893
3894 txwisize = sizeof(txwi);
3895 mtw_write_region_1(sc, MTW_BCN_BASE, (uint8_t *)&txwi, txwisize);
3896 mtw_write_region_1(sc, MTW_BCN_BASE + txwisize, mtod(m, uint8_t *),
3897 (m->m_pkthdr.len + 1) & ~1);
3898 }
3899
3900 static void
mtw_updateprot(struct ieee80211com * ic)3901 mtw_updateprot(struct ieee80211com *ic)
3902 {
3903 struct mtw_softc *sc = ic->ic_softc;
3904 uint32_t i;
3905
3906 i = MTW_CMDQ_GET(&sc->cmdq_store);
3907 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "test cmdq_store=%d\n", i);
3908 sc->cmdq[i].func = mtw_updateprot_cb;
3909 sc->cmdq[i].arg0 = ic;
3910 ieee80211_runtask(ic, &sc->cmdq_task);
3911 }
3912
3913 static void
mtw_updateprot_cb(void * arg)3914 mtw_updateprot_cb(void *arg)
3915 {
3916
3917 struct ieee80211com *ic = arg;
3918 struct mtw_softc *sc = ic->ic_softc;
3919 uint32_t tmp;
3920
3921 tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL;
3922 /* setup protection frame rate (MCS code) */
3923 tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ?
3924 rt2860_rates[MTW_RIDX_OFDM6].mcs | MTW_PHY_OFDM :
3925 rt2860_rates[MTW_RIDX_CCK11].mcs;
3926
3927 /* CCK frames don't require protection */
3928 mtw_write(sc, MTW_CCK_PROT_CFG, tmp);
3929 if (ic->ic_flags & IEEE80211_F_USEPROT) {
3930 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3931 tmp |= RT2860_PROT_CTRL_RTS_CTS;
3932 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3933 tmp |= RT2860_PROT_CTRL_CTS;
3934 }
3935 mtw_write(sc, MTW_OFDM_PROT_CFG, tmp);
3936 }
3937
3938 static void
mtw_usb_timeout_cb(void * arg)3939 mtw_usb_timeout_cb(void *arg)
3940 {
3941 struct ieee80211vap *vap = arg;
3942 struct mtw_softc *sc = vap->iv_ic->ic_softc;
3943
3944 MTW_LOCK_ASSERT(sc, MA_OWNED);
3945
3946 if (vap->iv_state == IEEE80211_S_SCAN) {
3947 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3948 "timeout caused by scan\n");
3949 /* cancel bgscan */
3950 ieee80211_cancel_scan(vap);
3951 } else {
3952 MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
3953 "timeout by unknown cause\n");
3954 }
3955 }
mtw_reset(struct mtw_softc * sc)3956 static int mtw_reset(struct mtw_softc *sc)
3957 {
3958
3959 usb_device_request_t req;
3960 uint16_t tmp;
3961 uint16_t actlen;
3962
3963 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
3964 req.bRequest = MTW_RESET;
3965 USETW(req.wValue, 1);
3966 USETW(req.wIndex, 0);
3967 USETW(req.wLength, 0);
3968 return (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
3969 &req, &tmp, 0, &actlen, 1000));
3970
3971 }
3972
3973
3974 static void
mtw_update_promisc_locked(struct mtw_softc * sc)3975 mtw_update_promisc_locked(struct mtw_softc *sc)
3976 {
3977
3978 uint32_t tmp;
3979
3980 mtw_read(sc, MTW_RX_FILTR_CFG, &tmp);
3981
3982 tmp |= MTW_DROP_UC_NOME;
3983 if (sc->sc_ic.ic_promisc > 0)
3984 tmp &= ~MTW_DROP_UC_NOME;
3985
3986 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
3987
3988 MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s promiscuous mode\n",
3989 (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving");
3990 }
3991
3992 static void
mtw_update_promisc(struct ieee80211com * ic)3993 mtw_update_promisc(struct ieee80211com *ic)
3994 {
3995 struct mtw_softc *sc = ic->ic_softc;
3996
3997 if ((sc->sc_flags & MTW_RUNNING) == 0)
3998 return;
3999
4000 MTW_LOCK(sc);
4001 mtw_update_promisc_locked(sc);
4002 MTW_UNLOCK(sc);
4003 }
4004
4005 static void
mtw_enable_tsf_sync(struct mtw_softc * sc)4006 mtw_enable_tsf_sync(struct mtw_softc *sc)
4007 {
4008 struct ieee80211com *ic = &sc->sc_ic;
4009 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4010 uint32_t tmp;
4011 int error;
4012 mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
4013 tmp &= ~0x1fffff;
4014 tmp |= vap->iv_bss->ni_intval * 16;
4015 tmp |= MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN;
4016
4017 /* local TSF is always updated with remote TSF on beacon reception */
4018 tmp |= 1 << MTW_TSF_SYNC_MODE_SHIFT;
4019 error = mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
4020 if (error != 0) {
4021 device_printf(sc->sc_dev, "enable_tsf_sync failed error:%d\n",
4022 error);
4023 }
4024 return;
4025 }
4026
4027 static void
mtw_enable_mrr(struct mtw_softc * sc)4028 mtw_enable_mrr(struct mtw_softc *sc)
4029 {
4030 #define CCK(mcs) (mcs)
4031
4032 #define OFDM(mcs) (1 << 3 | (mcs))
4033 mtw_write(sc, MTW_LG_FBK_CFG0,
4034 OFDM(6) << 28 | /* 54->48 */
4035 OFDM(5) << 24 | /* 48->36 */
4036 OFDM(4) << 20 | /* 36->24 */
4037 OFDM(3) << 16 | /* 24->18 */
4038 OFDM(2) << 12 | /* 18->12 */
4039 OFDM(1) << 8 | /* 12-> 9 */
4040 OFDM(0) << 4 | /* 9-> 6 */
4041 OFDM(0)); /* 6-> 6 */
4042
4043 mtw_write(sc, MTW_LG_FBK_CFG1,
4044 CCK(2) << 12 | /* 11->5.5 */
4045 CCK(1) << 8 | /* 5.5-> 2 */
4046 CCK(0) << 4 | /* 2-> 1 */
4047 CCK(0)); /* 1-> 1 */
4048 #undef OFDM
4049 #undef CCK
4050 }
4051
4052 static void
mtw_set_txpreamble(struct mtw_softc * sc)4053 mtw_set_txpreamble(struct mtw_softc *sc)
4054 {
4055 struct ieee80211com *ic = &sc->sc_ic;
4056 uint32_t tmp;
4057
4058 mtw_read(sc, MTW_AUTO_RSP_CFG, &tmp);
4059 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4060 tmp |= MTW_CCK_SHORT_EN;
4061 else
4062 tmp &= ~MTW_CCK_SHORT_EN;
4063 mtw_write(sc, MTW_AUTO_RSP_CFG, tmp);
4064 }
4065
4066 static void
mtw_set_basicrates(struct mtw_softc * sc)4067 mtw_set_basicrates(struct mtw_softc *sc)
4068 {
4069 struct ieee80211com *ic = &sc->sc_ic;
4070
4071 /* set basic rates mask */
4072 if (ic->ic_curmode == IEEE80211_MODE_11B)
4073 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x003);
4074 else if (ic->ic_curmode == IEEE80211_MODE_11A)
4075 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x150);
4076 else /* 11g */
4077 mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x17f);
4078 }
4079
4080 static void
mtw_set_bssid(struct mtw_softc * sc,const uint8_t * bssid)4081 mtw_set_bssid(struct mtw_softc *sc, const uint8_t *bssid)
4082 {
4083 mtw_write(sc, MTW_MAC_BSSID_DW0,
4084 bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24);
4085 mtw_write(sc, MTW_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8);
4086 }
4087
4088 static void
mtw_set_macaddr(struct mtw_softc * sc,const uint8_t * addr)4089 mtw_set_macaddr(struct mtw_softc *sc, const uint8_t *addr)
4090 {
4091 mtw_write(sc, MTW_MAC_ADDR_DW0,
4092 addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
4093 mtw_write(sc, MTW_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16);
4094 }
4095
4096 static void
mtw_updateslot(struct ieee80211com * ic)4097 mtw_updateslot(struct ieee80211com *ic)
4098 {
4099
4100 struct mtw_softc *sc = ic->ic_softc;
4101 uint32_t i;
4102
4103 i = MTW_CMDQ_GET(&sc->cmdq_store);
4104 MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
4105 sc->cmdq[i].func = mtw_updateslot_cb;
4106 sc->cmdq[i].arg0 = ic;
4107 ieee80211_runtask(ic, &sc->cmdq_task);
4108
4109 return;
4110 }
4111
4112 /* ARGSUSED */
4113 static void
mtw_updateslot_cb(void * arg)4114 mtw_updateslot_cb(void *arg)
4115 {
4116 struct ieee80211com *ic = arg;
4117 struct mtw_softc *sc = ic->ic_softc;
4118 uint32_t tmp;
4119 mtw_read(sc, MTW_BKOFF_SLOT_CFG, &tmp);
4120 tmp &= ~0xff;
4121 tmp |= IEEE80211_GET_SLOTTIME(ic);
4122 mtw_write(sc, MTW_BKOFF_SLOT_CFG, tmp);
4123 }
4124
4125 static void
mtw_update_mcast(struct ieee80211com * ic)4126 mtw_update_mcast(struct ieee80211com *ic)
4127 {
4128 }
4129
4130 static int8_t
mtw_rssi2dbm(struct mtw_softc * sc,uint8_t rssi,uint8_t rxchain)4131 mtw_rssi2dbm(struct mtw_softc *sc, uint8_t rssi, uint8_t rxchain)
4132 {
4133 struct ieee80211com *ic = &sc->sc_ic;
4134 struct ieee80211_channel *c = ic->ic_curchan;
4135 int delta;
4136
4137 if (IEEE80211_IS_CHAN_5GHZ(c)) {
4138 u_int chan = ieee80211_chan2ieee(ic, c);
4139 delta = sc->rssi_5ghz[rxchain];
4140
4141 /* determine channel group */
4142 if (chan <= 64)
4143 delta -= sc->lna[1];
4144 else if (chan <= 128)
4145 delta -= sc->lna[2];
4146 else
4147 delta -= sc->lna[3];
4148 } else
4149 delta = sc->rssi_2ghz[rxchain] - sc->lna[0];
4150
4151 return (-12 - delta - rssi);
4152 }
4153 static int
mt7601_bbp_init(struct mtw_softc * sc)4154 mt7601_bbp_init(struct mtw_softc *sc)
4155 {
4156 uint8_t bbp;
4157 int i, error, ntries;
4158
4159 /* wait for BBP to wake up */
4160 for (ntries = 0; ntries < 20; ntries++) {
4161 if ((error = mtw_bbp_read(sc, 0, &bbp)) != 0)
4162 return (error);
4163 if (bbp != 0 && bbp != 0xff)
4164 break;
4165 }
4166
4167 if (ntries == 20)
4168 return (ETIMEDOUT);
4169
4170 mtw_bbp_read(sc, 3, &bbp);
4171 mtw_bbp_write(sc, 3, 0);
4172 mtw_bbp_read(sc, 105, &bbp);
4173 mtw_bbp_write(sc, 105, 0);
4174
4175 /* initialize BBP registers to default values */
4176 for (i = 0; i < nitems(mt7601_def_bbp); i++) {
4177 if ((error = mtw_bbp_write(sc, mt7601_def_bbp[i].reg,
4178 mt7601_def_bbp[i].val)) != 0)
4179 return (error);
4180 }
4181
4182 sc->sc_bw_calibrated = 0;
4183
4184 return (0);
4185 }
4186
4187 static int
mt7601_rf_init(struct mtw_softc * sc)4188 mt7601_rf_init(struct mtw_softc *sc)
4189 {
4190 int i, error;
4191
4192 /* RF bank 0 */
4193 for (i = 0; i < nitems(mt7601_rf_bank0); i++) {
4194 error = mtw_rf_write(sc, 0, mt7601_rf_bank0[i].reg,
4195 mt7601_rf_bank0[i].val);
4196 if (error != 0)
4197 return (error);
4198 }
4199 /* RF bank 4 */
4200 for (i = 0; i < nitems(mt7601_rf_bank4); i++) {
4201 error = mtw_rf_write(sc, 4, mt7601_rf_bank4[i].reg,
4202 mt7601_rf_bank4[i].val);
4203 if (error != 0)
4204 return (error);
4205 }
4206 /* RF bank 5 */
4207 for (i = 0; i < nitems(mt7601_rf_bank5); i++) {
4208 error = mtw_rf_write(sc, 5, mt7601_rf_bank5[i].reg,
4209 mt7601_rf_bank5[i].val);
4210 if (error != 0)
4211 return (error);
4212 }
4213 return (0);
4214 }
4215
4216 static int
mtw_txrx_enable(struct mtw_softc * sc)4217 mtw_txrx_enable(struct mtw_softc *sc)
4218 {
4219 struct ieee80211com *ic = &sc->sc_ic;
4220 uint32_t tmp;
4221 int error, ntries;
4222 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_TX_EN);
4223 for (ntries = 0; ntries < 200; ntries++) {
4224 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) {
4225 return (error);
4226 }
4227 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4228 break;
4229 mtw_delay(sc, 50);
4230 }
4231 if (ntries == 200) {
4232 return (ETIMEDOUT);
4233 }
4234
4235 DELAY(50);
4236
4237 tmp |= MTW_RX_DMA_EN | MTW_TX_DMA_EN | MTW_TX_WB_DDONE;
4238 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4239
4240 /* enable Rx bulk aggregation (set timeout and limit) */
4241 tmp = MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4242 MTW_USB_RX_AGG_TO(128) | MTW_USB_RX_AGG_LMT(2);
4243 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4244
4245 /* set Rx filter */
4246 tmp = MTW_DROP_CRC_ERR | MTW_DROP_PHY_ERR;
4247 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
4248 tmp |= MTW_DROP_UC_NOME | MTW_DROP_DUPL | MTW_DROP_CTS |
4249 MTW_DROP_BA | MTW_DROP_ACK | MTW_DROP_VER_ERR |
4250 MTW_DROP_CTRL_RSV | MTW_DROP_CFACK | MTW_DROP_CFEND;
4251 if (ic->ic_opmode == IEEE80211_M_STA)
4252 tmp |= MTW_DROP_RTS | MTW_DROP_PSPOLL;
4253 }
4254 mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
4255
4256 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4257 return (0);
4258 }
4259 static int
mt7601_rxdc_cal(struct mtw_softc * sc)4260 mt7601_rxdc_cal(struct mtw_softc *sc)
4261 {
4262 uint32_t tmp;
4263 uint8_t bbp;
4264 int ntries;
4265
4266 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4267 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN);
4268 mtw_bbp_write(sc, 158, 0x8d);
4269 mtw_bbp_write(sc, 159, 0xfc);
4270 mtw_bbp_write(sc, 158, 0x8c);
4271 mtw_bbp_write(sc, 159, 0x4c);
4272
4273 for (ntries = 0; ntries < 20; ntries++) {
4274 DELAY(300);
4275 mtw_bbp_write(sc, 158, 0x8c);
4276 mtw_bbp_read(sc, 159, &bbp);
4277 if (bbp == 0x0c)
4278 break;
4279 }
4280
4281 if (ntries == 20)
4282 return (ETIMEDOUT);
4283
4284 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4285 mtw_bbp_write(sc, 158, 0x8d);
4286 mtw_bbp_write(sc, 159, 0xe0);
4287 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4288 return (0);
4289 }
4290
4291 static int
mt7601_r49_read(struct mtw_softc * sc,uint8_t flag,int8_t * val)4292 mt7601_r49_read(struct mtw_softc *sc, uint8_t flag, int8_t *val)
4293 {
4294 uint8_t bbp;
4295
4296 mtw_bbp_read(sc, 47, &bbp);
4297 bbp = 0x90;
4298 mtw_bbp_write(sc, 47, bbp);
4299 bbp &= ~0x0f;
4300 bbp |= flag;
4301 mtw_bbp_write(sc, 47, bbp);
4302 return (mtw_bbp_read(sc, 49, val));
4303 }
4304
4305 static int
mt7601_rf_temperature(struct mtw_softc * sc,int8_t * val)4306 mt7601_rf_temperature(struct mtw_softc *sc, int8_t *val)
4307 {
4308 uint32_t rfb, rfs;
4309 uint8_t bbp;
4310 int ntries;
4311
4312 mtw_read(sc, MTW_RF_BYPASS0, &rfb);
4313 mtw_read(sc, MTW_RF_SETTING0, &rfs);
4314 mtw_write(sc, MTW_RF_BYPASS0, 0);
4315 mtw_write(sc, MTW_RF_SETTING0, 0x10);
4316 mtw_write(sc, MTW_RF_BYPASS0, 0x10);
4317
4318 mtw_bbp_read(sc, 47, &bbp);
4319 bbp &= ~0x7f;
4320 bbp |= 0x10;
4321 mtw_bbp_write(sc, 47, bbp);
4322
4323 mtw_bbp_write(sc, 22, 0x40);
4324
4325 for (ntries = 0; ntries < 10; ntries++) {
4326 mtw_bbp_read(sc, 47, &bbp);
4327 if ((bbp & 0x10) == 0)
4328 break;
4329 }
4330 if (ntries == 10)
4331 return (ETIMEDOUT);
4332
4333 mt7601_r49_read(sc, MT7601_R47_TEMP, val);
4334
4335 mtw_bbp_write(sc, 22, 0);
4336
4337 mtw_bbp_read(sc, 21, &bbp);
4338 bbp |= 0x02;
4339 mtw_bbp_write(sc, 21, bbp);
4340 bbp &= ~0x02;
4341 mtw_bbp_write(sc, 21, bbp);
4342
4343 mtw_write(sc, MTW_RF_BYPASS0, 0);
4344 mtw_write(sc, MTW_RF_SETTING0, rfs);
4345 mtw_write(sc, MTW_RF_BYPASS0, rfb);
4346 return (0);
4347 }
4348
4349 static int
mt7601_rf_setup(struct mtw_softc * sc)4350 mt7601_rf_setup(struct mtw_softc *sc)
4351 {
4352 uint32_t tmp;
4353 uint8_t rf;
4354 int error;
4355
4356 if (sc->sc_rf_calibrated)
4357 return (0);
4358
4359 /* init RF registers */
4360 if ((error = mt7601_rf_init(sc)) != 0)
4361 return (error);
4362
4363 /* init frequency offset */
4364 mtw_rf_write(sc, 0, 12, sc->rf_freq_offset);
4365 mtw_rf_read(sc, 0, 12, &rf);
4366
4367 /* read temperature */
4368 mt7601_rf_temperature(sc, &rf);
4369 sc->bbp_temp = rf;
4370 device_printf(sc->sc_dev, "BBP temp 0x%x\n", rf);
4371
4372 mtw_rf_read(sc, 0, 7, &rf);
4373 if ((error = mtw_mcu_calibrate(sc, 0x1, 0)) != 0)
4374 return (error);
4375 mtw_delay(sc, 100);
4376 mtw_rf_read(sc, 0, 7, &rf);
4377
4378 /* Calibrate VCO RF 0/4 */
4379 mtw_rf_write(sc, 0, 4, 0x0a);
4380 mtw_rf_write(sc, 0, 4, 0x20);
4381 mtw_rf_read(sc, 0, 4, &rf);
4382 mtw_rf_write(sc, 0, 4, rf | 0x80);
4383
4384 if ((error = mtw_mcu_calibrate(sc, 0x9, 0)) != 0)
4385 return (error);
4386 if ((error = mt7601_rxdc_cal(sc)) != 0)
4387 return (error);
4388 if ((error = mtw_mcu_calibrate(sc, 0x6, 1)) != 0)
4389 return (error);
4390 if ((error = mtw_mcu_calibrate(sc, 0x6, 0)) != 0)
4391 return (error);
4392 if ((error = mtw_mcu_calibrate(sc, 0x4, 0)) != 0)
4393 return (error);
4394 if ((error = mtw_mcu_calibrate(sc, 0x5, 0)) != 0)
4395 return (error);
4396
4397 mtw_read(sc, MTW_LDO_CFG0, &tmp);
4398 tmp &= ~(1 << 4);
4399 tmp |= (1 << 2);
4400 mtw_write(sc, MTW_LDO_CFG0, tmp);
4401
4402 if ((error = mtw_mcu_calibrate(sc, 0x8, 0)) != 0)
4403 return (error);
4404 if ((error = mt7601_rxdc_cal(sc)) != 0)
4405 return (error);
4406
4407 sc->sc_rf_calibrated = 1;
4408 return (0);
4409 }
4410
4411 static void
mtw_set_txrts(struct mtw_softc * sc)4412 mtw_set_txrts(struct mtw_softc *sc)
4413 {
4414 uint32_t tmp;
4415
4416 /* set RTS threshold */
4417 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4418 tmp &= ~0xffff00;
4419 tmp |= 0x1000 << MTW_RTS_THRES_SHIFT;
4420 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4421 }
4422 static int
mtw_mcu_radio(struct mtw_softc * sc,int func,uint32_t val)4423 mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val)
4424 {
4425 struct mtw_mcu_cmd_16 cmd;
4426
4427 cmd.r1 = htole32(func);
4428 cmd.r2 = htole32(val);
4429 cmd.r3 = 0;
4430 cmd.r4 = 0;
4431 return (mtw_mcu_cmd(sc, 20, &cmd, sizeof(struct mtw_mcu_cmd_16)));
4432 }
4433 static void
mtw_init_locked(struct mtw_softc * sc)4434 mtw_init_locked(struct mtw_softc *sc)
4435 {
4436
4437 struct ieee80211com *ic = &sc->sc_ic;
4438 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4439 uint32_t tmp;
4440 int i, error, ridx, ntries;
4441 if (ic->ic_nrunning > 1)
4442 return;
4443 mtw_stop(sc);
4444
4445 for (i = 0; i != MTW_EP_QUEUES; i++)
4446 mtw_setup_tx_list(sc, &sc->sc_epq[i]);
4447
4448 for (ntries = 0; ntries < 100; ntries++) {
4449 if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0)
4450 goto fail;
4451 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4452 break;
4453 DELAY(1000);
4454 }
4455 if (ntries == 100) {
4456 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4457 error = ETIMEDOUT;
4458 goto fail;
4459 }
4460 tmp &= 0xff0;
4461 tmp |= MTW_TX_WB_DDONE;
4462 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4463
4464 mtw_set_leds(sc, MTW_LED_MODE_ON);
4465 /* reset MAC and baseband */
4466 mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_BBP_HRST | MTW_MAC_SRST);
4467 mtw_write(sc, MTW_USB_DMA_CFG, 0);
4468 mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
4469
4470 /* init MAC values */
4471 if (sc->asic_ver == 0x7601) {
4472 for (i = 0; i < nitems(mt7601_def_mac); i++)
4473 mtw_write(sc, mt7601_def_mac[i].reg,
4474 mt7601_def_mac[i].val);
4475 }
4476
4477 /* wait while MAC is busy */
4478 for (ntries = 0; ntries < 100; ntries++) {
4479 if ((error = mtw_read(sc, MTW_MAC_STATUS_REG, &tmp)) != 0)
4480 goto fail;
4481 if (!(tmp & (MTW_RX_STATUS_BUSY | MTW_TX_STATUS_BUSY)))
4482 break;
4483 DELAY(1000);
4484 }
4485 if (ntries == 100) {
4486 error = ETIMEDOUT;
4487 goto fail;
4488 }
4489
4490 /* set MAC address */
4491
4492 mtw_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
4493
4494 /* clear WCID attribute table */
4495 mtw_set_region_4(sc, MTW_WCID_ATTR(0), 1, 8 * 32);
4496
4497 mtw_write(sc, 0x1648, 0x00830083);
4498 mtw_read(sc, MTW_FCE_L2_STUFF, &tmp);
4499 tmp &= ~MTW_L2S_WR_MPDU_LEN_EN;
4500 mtw_write(sc, MTW_FCE_L2_STUFF, tmp);
4501
4502 /* RTS config */
4503 mtw_set_txrts(sc);
4504
4505 /* clear Host to MCU mailbox */
4506 mtw_write(sc, MTW_BBP_CSR, 0);
4507 mtw_write(sc, MTW_H2M_MAILBOX, 0);
4508
4509 /* clear RX WCID search table */
4510 mtw_set_region_4(sc, MTW_WCID_ENTRY(0), 0xffffffff, 512);
4511
4512 /* abort TSF synchronization */
4513 mtw_abort_tsf_sync(sc);
4514
4515 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4516 tmp = (tmp & ~0xff);
4517 if (sc->asic_ver == 0x7601)
4518 tmp |= 0x1e;
4519 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4520
4521 /* clear shared key table */
4522 mtw_set_region_4(sc, MTW_SKEY(0, 0), 0, 8 * 32);
4523
4524 /* clear IV/EIV table */
4525 mtw_set_region_4(sc, MTW_IVEIV(0), 0, 8 * 32);
4526
4527 /* clear shared key mode */
4528 mtw_write(sc, MTW_SKEY_MODE_0_7, 0);
4529 mtw_write(sc, MTW_SKEY_MODE_8_15, 0);
4530
4531 /* txop truncation */
4532 mtw_write(sc, MTW_TXOP_CTRL_CFG, 0x0000583f);
4533
4534 /* init Tx power for all Tx rates */
4535 for (ridx = 0; ridx < 5; ridx++) {
4536 if (sc->txpow20mhz[ridx] == 0xffffffff)
4537 continue;
4538 mtw_write(sc, MTW_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]);
4539 }
4540 mtw_write(sc, MTW_TX_PWR_CFG7, 0);
4541 mtw_write(sc, MTW_TX_PWR_CFG9, 0);
4542
4543 mtw_read(sc, MTW_CMB_CTRL, &tmp);
4544 tmp &= ~(1 << 18 | 1 << 14);
4545 mtw_write(sc, MTW_CMB_CTRL, tmp);
4546
4547 /* clear USB DMA */
4548 mtw_write(sc, MTW_USB_DMA_CFG,
4549 MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
4550 MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4551 mtw_delay(sc, 50);
4552 mtw_read(sc, MTW_USB_DMA_CFG, &tmp);
4553 tmp &= ~(MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
4554 mtw_write(sc, MTW_USB_DMA_CFG, tmp);
4555
4556 /* enable radio */
4557 mtw_mcu_radio(sc, 0x31, 0);
4558
4559 /* init RF registers */
4560 if (sc->asic_ver == 0x7601)
4561 mt7601_rf_init(sc);
4562
4563 /* init baseband registers */
4564 if (sc->asic_ver == 0x7601)
4565 error = mt7601_bbp_init(sc);
4566
4567 if (error != 0) {
4568 device_printf(sc->sc_dev, "could not initialize BBP\n");
4569 goto fail;
4570 }
4571
4572 /* setup and calibrate RF */
4573 error = mt7601_rf_setup(sc);
4574
4575 if (error != 0) {
4576 device_printf(sc->sc_dev, "could not initialize RF\n");
4577 goto fail;
4578 }
4579
4580 /* select default channel */
4581 mtw_set_chan(sc, ic->ic_curchan);
4582
4583 /* setup initial protection mode */
4584 mtw_updateprot_cb(ic);
4585
4586 sc->sc_flags |= MTW_RUNNING;
4587 sc->cmdq_run = MTW_CMDQ_GO;
4588 for (i = 0; i != MTW_N_XFER; i++)
4589 usbd_xfer_set_stall(sc->sc_xfer[i]);
4590
4591 usbd_transfer_start(sc->sc_xfer[MTW_BULK_RX]);
4592
4593 error = mtw_txrx_enable(sc);
4594 if (error != 0) {
4595 goto fail;
4596 }
4597
4598 return;
4599
4600 fail:
4601
4602 mtw_stop(sc);
4603 return;
4604 }
4605
4606 static void
mtw_stop(void * arg)4607 mtw_stop(void *arg)
4608 {
4609 struct mtw_softc *sc = (struct mtw_softc *)arg;
4610 uint32_t tmp;
4611 int i, ntries, error;
4612
4613 MTW_LOCK_ASSERT(sc, MA_OWNED);
4614
4615 sc->sc_flags &= ~MTW_RUNNING;
4616
4617 sc->ratectl_run = MTW_RATECTL_OFF;
4618 sc->cmdq_run = sc->cmdq_key_set;
4619
4620 MTW_UNLOCK(sc);
4621
4622 for (i = 0; i < MTW_N_XFER; i++)
4623 usbd_transfer_drain(sc->sc_xfer[i]);
4624
4625 MTW_LOCK(sc);
4626
4627 mtw_drain_mbufq(sc);
4628
4629 if (sc->rx_m != NULL) {
4630 m_free(sc->rx_m);
4631 sc->rx_m = NULL;
4632 }
4633
4634 /* Disable Tx/Rx DMA. */
4635 mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp);
4636 tmp &= ~(MTW_RX_DMA_EN | MTW_TX_DMA_EN);
4637 mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
4638 // mtw_usb_dma_write(sc, 0);
4639
4640 for (ntries = 0; ntries < 100; ntries++) {
4641 if (mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp) != 0)
4642 break;
4643 if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
4644 break;
4645 DELAY(10);
4646 }
4647 if (ntries == 100) {
4648 device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
4649 }
4650
4651 /* stop MAC Tx/Rx */
4652 mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
4653 tmp &= ~(MTW_MAC_RX_EN | MTW_MAC_TX_EN);
4654 mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
4655
4656 /* disable RTS retry */
4657 mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
4658 tmp &= ~0xff;
4659 mtw_write(sc, MTW_TX_RTS_CFG, tmp);
4660
4661 /* US_CYC_CFG */
4662 mtw_read(sc, MTW_US_CYC_CNT, &tmp);
4663 tmp = (tmp & ~0xff);
4664 mtw_write(sc, MTW_US_CYC_CNT, tmp);
4665
4666 /* stop PBF */
4667 mtw_read(sc, MTW_PBF_CFG, &tmp);
4668 tmp &= ~0x3;
4669 mtw_write(sc, MTW_PBF_CFG, tmp);
4670
4671 /* wait for pending Tx to complete */
4672 for (ntries = 0; ntries < 100; ntries++) {
4673 if ((error = mtw_read(sc, MTW_TXRXQ_PCNT, &tmp)) != 0)
4674 break;
4675 if ((tmp & MTW_TX2Q_PCNT_MASK) == 0)
4676 break;
4677 }
4678
4679 }
4680
4681 static void
mtw_delay(struct mtw_softc * sc,u_int ms)4682 mtw_delay(struct mtw_softc *sc, u_int ms)
4683 {
4684 usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL,
4685 USB_MS_TO_TICKS(ms));
4686 }
4687
4688 static void
mtw_update_chw(struct ieee80211com * ic)4689 mtw_update_chw(struct ieee80211com *ic)
4690 {
4691
4692 printf("%s: TODO\n", __func__);
4693 }
4694
4695 static int
mtw_ampdu_enable(struct ieee80211_node * ni,struct ieee80211_tx_ampdu * tap)4696 mtw_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
4697 {
4698
4699 /* For now, no A-MPDU TX support in the driver */
4700 return (0);
4701 }
4702
4703 static device_method_t mtw_methods[] = {
4704 /* Device interface */
4705 DEVMETHOD(device_probe, mtw_match),
4706 DEVMETHOD(device_attach, mtw_attach),
4707 DEVMETHOD(device_detach, mtw_detach), DEVMETHOD_END
4708 };
4709
4710 static driver_t mtw_driver = { .name = "mtw",
4711 .methods = mtw_methods,
4712 .size = sizeof(struct mtw_softc) };
4713
4714 DRIVER_MODULE(mtw, uhub, mtw_driver, mtw_driver_loaded, NULL);
4715 MODULE_DEPEND(mtw, wlan, 1, 1, 1);
4716 MODULE_DEPEND(mtw, usb, 1, 1, 1);
4717 MODULE_DEPEND(mtw, firmware, 1, 1, 1);
4718 MODULE_VERSION(mtw, 1);
4719 USB_PNP_HOST_INFO(mtw_devs);
4720