xref: /src/sys/contrib/dev/mediatek/mt76/usb.c (revision 642c8387647daabc2505497cdafef4d3ce82bbf2)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  */
5 
6 #if defined(__FreeBSD__)
7 #define	LINUXKPI_PARAM_PREFIX	mt76_usb_
8 
9 #include <linux/delay.h>
10 #endif
11 
12 #include <linux/module.h>
13 #include "mt76.h"
14 #include "usb_trace.h"
15 #include "dma.h"
16 
17 #define MT_VEND_REQ_MAX_RETRY	10
18 #define MT_VEND_REQ_TOUT_MS	300
19 
20 static bool disable_usb_sg;
21 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
22 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
23 
__mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)24 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
25 			   u16 val, u16 offset, void *buf, size_t len)
26 {
27 	struct usb_interface *uintf = to_usb_interface(dev->dev);
28 	struct usb_device *udev = interface_to_usbdev(uintf);
29 	unsigned int pipe;
30 	int i, ret;
31 
32 	lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
33 
34 	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
35 				       : usb_sndctrlpipe(udev, 0);
36 	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
37 		if (test_bit(MT76_REMOVED, &dev->phy.state))
38 			return -EIO;
39 
40 		ret = usb_control_msg(udev, pipe, req, req_type, val,
41 				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
42 		if (ret == -ENODEV || ret == -EPROTO)
43 			set_bit(MT76_REMOVED, &dev->phy.state);
44 		if (ret >= 0 || ret == -ENODEV || ret == -EPROTO)
45 			return ret;
46 		usleep_range(5000, 10000);
47 	}
48 
49 	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
50 		req, offset, ret);
51 	return ret;
52 }
53 EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
54 
mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)55 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
56 			 u8 req_type, u16 val, u16 offset,
57 			 void *buf, size_t len)
58 {
59 	int ret;
60 
61 	mutex_lock(&dev->usb.usb_ctrl_mtx);
62 	ret = __mt76u_vendor_request(dev, req, req_type,
63 				     val, offset, buf, len);
64 	trace_usb_reg_wr(dev, offset, val);
65 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
66 
67 	return ret;
68 }
69 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
70 
___mt76u_rr(struct mt76_dev * dev,u8 req,u8 req_type,u32 addr)71 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
72 {
73 	struct mt76_usb *usb = &dev->usb;
74 	u32 data = ~0;
75 	int ret;
76 
77 	ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
78 				     addr, usb->data, sizeof(__le32));
79 	if (ret == sizeof(__le32))
80 		data = get_unaligned_le32(usb->data);
81 	trace_usb_reg_rr(dev, addr, data);
82 
83 	return data;
84 }
85 EXPORT_SYMBOL_GPL(___mt76u_rr);
86 
__mt76u_rr(struct mt76_dev * dev,u32 addr)87 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
88 {
89 	u8 req;
90 
91 	switch (addr & MT_VEND_TYPE_MASK) {
92 	case MT_VEND_TYPE_EEPROM:
93 		req = MT_VEND_READ_EEPROM;
94 		break;
95 	case MT_VEND_TYPE_CFG:
96 		req = MT_VEND_READ_CFG;
97 		break;
98 	default:
99 		req = MT_VEND_MULTI_READ;
100 		break;
101 	}
102 
103 	return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
104 			   addr & ~MT_VEND_TYPE_MASK);
105 }
106 
mt76u_rr(struct mt76_dev * dev,u32 addr)107 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
108 {
109 	u32 ret;
110 
111 	mutex_lock(&dev->usb.usb_ctrl_mtx);
112 	ret = __mt76u_rr(dev, addr);
113 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
114 
115 	return ret;
116 }
117 
___mt76u_wr(struct mt76_dev * dev,u8 req,u8 req_type,u32 addr,u32 val)118 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
119 		 u32 addr, u32 val)
120 {
121 	struct mt76_usb *usb = &dev->usb;
122 
123 	put_unaligned_le32(val, usb->data);
124 	__mt76u_vendor_request(dev, req, req_type, addr >> 16,
125 			       addr, usb->data, sizeof(__le32));
126 	trace_usb_reg_wr(dev, addr, val);
127 }
128 EXPORT_SYMBOL_GPL(___mt76u_wr);
129 
__mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)130 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
131 {
132 	u8 req;
133 
134 	switch (addr & MT_VEND_TYPE_MASK) {
135 	case MT_VEND_TYPE_CFG:
136 		req = MT_VEND_WRITE_CFG;
137 		break;
138 	default:
139 		req = MT_VEND_MULTI_WRITE;
140 		break;
141 	}
142 	___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
143 		    addr & ~MT_VEND_TYPE_MASK, val);
144 }
145 
mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)146 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
147 {
148 	mutex_lock(&dev->usb.usb_ctrl_mtx);
149 	__mt76u_wr(dev, addr, val);
150 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
151 }
152 
mt76u_rmw(struct mt76_dev * dev,u32 addr,u32 mask,u32 val)153 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
154 		     u32 mask, u32 val)
155 {
156 	mutex_lock(&dev->usb.usb_ctrl_mtx);
157 	val |= __mt76u_rr(dev, addr) & ~mask;
158 	__mt76u_wr(dev, addr, val);
159 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
160 
161 	return val;
162 }
163 
mt76u_copy(struct mt76_dev * dev,u32 offset,const void * data,int len)164 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
165 		       const void *data, int len)
166 {
167 	struct mt76_usb *usb = &dev->usb;
168 	const u8 *val = data;
169 	int ret;
170 	int current_batch_size;
171 	int i = 0;
172 
173 	/* Assure that always a multiple of 4 bytes are copied,
174 	 * otherwise beacons can be corrupted.
175 	 * See: "mt76: round up length on mt76_wr_copy"
176 	 * Commit 850e8f6fbd5d0003b0
177 	 */
178 	len = round_up(len, 4);
179 
180 	mutex_lock(&usb->usb_ctrl_mtx);
181 	while (i < len) {
182 		current_batch_size = min_t(int, usb->data_len, len - i);
183 		memcpy(usb->data, val + i, current_batch_size);
184 		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
185 					     USB_DIR_OUT | USB_TYPE_VENDOR,
186 					     0, offset + i, usb->data,
187 					     current_batch_size);
188 		if (ret < 0)
189 			break;
190 
191 		i += current_batch_size;
192 	}
193 	mutex_unlock(&usb->usb_ctrl_mtx);
194 }
195 
mt76u_read_copy(struct mt76_dev * dev,u32 offset,void * data,int len)196 void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
197 		     void *data, int len)
198 {
199 	struct mt76_usb *usb = &dev->usb;
200 	int i = 0, batch_len, ret;
201 	u8 *val = data;
202 
203 	len = round_up(len, 4);
204 	mutex_lock(&usb->usb_ctrl_mtx);
205 	while (i < len) {
206 		batch_len = min_t(int, usb->data_len, len - i);
207 		ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
208 					     USB_DIR_IN | USB_TYPE_VENDOR,
209 					     (offset + i) >> 16, offset + i,
210 					     usb->data, batch_len);
211 		if (ret < 0)
212 			break;
213 
214 		memcpy(val + i, usb->data, batch_len);
215 		i += batch_len;
216 	}
217 	mutex_unlock(&usb->usb_ctrl_mtx);
218 }
219 EXPORT_SYMBOL_GPL(mt76u_read_copy);
220 
mt76u_single_wr(struct mt76_dev * dev,const u8 req,const u16 offset,const u32 val)221 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
222 		     const u16 offset, const u32 val)
223 {
224 	mutex_lock(&dev->usb.usb_ctrl_mtx);
225 	__mt76u_vendor_request(dev, req,
226 			       USB_DIR_OUT | USB_TYPE_VENDOR,
227 			       val & 0xffff, offset, NULL, 0);
228 	__mt76u_vendor_request(dev, req,
229 			       USB_DIR_OUT | USB_TYPE_VENDOR,
230 			       val >> 16, offset + 2, NULL, 0);
231 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
232 }
233 EXPORT_SYMBOL_GPL(mt76u_single_wr);
234 
235 static int
mt76u_req_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int len)236 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
237 		const struct mt76_reg_pair *data, int len)
238 {
239 	struct mt76_usb *usb = &dev->usb;
240 
241 	mutex_lock(&usb->usb_ctrl_mtx);
242 	while (len > 0) {
243 		__mt76u_wr(dev, base + data->reg, data->value);
244 		len--;
245 		data++;
246 	}
247 	mutex_unlock(&usb->usb_ctrl_mtx);
248 
249 	return 0;
250 }
251 
252 static int
mt76u_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int n)253 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
254 	    const struct mt76_reg_pair *data, int n)
255 {
256 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
257 		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
258 	else
259 		return mt76u_req_wr_rp(dev, base, data, n);
260 }
261 
262 static int
mt76u_req_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int len)263 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
264 		int len)
265 {
266 	struct mt76_usb *usb = &dev->usb;
267 
268 	mutex_lock(&usb->usb_ctrl_mtx);
269 	while (len > 0) {
270 		data->value = __mt76u_rr(dev, base + data->reg);
271 		len--;
272 		data++;
273 	}
274 	mutex_unlock(&usb->usb_ctrl_mtx);
275 
276 	return 0;
277 }
278 
279 static int
mt76u_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int n)280 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
281 	    struct mt76_reg_pair *data, int n)
282 {
283 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
284 		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
285 	else
286 		return mt76u_req_rd_rp(dev, base, data, n);
287 }
288 
mt76u_check_sg(struct mt76_dev * dev)289 static bool mt76u_check_sg(struct mt76_dev *dev)
290 {
291 	struct usb_interface *uintf = to_usb_interface(dev->dev);
292 	struct usb_device *udev = interface_to_usbdev(uintf);
293 
294 	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
295 		udev->bus->no_sg_constraint);
296 }
297 
298 static int
mt76u_set_endpoints(struct usb_interface * intf,struct mt76_usb * usb)299 mt76u_set_endpoints(struct usb_interface *intf,
300 		    struct mt76_usb *usb)
301 {
302 	struct usb_host_interface *intf_desc = intf->cur_altsetting;
303 	struct usb_endpoint_descriptor *ep_desc;
304 	int i, in_ep = 0, out_ep = 0;
305 
306 	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
307 		ep_desc = &intf_desc->endpoint[i].desc;
308 
309 		if (usb_endpoint_is_bulk_in(ep_desc) &&
310 		    in_ep < __MT_EP_IN_MAX) {
311 			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
312 			in_ep++;
313 		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
314 			   out_ep < __MT_EP_OUT_MAX) {
315 			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
316 			out_ep++;
317 		}
318 	}
319 
320 	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
321 		return -EINVAL;
322 	return 0;
323 }
324 
325 static int
mt76u_fill_rx_sg(struct mt76_dev * dev,struct mt76_queue * q,struct urb * urb,int nsgs)326 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
327 		 int nsgs)
328 {
329 	int i;
330 
331 	for (i = 0; i < nsgs; i++) {
332 		void *data;
333 		int offset;
334 
335 		data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
336 		if (!data)
337 			break;
338 
339 		sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
340 			    offset);
341 	}
342 
343 	if (i < nsgs) {
344 		int j;
345 
346 		for (j = nsgs; j < urb->num_sgs; j++)
347 			mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
348 		urb->num_sgs = i;
349 	}
350 
351 	urb->num_sgs = max_t(int, i, urb->num_sgs);
352 	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
353 	sg_init_marker(urb->sg, urb->num_sgs);
354 
355 	return i ? : -ENOMEM;
356 }
357 
358 static int
mt76u_refill_rx(struct mt76_dev * dev,struct mt76_queue * q,struct urb * urb,int nsgs)359 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
360 		struct urb *urb, int nsgs)
361 {
362 	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
363 	int offset;
364 
365 	if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
366 		return mt76u_fill_rx_sg(dev, q, urb, nsgs);
367 
368 	urb->transfer_buffer_length = q->buf_size;
369 	urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
370 
371 	return urb->transfer_buffer ? 0 : -ENOMEM;
372 }
373 
374 static int
mt76u_urb_alloc(struct mt76_dev * dev,struct mt76_queue_entry * e,int sg_max_size)375 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
376 		int sg_max_size)
377 {
378 	unsigned int size = sizeof(struct urb);
379 
380 	if (dev->usb.sg_en)
381 		size += sg_max_size * sizeof(struct scatterlist);
382 
383 	e->urb = kzalloc(size, GFP_KERNEL);
384 	if (!e->urb)
385 		return -ENOMEM;
386 
387 	usb_init_urb(e->urb);
388 
389 	if (dev->usb.sg_en && sg_max_size > 0)
390 		e->urb->sg = (struct scatterlist *)(e->urb + 1);
391 
392 	return 0;
393 }
394 
395 static int
mt76u_rx_urb_alloc(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_entry * e)396 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
397 		   struct mt76_queue_entry *e)
398 {
399 	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
400 	int err, sg_size;
401 
402 	sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
403 	err = mt76u_urb_alloc(dev, e, sg_size);
404 	if (err)
405 		return err;
406 
407 	return mt76u_refill_rx(dev, q, e->urb, sg_size);
408 }
409 
mt76u_urb_free(struct urb * urb)410 static void mt76u_urb_free(struct urb *urb)
411 {
412 	int i;
413 
414 	for (i = 0; i < urb->num_sgs; i++)
415 		mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
416 
417 	if (urb->transfer_buffer)
418 		mt76_put_page_pool_buf(urb->transfer_buffer, false);
419 
420 	usb_free_urb(urb);
421 }
422 
423 static void
mt76u_fill_bulk_urb(struct mt76_dev * dev,int dir,int index,struct urb * urb,usb_complete_t complete_fn,void * context)424 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
425 		    struct urb *urb, usb_complete_t complete_fn,
426 		    void *context)
427 {
428 	struct usb_interface *uintf = to_usb_interface(dev->dev);
429 	struct usb_device *udev = interface_to_usbdev(uintf);
430 	unsigned int pipe;
431 
432 	if (dir == USB_DIR_IN)
433 		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
434 	else
435 		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
436 
437 	urb->dev = udev;
438 	urb->pipe = pipe;
439 	urb->complete = complete_fn;
440 	urb->context = context;
441 }
442 
443 static struct urb *
mt76u_get_next_rx_entry(struct mt76_queue * q)444 mt76u_get_next_rx_entry(struct mt76_queue *q)
445 {
446 	struct urb *urb = NULL;
447 	unsigned long flags;
448 
449 	spin_lock_irqsave(&q->lock, flags);
450 	if (q->queued > 0) {
451 		urb = q->entry[q->tail].urb;
452 		q->tail = (q->tail + 1) % q->ndesc;
453 		q->queued--;
454 	}
455 	spin_unlock_irqrestore(&q->lock, flags);
456 
457 	return urb;
458 }
459 
460 static int
mt76u_get_rx_entry_len(struct mt76_dev * dev,u8 * data,u32 data_len)461 mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
462 		       u32 data_len)
463 {
464 	u16 dma_len, min_len;
465 
466 	dma_len = get_unaligned_le16(data);
467 	if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
468 		return dma_len;
469 
470 	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
471 	if (data_len < min_len || !dma_len ||
472 	    dma_len + MT_DMA_HDR_LEN > data_len ||
473 	    (dma_len & 0x3))
474 		return -EINVAL;
475 	return dma_len;
476 }
477 
478 static struct sk_buff *
479 #if defined(__linux__)
mt76u_build_rx_skb(struct mt76_dev * dev,void * data,int len,int buf_size)480 mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
481 #elif defined(__FreeBSD__)
482 mt76u_build_rx_skb(struct mt76_dev *dev, u8 *data,
483 #endif
484 		   int len, int buf_size)
485 {
486 	int head_room, drv_flags = dev->drv->drv_flags;
487 	struct sk_buff *skb;
488 
489 	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
490 	if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
491 		struct page *page;
492 
493 		/* slow path, not enough space for data and
494 		 * skb_shared_info
495 		 */
496 		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
497 		if (!skb)
498 			return NULL;
499 
500 		skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
501 		data += head_room + MT_SKB_HEAD_LEN;
502 		page = virt_to_head_page(data);
503 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
504 #if defined(__linux__)
505 				page, data - page_address(page),
506 #elif defined(__FreeBSD__)
507 				page, data - (u8 *)page_address(page),
508 #endif
509 				len - MT_SKB_HEAD_LEN, buf_size);
510 
511 		return skb;
512 	}
513 
514 	/* fast path */
515 	skb = build_skb(data, buf_size);
516 	if (!skb)
517 		return NULL;
518 
519 	skb_reserve(skb, head_room);
520 	__skb_put(skb, len);
521 
522 	return skb;
523 }
524 
525 static int
mt76u_process_rx_entry(struct mt76_dev * dev,struct urb * urb,int buf_size)526 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
527 		       int buf_size)
528 {
529 	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
530 	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
531 	int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
532 	struct sk_buff *skb;
533 
534 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
535 		return 0;
536 
537 	len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
538 	if (len < 0)
539 		return 0;
540 
541 	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
542 	data_len = min_t(int, len, data_len - head_room);
543 
544 	if (len == data_len &&
545 	    dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
546 		return 0;
547 
548 	skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
549 	if (!skb)
550 		return 0;
551 
552 	len -= data_len;
553 	while (len > 0 && nsgs < urb->num_sgs) {
554 		data_len = min_t(int, len, urb->sg[nsgs].length);
555 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
556 				sg_page(&urb->sg[nsgs]),
557 				urb->sg[nsgs].offset, data_len,
558 				buf_size);
559 		len -= data_len;
560 		nsgs++;
561 	}
562 
563 	skb_mark_for_recycle(skb);
564 	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
565 
566 	return nsgs;
567 }
568 
mt76u_complete_rx(struct urb * urb)569 static void mt76u_complete_rx(struct urb *urb)
570 {
571 	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
572 	struct mt76_queue *q = urb->context;
573 	unsigned long flags;
574 
575 	trace_rx_urb(dev, urb);
576 
577 	switch (urb->status) {
578 	case -ECONNRESET:
579 	case -ESHUTDOWN:
580 	case -ENOENT:
581 	case -EPROTO:
582 		return;
583 	default:
584 		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
585 				    urb->status);
586 		fallthrough;
587 	case 0:
588 		break;
589 	}
590 
591 	spin_lock_irqsave(&q->lock, flags);
592 	if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
593 		goto out;
594 
595 	q->head = (q->head + 1) % q->ndesc;
596 	q->queued++;
597 	mt76_worker_schedule(&dev->usb.rx_worker);
598 out:
599 	spin_unlock_irqrestore(&q->lock, flags);
600 }
601 
602 static int
mt76u_submit_rx_buf(struct mt76_dev * dev,enum mt76_rxq_id qid,struct urb * urb)603 mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
604 		    struct urb *urb)
605 {
606 	int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
607 
608 	mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
609 			    mt76u_complete_rx, &dev->q_rx[qid]);
610 	trace_submit_urb(dev, urb);
611 
612 	return usb_submit_urb(urb, GFP_ATOMIC);
613 }
614 
615 static void
mt76u_process_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)616 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
617 {
618 	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
619 	struct urb *urb;
620 	int err, count;
621 
622 	while (true) {
623 		urb = mt76u_get_next_rx_entry(q);
624 		if (!urb)
625 			break;
626 
627 		count = mt76u_process_rx_entry(dev, urb, q->buf_size);
628 		if (count > 0) {
629 			err = mt76u_refill_rx(dev, q, urb, count);
630 			if (err < 0)
631 				break;
632 		}
633 		mt76u_submit_rx_buf(dev, qid, urb);
634 	}
635 	if (qid == MT_RXQ_MAIN) {
636 		local_bh_disable();
637 		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
638 		local_bh_enable();
639 	}
640 }
641 
mt76u_rx_worker(struct mt76_worker * w)642 static void mt76u_rx_worker(struct mt76_worker *w)
643 {
644 	struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
645 	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
646 	int i;
647 
648 	rcu_read_lock();
649 	mt76_for_each_q_rx(dev, i)
650 		mt76u_process_rx_queue(dev, &dev->q_rx[i]);
651 	rcu_read_unlock();
652 }
653 
654 static int
mt76u_submit_rx_buffers(struct mt76_dev * dev,enum mt76_rxq_id qid)655 mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
656 {
657 	struct mt76_queue *q = &dev->q_rx[qid];
658 	unsigned long flags;
659 	int i, err = 0;
660 
661 	spin_lock_irqsave(&q->lock, flags);
662 	for (i = 0; i < q->ndesc; i++) {
663 		err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
664 		if (err < 0)
665 			break;
666 	}
667 	q->head = q->tail = 0;
668 	q->queued = 0;
669 	spin_unlock_irqrestore(&q->lock, flags);
670 
671 	return err;
672 }
673 
674 static int
mt76u_alloc_rx_queue(struct mt76_dev * dev,enum mt76_rxq_id qid)675 mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
676 {
677 	struct mt76_queue *q = &dev->q_rx[qid];
678 	int i, err;
679 
680 	err = mt76_create_page_pool(dev, q);
681 	if (err)
682 		return err;
683 
684 	spin_lock_init(&q->lock);
685 	q->entry = devm_kcalloc(dev->dev,
686 				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
687 				GFP_KERNEL);
688 	if (!q->entry)
689 		return -ENOMEM;
690 
691 	q->ndesc = MT_NUM_RX_ENTRIES;
692 	q->buf_size = PAGE_SIZE;
693 
694 	for (i = 0; i < q->ndesc; i++) {
695 		err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
696 		if (err < 0)
697 			return err;
698 	}
699 
700 	return mt76u_submit_rx_buffers(dev, qid);
701 }
702 
mt76u_alloc_mcu_queue(struct mt76_dev * dev)703 int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
704 {
705 	return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
706 }
707 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
708 
709 static void
mt76u_free_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)710 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
711 {
712 	int i;
713 
714 	for (i = 0; i < q->ndesc; i++) {
715 		if (!q->entry[i].urb)
716 			continue;
717 
718 		mt76u_urb_free(q->entry[i].urb);
719 		q->entry[i].urb = NULL;
720 	}
721 	page_pool_destroy(q->page_pool);
722 	q->page_pool = NULL;
723 }
724 
mt76u_free_rx(struct mt76_dev * dev)725 static void mt76u_free_rx(struct mt76_dev *dev)
726 {
727 	int i;
728 
729 	mt76_worker_teardown(&dev->usb.rx_worker);
730 
731 	mt76_for_each_q_rx(dev, i)
732 		mt76u_free_rx_queue(dev, &dev->q_rx[i]);
733 }
734 
mt76u_stop_rx(struct mt76_dev * dev)735 void mt76u_stop_rx(struct mt76_dev *dev)
736 {
737 	int i;
738 
739 	mt76_worker_disable(&dev->usb.rx_worker);
740 
741 	mt76_for_each_q_rx(dev, i) {
742 		struct mt76_queue *q = &dev->q_rx[i];
743 		int j;
744 
745 		for (j = 0; j < q->ndesc; j++)
746 			usb_poison_urb(q->entry[j].urb);
747 	}
748 }
749 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
750 
mt76u_resume_rx(struct mt76_dev * dev)751 int mt76u_resume_rx(struct mt76_dev *dev)
752 {
753 	int i;
754 
755 	mt76_for_each_q_rx(dev, i) {
756 		struct mt76_queue *q = &dev->q_rx[i];
757 		int err, j;
758 
759 		for (j = 0; j < q->ndesc; j++)
760 			usb_unpoison_urb(q->entry[j].urb);
761 
762 		err = mt76u_submit_rx_buffers(dev, i);
763 		if (err < 0)
764 			return err;
765 	}
766 
767 	mt76_worker_enable(&dev->usb.rx_worker);
768 
769 	return 0;
770 }
771 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
772 
mt76u_status_worker(struct mt76_worker * w)773 static void mt76u_status_worker(struct mt76_worker *w)
774 {
775 	struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
776 	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
777 	struct mt76_queue_entry entry;
778 	struct mt76_queue *q;
779 	int i;
780 
781 	if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
782 		return;
783 
784 	for (i = 0; i <= MT_TXQ_PSD; i++) {
785 		q = dev->phy.q_tx[i];
786 		if (!q)
787 			continue;
788 
789 		while (q->queued > 0) {
790 			if (!q->entry[q->tail].done)
791 				break;
792 
793 			entry = q->entry[q->tail];
794 			q->entry[q->tail].done = false;
795 
796 			mt76_queue_tx_complete(dev, q, &entry);
797 		}
798 
799 		if (!q->queued)
800 			wake_up(&dev->tx_wait);
801 
802 		mt76_worker_schedule(&dev->tx_worker);
803 	}
804 
805 	if (dev->drv->tx_status_data &&
806 	    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
807 		queue_work(dev->wq, &dev->usb.stat_work);
808 }
809 
mt76u_tx_status_data(struct work_struct * work)810 static void mt76u_tx_status_data(struct work_struct *work)
811 {
812 	struct mt76_usb *usb;
813 	struct mt76_dev *dev;
814 	u8 update = 1;
815 	u16 count = 0;
816 
817 	usb = container_of(work, struct mt76_usb, stat_work);
818 	dev = container_of(usb, struct mt76_dev, usb);
819 
820 	while (true) {
821 		if (test_bit(MT76_REMOVED, &dev->phy.state))
822 			break;
823 
824 		if (!dev->drv->tx_status_data(dev, &update))
825 			break;
826 		count++;
827 	}
828 
829 	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
830 		queue_work(dev->wq, &usb->stat_work);
831 	else
832 		clear_bit(MT76_READING_STATS, &dev->phy.state);
833 }
834 
mt76u_complete_tx(struct urb * urb)835 static void mt76u_complete_tx(struct urb *urb)
836 {
837 	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
838 	struct mt76_queue_entry *e = urb->context;
839 
840 	if (mt76u_urb_error(urb))
841 		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
842 	e->done = true;
843 
844 	mt76_worker_schedule(&dev->usb.status_worker);
845 }
846 
847 static int
mt76u_tx_setup_buffers(struct mt76_dev * dev,struct sk_buff * skb,struct urb * urb)848 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
849 		       struct urb *urb)
850 {
851 	urb->transfer_buffer_length = skb->len;
852 
853 	if (!dev->usb.sg_en) {
854 		urb->transfer_buffer = skb->data;
855 		return 0;
856 	}
857 
858 	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
859 	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
860 	if (!urb->num_sgs)
861 		return -ENOMEM;
862 
863 	return urb->num_sgs;
864 }
865 
866 static int
mt76u_tx_queue_skb(struct mt76_phy * phy,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)867 mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
868 		   enum mt76_txq_id qid, struct sk_buff *skb,
869 		   struct mt76_wcid *wcid, struct ieee80211_sta *sta)
870 {
871 	struct mt76_tx_info tx_info = {
872 		.skb = skb,
873 	};
874 	struct mt76_dev *dev = phy->dev;
875 	u16 idx = q->head;
876 	int err;
877 
878 	if (q->queued == q->ndesc)
879 		return -ENOSPC;
880 
881 	skb->prev = skb->next = NULL;
882 	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
883 	if (err < 0)
884 		return err;
885 
886 	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
887 	if (err < 0)
888 		return err;
889 
890 	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
891 			    mt76u_complete_tx, &q->entry[idx]);
892 
893 	q->head = (q->head + 1) % q->ndesc;
894 	q->entry[idx].skb = tx_info.skb;
895 	q->entry[idx].wcid = 0xffff;
896 	q->queued++;
897 
898 	return idx;
899 }
900 
mt76u_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)901 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
902 {
903 	struct urb *urb;
904 	int err;
905 
906 	while (q->first != q->head) {
907 		urb = q->entry[q->first].urb;
908 
909 		trace_submit_urb(dev, urb);
910 		err = usb_submit_urb(urb, GFP_ATOMIC);
911 		if (err < 0) {
912 			if (err == -ENODEV)
913 				set_bit(MT76_REMOVED, &dev->phy.state);
914 			else
915 				dev_err(dev->dev, "tx urb submit failed:%d\n",
916 					err);
917 			break;
918 		}
919 		q->first = (q->first + 1) % q->ndesc;
920 	}
921 }
922 
923 static void
mt76u_ac_to_hwq(struct mt76_dev * dev,struct mt76_queue * q,u8 qid)924 mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
925 {
926 	u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
927 
928 	switch (mt76_chip(dev)) {
929 	case 0x7663: {
930 		static const u8 lmac_queue_map[] = {
931 			/* ac to lmac mapping */
932 			[IEEE80211_AC_BK] = 0,
933 			[IEEE80211_AC_BE] = 1,
934 			[IEEE80211_AC_VI] = 2,
935 			[IEEE80211_AC_VO] = 4,
936 		};
937 
938 		q->hw_idx = lmac_queue_map[ac];
939 		q->ep = q->hw_idx + 1;
940 		break;
941 	}
942 	case 0x7961:
943 	case 0x7925:
944 		q->hw_idx = mt76_ac_to_hwq(ac);
945 		q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
946 		break;
947 	default:
948 		q->hw_idx = mt76_ac_to_hwq(ac);
949 		q->ep = q->hw_idx + 1;
950 		break;
951 	}
952 }
953 
mt76u_alloc_tx(struct mt76_dev * dev)954 static int mt76u_alloc_tx(struct mt76_dev *dev)
955 {
956 	int i;
957 
958 	for (i = 0; i <= MT_TXQ_PSD; i++) {
959 		struct mt76_queue *q;
960 		int j, err;
961 
962 		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
963 		if (!q)
964 			return -ENOMEM;
965 
966 		spin_lock_init(&q->lock);
967 		mt76u_ac_to_hwq(dev, q, i);
968 		dev->phy.q_tx[i] = q;
969 
970 		q->entry = devm_kcalloc(dev->dev,
971 					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
972 					GFP_KERNEL);
973 		if (!q->entry)
974 			return -ENOMEM;
975 
976 		q->ndesc = MT_NUM_TX_ENTRIES;
977 		for (j = 0; j < q->ndesc; j++) {
978 			err = mt76u_urb_alloc(dev, &q->entry[j],
979 					      MT_TX_SG_MAX_SIZE);
980 			if (err < 0)
981 				return err;
982 		}
983 	}
984 	return 0;
985 }
986 
mt76u_free_tx(struct mt76_dev * dev)987 static void mt76u_free_tx(struct mt76_dev *dev)
988 {
989 	int i;
990 
991 	mt76_worker_teardown(&dev->usb.status_worker);
992 
993 	for (i = 0; i <= MT_TXQ_PSD; i++) {
994 		struct mt76_queue *q;
995 		int j;
996 
997 		q = dev->phy.q_tx[i];
998 		if (!q)
999 			continue;
1000 
1001 		for (j = 0; j < q->ndesc; j++) {
1002 			usb_free_urb(q->entry[j].urb);
1003 			q->entry[j].urb = NULL;
1004 		}
1005 	}
1006 }
1007 
mt76u_stop_tx(struct mt76_dev * dev)1008 void mt76u_stop_tx(struct mt76_dev *dev)
1009 {
1010 	int ret;
1011 
1012 	mt76_worker_disable(&dev->usb.status_worker);
1013 
1014 	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
1015 				 HZ / 5);
1016 	if (!ret) {
1017 		struct mt76_queue_entry entry;
1018 		struct mt76_queue *q;
1019 		int i, j;
1020 
1021 		dev_err(dev->dev, "timed out waiting for pending tx\n");
1022 
1023 		for (i = 0; i <= MT_TXQ_PSD; i++) {
1024 			q = dev->phy.q_tx[i];
1025 			if (!q)
1026 				continue;
1027 
1028 			for (j = 0; j < q->ndesc; j++)
1029 				usb_kill_urb(q->entry[j].urb);
1030 		}
1031 
1032 		mt76_worker_disable(&dev->tx_worker);
1033 
1034 		/* On device removal we maight queue skb's, but mt76u_tx_kick()
1035 		 * will fail to submit urb, cleanup those skb's manually.
1036 		 */
1037 		for (i = 0; i <= MT_TXQ_PSD; i++) {
1038 			q = dev->phy.q_tx[i];
1039 			if (!q)
1040 				continue;
1041 
1042 			while (q->queued > 0) {
1043 				entry = q->entry[q->tail];
1044 				q->entry[q->tail].done = false;
1045 				mt76_queue_tx_complete(dev, q, &entry);
1046 			}
1047 		}
1048 
1049 		mt76_worker_enable(&dev->tx_worker);
1050 	}
1051 
1052 	cancel_work_sync(&dev->usb.stat_work);
1053 	clear_bit(MT76_READING_STATS, &dev->phy.state);
1054 
1055 	mt76_worker_enable(&dev->usb.status_worker);
1056 
1057 	mt76_tx_status_check(dev, true);
1058 }
1059 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1060 
mt76u_queues_deinit(struct mt76_dev * dev)1061 void mt76u_queues_deinit(struct mt76_dev *dev)
1062 {
1063 	mt76u_stop_rx(dev);
1064 	mt76u_stop_tx(dev);
1065 
1066 	mt76u_free_rx(dev);
1067 	mt76u_free_tx(dev);
1068 }
1069 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1070 
mt76u_alloc_queues(struct mt76_dev * dev)1071 int mt76u_alloc_queues(struct mt76_dev *dev)
1072 {
1073 	int err;
1074 
1075 	err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1076 	if (err < 0)
1077 		return err;
1078 
1079 	return mt76u_alloc_tx(dev);
1080 }
1081 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1082 
1083 static const struct mt76_queue_ops usb_queue_ops = {
1084 	.tx_queue_skb = mt76u_tx_queue_skb,
1085 	.kick = mt76u_tx_kick,
1086 };
1087 
__mt76u_init(struct mt76_dev * dev,struct usb_interface * intf,struct mt76_bus_ops * ops)1088 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1089 		 struct mt76_bus_ops *ops)
1090 {
1091 	struct usb_device *udev = interface_to_usbdev(intf);
1092 	struct mt76_usb *usb = &dev->usb;
1093 	int err;
1094 
1095 	INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1096 
1097 	usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
1098 	if (usb->data_len < 32)
1099 		usb->data_len = 32;
1100 
1101 	usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1102 	if (!usb->data)
1103 		return -ENOMEM;
1104 
1105 	mutex_init(&usb->usb_ctrl_mtx);
1106 	dev->bus = ops;
1107 	dev->queue_ops = &usb_queue_ops;
1108 
1109 	dev_set_drvdata(&udev->dev, dev);
1110 
1111 	usb->sg_en = mt76u_check_sg(dev);
1112 
1113 	err = mt76u_set_endpoints(intf, usb);
1114 	if (err < 0)
1115 		return err;
1116 
1117 	err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
1118 				"usb-rx");
1119 	if (err)
1120 		return err;
1121 
1122 	err = mt76_worker_setup(dev->hw, &usb->status_worker,
1123 				mt76u_status_worker, "usb-status");
1124 	if (err)
1125 		return err;
1126 
1127 	sched_set_fifo_low(usb->rx_worker.task);
1128 	sched_set_fifo_low(usb->status_worker.task);
1129 
1130 	return 0;
1131 }
1132 EXPORT_SYMBOL_GPL(__mt76u_init);
1133 
mt76u_init(struct mt76_dev * dev,struct usb_interface * intf)1134 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
1135 {
1136 	static struct mt76_bus_ops bus_ops = {
1137 		.rr = mt76u_rr,
1138 		.wr = mt76u_wr,
1139 		.rmw = mt76u_rmw,
1140 		.read_copy = mt76u_read_copy,
1141 		.write_copy = mt76u_copy,
1142 		.wr_rp = mt76u_wr_rp,
1143 		.rd_rp = mt76u_rd_rp,
1144 		.type = MT76_BUS_USB,
1145 	};
1146 
1147 	return __mt76u_init(dev, intf, &bus_ops);
1148 }
1149 EXPORT_SYMBOL_GPL(mt76u_init);
1150 
1151 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1152 MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
1153 MODULE_LICENSE("Dual BSD/GPL");
1154 #if defined(__FreeBSD__)
1155 MODULE_DEPEND(mt76_core, linuxkpi_usb, 1, 1, 1);
1156 #endif
1157