1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
3 *
4 * This file is written based on mt76/usb.c.
5 *
6 * Author: Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 * Sean Wang <sean.wang@mediatek.com>
9 */
10
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mmc/sdio_func.h>
15 #include <linux/sched.h>
16 #include <linux/kthread.h>
17
18 #include "mt76.h"
19
20 static int
mt76s_alloc_rx_queue(struct mt76_dev * dev,enum mt76_rxq_id qid)21 mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
22 {
23 struct mt76_queue *q = &dev->q_rx[qid];
24
25 spin_lock_init(&q->lock);
26 q->entry = devm_kcalloc(dev->dev,
27 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
28 GFP_KERNEL);
29 if (!q->entry)
30 return -ENOMEM;
31
32 q->ndesc = MT_NUM_RX_ENTRIES;
33 q->head = q->tail = 0;
34 q->queued = 0;
35
36 return 0;
37 }
38
mt76s_alloc_tx(struct mt76_dev * dev)39 static int mt76s_alloc_tx(struct mt76_dev *dev)
40 {
41 struct mt76_queue *q;
42 int i;
43
44 for (i = 0; i < MT_TXQ_MCU_WA; i++) {
45 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
46 if (!q)
47 return -ENOMEM;
48
49 spin_lock_init(&q->lock);
50 q->hw_idx = i;
51 dev->q_tx[i] = q;
52
53 q->entry = devm_kcalloc(dev->dev,
54 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
55 GFP_KERNEL);
56 if (!q->entry)
57 return -ENOMEM;
58
59 q->ndesc = MT_NUM_TX_ENTRIES;
60 }
61
62 return 0;
63 }
64
mt76s_stop_txrx(struct mt76_dev * dev)65 void mt76s_stop_txrx(struct mt76_dev *dev)
66 {
67 struct mt76_sdio *sdio = &dev->sdio;
68
69 cancel_work_sync(&sdio->tx.xmit_work);
70 cancel_work_sync(&sdio->tx.status_work);
71 cancel_work_sync(&sdio->rx.recv_work);
72 cancel_work_sync(&sdio->rx.net_work);
73 cancel_work_sync(&sdio->stat_work);
74 clear_bit(MT76_READING_STATS, &dev->phy.state);
75
76 mt76_tx_status_check(dev, NULL, true);
77 }
78 EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
79
mt76s_alloc_queues(struct mt76_dev * dev)80 int mt76s_alloc_queues(struct mt76_dev *dev)
81 {
82 int err;
83
84 err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
85 if (err < 0)
86 return err;
87
88 return mt76s_alloc_tx(dev);
89 }
90 EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
91
92 static struct mt76_queue_entry *
mt76s_get_next_rx_entry(struct mt76_queue * q)93 mt76s_get_next_rx_entry(struct mt76_queue *q)
94 {
95 struct mt76_queue_entry *e = NULL;
96
97 spin_lock_bh(&q->lock);
98 if (q->queued > 0) {
99 e = &q->entry[q->tail];
100 q->tail = (q->tail + 1) % q->ndesc;
101 q->queued--;
102 }
103 spin_unlock_bh(&q->lock);
104
105 return e;
106 }
107
108 static int
mt76s_process_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)109 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
110 {
111 int qid = q - &dev->q_rx[MT_RXQ_MAIN];
112 int nframes = 0;
113
114 while (true) {
115 struct mt76_queue_entry *e;
116
117 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
118 break;
119
120 e = mt76s_get_next_rx_entry(q);
121 if (!e || !e->skb)
122 break;
123
124 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
125 e->skb = NULL;
126 nframes++;
127 }
128 if (qid == MT_RXQ_MAIN)
129 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
130
131 return nframes;
132 }
133
mt76s_process_tx_queue(struct mt76_dev * dev,enum mt76_txq_id qid)134 static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
135 {
136 struct mt76_queue *q = dev->q_tx[qid];
137 struct mt76_queue_entry entry;
138 bool wake;
139
140 while (q->queued > 0) {
141 if (!q->entry[q->tail].done)
142 break;
143
144 entry = q->entry[q->tail];
145 q->entry[q->tail].done = false;
146
147 if (qid == MT_TXQ_MCU) {
148 dev_kfree_skb(entry.skb);
149 entry.skb = NULL;
150 }
151
152 mt76_queue_tx_complete(dev, q, &entry);
153 }
154
155 wake = q->stopped && q->queued < q->ndesc - 8;
156 if (wake)
157 q->stopped = false;
158
159 if (!q->queued)
160 wake_up(&dev->tx_wait);
161
162 if (qid == MT_TXQ_MCU)
163 return;
164
165 mt76_txq_schedule(&dev->phy, qid);
166
167 if (wake)
168 ieee80211_wake_queue(dev->hw, qid);
169 }
170
mt76s_tx_status_data(struct work_struct * work)171 static void mt76s_tx_status_data(struct work_struct *work)
172 {
173 struct mt76_sdio *sdio;
174 struct mt76_dev *dev;
175 u8 update = 1;
176 u16 count = 0;
177
178 sdio = container_of(work, struct mt76_sdio, stat_work);
179 dev = container_of(sdio, struct mt76_dev, sdio);
180
181 while (true) {
182 if (test_bit(MT76_REMOVED, &dev->phy.state))
183 break;
184
185 if (!dev->drv->tx_status_data(dev, &update))
186 break;
187 count++;
188 }
189
190 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
191 queue_work(dev->wq, &sdio->stat_work);
192 else
193 clear_bit(MT76_READING_STATS, &dev->phy.state);
194 }
195
196 static int
mt76s_tx_queue_skb(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)197 mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
198 struct sk_buff *skb, struct mt76_wcid *wcid,
199 struct ieee80211_sta *sta)
200 {
201 struct mt76_queue *q = dev->q_tx[qid];
202 struct mt76_tx_info tx_info = {
203 .skb = skb,
204 };
205 int err, len = skb->len;
206 u16 idx = q->head;
207
208 if (q->queued == q->ndesc)
209 return -ENOSPC;
210
211 skb->prev = skb->next = NULL;
212 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
213 if (err < 0)
214 return err;
215
216 q->entry[q->head].skb = tx_info.skb;
217 q->entry[q->head].buf_sz = len;
218 q->head = (q->head + 1) % q->ndesc;
219 q->queued++;
220
221 return idx;
222 }
223
224 static int
mt76s_tx_queue_skb_raw(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,u32 tx_info)225 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
226 struct sk_buff *skb, u32 tx_info)
227 {
228 struct mt76_queue *q = dev->q_tx[qid];
229 int ret = -ENOSPC, len = skb->len, pad;
230
231 if (q->queued == q->ndesc)
232 goto error;
233
234 pad = round_up(skb->len, 4) - skb->len;
235 ret = mt76_skb_adjust_pad(skb, pad);
236 if (ret)
237 goto error;
238
239 spin_lock_bh(&q->lock);
240
241 q->entry[q->head].buf_sz = len;
242 q->entry[q->head].skb = skb;
243 q->head = (q->head + 1) % q->ndesc;
244 q->queued++;
245
246 spin_unlock_bh(&q->lock);
247
248 return 0;
249
250 error:
251 dev_kfree_skb(skb);
252
253 return ret;
254 }
255
mt76s_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)256 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
257 {
258 struct mt76_sdio *sdio = &dev->sdio;
259
260 queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
261 }
262
263 static const struct mt76_queue_ops sdio_queue_ops = {
264 .tx_queue_skb = mt76s_tx_queue_skb,
265 .kick = mt76s_tx_kick,
266 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
267 };
268
mt76s_tx_work(struct work_struct * work)269 static void mt76s_tx_work(struct work_struct *work)
270 {
271 struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
272 tx.status_work);
273 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
274 int i;
275
276 for (i = 0; i < MT_TXQ_MCU_WA; i++)
277 mt76s_process_tx_queue(dev, i);
278
279 if (dev->drv->tx_status_data &&
280 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
281 queue_work(dev->wq, &dev->sdio.stat_work);
282 }
283
mt76s_rx_work(struct work_struct * work)284 static void mt76s_rx_work(struct work_struct *work)
285 {
286 struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
287 rx.net_work);
288 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
289 int i;
290
291 /* rx processing */
292 local_bh_disable();
293 rcu_read_lock();
294
295 mt76_for_each_q_rx(dev, i)
296 mt76s_process_rx_queue(dev, &dev->q_rx[i]);
297
298 rcu_read_unlock();
299 local_bh_enable();
300 }
301
mt76s_deinit(struct mt76_dev * dev)302 void mt76s_deinit(struct mt76_dev *dev)
303 {
304 struct mt76_sdio *sdio = &dev->sdio;
305 int i;
306
307 mt76s_stop_txrx(dev);
308 if (sdio->txrx_wq) {
309 destroy_workqueue(sdio->txrx_wq);
310 sdio->txrx_wq = NULL;
311 }
312
313 sdio_claim_host(sdio->func);
314 sdio_release_irq(sdio->func);
315 sdio_release_host(sdio->func);
316
317 mt76_for_each_q_rx(dev, i) {
318 struct mt76_queue *q = &dev->q_rx[i];
319 int j;
320
321 for (j = 0; j < q->ndesc; j++) {
322 struct mt76_queue_entry *e = &q->entry[j];
323
324 if (!e->skb)
325 continue;
326
327 dev_kfree_skb(e->skb);
328 e->skb = NULL;
329 }
330 }
331 }
332 EXPORT_SYMBOL_GPL(mt76s_deinit);
333
mt76s_init(struct mt76_dev * dev,struct sdio_func * func,const struct mt76_bus_ops * bus_ops)334 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
335 const struct mt76_bus_ops *bus_ops)
336 {
337 struct mt76_sdio *sdio = &dev->sdio;
338
339 sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
340 WQ_UNBOUND | WQ_HIGHPRI,
341 WQ_UNBOUND_MAX_ACTIVE);
342 if (!sdio->txrx_wq)
343 return -ENOMEM;
344
345 INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
346 INIT_WORK(&sdio->tx.status_work, mt76s_tx_work);
347 INIT_WORK(&sdio->rx.net_work, mt76s_rx_work);
348
349 mutex_init(&sdio->sched.lock);
350 dev->queue_ops = &sdio_queue_ops;
351 dev->bus = bus_ops;
352 dev->sdio.func = func;
353
354 return 0;
355 }
356 EXPORT_SYMBOL_GPL(mt76s_init);
357
358 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
359 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
360 MODULE_LICENSE("Dual BSD/GPL");
361