1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Qualcomm BAM-DMUX WWAN network driver
4 * Copyright (c) 2020, Stephan Gerhold <stephan@gerhold.net>
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/bitops.h>
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/if_arp.h>
13 #include <linux/interrupt.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/soc/qcom/smem_state.h>
20 #include <linux/spinlock.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
23 #include <net/pkt_sched.h>
24
25 #define BAM_DMUX_BUFFER_SIZE SZ_2K
26 #define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr)
27 #define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
28 #define BAM_DMUX_NUM_SKB 32
29
30 #define BAM_DMUX_HDR_MAGIC 0x33fc
31
32 #define BAM_DMUX_AUTOSUSPEND_DELAY 1000
33 #define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000)
34
35 enum {
36 BAM_DMUX_CMD_DATA,
37 BAM_DMUX_CMD_OPEN,
38 BAM_DMUX_CMD_CLOSE,
39 };
40
41 enum {
42 BAM_DMUX_CH_DATA_0,
43 BAM_DMUX_CH_DATA_1,
44 BAM_DMUX_CH_DATA_2,
45 BAM_DMUX_CH_DATA_3,
46 BAM_DMUX_CH_DATA_4,
47 BAM_DMUX_CH_DATA_5,
48 BAM_DMUX_CH_DATA_6,
49 BAM_DMUX_CH_DATA_7,
50 BAM_DMUX_NUM_CH
51 };
52
53 struct bam_dmux_hdr {
54 u16 magic;
55 u8 signal;
56 u8 cmd;
57 u8 pad;
58 u8 ch;
59 u16 len;
60 };
61
62 struct bam_dmux_skb_dma {
63 struct bam_dmux *dmux;
64 struct sk_buff *skb;
65 dma_addr_t addr;
66 };
67
68 struct bam_dmux {
69 struct device *dev;
70
71 int pc_irq;
72 bool pc_state, pc_ack_state;
73 struct qcom_smem_state *pc, *pc_ack;
74 u32 pc_mask, pc_ack_mask;
75 wait_queue_head_t pc_wait;
76 struct completion pc_ack_completion;
77
78 struct dma_chan *rx, *tx;
79 struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB];
80 struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB];
81 spinlock_t tx_lock; /* Protect tx_skbs, tx_next_skb */
82 unsigned int tx_next_skb;
83 atomic_long_t tx_deferred_skb;
84 struct work_struct tx_wakeup_work;
85
86 DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH);
87 struct work_struct register_netdev_work;
88 struct net_device *netdevs[BAM_DMUX_NUM_CH];
89 };
90
91 struct bam_dmux_netdev {
92 struct bam_dmux *dmux;
93 u8 ch;
94 };
95
bam_dmux_pc_vote(struct bam_dmux * dmux,bool enable)96 static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable)
97 {
98 reinit_completion(&dmux->pc_ack_completion);
99 qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask,
100 enable ? dmux->pc_mask : 0);
101 }
102
bam_dmux_pc_ack(struct bam_dmux * dmux)103 static void bam_dmux_pc_ack(struct bam_dmux *dmux)
104 {
105 qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask,
106 dmux->pc_ack_state ? 0 : dmux->pc_ack_mask);
107 dmux->pc_ack_state = !dmux->pc_ack_state;
108 }
109
bam_dmux_skb_dma_map(struct bam_dmux_skb_dma * skb_dma,enum dma_data_direction dir)110 static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma,
111 enum dma_data_direction dir)
112 {
113 struct device *dev = skb_dma->dmux->dev;
114
115 skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir);
116 if (dma_mapping_error(dev, skb_dma->addr)) {
117 dev_err(dev, "Failed to DMA map buffer\n");
118 skb_dma->addr = 0;
119 return false;
120 }
121
122 return true;
123 }
124
bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma * skb_dma,enum dma_data_direction dir)125 static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma,
126 enum dma_data_direction dir)
127 {
128 dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir);
129 skb_dma->addr = 0;
130 }
131
bam_dmux_tx_wake_queues(struct bam_dmux * dmux)132 static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux)
133 {
134 int i;
135
136 dev_dbg(dmux->dev, "wake queues\n");
137
138 for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
139 struct net_device *netdev = dmux->netdevs[i];
140
141 if (netdev && netif_running(netdev))
142 netif_wake_queue(netdev);
143 }
144 }
145
bam_dmux_tx_stop_queues(struct bam_dmux * dmux)146 static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux)
147 {
148 int i;
149
150 dev_dbg(dmux->dev, "stop queues\n");
151
152 for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
153 struct net_device *netdev = dmux->netdevs[i];
154
155 if (netdev)
156 netif_stop_queue(netdev);
157 }
158 }
159
bam_dmux_tx_done(struct bam_dmux_skb_dma * skb_dma)160 static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma)
161 {
162 struct bam_dmux *dmux = skb_dma->dmux;
163 unsigned long flags;
164
165 pm_runtime_put_autosuspend(dmux->dev);
166
167 if (skb_dma->addr)
168 bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE);
169
170 spin_lock_irqsave(&dmux->tx_lock, flags);
171 skb_dma->skb = NULL;
172 if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB])
173 bam_dmux_tx_wake_queues(dmux);
174 spin_unlock_irqrestore(&dmux->tx_lock, flags);
175 }
176
bam_dmux_tx_callback(void * data)177 static void bam_dmux_tx_callback(void *data)
178 {
179 struct bam_dmux_skb_dma *skb_dma = data;
180 struct sk_buff *skb = skb_dma->skb;
181
182 bam_dmux_tx_done(skb_dma);
183 dev_consume_skb_any(skb);
184 }
185
bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma * skb_dma)186 static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma)
187 {
188 struct bam_dmux *dmux = skb_dma->dmux;
189 struct dma_async_tx_descriptor *desc;
190
191 desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr,
192 skb_dma->skb->len, DMA_MEM_TO_DEV,
193 DMA_PREP_INTERRUPT);
194 if (!desc) {
195 dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n");
196 return false;
197 }
198
199 desc->callback = bam_dmux_tx_callback;
200 desc->callback_param = skb_dma;
201 desc->cookie = dmaengine_submit(desc);
202 return true;
203 }
204
205 static struct bam_dmux_skb_dma *
bam_dmux_tx_queue(struct bam_dmux * dmux,struct sk_buff * skb)206 bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb)
207 {
208 struct bam_dmux_skb_dma *skb_dma;
209 unsigned long flags;
210
211 spin_lock_irqsave(&dmux->tx_lock, flags);
212
213 skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB];
214 if (skb_dma->skb) {
215 bam_dmux_tx_stop_queues(dmux);
216 spin_unlock_irqrestore(&dmux->tx_lock, flags);
217 return NULL;
218 }
219 skb_dma->skb = skb;
220
221 dmux->tx_next_skb++;
222 if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb)
223 bam_dmux_tx_stop_queues(dmux);
224
225 spin_unlock_irqrestore(&dmux->tx_lock, flags);
226 return skb_dma;
227 }
228
bam_dmux_send_cmd(struct bam_dmux_netdev * bndev,u8 cmd)229 static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd)
230 {
231 struct bam_dmux *dmux = bndev->dmux;
232 struct bam_dmux_skb_dma *skb_dma;
233 struct bam_dmux_hdr *hdr;
234 struct sk_buff *skb;
235 int ret;
236
237 skb = alloc_skb(sizeof(*hdr), GFP_KERNEL);
238 if (!skb)
239 return -ENOMEM;
240
241 hdr = skb_put_zero(skb, sizeof(*hdr));
242 hdr->magic = BAM_DMUX_HDR_MAGIC;
243 hdr->cmd = cmd;
244 hdr->ch = bndev->ch;
245
246 skb_dma = bam_dmux_tx_queue(dmux, skb);
247 if (!skb_dma) {
248 ret = -EAGAIN;
249 goto free_skb;
250 }
251
252 ret = pm_runtime_get_sync(dmux->dev);
253 if (ret < 0)
254 goto tx_fail;
255
256 if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) {
257 ret = -ENOMEM;
258 goto tx_fail;
259 }
260
261 if (!bam_dmux_skb_dma_submit_tx(skb_dma)) {
262 ret = -EIO;
263 goto tx_fail;
264 }
265
266 dma_async_issue_pending(dmux->tx);
267 return 0;
268
269 tx_fail:
270 bam_dmux_tx_done(skb_dma);
271 free_skb:
272 dev_kfree_skb(skb);
273 return ret;
274 }
275
bam_dmux_netdev_open(struct net_device * netdev)276 static int bam_dmux_netdev_open(struct net_device *netdev)
277 {
278 struct bam_dmux_netdev *bndev = netdev_priv(netdev);
279 int ret;
280
281 ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN);
282 if (ret)
283 return ret;
284
285 netif_start_queue(netdev);
286 return 0;
287 }
288
bam_dmux_netdev_stop(struct net_device * netdev)289 static int bam_dmux_netdev_stop(struct net_device *netdev)
290 {
291 struct bam_dmux_netdev *bndev = netdev_priv(netdev);
292
293 netif_stop_queue(netdev);
294 bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE);
295 return 0;
296 }
297
needed_room(unsigned int avail,unsigned int needed)298 static unsigned int needed_room(unsigned int avail, unsigned int needed)
299 {
300 if (avail >= needed)
301 return 0;
302 return needed - avail;
303 }
304
bam_dmux_tx_prepare_skb(struct bam_dmux_netdev * bndev,struct sk_buff * skb)305 static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev,
306 struct sk_buff *skb)
307 {
308 unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE);
309 unsigned int pad = sizeof(u32) - skb->len % sizeof(u32);
310 unsigned int tail = needed_room(skb_tailroom(skb), pad);
311 struct bam_dmux_hdr *hdr;
312 int ret;
313
314 if (head || tail || skb_cloned(skb)) {
315 ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC);
316 if (ret)
317 return ret;
318 }
319
320 hdr = skb_push(skb, sizeof(*hdr));
321 hdr->magic = BAM_DMUX_HDR_MAGIC;
322 hdr->signal = 0;
323 hdr->cmd = BAM_DMUX_CMD_DATA;
324 hdr->pad = pad;
325 hdr->ch = bndev->ch;
326 hdr->len = skb->len - sizeof(*hdr);
327 if (pad)
328 skb_put_zero(skb, pad);
329
330 return 0;
331 }
332
bam_dmux_netdev_start_xmit(struct sk_buff * skb,struct net_device * netdev)333 static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb,
334 struct net_device *netdev)
335 {
336 struct bam_dmux_netdev *bndev = netdev_priv(netdev);
337 struct bam_dmux *dmux = bndev->dmux;
338 struct bam_dmux_skb_dma *skb_dma;
339 int active, ret;
340
341 skb_dma = bam_dmux_tx_queue(dmux, skb);
342 if (!skb_dma)
343 return NETDEV_TX_BUSY;
344
345 active = pm_runtime_get(dmux->dev);
346 if (active < 0 && active != -EINPROGRESS)
347 goto drop;
348
349 ret = bam_dmux_tx_prepare_skb(bndev, skb);
350 if (ret)
351 goto drop;
352
353 if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE))
354 goto drop;
355
356 if (active <= 0) {
357 /* Cannot sleep here so mark skb for wakeup handler and return */
358 if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs),
359 &dmux->tx_deferred_skb))
360 queue_pm_work(&dmux->tx_wakeup_work);
361 return NETDEV_TX_OK;
362 }
363
364 if (!bam_dmux_skb_dma_submit_tx(skb_dma))
365 goto drop;
366
367 dma_async_issue_pending(dmux->tx);
368 return NETDEV_TX_OK;
369
370 drop:
371 bam_dmux_tx_done(skb_dma);
372 dev_kfree_skb_any(skb);
373 return NETDEV_TX_OK;
374 }
375
bam_dmux_tx_wakeup_work(struct work_struct * work)376 static void bam_dmux_tx_wakeup_work(struct work_struct *work)
377 {
378 struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
379 unsigned long pending;
380 int ret, i;
381
382 ret = pm_runtime_resume_and_get(dmux->dev);
383 if (ret < 0) {
384 dev_err(dmux->dev, "Failed to resume: %d\n", ret);
385 return;
386 }
387
388 pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0);
389 if (!pending)
390 goto out;
391
392 dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending);
393 for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) {
394 bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]);
395 }
396 dma_async_issue_pending(dmux->tx);
397
398 out:
399 pm_runtime_put_autosuspend(dmux->dev);
400 }
401
402 static const struct net_device_ops bam_dmux_ops = {
403 .ndo_open = bam_dmux_netdev_open,
404 .ndo_stop = bam_dmux_netdev_stop,
405 .ndo_start_xmit = bam_dmux_netdev_start_xmit,
406 };
407
408 static const struct device_type wwan_type = {
409 .name = "wwan",
410 };
411
bam_dmux_netdev_setup(struct net_device * dev)412 static void bam_dmux_netdev_setup(struct net_device *dev)
413 {
414 dev->netdev_ops = &bam_dmux_ops;
415
416 dev->type = ARPHRD_RAWIP;
417 SET_NETDEV_DEVTYPE(dev, &wwan_type);
418 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
419
420 dev->mtu = ETH_DATA_LEN;
421 dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE;
422 dev->needed_headroom = sizeof(struct bam_dmux_hdr);
423 dev->needed_tailroom = sizeof(u32); /* word-aligned */
424 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
425
426 /* This perm addr will be used as interface identifier by IPv6 */
427 dev->addr_assign_type = NET_ADDR_RANDOM;
428 eth_random_addr(dev->perm_addr);
429 }
430
bam_dmux_register_netdev_work(struct work_struct * work)431 static void bam_dmux_register_netdev_work(struct work_struct *work)
432 {
433 struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
434 struct bam_dmux_netdev *bndev;
435 struct net_device *netdev;
436 int ch, ret;
437
438 for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) {
439 if (dmux->netdevs[ch])
440 continue;
441
442 netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM,
443 bam_dmux_netdev_setup);
444 if (!netdev)
445 return;
446
447 SET_NETDEV_DEV(netdev, dmux->dev);
448 netdev->dev_port = ch;
449
450 bndev = netdev_priv(netdev);
451 bndev->dmux = dmux;
452 bndev->ch = ch;
453
454 ret = register_netdev(netdev);
455 if (ret) {
456 dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n",
457 ch, ret);
458 free_netdev(netdev);
459 return;
460 }
461
462 dmux->netdevs[ch] = netdev;
463 }
464 }
465
466 static void bam_dmux_rx_callback(void *data);
467
bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma * skb_dma)468 static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma)
469 {
470 struct bam_dmux *dmux = skb_dma->dmux;
471 struct dma_async_tx_descriptor *desc;
472
473 desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr,
474 skb_dma->skb->len, DMA_DEV_TO_MEM,
475 DMA_PREP_INTERRUPT);
476 if (!desc) {
477 dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n");
478 return false;
479 }
480
481 desc->callback = bam_dmux_rx_callback;
482 desc->callback_param = skb_dma;
483 desc->cookie = dmaengine_submit(desc);
484 return true;
485 }
486
bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma * skb_dma,gfp_t gfp)487 static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp)
488 {
489 if (!skb_dma->skb) {
490 skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp);
491 if (!skb_dma->skb)
492 return false;
493 skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE);
494 }
495
496 return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) &&
497 bam_dmux_skb_dma_submit_rx(skb_dma);
498 }
499
bam_dmux_cmd_data(struct bam_dmux_skb_dma * skb_dma)500 static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma)
501 {
502 struct bam_dmux *dmux = skb_dma->dmux;
503 struct sk_buff *skb = skb_dma->skb;
504 struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
505 struct net_device *netdev = dmux->netdevs[hdr->ch];
506
507 if (!netdev || !netif_running(netdev)) {
508 dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch);
509 return;
510 }
511
512 if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) {
513 dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n",
514 hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE);
515 return;
516 }
517
518 skb_dma->skb = NULL; /* Hand over to network stack */
519
520 skb_pull(skb, sizeof(*hdr));
521 skb_trim(skb, hdr->len);
522 skb->dev = netdev;
523
524 /* Only Raw-IP/QMAP is supported by this driver */
525 switch (skb->data[0] & 0xf0) {
526 case 0x40:
527 skb->protocol = htons(ETH_P_IP);
528 break;
529 case 0x60:
530 skb->protocol = htons(ETH_P_IPV6);
531 break;
532 default:
533 skb->protocol = htons(ETH_P_MAP);
534 break;
535 }
536
537 netif_receive_skb(skb);
538 }
539
bam_dmux_cmd_open(struct bam_dmux * dmux,struct bam_dmux_hdr * hdr)540 static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
541 {
542 struct net_device *netdev = dmux->netdevs[hdr->ch];
543
544 dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch);
545
546 if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) {
547 dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch);
548 return;
549 }
550
551 if (netdev) {
552 netif_device_attach(netdev);
553 } else {
554 /* Cannot sleep here, schedule work to register the netdev */
555 schedule_work(&dmux->register_netdev_work);
556 }
557 }
558
bam_dmux_cmd_close(struct bam_dmux * dmux,struct bam_dmux_hdr * hdr)559 static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
560 {
561 struct net_device *netdev = dmux->netdevs[hdr->ch];
562
563 dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch);
564
565 if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) {
566 dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch);
567 return;
568 }
569
570 if (netdev)
571 netif_device_detach(netdev);
572 }
573
bam_dmux_rx_callback(void * data)574 static void bam_dmux_rx_callback(void *data)
575 {
576 struct bam_dmux_skb_dma *skb_dma = data;
577 struct bam_dmux *dmux = skb_dma->dmux;
578 struct sk_buff *skb = skb_dma->skb;
579 struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
580
581 bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE);
582
583 if (hdr->magic != BAM_DMUX_HDR_MAGIC) {
584 dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic);
585 goto out;
586 }
587
588 if (hdr->ch >= BAM_DMUX_NUM_CH) {
589 dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch);
590 goto out;
591 }
592
593 switch (hdr->cmd) {
594 case BAM_DMUX_CMD_DATA:
595 bam_dmux_cmd_data(skb_dma);
596 break;
597 case BAM_DMUX_CMD_OPEN:
598 bam_dmux_cmd_open(dmux, hdr);
599 break;
600 case BAM_DMUX_CMD_CLOSE:
601 bam_dmux_cmd_close(dmux, hdr);
602 break;
603 default:
604 dev_err(dmux->dev, "Unsupported command %u on channel %u\n",
605 hdr->cmd, hdr->ch);
606 break;
607 }
608
609 out:
610 if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC))
611 dma_async_issue_pending(dmux->rx);
612 }
613
bam_dmux_power_on(struct bam_dmux * dmux)614 static bool bam_dmux_power_on(struct bam_dmux *dmux)
615 {
616 struct device *dev = dmux->dev;
617 struct dma_slave_config dma_rx_conf = {
618 .direction = DMA_DEV_TO_MEM,
619 .src_maxburst = BAM_DMUX_BUFFER_SIZE,
620 };
621 int i;
622
623 dmux->rx = dma_request_chan(dev, "rx");
624 if (IS_ERR(dmux->rx)) {
625 dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx);
626 dmux->rx = NULL;
627 return false;
628 }
629 dmaengine_slave_config(dmux->rx, &dma_rx_conf);
630
631 for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
632 if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL))
633 return false;
634 }
635 dma_async_issue_pending(dmux->rx);
636
637 return true;
638 }
639
bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],enum dma_data_direction dir)640 static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],
641 enum dma_data_direction dir)
642 {
643 int i;
644
645 for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
646 struct bam_dmux_skb_dma *skb_dma = &skbs[i];
647
648 if (skb_dma->addr)
649 bam_dmux_skb_dma_unmap(skb_dma, dir);
650 if (skb_dma->skb) {
651 dev_kfree_skb(skb_dma->skb);
652 skb_dma->skb = NULL;
653 }
654 }
655 }
656
bam_dmux_power_off(struct bam_dmux * dmux)657 static void bam_dmux_power_off(struct bam_dmux *dmux)
658 {
659 if (dmux->tx) {
660 dmaengine_terminate_sync(dmux->tx);
661 dma_release_channel(dmux->tx);
662 dmux->tx = NULL;
663 }
664
665 if (dmux->rx) {
666 dmaengine_terminate_sync(dmux->rx);
667 dma_release_channel(dmux->rx);
668 dmux->rx = NULL;
669 }
670
671 bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE);
672 }
673
bam_dmux_pc_irq(int irq,void * data)674 static irqreturn_t bam_dmux_pc_irq(int irq, void *data)
675 {
676 struct bam_dmux *dmux = data;
677 bool new_state = !dmux->pc_state;
678
679 dev_dbg(dmux->dev, "pc: %u\n", new_state);
680
681 if (new_state) {
682 if (bam_dmux_power_on(dmux))
683 bam_dmux_pc_ack(dmux);
684 else
685 bam_dmux_power_off(dmux);
686 } else {
687 bam_dmux_power_off(dmux);
688 bam_dmux_pc_ack(dmux);
689 }
690
691 dmux->pc_state = new_state;
692 wake_up_all(&dmux->pc_wait);
693
694 return IRQ_HANDLED;
695 }
696
bam_dmux_pc_ack_irq(int irq,void * data)697 static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data)
698 {
699 struct bam_dmux *dmux = data;
700
701 dev_dbg(dmux->dev, "pc ack\n");
702 complete_all(&dmux->pc_ack_completion);
703
704 return IRQ_HANDLED;
705 }
706
bam_dmux_runtime_suspend(struct device * dev)707 static int bam_dmux_runtime_suspend(struct device *dev)
708 {
709 struct bam_dmux *dmux = dev_get_drvdata(dev);
710
711 dev_dbg(dev, "runtime suspend\n");
712 bam_dmux_pc_vote(dmux, false);
713
714 return 0;
715 }
716
bam_dmux_runtime_resume(struct device * dev)717 static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
718 {
719 struct bam_dmux *dmux = dev_get_drvdata(dev);
720
721 dev_dbg(dev, "runtime resume\n");
722
723 /* Wait until previous power down was acked */
724 if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
725 BAM_DMUX_REMOTE_TIMEOUT))
726 return -ETIMEDOUT;
727
728 /* Vote for power state */
729 bam_dmux_pc_vote(dmux, true);
730
731 /* Wait for ack */
732 if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
733 BAM_DMUX_REMOTE_TIMEOUT)) {
734 bam_dmux_pc_vote(dmux, false);
735 return -ETIMEDOUT;
736 }
737
738 /* Wait until we're up */
739 if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state,
740 BAM_DMUX_REMOTE_TIMEOUT)) {
741 bam_dmux_pc_vote(dmux, false);
742 return -ETIMEDOUT;
743 }
744
745 /* Ensure that we actually initialized successfully */
746 if (!dmux->rx) {
747 bam_dmux_pc_vote(dmux, false);
748 return -ENXIO;
749 }
750
751 /* Request TX channel if necessary */
752 if (dmux->tx)
753 return 0;
754
755 dmux->tx = dma_request_chan(dev, "tx");
756 if (IS_ERR(dmux->tx)) {
757 dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
758 dmux->tx = NULL;
759 bam_dmux_runtime_suspend(dev);
760 return -ENXIO;
761 }
762
763 return 0;
764 }
765
bam_dmux_probe(struct platform_device * pdev)766 static int bam_dmux_probe(struct platform_device *pdev)
767 {
768 struct device *dev = &pdev->dev;
769 struct bam_dmux *dmux;
770 int ret, pc_ack_irq, i;
771 unsigned int bit;
772
773 dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL);
774 if (!dmux)
775 return -ENOMEM;
776
777 dmux->dev = dev;
778 platform_set_drvdata(pdev, dmux);
779
780 dmux->pc_irq = platform_get_irq_byname(pdev, "pc");
781 if (dmux->pc_irq < 0)
782 return dmux->pc_irq;
783
784 pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack");
785 if (pc_ack_irq < 0)
786 return pc_ack_irq;
787
788 dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit);
789 if (IS_ERR(dmux->pc))
790 return dev_err_probe(dev, PTR_ERR(dmux->pc),
791 "Failed to get pc state\n");
792 dmux->pc_mask = BIT(bit);
793
794 dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit);
795 if (IS_ERR(dmux->pc_ack))
796 return dev_err_probe(dev, PTR_ERR(dmux->pc_ack),
797 "Failed to get pc-ack state\n");
798 dmux->pc_ack_mask = BIT(bit);
799
800 init_waitqueue_head(&dmux->pc_wait);
801 init_completion(&dmux->pc_ack_completion);
802 complete_all(&dmux->pc_ack_completion);
803
804 spin_lock_init(&dmux->tx_lock);
805 INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work);
806 INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work);
807
808 for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
809 dmux->rx_skbs[i].dmux = dmux;
810 dmux->tx_skbs[i].dmux = dmux;
811 }
812
813 /* Runtime PM manages our own power vote.
814 * Note that the RX path may be active even if we are runtime suspended,
815 * since it is controlled by the remote side.
816 */
817 pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY);
818 pm_runtime_use_autosuspend(dev);
819 pm_runtime_enable(dev);
820
821 ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
822 IRQF_ONESHOT, NULL, dmux);
823 if (ret)
824 goto err_disable_pm;
825
826 ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
827 IRQF_ONESHOT, NULL, dmux);
828 if (ret)
829 goto err_disable_pm;
830
831 ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
832 &dmux->pc_state);
833 if (ret)
834 goto err_disable_pm;
835
836 /* Check if remote finished initialization before us */
837 if (dmux->pc_state) {
838 if (bam_dmux_power_on(dmux))
839 bam_dmux_pc_ack(dmux);
840 else
841 bam_dmux_power_off(dmux);
842 }
843
844 return 0;
845
846 err_disable_pm:
847 pm_runtime_disable(dev);
848 pm_runtime_dont_use_autosuspend(dev);
849 return ret;
850 }
851
bam_dmux_remove(struct platform_device * pdev)852 static void bam_dmux_remove(struct platform_device *pdev)
853 {
854 struct bam_dmux *dmux = platform_get_drvdata(pdev);
855 struct device *dev = dmux->dev;
856 LIST_HEAD(list);
857 int i;
858
859 /* Unregister network interfaces */
860 cancel_work_sync(&dmux->register_netdev_work);
861 rtnl_lock();
862 for (i = 0; i < BAM_DMUX_NUM_CH; ++i)
863 if (dmux->netdevs[i])
864 unregister_netdevice_queue(dmux->netdevs[i], &list);
865 unregister_netdevice_many(&list);
866 rtnl_unlock();
867 cancel_work_sync(&dmux->tx_wakeup_work);
868
869 /* Drop our own power vote */
870 pm_runtime_disable(dev);
871 pm_runtime_dont_use_autosuspend(dev);
872 bam_dmux_runtime_suspend(dev);
873 pm_runtime_set_suspended(dev);
874
875 /* Try to wait for remote side to drop power vote */
876 if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT))
877 dev_err(dev, "Timed out waiting for remote side to suspend\n");
878
879 /* Make sure everything is cleaned up before we return */
880 disable_irq(dmux->pc_irq);
881 bam_dmux_power_off(dmux);
882 bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
883 }
884
885 static const struct dev_pm_ops bam_dmux_pm_ops = {
886 SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL)
887 };
888
889 static const struct of_device_id bam_dmux_of_match[] = {
890 { .compatible = "qcom,bam-dmux" },
891 { /* sentinel */ }
892 };
893 MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
894
895 static struct platform_driver bam_dmux_driver = {
896 .probe = bam_dmux_probe,
897 .remove = bam_dmux_remove,
898 .driver = {
899 .name = "bam-dmux",
900 .pm = &bam_dmux_pm_ops,
901 .of_match_table = bam_dmux_of_match,
902 },
903 };
904 module_platform_driver(bam_dmux_driver);
905
906 MODULE_LICENSE("GPL v2");
907 MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
908 MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
909