1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include "internal.h"
18 #include "trace.h"
19
mhi_read_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 * out)20 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
21 void __iomem *base, u32 offset, u32 *out)
22 {
23 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
24 }
25
mhi_read_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 * out)26 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
27 void __iomem *base, u32 offset,
28 u32 mask, u32 *out)
29 {
30 u32 tmp;
31 int ret;
32
33 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
34 if (ret)
35 return ret;
36
37 *out = (tmp & mask) >> __ffs(mask);
38
39 return 0;
40 }
41
mhi_poll_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val,u32 delayus,u32 timeout_ms)42 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
43 void __iomem *base, u32 offset,
44 u32 mask, u32 val, u32 delayus,
45 u32 timeout_ms)
46 {
47 int ret;
48 u32 out, retry = (timeout_ms * 1000) / delayus;
49
50 while (retry--) {
51 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
52 if (ret)
53 return ret;
54
55 if (out == val)
56 return 0;
57
58 fsleep(delayus);
59 }
60
61 return -ETIMEDOUT;
62 }
63
mhi_write_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 val)64 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
65 u32 offset, u32 val)
66 {
67 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
68 }
69
mhi_write_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val)70 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
71 void __iomem *base, u32 offset, u32 mask,
72 u32 val)
73 {
74 int ret;
75 u32 tmp;
76
77 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
78 if (ret)
79 return ret;
80
81 tmp &= ~mask;
82 tmp |= (val << __ffs(mask));
83 mhi_write_reg(mhi_cntrl, base, offset, tmp);
84
85 return 0;
86 }
87
mhi_write_db(struct mhi_controller * mhi_cntrl,void __iomem * db_addr,dma_addr_t db_val)88 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
89 dma_addr_t db_val)
90 {
91 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
92 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
93 }
94
mhi_db_brstmode(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)95 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
96 struct db_cfg *db_cfg,
97 void __iomem *db_addr,
98 dma_addr_t db_val)
99 {
100 if (db_cfg->db_mode) {
101 db_cfg->db_val = db_val;
102 mhi_write_db(mhi_cntrl, db_addr, db_val);
103 db_cfg->db_mode = 0;
104 }
105 }
106
mhi_db_brstmode_disable(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)107 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
108 struct db_cfg *db_cfg,
109 void __iomem *db_addr,
110 dma_addr_t db_val)
111 {
112 db_cfg->db_val = db_val;
113 mhi_write_db(mhi_cntrl, db_addr, db_val);
114 }
115
mhi_ring_er_db(struct mhi_event * mhi_event)116 void mhi_ring_er_db(struct mhi_event *mhi_event)
117 {
118 struct mhi_ring *ring = &mhi_event->ring;
119
120 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
121 ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
122 }
123
mhi_ring_cmd_db(struct mhi_controller * mhi_cntrl,struct mhi_cmd * mhi_cmd)124 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
125 {
126 dma_addr_t db;
127 struct mhi_ring *ring = &mhi_cmd->ring;
128
129 db = ring->iommu_base + (ring->wp - ring->base);
130 *ring->ctxt_wp = cpu_to_le64(db);
131 mhi_write_db(mhi_cntrl, ring->db_addr, db);
132 }
133
mhi_ring_chan_db(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)134 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
135 struct mhi_chan *mhi_chan)
136 {
137 struct mhi_ring *ring = &mhi_chan->tre_ring;
138 dma_addr_t db;
139
140 db = ring->iommu_base + (ring->wp - ring->base);
141
142 /*
143 * Writes to the new ring element must be visible to the hardware
144 * before letting h/w know there is new element to fetch.
145 */
146 dma_wmb();
147 *ring->ctxt_wp = cpu_to_le64(db);
148
149 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
150 ring->db_addr, db);
151 }
152
mhi_get_exec_env(struct mhi_controller * mhi_cntrl)153 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
154 {
155 u32 exec;
156 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
157
158 return (ret) ? MHI_EE_MAX : exec;
159 }
160 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
161
mhi_get_mhi_state(struct mhi_controller * mhi_cntrl)162 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
163 {
164 u32 state;
165 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
166 MHISTATUS_MHISTATE_MASK, &state);
167 return ret ? MHI_STATE_MAX : state;
168 }
169 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
170
mhi_soc_reset(struct mhi_controller * mhi_cntrl)171 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
172 {
173 if (mhi_cntrl->reset) {
174 mhi_cntrl->reset(mhi_cntrl);
175 return;
176 }
177
178 /* Generic MHI SoC reset */
179 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
180 MHI_SOC_RESET_REQ);
181 }
182 EXPORT_SYMBOL_GPL(mhi_soc_reset);
183
mhi_map_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)184 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
185 struct mhi_buf_info *buf_info)
186 {
187 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
188 buf_info->v_addr, buf_info->len,
189 buf_info->dir);
190 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
191 return -ENOMEM;
192
193 return 0;
194 }
195
mhi_map_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)196 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
197 struct mhi_buf_info *buf_info)
198 {
199 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
200 &buf_info->p_addr, GFP_ATOMIC);
201
202 if (!buf)
203 return -ENOMEM;
204
205 if (buf_info->dir == DMA_TO_DEVICE)
206 memcpy(buf, buf_info->v_addr, buf_info->len);
207
208 buf_info->bb_addr = buf;
209
210 return 0;
211 }
212
mhi_unmap_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)213 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
214 struct mhi_buf_info *buf_info)
215 {
216 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
217 buf_info->dir);
218 }
219
mhi_unmap_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)220 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
221 struct mhi_buf_info *buf_info)
222 {
223 if (buf_info->dir == DMA_FROM_DEVICE)
224 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
225
226 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
227 buf_info->bb_addr, buf_info->p_addr);
228 }
229
get_nr_avail_ring_elements(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)230 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
231 struct mhi_ring *ring)
232 {
233 int nr_el;
234
235 if (ring->wp < ring->rp) {
236 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
237 } else {
238 nr_el = (ring->rp - ring->base) / ring->el_size;
239 nr_el += ((ring->base + ring->len - ring->wp) /
240 ring->el_size) - 1;
241 }
242
243 return nr_el;
244 }
245
mhi_to_virtual(struct mhi_ring * ring,dma_addr_t addr)246 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
247 {
248 return (addr - ring->iommu_base) + ring->base;
249 }
250
mhi_add_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)251 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
252 struct mhi_ring *ring)
253 {
254 ring->wp += ring->el_size;
255 if (ring->wp >= (ring->base + ring->len))
256 ring->wp = ring->base;
257 /* smp update */
258 smp_wmb();
259 }
260
mhi_del_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)261 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
262 struct mhi_ring *ring)
263 {
264 ring->rp += ring->el_size;
265 if (ring->rp >= (ring->base + ring->len))
266 ring->rp = ring->base;
267 /* smp update */
268 smp_wmb();
269 }
270
is_valid_ring_ptr(struct mhi_ring * ring,dma_addr_t addr)271 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
272 {
273 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
274 !(addr & (sizeof(struct mhi_ring_element) - 1));
275 }
276
mhi_destroy_device(struct device * dev,void * data)277 int mhi_destroy_device(struct device *dev, void *data)
278 {
279 struct mhi_chan *ul_chan, *dl_chan;
280 struct mhi_device *mhi_dev;
281 struct mhi_controller *mhi_cntrl;
282 enum mhi_ee_type ee = MHI_EE_MAX;
283
284 if (dev->bus != &mhi_bus_type)
285 return 0;
286
287 mhi_dev = to_mhi_device(dev);
288 mhi_cntrl = mhi_dev->mhi_cntrl;
289
290 /* Only destroy virtual devices thats attached to bus */
291 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
292 return 0;
293
294 ul_chan = mhi_dev->ul_chan;
295 dl_chan = mhi_dev->dl_chan;
296
297 /*
298 * If execution environment is specified, remove only those devices that
299 * started in them based on ee_mask for the channels as we move on to a
300 * different execution environment
301 */
302 if (data)
303 ee = *(enum mhi_ee_type *)data;
304
305 /*
306 * For the suspend and resume case, this function will get called
307 * without mhi_unregister_controller(). Hence, we need to drop the
308 * references to mhi_dev created for ul and dl channels. We can
309 * be sure that there will be no instances of mhi_dev left after
310 * this.
311 */
312 if (ul_chan) {
313 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
314 return 0;
315
316 put_device(&ul_chan->mhi_dev->dev);
317 }
318
319 if (dl_chan) {
320 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
321 return 0;
322
323 put_device(&dl_chan->mhi_dev->dev);
324 }
325
326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
327 mhi_dev->name);
328
329 /* Notify the client and remove the device from MHI bus */
330 device_del(dev);
331 put_device(dev);
332
333 return 0;
334 }
335
mhi_get_free_desc_count(struct mhi_device * mhi_dev,enum dma_data_direction dir)336 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
337 enum dma_data_direction dir)
338 {
339 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
340 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
341 mhi_dev->ul_chan : mhi_dev->dl_chan;
342 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
343
344 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
345 }
346 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
347
mhi_notify(struct mhi_device * mhi_dev,enum mhi_callback cb_reason)348 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
349 {
350 struct mhi_driver *mhi_drv;
351
352 if (!mhi_dev->dev.driver)
353 return;
354
355 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
356
357 if (mhi_drv->status_cb)
358 mhi_drv->status_cb(mhi_dev, cb_reason);
359 }
360 EXPORT_SYMBOL_GPL(mhi_notify);
361
362 /* Bind MHI channels to MHI devices */
mhi_create_devices(struct mhi_controller * mhi_cntrl)363 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
364 {
365 struct mhi_chan *mhi_chan;
366 struct mhi_device *mhi_dev;
367 struct device *dev = &mhi_cntrl->mhi_dev->dev;
368 int i, ret;
369
370 mhi_chan = mhi_cntrl->mhi_chan;
371 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
372 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
373 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
374 continue;
375 mhi_dev = mhi_alloc_device(mhi_cntrl);
376 if (IS_ERR(mhi_dev))
377 return;
378
379 mhi_dev->dev_type = MHI_DEVICE_XFER;
380 switch (mhi_chan->dir) {
381 case DMA_TO_DEVICE:
382 mhi_dev->ul_chan = mhi_chan;
383 mhi_dev->ul_chan_id = mhi_chan->chan;
384 break;
385 case DMA_FROM_DEVICE:
386 /* We use dl_chan as offload channels */
387 mhi_dev->dl_chan = mhi_chan;
388 mhi_dev->dl_chan_id = mhi_chan->chan;
389 break;
390 default:
391 dev_err(dev, "Direction not supported\n");
392 put_device(&mhi_dev->dev);
393 return;
394 }
395
396 get_device(&mhi_dev->dev);
397 mhi_chan->mhi_dev = mhi_dev;
398
399 /* Check next channel if it matches */
400 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
401 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
402 i++;
403 mhi_chan++;
404 if (mhi_chan->dir == DMA_TO_DEVICE) {
405 mhi_dev->ul_chan = mhi_chan;
406 mhi_dev->ul_chan_id = mhi_chan->chan;
407 } else {
408 mhi_dev->dl_chan = mhi_chan;
409 mhi_dev->dl_chan_id = mhi_chan->chan;
410 }
411 get_device(&mhi_dev->dev);
412 mhi_chan->mhi_dev = mhi_dev;
413 }
414 }
415
416 /* Channel name is same for both UL and DL */
417 mhi_dev->name = mhi_chan->name;
418 dev_set_name(&mhi_dev->dev, "%s_%s",
419 dev_name(&mhi_cntrl->mhi_dev->dev),
420 mhi_dev->name);
421
422 /* Init wakeup source if available */
423 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
424 device_init_wakeup(&mhi_dev->dev, true);
425
426 ret = device_add(&mhi_dev->dev);
427 if (ret)
428 put_device(&mhi_dev->dev);
429 }
430 }
431
mhi_irq_handler(int irq_number,void * dev)432 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
433 {
434 struct mhi_event *mhi_event = dev;
435 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
436 struct mhi_event_ctxt *er_ctxt;
437 struct mhi_ring *ev_ring = &mhi_event->ring;
438 dma_addr_t ptr;
439 void *dev_rp;
440
441 /*
442 * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
443 * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
444 * before handling the IRQs.
445 */
446 if (!mhi_cntrl->mhi_ctxt) {
447 dev_dbg(&mhi_cntrl->mhi_dev->dev,
448 "mhi_ctxt has been freed\n");
449 return IRQ_HANDLED;
450 }
451
452 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
453 ptr = le64_to_cpu(er_ctxt->rp);
454
455 if (!is_valid_ring_ptr(ev_ring, ptr)) {
456 dev_err(&mhi_cntrl->mhi_dev->dev,
457 "Event ring rp points outside of the event ring\n");
458 return IRQ_HANDLED;
459 }
460
461 dev_rp = mhi_to_virtual(ev_ring, ptr);
462
463 /* Only proceed if event ring has pending events */
464 if (ev_ring->rp == dev_rp)
465 return IRQ_HANDLED;
466
467 /* For client managed event ring, notify pending data */
468 if (mhi_event->cl_manage) {
469 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
470 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
471
472 if (mhi_dev)
473 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
474 } else {
475 tasklet_schedule(&mhi_event->task);
476 }
477
478 return IRQ_HANDLED;
479 }
480
mhi_intvec_threaded_handler(int irq_number,void * priv)481 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
482 {
483 struct mhi_controller *mhi_cntrl = priv;
484 struct device *dev = &mhi_cntrl->mhi_dev->dev;
485 enum mhi_state state;
486 enum mhi_pm_state pm_state = 0;
487 enum mhi_ee_type ee;
488
489 write_lock_irq(&mhi_cntrl->pm_lock);
490 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
491 write_unlock_irq(&mhi_cntrl->pm_lock);
492 goto exit_intvec;
493 }
494
495 state = mhi_get_mhi_state(mhi_cntrl);
496 ee = mhi_get_exec_env(mhi_cntrl);
497
498 trace_mhi_intvec_states(mhi_cntrl, ee, state);
499 if (state == MHI_STATE_SYS_ERR) {
500 dev_dbg(dev, "System error detected\n");
501 pm_state = mhi_tryset_pm_state(mhi_cntrl,
502 MHI_PM_SYS_ERR_DETECT);
503 }
504 write_unlock_irq(&mhi_cntrl->pm_lock);
505
506 if (pm_state != MHI_PM_SYS_ERR_DETECT)
507 goto exit_intvec;
508
509 switch (ee) {
510 case MHI_EE_RDDM:
511 /* proceed if power down is not already in progress */
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
514 mhi_cntrl->ee = ee;
515 mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
516 wake_up_all(&mhi_cntrl->state_event);
517 }
518 break;
519 case MHI_EE_PBL:
520 case MHI_EE_EDL:
521 case MHI_EE_PTHRU:
522 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
523 mhi_cntrl->ee = ee;
524 wake_up_all(&mhi_cntrl->state_event);
525 mhi_pm_sys_err_handler(mhi_cntrl);
526 break;
527 default:
528 wake_up_all(&mhi_cntrl->state_event);
529 mhi_pm_sys_err_handler(mhi_cntrl);
530 break;
531 }
532
533 exit_intvec:
534
535 return IRQ_HANDLED;
536 }
537
mhi_intvec_handler(int irq_number,void * dev)538 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
539 {
540 struct mhi_controller *mhi_cntrl = dev;
541
542 /* Wake up events waiting for state change */
543 wake_up_all(&mhi_cntrl->state_event);
544
545 return IRQ_WAKE_THREAD;
546 }
547
mhi_recycle_ev_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)548 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
549 struct mhi_ring *ring)
550 {
551 /* Update the WP */
552 ring->wp += ring->el_size;
553
554 if (ring->wp >= (ring->base + ring->len))
555 ring->wp = ring->base;
556
557 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
558
559 /* Update the RP */
560 ring->rp += ring->el_size;
561 if (ring->rp >= (ring->base + ring->len))
562 ring->rp = ring->base;
563
564 /* Update to all cores */
565 smp_wmb();
566 }
567
parse_xfer_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)568 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
569 struct mhi_ring_element *event,
570 struct mhi_chan *mhi_chan)
571 {
572 struct mhi_ring *buf_ring, *tre_ring;
573 struct device *dev = &mhi_cntrl->mhi_dev->dev;
574 struct mhi_result result;
575 unsigned long flags = 0;
576 u32 ev_code;
577
578 ev_code = MHI_TRE_GET_EV_CODE(event);
579 buf_ring = &mhi_chan->buf_ring;
580 tre_ring = &mhi_chan->tre_ring;
581
582 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
583 -EOVERFLOW : 0;
584
585 /*
586 * If it's a DB Event then we need to grab the lock
587 * with preemption disabled and as a write because we
588 * have to update db register and there are chances that
589 * another thread could be doing the same.
590 */
591 if (ev_code >= MHI_EV_CC_OOB)
592 write_lock_irqsave(&mhi_chan->lock, flags);
593 else
594 read_lock_bh(&mhi_chan->lock);
595
596 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
597 goto end_process_tx_event;
598
599 switch (ev_code) {
600 case MHI_EV_CC_OVERFLOW:
601 case MHI_EV_CC_EOB:
602 case MHI_EV_CC_EOT:
603 {
604 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
605 struct mhi_ring_element *local_rp, *ev_tre;
606 void *dev_rp, *next_rp;
607 struct mhi_buf_info *buf_info;
608 u16 xfer_len;
609
610 if (!is_valid_ring_ptr(tre_ring, ptr)) {
611 dev_err(&mhi_cntrl->mhi_dev->dev,
612 "Event element points outside of the tre ring\n");
613 break;
614 }
615 /* Get the TRB this event points to */
616 ev_tre = mhi_to_virtual(tre_ring, ptr);
617
618 dev_rp = ev_tre + 1;
619 if (dev_rp >= (tre_ring->base + tre_ring->len))
620 dev_rp = tre_ring->base;
621
622 result.dir = mhi_chan->dir;
623
624 local_rp = tre_ring->rp;
625
626 next_rp = local_rp + 1;
627 if (next_rp >= tre_ring->base + tre_ring->len)
628 next_rp = tre_ring->base;
629 if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) {
630 dev_err(&mhi_cntrl->mhi_dev->dev,
631 "Event element points to an unexpected TRE\n");
632 break;
633 }
634
635 while (local_rp != dev_rp) {
636 buf_info = buf_ring->rp;
637 /* If it's the last TRE, get length from the event */
638 if (local_rp == ev_tre)
639 xfer_len = MHI_TRE_GET_EV_LEN(event);
640 else
641 xfer_len = buf_info->len;
642
643 /* Unmap if it's not pre-mapped by client */
644 if (likely(!buf_info->pre_mapped))
645 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
646
647 result.buf_addr = buf_info->cb_buf;
648
649 /* truncate to buf len if xfer_len is larger */
650 result.bytes_xferd =
651 min_t(u16, xfer_len, buf_info->len);
652 mhi_del_ring_element(mhi_cntrl, buf_ring);
653 mhi_del_ring_element(mhi_cntrl, tre_ring);
654 local_rp = tre_ring->rp;
655
656 read_unlock_bh(&mhi_chan->lock);
657
658 /* notify client */
659 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
660
661 if (mhi_chan->dir == DMA_TO_DEVICE) {
662 atomic_dec(&mhi_cntrl->pending_pkts);
663 /* Release the reference got from mhi_queue() */
664 mhi_cntrl->runtime_put(mhi_cntrl);
665 }
666
667 read_lock_bh(&mhi_chan->lock);
668 }
669 break;
670 } /* CC_EOT */
671 case MHI_EV_CC_OOB:
672 case MHI_EV_CC_DB_MODE:
673 {
674 unsigned long pm_lock_flags;
675
676 mhi_chan->db_cfg.db_mode = 1;
677 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
678 if (tre_ring->wp != tre_ring->rp &&
679 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
680 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
681 }
682 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
683 break;
684 }
685 case MHI_EV_CC_BAD_TRE:
686 default:
687 dev_err(dev, "Unknown event 0x%x\n", ev_code);
688 break;
689 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
690
691 end_process_tx_event:
692 if (ev_code >= MHI_EV_CC_OOB)
693 write_unlock_irqrestore(&mhi_chan->lock, flags);
694 else
695 read_unlock_bh(&mhi_chan->lock);
696
697 return 0;
698 }
699
parse_rsc_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)700 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
701 struct mhi_ring_element *event,
702 struct mhi_chan *mhi_chan)
703 {
704 struct mhi_ring *buf_ring, *tre_ring;
705 struct mhi_buf_info *buf_info;
706 struct mhi_result result;
707 int ev_code;
708 u32 cookie; /* offset to local descriptor */
709 u16 xfer_len;
710
711 buf_ring = &mhi_chan->buf_ring;
712 tre_ring = &mhi_chan->tre_ring;
713
714 ev_code = MHI_TRE_GET_EV_CODE(event);
715 cookie = MHI_TRE_GET_EV_COOKIE(event);
716 xfer_len = MHI_TRE_GET_EV_LEN(event);
717
718 /* Received out of bound cookie */
719 WARN_ON(cookie >= buf_ring->len);
720
721 buf_info = buf_ring->base + cookie;
722
723 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
724 -EOVERFLOW : 0;
725
726 /* truncate to buf len if xfer_len is larger */
727 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
728 result.buf_addr = buf_info->cb_buf;
729 result.dir = mhi_chan->dir;
730
731 read_lock_bh(&mhi_chan->lock);
732
733 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
734 goto end_process_rsc_event;
735
736 WARN_ON(!buf_info->used);
737
738 /* notify the client */
739 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
740
741 /*
742 * Note: We're arbitrarily incrementing RP even though, completion
743 * packet we processed might not be the same one, reason we can do this
744 * is because device guaranteed to cache descriptors in order it
745 * receive, so even though completion event is different we can re-use
746 * all descriptors in between.
747 * Example:
748 * Transfer Ring has descriptors: A, B, C, D
749 * Last descriptor host queue is D (WP) and first descriptor
750 * host queue is A (RP).
751 * The completion event we just serviced is descriptor C.
752 * Then we can safely queue descriptors to replace A, B, and C
753 * even though host did not receive any completions.
754 */
755 mhi_del_ring_element(mhi_cntrl, tre_ring);
756 buf_info->used = false;
757
758 end_process_rsc_event:
759 read_unlock_bh(&mhi_chan->lock);
760
761 return 0;
762 }
763
mhi_process_cmd_completion(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * tre)764 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
765 struct mhi_ring_element *tre)
766 {
767 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
768 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
769 struct mhi_ring *mhi_ring = &cmd_ring->ring;
770 struct mhi_ring_element *cmd_pkt;
771 struct mhi_chan *mhi_chan;
772 u32 chan;
773
774 if (!is_valid_ring_ptr(mhi_ring, ptr)) {
775 dev_err(&mhi_cntrl->mhi_dev->dev,
776 "Event element points outside of the cmd ring\n");
777 return;
778 }
779
780 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
781
782 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
783
784 if (chan < mhi_cntrl->max_chan &&
785 mhi_cntrl->mhi_chan[chan].configured) {
786 mhi_chan = &mhi_cntrl->mhi_chan[chan];
787 write_lock_bh(&mhi_chan->lock);
788 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
789 complete(&mhi_chan->completion);
790 write_unlock_bh(&mhi_chan->lock);
791 } else {
792 dev_err(&mhi_cntrl->mhi_dev->dev,
793 "Completion packet for invalid channel ID: %d\n", chan);
794 }
795
796 mhi_del_ring_element(mhi_cntrl, mhi_ring);
797 }
798
mhi_process_ctrl_ev_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)799 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
800 struct mhi_event *mhi_event,
801 u32 event_quota)
802 {
803 struct mhi_ring_element *dev_rp, *local_rp;
804 struct mhi_ring *ev_ring = &mhi_event->ring;
805 struct mhi_event_ctxt *er_ctxt =
806 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
807 struct mhi_chan *mhi_chan;
808 struct device *dev = &mhi_cntrl->mhi_dev->dev;
809 u32 chan;
810 int count = 0;
811 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
812
813 /*
814 * This is a quick check to avoid unnecessary event processing
815 * in case MHI is already in error state, but it's still possible
816 * to transition to error state while processing events
817 */
818 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
819 return -EIO;
820
821 if (!is_valid_ring_ptr(ev_ring, ptr)) {
822 dev_err(&mhi_cntrl->mhi_dev->dev,
823 "Event ring rp points outside of the event ring\n");
824 return -EIO;
825 }
826
827 dev_rp = mhi_to_virtual(ev_ring, ptr);
828 local_rp = ev_ring->rp;
829
830 while (dev_rp != local_rp) {
831 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
832
833 trace_mhi_ctrl_event(mhi_cntrl, local_rp);
834
835 switch (type) {
836 case MHI_PKT_TYPE_BW_REQ_EVENT:
837 {
838 struct mhi_link_info *link_info;
839
840 link_info = &mhi_cntrl->mhi_link_info;
841 write_lock_irq(&mhi_cntrl->pm_lock);
842 link_info->target_link_speed =
843 MHI_TRE_GET_EV_LINKSPEED(local_rp);
844 link_info->target_link_width =
845 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
846 write_unlock_irq(&mhi_cntrl->pm_lock);
847 dev_dbg(dev, "Received BW_REQ event\n");
848 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
849 break;
850 }
851 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
852 {
853 enum mhi_state new_state;
854
855 new_state = MHI_TRE_GET_EV_STATE(local_rp);
856
857 dev_dbg(dev, "State change event to state: %s\n",
858 mhi_state_str(new_state));
859
860 switch (new_state) {
861 case MHI_STATE_M0:
862 mhi_pm_m0_transition(mhi_cntrl);
863 break;
864 case MHI_STATE_M1:
865 mhi_pm_m1_transition(mhi_cntrl);
866 break;
867 case MHI_STATE_M3:
868 mhi_pm_m3_transition(mhi_cntrl);
869 break;
870 case MHI_STATE_SYS_ERR:
871 {
872 enum mhi_pm_state pm_state;
873
874 dev_dbg(dev, "System error detected\n");
875 write_lock_irq(&mhi_cntrl->pm_lock);
876 pm_state = mhi_tryset_pm_state(mhi_cntrl,
877 MHI_PM_SYS_ERR_DETECT);
878 write_unlock_irq(&mhi_cntrl->pm_lock);
879 if (pm_state == MHI_PM_SYS_ERR_DETECT)
880 mhi_pm_sys_err_handler(mhi_cntrl);
881 break;
882 }
883 default:
884 dev_err(dev, "Invalid state: %s\n",
885 mhi_state_str(new_state));
886 }
887
888 break;
889 }
890 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
891 mhi_process_cmd_completion(mhi_cntrl, local_rp);
892 break;
893 case MHI_PKT_TYPE_EE_EVENT:
894 {
895 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
896 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
897
898 dev_dbg(dev, "Received EE event: %s\n",
899 TO_MHI_EXEC_STR(event));
900 switch (event) {
901 case MHI_EE_SBL:
902 st = DEV_ST_TRANSITION_SBL;
903 break;
904 case MHI_EE_WFW:
905 case MHI_EE_AMSS:
906 st = DEV_ST_TRANSITION_MISSION_MODE;
907 break;
908 case MHI_EE_FP:
909 st = DEV_ST_TRANSITION_FP;
910 break;
911 case MHI_EE_RDDM:
912 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
913 write_lock_irq(&mhi_cntrl->pm_lock);
914 mhi_cntrl->ee = event;
915 write_unlock_irq(&mhi_cntrl->pm_lock);
916 wake_up_all(&mhi_cntrl->state_event);
917 break;
918 default:
919 dev_err(dev,
920 "Unhandled EE event: 0x%x\n", type);
921 }
922 if (st != DEV_ST_TRANSITION_MAX)
923 mhi_queue_state_transition(mhi_cntrl, st);
924
925 break;
926 }
927 case MHI_PKT_TYPE_TX_EVENT:
928 chan = MHI_TRE_GET_EV_CHID(local_rp);
929
930 WARN_ON(chan >= mhi_cntrl->max_chan);
931
932 /*
933 * Only process the event ring elements whose channel
934 * ID is within the maximum supported range.
935 */
936 if (chan < mhi_cntrl->max_chan) {
937 mhi_chan = &mhi_cntrl->mhi_chan[chan];
938 if (!mhi_chan->configured)
939 break;
940 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
941 }
942 break;
943 default:
944 dev_err(dev, "Unhandled event type: %d\n", type);
945 break;
946 }
947
948 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
949 local_rp = ev_ring->rp;
950
951 ptr = le64_to_cpu(er_ctxt->rp);
952 if (!is_valid_ring_ptr(ev_ring, ptr)) {
953 dev_err(&mhi_cntrl->mhi_dev->dev,
954 "Event ring rp points outside of the event ring\n");
955 return -EIO;
956 }
957
958 dev_rp = mhi_to_virtual(ev_ring, ptr);
959 count++;
960 }
961
962 read_lock_bh(&mhi_cntrl->pm_lock);
963
964 /* Ring EV DB only if there is any pending element to process */
965 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
966 mhi_ring_er_db(mhi_event);
967 read_unlock_bh(&mhi_cntrl->pm_lock);
968
969 return count;
970 }
971
mhi_process_data_event_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)972 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
973 struct mhi_event *mhi_event,
974 u32 event_quota)
975 {
976 struct mhi_ring_element *dev_rp, *local_rp;
977 struct mhi_ring *ev_ring = &mhi_event->ring;
978 struct mhi_event_ctxt *er_ctxt =
979 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
980 int count = 0;
981 u32 chan;
982 struct mhi_chan *mhi_chan;
983 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
984
985 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
986 return -EIO;
987
988 if (!is_valid_ring_ptr(ev_ring, ptr)) {
989 dev_err(&mhi_cntrl->mhi_dev->dev,
990 "Event ring rp points outside of the event ring\n");
991 return -EIO;
992 }
993
994 dev_rp = mhi_to_virtual(ev_ring, ptr);
995 local_rp = ev_ring->rp;
996
997 while (dev_rp != local_rp && event_quota > 0) {
998 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
999
1000 trace_mhi_data_event(mhi_cntrl, local_rp);
1001
1002 chan = MHI_TRE_GET_EV_CHID(local_rp);
1003
1004 WARN_ON(chan >= mhi_cntrl->max_chan);
1005
1006 /*
1007 * Only process the event ring elements whose channel
1008 * ID is within the maximum supported range.
1009 */
1010 if (chan < mhi_cntrl->max_chan &&
1011 mhi_cntrl->mhi_chan[chan].configured) {
1012 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1013
1014 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
1015 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1016 event_quota--;
1017 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
1018 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1019 event_quota--;
1020 }
1021 }
1022
1023 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1024 local_rp = ev_ring->rp;
1025
1026 ptr = le64_to_cpu(er_ctxt->rp);
1027 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1028 dev_err(&mhi_cntrl->mhi_dev->dev,
1029 "Event ring rp points outside of the event ring\n");
1030 return -EIO;
1031 }
1032
1033 dev_rp = mhi_to_virtual(ev_ring, ptr);
1034 count++;
1035 }
1036 read_lock_bh(&mhi_cntrl->pm_lock);
1037
1038 /* Ring EV DB only if there is any pending element to process */
1039 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
1040 mhi_ring_er_db(mhi_event);
1041 read_unlock_bh(&mhi_cntrl->pm_lock);
1042
1043 return count;
1044 }
1045
mhi_ev_task(unsigned long data)1046 void mhi_ev_task(unsigned long data)
1047 {
1048 struct mhi_event *mhi_event = (struct mhi_event *)data;
1049 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1050
1051 /* process all pending events */
1052 spin_lock_bh(&mhi_event->lock);
1053 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1054 spin_unlock_bh(&mhi_event->lock);
1055 }
1056
mhi_ctrl_ev_task(unsigned long data)1057 void mhi_ctrl_ev_task(unsigned long data)
1058 {
1059 struct mhi_event *mhi_event = (struct mhi_event *)data;
1060 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1061 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1062 enum mhi_state state;
1063 enum mhi_pm_state pm_state = 0;
1064 int ret;
1065
1066 /*
1067 * We can check PM state w/o a lock here because there is no way
1068 * PM state can change from reg access valid to no access while this
1069 * thread being executed.
1070 */
1071 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1072 /*
1073 * We may have a pending event but not allowed to
1074 * process it since we are probably in a suspended state,
1075 * so trigger a resume.
1076 */
1077 mhi_trigger_resume(mhi_cntrl);
1078
1079 return;
1080 }
1081
1082 /* Process ctrl events */
1083 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1084
1085 /*
1086 * We received an IRQ but no events to process, maybe device went to
1087 * SYS_ERR state? Check the state to confirm.
1088 */
1089 if (!ret) {
1090 write_lock_irq(&mhi_cntrl->pm_lock);
1091 state = mhi_get_mhi_state(mhi_cntrl);
1092 if (state == MHI_STATE_SYS_ERR) {
1093 dev_dbg(dev, "System error detected\n");
1094 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1095 MHI_PM_SYS_ERR_DETECT);
1096 }
1097 write_unlock_irq(&mhi_cntrl->pm_lock);
1098 if (pm_state == MHI_PM_SYS_ERR_DETECT)
1099 mhi_pm_sys_err_handler(mhi_cntrl);
1100 }
1101 }
1102
mhi_is_ring_full(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)1103 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1104 struct mhi_ring *ring)
1105 {
1106 void *tmp = ring->wp + ring->el_size;
1107
1108 if (tmp >= (ring->base + ring->len))
1109 tmp = ring->base;
1110
1111 return (tmp == ring->rp);
1112 }
1113
mhi_queue(struct mhi_device * mhi_dev,struct mhi_buf_info * buf_info,enum dma_data_direction dir,enum mhi_flags mflags)1114 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1115 enum dma_data_direction dir, enum mhi_flags mflags)
1116 {
1117 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1118 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1119 mhi_dev->dl_chan;
1120 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1121 unsigned long flags;
1122 int ret;
1123
1124 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1125 return -EIO;
1126
1127 ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1128 if (unlikely(ret))
1129 return -EAGAIN;
1130
1131 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1132 if (unlikely(ret))
1133 return ret;
1134
1135 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1136
1137 /* Packet is queued, take a usage ref to exit M3 if necessary
1138 * for host->device buffer, balanced put is done on buffer completion
1139 * for device->host buffer, balanced put is after ringing the DB
1140 */
1141 mhi_cntrl->runtime_get(mhi_cntrl);
1142
1143 /* Assert dev_wake (to exit/prevent M1/M2)*/
1144 mhi_cntrl->wake_toggle(mhi_cntrl);
1145
1146 if (mhi_chan->dir == DMA_TO_DEVICE)
1147 atomic_inc(&mhi_cntrl->pending_pkts);
1148
1149 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1150 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1151
1152 if (dir == DMA_FROM_DEVICE)
1153 mhi_cntrl->runtime_put(mhi_cntrl);
1154
1155 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1156
1157 return ret;
1158 }
1159
mhi_queue_skb(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct sk_buff * skb,size_t len,enum mhi_flags mflags)1160 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1161 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1162 {
1163 struct mhi_buf_info buf_info = { };
1164
1165 buf_info.v_addr = skb->data;
1166 buf_info.cb_buf = skb;
1167 buf_info.len = len;
1168
1169 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1170 }
1171 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1172
mhi_gen_tre(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,struct mhi_buf_info * info,enum mhi_flags flags)1173 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1174 struct mhi_buf_info *info, enum mhi_flags flags)
1175 {
1176 struct mhi_ring *buf_ring, *tre_ring;
1177 struct mhi_ring_element *mhi_tre;
1178 struct mhi_buf_info *buf_info;
1179 int eot, eob, chain, bei;
1180 int ret = 0;
1181
1182 /* Protect accesses for reading and incrementing WP */
1183 write_lock_bh(&mhi_chan->lock);
1184
1185 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1186 ret = -ENODEV;
1187 goto out;
1188 }
1189
1190 buf_ring = &mhi_chan->buf_ring;
1191 tre_ring = &mhi_chan->tre_ring;
1192
1193 buf_info = buf_ring->wp;
1194 WARN_ON(buf_info->used);
1195 buf_info->pre_mapped = info->pre_mapped;
1196 if (info->pre_mapped)
1197 buf_info->p_addr = info->p_addr;
1198 else
1199 buf_info->v_addr = info->v_addr;
1200 buf_info->cb_buf = info->cb_buf;
1201 buf_info->wp = tre_ring->wp;
1202 buf_info->dir = mhi_chan->dir;
1203 buf_info->len = info->len;
1204
1205 if (!info->pre_mapped) {
1206 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1207 if (ret)
1208 goto out;
1209 }
1210
1211 eob = !!(flags & MHI_EOB);
1212 eot = !!(flags & MHI_EOT);
1213 chain = !!(flags & MHI_CHAIN);
1214 bei = !!(mhi_chan->intmod);
1215
1216 mhi_tre = tre_ring->wp;
1217 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1218 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1219 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1220
1221 trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre);
1222 /* increment WP */
1223 mhi_add_ring_element(mhi_cntrl, tre_ring);
1224 mhi_add_ring_element(mhi_cntrl, buf_ring);
1225
1226 out:
1227 write_unlock_bh(&mhi_chan->lock);
1228
1229 return ret;
1230 }
1231
mhi_queue_buf(struct mhi_device * mhi_dev,enum dma_data_direction dir,void * buf,size_t len,enum mhi_flags mflags)1232 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1233 void *buf, size_t len, enum mhi_flags mflags)
1234 {
1235 struct mhi_buf_info buf_info = { };
1236
1237 buf_info.v_addr = buf;
1238 buf_info.cb_buf = buf;
1239 buf_info.len = len;
1240
1241 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1242 }
1243 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1244
mhi_queue_is_full(struct mhi_device * mhi_dev,enum dma_data_direction dir)1245 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1246 {
1247 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1248 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1249 mhi_dev->ul_chan : mhi_dev->dl_chan;
1250 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1251
1252 return mhi_is_ring_full(mhi_cntrl, tre_ring);
1253 }
1254 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1255
mhi_send_cmd(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_cmd_type cmd)1256 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1257 struct mhi_chan *mhi_chan,
1258 enum mhi_cmd_type cmd)
1259 {
1260 struct mhi_ring_element *cmd_tre = NULL;
1261 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1262 struct mhi_ring *ring = &mhi_cmd->ring;
1263 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1264 int chan = 0;
1265
1266 if (mhi_chan)
1267 chan = mhi_chan->chan;
1268
1269 spin_lock_bh(&mhi_cmd->lock);
1270 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1271 spin_unlock_bh(&mhi_cmd->lock);
1272 return -ENOMEM;
1273 }
1274
1275 /* prepare the cmd tre */
1276 cmd_tre = ring->wp;
1277 switch (cmd) {
1278 case MHI_CMD_RESET_CHAN:
1279 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1280 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1281 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1282 break;
1283 case MHI_CMD_STOP_CHAN:
1284 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
1285 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
1286 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
1287 break;
1288 case MHI_CMD_START_CHAN:
1289 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1290 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1291 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1292 break;
1293 default:
1294 dev_err(dev, "Command not supported\n");
1295 break;
1296 }
1297
1298 /* queue to hardware */
1299 mhi_add_ring_element(mhi_cntrl, ring);
1300 read_lock_bh(&mhi_cntrl->pm_lock);
1301 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1302 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1303 read_unlock_bh(&mhi_cntrl->pm_lock);
1304 spin_unlock_bh(&mhi_cmd->lock);
1305
1306 return 0;
1307 }
1308
mhi_update_channel_state(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_ch_state_type to_state)1309 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1310 struct mhi_chan *mhi_chan,
1311 enum mhi_ch_state_type to_state)
1312 {
1313 struct device *dev = &mhi_chan->mhi_dev->dev;
1314 enum mhi_cmd_type cmd = MHI_CMD_NOP;
1315 int ret;
1316
1317 trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating"));
1318 switch (to_state) {
1319 case MHI_CH_STATE_TYPE_RESET:
1320 write_lock_irq(&mhi_chan->lock);
1321 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1322 mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1323 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1324 write_unlock_irq(&mhi_chan->lock);
1325 return -EINVAL;
1326 }
1327 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1328 write_unlock_irq(&mhi_chan->lock);
1329
1330 cmd = MHI_CMD_RESET_CHAN;
1331 break;
1332 case MHI_CH_STATE_TYPE_STOP:
1333 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1334 return -EINVAL;
1335
1336 cmd = MHI_CMD_STOP_CHAN;
1337 break;
1338 case MHI_CH_STATE_TYPE_START:
1339 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1340 mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1341 return -EINVAL;
1342
1343 cmd = MHI_CMD_START_CHAN;
1344 break;
1345 default:
1346 dev_err(dev, "%d: Channel state update to %s not allowed\n",
1347 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1348 return -EINVAL;
1349 }
1350
1351 /* bring host and device out of suspended states */
1352 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1353 if (ret)
1354 return ret;
1355 mhi_cntrl->runtime_get(mhi_cntrl);
1356
1357 reinit_completion(&mhi_chan->completion);
1358 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1359 if (ret) {
1360 dev_err(dev, "%d: Failed to send %s channel command\n",
1361 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1362 goto exit_channel_update;
1363 }
1364
1365 ret = wait_for_completion_timeout(&mhi_chan->completion,
1366 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1367 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1368 dev_err(dev,
1369 "%d: Failed to receive %s channel command completion\n",
1370 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1371 ret = -EIO;
1372 goto exit_channel_update;
1373 }
1374
1375 ret = 0;
1376
1377 if (to_state != MHI_CH_STATE_TYPE_RESET) {
1378 write_lock_irq(&mhi_chan->lock);
1379 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1380 MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
1381 write_unlock_irq(&mhi_chan->lock);
1382 }
1383
1384 trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated"));
1385 exit_channel_update:
1386 mhi_cntrl->runtime_put(mhi_cntrl);
1387 mhi_device_put(mhi_cntrl->mhi_dev);
1388
1389 return ret;
1390 }
1391
mhi_unprepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1392 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1393 struct mhi_chan *mhi_chan)
1394 {
1395 int ret;
1396 struct device *dev = &mhi_chan->mhi_dev->dev;
1397
1398 mutex_lock(&mhi_chan->mutex);
1399
1400 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1401 dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1402 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1403 goto exit_unprepare_channel;
1404 }
1405
1406 /* no more processing events for this channel */
1407 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1408 MHI_CH_STATE_TYPE_RESET);
1409 if (ret)
1410 dev_err(dev, "%d: Failed to reset channel, still resetting\n",
1411 mhi_chan->chan);
1412
1413 exit_unprepare_channel:
1414 write_lock_irq(&mhi_chan->lock);
1415 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1416 write_unlock_irq(&mhi_chan->lock);
1417
1418 if (!mhi_chan->offload_ch) {
1419 mhi_reset_chan(mhi_cntrl, mhi_chan);
1420 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1421 }
1422 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1423
1424 mutex_unlock(&mhi_chan->mutex);
1425 }
1426
mhi_prepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,unsigned int flags)1427 static int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1428 struct mhi_chan *mhi_chan, unsigned int flags)
1429 {
1430 int ret = 0;
1431 struct device *dev = &mhi_chan->mhi_dev->dev;
1432
1433 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1434 dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1435 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1436 return -ENOTCONN;
1437 }
1438
1439 mutex_lock(&mhi_chan->mutex);
1440
1441 /* Check of client manages channel context for offload channels */
1442 if (!mhi_chan->offload_ch) {
1443 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1444 if (ret)
1445 goto error_init_chan;
1446 }
1447
1448 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1449 MHI_CH_STATE_TYPE_START);
1450 if (ret)
1451 goto error_pm_state;
1452
1453 mutex_unlock(&mhi_chan->mutex);
1454
1455 return 0;
1456
1457 error_pm_state:
1458 if (!mhi_chan->offload_ch)
1459 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1460
1461 error_init_chan:
1462 mutex_unlock(&mhi_chan->mutex);
1463
1464 return ret;
1465 }
1466
mhi_mark_stale_events(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,struct mhi_event_ctxt * er_ctxt,int chan)1467 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1468 struct mhi_event *mhi_event,
1469 struct mhi_event_ctxt *er_ctxt,
1470 int chan)
1471
1472 {
1473 struct mhi_ring_element *dev_rp, *local_rp;
1474 struct mhi_ring *ev_ring;
1475 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1476 unsigned long flags;
1477 dma_addr_t ptr;
1478
1479 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1480
1481 ev_ring = &mhi_event->ring;
1482
1483 /* mark all stale events related to channel as STALE event */
1484 spin_lock_irqsave(&mhi_event->lock, flags);
1485
1486 ptr = le64_to_cpu(er_ctxt->rp);
1487 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1488 dev_err(&mhi_cntrl->mhi_dev->dev,
1489 "Event ring rp points outside of the event ring\n");
1490 dev_rp = ev_ring->rp;
1491 } else {
1492 dev_rp = mhi_to_virtual(ev_ring, ptr);
1493 }
1494
1495 local_rp = ev_ring->rp;
1496 while (dev_rp != local_rp) {
1497 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1498 chan == MHI_TRE_GET_EV_CHID(local_rp))
1499 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1500 MHI_PKT_TYPE_STALE_EVENT);
1501 local_rp++;
1502 if (local_rp == (ev_ring->base + ev_ring->len))
1503 local_rp = ev_ring->base;
1504 }
1505
1506 dev_dbg(dev, "Finished marking events as stale events\n");
1507 spin_unlock_irqrestore(&mhi_event->lock, flags);
1508 }
1509
mhi_reset_data_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1510 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1511 struct mhi_chan *mhi_chan)
1512 {
1513 struct mhi_ring *buf_ring, *tre_ring;
1514 struct mhi_result result;
1515
1516 /* Reset any pending buffers */
1517 buf_ring = &mhi_chan->buf_ring;
1518 tre_ring = &mhi_chan->tre_ring;
1519 result.transaction_status = -ENOTCONN;
1520 result.bytes_xferd = 0;
1521 while (tre_ring->rp != tre_ring->wp) {
1522 struct mhi_buf_info *buf_info = buf_ring->rp;
1523
1524 if (mhi_chan->dir == DMA_TO_DEVICE) {
1525 atomic_dec(&mhi_cntrl->pending_pkts);
1526 /* Release the reference got from mhi_queue() */
1527 mhi_cntrl->runtime_put(mhi_cntrl);
1528 }
1529
1530 if (!buf_info->pre_mapped)
1531 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1532
1533 mhi_del_ring_element(mhi_cntrl, buf_ring);
1534 mhi_del_ring_element(mhi_cntrl, tre_ring);
1535
1536 result.buf_addr = buf_info->cb_buf;
1537 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1538 }
1539 }
1540
mhi_reset_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1541 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1542 {
1543 struct mhi_event *mhi_event;
1544 struct mhi_event_ctxt *er_ctxt;
1545 int chan = mhi_chan->chan;
1546
1547 /* Nothing to reset, client doesn't queue buffers */
1548 if (mhi_chan->offload_ch)
1549 return;
1550
1551 read_lock_bh(&mhi_cntrl->pm_lock);
1552 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1553 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1554
1555 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1556
1557 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1558
1559 read_unlock_bh(&mhi_cntrl->pm_lock);
1560 }
1561
__mhi_prepare_for_transfer(struct mhi_device * mhi_dev,unsigned int flags)1562 static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
1563 {
1564 int ret, dir;
1565 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1566 struct mhi_chan *mhi_chan;
1567
1568 for (dir = 0; dir < 2; dir++) {
1569 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1570 if (!mhi_chan)
1571 continue;
1572
1573 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1574 if (ret)
1575 goto error_open_chan;
1576 }
1577
1578 return 0;
1579
1580 error_open_chan:
1581 for (--dir; dir >= 0; dir--) {
1582 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1583 if (!mhi_chan)
1584 continue;
1585
1586 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1587 }
1588
1589 return ret;
1590 }
1591
mhi_prepare_for_transfer(struct mhi_device * mhi_dev)1592 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1593 {
1594 return __mhi_prepare_for_transfer(mhi_dev, 0);
1595 }
1596 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1597
mhi_unprepare_from_transfer(struct mhi_device * mhi_dev)1598 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1599 {
1600 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1601 struct mhi_chan *mhi_chan;
1602 int dir;
1603
1604 for (dir = 0; dir < 2; dir++) {
1605 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1606 if (!mhi_chan)
1607 continue;
1608
1609 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1610 }
1611 }
1612 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1613
mhi_get_channel_doorbell_offset(struct mhi_controller * mhi_cntrl,u32 * chdb_offset)1614 int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset)
1615 {
1616 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1617 void __iomem *base = mhi_cntrl->regs;
1618 int ret;
1619
1620 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset);
1621 if (ret) {
1622 dev_err(dev, "Unable to read CHDBOFF register\n");
1623 return -EIO;
1624 }
1625
1626 return 0;
1627 }
1628 EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset);
1629