1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include "internal.h"
18
mhi_read_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 * out)19 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
20 void __iomem *base, u32 offset, u32 *out)
21 {
22 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
23 }
24
mhi_read_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 * out)25 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
26 void __iomem *base, u32 offset,
27 u32 mask, u32 *out)
28 {
29 u32 tmp;
30 int ret;
31
32 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
33 if (ret)
34 return ret;
35
36 *out = (tmp & mask) >> __ffs(mask);
37
38 return 0;
39 }
40
mhi_poll_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val,u32 delayus,u32 timeout_ms)41 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
42 void __iomem *base, u32 offset,
43 u32 mask, u32 val, u32 delayus,
44 u32 timeout_ms)
45 {
46 int ret;
47 u32 out, retry = (timeout_ms * 1000) / delayus;
48
49 while (retry--) {
50 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
51 if (ret)
52 return ret;
53
54 if (out == val)
55 return 0;
56
57 fsleep(delayus);
58 }
59
60 return -ETIMEDOUT;
61 }
62
mhi_write_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 val)63 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
64 u32 offset, u32 val)
65 {
66 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
67 }
68
mhi_write_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val)69 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
70 void __iomem *base, u32 offset, u32 mask,
71 u32 val)
72 {
73 int ret;
74 u32 tmp;
75
76 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
77 if (ret)
78 return ret;
79
80 tmp &= ~mask;
81 tmp |= (val << __ffs(mask));
82 mhi_write_reg(mhi_cntrl, base, offset, tmp);
83
84 return 0;
85 }
86
mhi_write_db(struct mhi_controller * mhi_cntrl,void __iomem * db_addr,dma_addr_t db_val)87 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
88 dma_addr_t db_val)
89 {
90 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
91 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
92 }
93
mhi_db_brstmode(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)94 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
95 struct db_cfg *db_cfg,
96 void __iomem *db_addr,
97 dma_addr_t db_val)
98 {
99 if (db_cfg->db_mode) {
100 db_cfg->db_val = db_val;
101 mhi_write_db(mhi_cntrl, db_addr, db_val);
102 db_cfg->db_mode = 0;
103 }
104 }
105
mhi_db_brstmode_disable(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)106 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
107 struct db_cfg *db_cfg,
108 void __iomem *db_addr,
109 dma_addr_t db_val)
110 {
111 db_cfg->db_val = db_val;
112 mhi_write_db(mhi_cntrl, db_addr, db_val);
113 }
114
mhi_ring_er_db(struct mhi_event * mhi_event)115 void mhi_ring_er_db(struct mhi_event *mhi_event)
116 {
117 struct mhi_ring *ring = &mhi_event->ring;
118
119 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
120 ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
121 }
122
mhi_ring_cmd_db(struct mhi_controller * mhi_cntrl,struct mhi_cmd * mhi_cmd)123 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
124 {
125 dma_addr_t db;
126 struct mhi_ring *ring = &mhi_cmd->ring;
127
128 db = ring->iommu_base + (ring->wp - ring->base);
129 *ring->ctxt_wp = cpu_to_le64(db);
130 mhi_write_db(mhi_cntrl, ring->db_addr, db);
131 }
132
mhi_ring_chan_db(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)133 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
134 struct mhi_chan *mhi_chan)
135 {
136 struct mhi_ring *ring = &mhi_chan->tre_ring;
137 dma_addr_t db;
138
139 db = ring->iommu_base + (ring->wp - ring->base);
140
141 /*
142 * Writes to the new ring element must be visible to the hardware
143 * before letting h/w know there is new element to fetch.
144 */
145 dma_wmb();
146 *ring->ctxt_wp = cpu_to_le64(db);
147
148 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
149 ring->db_addr, db);
150 }
151
mhi_get_exec_env(struct mhi_controller * mhi_cntrl)152 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
153 {
154 u32 exec;
155 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
156
157 return (ret) ? MHI_EE_MAX : exec;
158 }
159 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
160
mhi_get_mhi_state(struct mhi_controller * mhi_cntrl)161 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
162 {
163 u32 state;
164 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
165 MHISTATUS_MHISTATE_MASK, &state);
166 return ret ? MHI_STATE_MAX : state;
167 }
168 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
169
mhi_soc_reset(struct mhi_controller * mhi_cntrl)170 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
171 {
172 if (mhi_cntrl->reset) {
173 mhi_cntrl->reset(mhi_cntrl);
174 return;
175 }
176
177 /* Generic MHI SoC reset */
178 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
179 MHI_SOC_RESET_REQ);
180 }
181 EXPORT_SYMBOL_GPL(mhi_soc_reset);
182
mhi_map_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)183 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
184 struct mhi_buf_info *buf_info)
185 {
186 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
187 buf_info->v_addr, buf_info->len,
188 buf_info->dir);
189 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
190 return -ENOMEM;
191
192 return 0;
193 }
194
mhi_map_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)195 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
196 struct mhi_buf_info *buf_info)
197 {
198 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
199 &buf_info->p_addr, GFP_ATOMIC);
200
201 if (!buf)
202 return -ENOMEM;
203
204 if (buf_info->dir == DMA_TO_DEVICE)
205 memcpy(buf, buf_info->v_addr, buf_info->len);
206
207 buf_info->bb_addr = buf;
208
209 return 0;
210 }
211
mhi_unmap_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)212 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
213 struct mhi_buf_info *buf_info)
214 {
215 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
216 buf_info->dir);
217 }
218
mhi_unmap_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)219 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
220 struct mhi_buf_info *buf_info)
221 {
222 if (buf_info->dir == DMA_FROM_DEVICE)
223 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
224
225 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
226 buf_info->bb_addr, buf_info->p_addr);
227 }
228
get_nr_avail_ring_elements(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)229 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
230 struct mhi_ring *ring)
231 {
232 int nr_el;
233
234 if (ring->wp < ring->rp) {
235 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
236 } else {
237 nr_el = (ring->rp - ring->base) / ring->el_size;
238 nr_el += ((ring->base + ring->len - ring->wp) /
239 ring->el_size) - 1;
240 }
241
242 return nr_el;
243 }
244
mhi_to_virtual(struct mhi_ring * ring,dma_addr_t addr)245 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
246 {
247 return (addr - ring->iommu_base) + ring->base;
248 }
249
mhi_add_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)250 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
251 struct mhi_ring *ring)
252 {
253 ring->wp += ring->el_size;
254 if (ring->wp >= (ring->base + ring->len))
255 ring->wp = ring->base;
256 /* smp update */
257 smp_wmb();
258 }
259
mhi_del_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)260 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
261 struct mhi_ring *ring)
262 {
263 ring->rp += ring->el_size;
264 if (ring->rp >= (ring->base + ring->len))
265 ring->rp = ring->base;
266 /* smp update */
267 smp_wmb();
268 }
269
is_valid_ring_ptr(struct mhi_ring * ring,dma_addr_t addr)270 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
271 {
272 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
273 !(addr & (sizeof(struct mhi_ring_element) - 1));
274 }
275
mhi_destroy_device(struct device * dev,void * data)276 int mhi_destroy_device(struct device *dev, void *data)
277 {
278 struct mhi_chan *ul_chan, *dl_chan;
279 struct mhi_device *mhi_dev;
280 struct mhi_controller *mhi_cntrl;
281 enum mhi_ee_type ee = MHI_EE_MAX;
282
283 if (dev->bus != &mhi_bus_type)
284 return 0;
285
286 mhi_dev = to_mhi_device(dev);
287 mhi_cntrl = mhi_dev->mhi_cntrl;
288
289 /* Only destroy virtual devices thats attached to bus */
290 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
291 return 0;
292
293 ul_chan = mhi_dev->ul_chan;
294 dl_chan = mhi_dev->dl_chan;
295
296 /*
297 * If execution environment is specified, remove only those devices that
298 * started in them based on ee_mask for the channels as we move on to a
299 * different execution environment
300 */
301 if (data)
302 ee = *(enum mhi_ee_type *)data;
303
304 /*
305 * For the suspend and resume case, this function will get called
306 * without mhi_unregister_controller(). Hence, we need to drop the
307 * references to mhi_dev created for ul and dl channels. We can
308 * be sure that there will be no instances of mhi_dev left after
309 * this.
310 */
311 if (ul_chan) {
312 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
313 return 0;
314
315 put_device(&ul_chan->mhi_dev->dev);
316 }
317
318 if (dl_chan) {
319 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
320 return 0;
321
322 put_device(&dl_chan->mhi_dev->dev);
323 }
324
325 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
326 mhi_dev->name);
327
328 /* Notify the client and remove the device from MHI bus */
329 device_del(dev);
330 put_device(dev);
331
332 return 0;
333 }
334
mhi_get_free_desc_count(struct mhi_device * mhi_dev,enum dma_data_direction dir)335 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
336 enum dma_data_direction dir)
337 {
338 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
339 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
340 mhi_dev->ul_chan : mhi_dev->dl_chan;
341 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
342
343 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
344 }
345 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
346
mhi_notify(struct mhi_device * mhi_dev,enum mhi_callback cb_reason)347 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
348 {
349 struct mhi_driver *mhi_drv;
350
351 if (!mhi_dev->dev.driver)
352 return;
353
354 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
355
356 if (mhi_drv->status_cb)
357 mhi_drv->status_cb(mhi_dev, cb_reason);
358 }
359 EXPORT_SYMBOL_GPL(mhi_notify);
360
361 /* Bind MHI channels to MHI devices */
mhi_create_devices(struct mhi_controller * mhi_cntrl)362 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
363 {
364 struct mhi_chan *mhi_chan;
365 struct mhi_device *mhi_dev;
366 struct device *dev = &mhi_cntrl->mhi_dev->dev;
367 int i, ret;
368
369 mhi_chan = mhi_cntrl->mhi_chan;
370 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
371 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
372 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
373 continue;
374 mhi_dev = mhi_alloc_device(mhi_cntrl);
375 if (IS_ERR(mhi_dev))
376 return;
377
378 mhi_dev->dev_type = MHI_DEVICE_XFER;
379 switch (mhi_chan->dir) {
380 case DMA_TO_DEVICE:
381 mhi_dev->ul_chan = mhi_chan;
382 mhi_dev->ul_chan_id = mhi_chan->chan;
383 break;
384 case DMA_FROM_DEVICE:
385 /* We use dl_chan as offload channels */
386 mhi_dev->dl_chan = mhi_chan;
387 mhi_dev->dl_chan_id = mhi_chan->chan;
388 break;
389 default:
390 dev_err(dev, "Direction not supported\n");
391 put_device(&mhi_dev->dev);
392 return;
393 }
394
395 get_device(&mhi_dev->dev);
396 mhi_chan->mhi_dev = mhi_dev;
397
398 /* Check next channel if it matches */
399 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
400 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
401 i++;
402 mhi_chan++;
403 if (mhi_chan->dir == DMA_TO_DEVICE) {
404 mhi_dev->ul_chan = mhi_chan;
405 mhi_dev->ul_chan_id = mhi_chan->chan;
406 } else {
407 mhi_dev->dl_chan = mhi_chan;
408 mhi_dev->dl_chan_id = mhi_chan->chan;
409 }
410 get_device(&mhi_dev->dev);
411 mhi_chan->mhi_dev = mhi_dev;
412 }
413 }
414
415 /* Channel name is same for both UL and DL */
416 mhi_dev->name = mhi_chan->name;
417 dev_set_name(&mhi_dev->dev, "%s_%s",
418 dev_name(&mhi_cntrl->mhi_dev->dev),
419 mhi_dev->name);
420
421 /* Init wakeup source if available */
422 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
423 device_init_wakeup(&mhi_dev->dev, true);
424
425 ret = device_add(&mhi_dev->dev);
426 if (ret)
427 put_device(&mhi_dev->dev);
428 }
429 }
430
mhi_irq_handler(int irq_number,void * dev)431 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
432 {
433 struct mhi_event *mhi_event = dev;
434 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
435 struct mhi_event_ctxt *er_ctxt;
436 struct mhi_ring *ev_ring = &mhi_event->ring;
437 dma_addr_t ptr;
438 void *dev_rp;
439
440 /*
441 * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
442 * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
443 * before handling the IRQs.
444 */
445 if (!mhi_cntrl->mhi_ctxt) {
446 dev_dbg(&mhi_cntrl->mhi_dev->dev,
447 "mhi_ctxt has been freed\n");
448 return IRQ_HANDLED;
449 }
450
451 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
452 ptr = le64_to_cpu(er_ctxt->rp);
453
454 if (!is_valid_ring_ptr(ev_ring, ptr)) {
455 dev_err(&mhi_cntrl->mhi_dev->dev,
456 "Event ring rp points outside of the event ring\n");
457 return IRQ_HANDLED;
458 }
459
460 dev_rp = mhi_to_virtual(ev_ring, ptr);
461
462 /* Only proceed if event ring has pending events */
463 if (ev_ring->rp == dev_rp)
464 return IRQ_HANDLED;
465
466 /* For client managed event ring, notify pending data */
467 if (mhi_event->cl_manage) {
468 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
469 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
470
471 if (mhi_dev)
472 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
473 } else {
474 tasklet_schedule(&mhi_event->task);
475 }
476
477 return IRQ_HANDLED;
478 }
479
mhi_intvec_threaded_handler(int irq_number,void * priv)480 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
481 {
482 struct mhi_controller *mhi_cntrl = priv;
483 struct device *dev = &mhi_cntrl->mhi_dev->dev;
484 enum mhi_state state;
485 enum mhi_pm_state pm_state = 0;
486 enum mhi_ee_type ee;
487
488 write_lock_irq(&mhi_cntrl->pm_lock);
489 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
490 write_unlock_irq(&mhi_cntrl->pm_lock);
491 goto exit_intvec;
492 }
493
494 state = mhi_get_mhi_state(mhi_cntrl);
495 ee = mhi_get_exec_env(mhi_cntrl);
496 dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
497 TO_MHI_EXEC_STR(mhi_cntrl->ee),
498 mhi_state_str(mhi_cntrl->dev_state),
499 TO_MHI_EXEC_STR(ee), mhi_state_str(state));
500
501 if (state == MHI_STATE_SYS_ERR) {
502 dev_dbg(dev, "System error detected\n");
503 pm_state = mhi_tryset_pm_state(mhi_cntrl,
504 MHI_PM_SYS_ERR_DETECT);
505 }
506 write_unlock_irq(&mhi_cntrl->pm_lock);
507
508 if (pm_state != MHI_PM_SYS_ERR_DETECT)
509 goto exit_intvec;
510
511 switch (ee) {
512 case MHI_EE_RDDM:
513 /* proceed if power down is not already in progress */
514 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
515 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
516 mhi_cntrl->ee = ee;
517 wake_up_all(&mhi_cntrl->state_event);
518 }
519 break;
520 case MHI_EE_PBL:
521 case MHI_EE_EDL:
522 case MHI_EE_PTHRU:
523 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
524 mhi_cntrl->ee = ee;
525 wake_up_all(&mhi_cntrl->state_event);
526 mhi_pm_sys_err_handler(mhi_cntrl);
527 break;
528 default:
529 wake_up_all(&mhi_cntrl->state_event);
530 mhi_pm_sys_err_handler(mhi_cntrl);
531 break;
532 }
533
534 exit_intvec:
535
536 return IRQ_HANDLED;
537 }
538
mhi_intvec_handler(int irq_number,void * dev)539 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
540 {
541 struct mhi_controller *mhi_cntrl = dev;
542
543 /* Wake up events waiting for state change */
544 wake_up_all(&mhi_cntrl->state_event);
545
546 return IRQ_WAKE_THREAD;
547 }
548
mhi_recycle_ev_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)549 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
550 struct mhi_ring *ring)
551 {
552 /* Update the WP */
553 ring->wp += ring->el_size;
554
555 if (ring->wp >= (ring->base + ring->len))
556 ring->wp = ring->base;
557
558 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
559
560 /* Update the RP */
561 ring->rp += ring->el_size;
562 if (ring->rp >= (ring->base + ring->len))
563 ring->rp = ring->base;
564
565 /* Update to all cores */
566 smp_wmb();
567 }
568
parse_xfer_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)569 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
570 struct mhi_ring_element *event,
571 struct mhi_chan *mhi_chan)
572 {
573 struct mhi_ring *buf_ring, *tre_ring;
574 struct device *dev = &mhi_cntrl->mhi_dev->dev;
575 struct mhi_result result;
576 unsigned long flags = 0;
577 u32 ev_code;
578
579 ev_code = MHI_TRE_GET_EV_CODE(event);
580 buf_ring = &mhi_chan->buf_ring;
581 tre_ring = &mhi_chan->tre_ring;
582
583 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
584 -EOVERFLOW : 0;
585
586 /*
587 * If it's a DB Event then we need to grab the lock
588 * with preemption disabled and as a write because we
589 * have to update db register and there are chances that
590 * another thread could be doing the same.
591 */
592 if (ev_code >= MHI_EV_CC_OOB)
593 write_lock_irqsave(&mhi_chan->lock, flags);
594 else
595 read_lock_bh(&mhi_chan->lock);
596
597 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
598 goto end_process_tx_event;
599
600 switch (ev_code) {
601 case MHI_EV_CC_OVERFLOW:
602 case MHI_EV_CC_EOB:
603 case MHI_EV_CC_EOT:
604 {
605 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
606 struct mhi_ring_element *local_rp, *ev_tre;
607 void *dev_rp;
608 struct mhi_buf_info *buf_info;
609 u16 xfer_len;
610
611 if (!is_valid_ring_ptr(tre_ring, ptr)) {
612 dev_err(&mhi_cntrl->mhi_dev->dev,
613 "Event element points outside of the tre ring\n");
614 break;
615 }
616 /* Get the TRB this event points to */
617 ev_tre = mhi_to_virtual(tre_ring, ptr);
618
619 dev_rp = ev_tre + 1;
620 if (dev_rp >= (tre_ring->base + tre_ring->len))
621 dev_rp = tre_ring->base;
622
623 result.dir = mhi_chan->dir;
624
625 local_rp = tre_ring->rp;
626 while (local_rp != dev_rp) {
627 buf_info = buf_ring->rp;
628 /* If it's the last TRE, get length from the event */
629 if (local_rp == ev_tre)
630 xfer_len = MHI_TRE_GET_EV_LEN(event);
631 else
632 xfer_len = buf_info->len;
633
634 /* Unmap if it's not pre-mapped by client */
635 if (likely(!buf_info->pre_mapped))
636 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
637
638 result.buf_addr = buf_info->cb_buf;
639
640 /* truncate to buf len if xfer_len is larger */
641 result.bytes_xferd =
642 min_t(u16, xfer_len, buf_info->len);
643 mhi_del_ring_element(mhi_cntrl, buf_ring);
644 mhi_del_ring_element(mhi_cntrl, tre_ring);
645 local_rp = tre_ring->rp;
646
647 read_unlock_bh(&mhi_chan->lock);
648
649 /* notify client */
650 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
651
652 if (mhi_chan->dir == DMA_TO_DEVICE) {
653 atomic_dec(&mhi_cntrl->pending_pkts);
654 /* Release the reference got from mhi_queue() */
655 mhi_cntrl->runtime_put(mhi_cntrl);
656 }
657
658 /*
659 * Recycle the buffer if buffer is pre-allocated,
660 * if there is an error, not much we can do apart
661 * from dropping the packet
662 */
663 if (mhi_chan->pre_alloc) {
664 if (mhi_queue_buf(mhi_chan->mhi_dev,
665 mhi_chan->dir,
666 buf_info->cb_buf,
667 buf_info->len, MHI_EOT)) {
668 dev_err(dev,
669 "Error recycling buffer for chan:%d\n",
670 mhi_chan->chan);
671 kfree(buf_info->cb_buf);
672 }
673 }
674
675 read_lock_bh(&mhi_chan->lock);
676 }
677 break;
678 } /* CC_EOT */
679 case MHI_EV_CC_OOB:
680 case MHI_EV_CC_DB_MODE:
681 {
682 unsigned long pm_lock_flags;
683
684 mhi_chan->db_cfg.db_mode = 1;
685 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
686 if (tre_ring->wp != tre_ring->rp &&
687 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
688 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
689 }
690 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
691 break;
692 }
693 case MHI_EV_CC_BAD_TRE:
694 default:
695 dev_err(dev, "Unknown event 0x%x\n", ev_code);
696 break;
697 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
698
699 end_process_tx_event:
700 if (ev_code >= MHI_EV_CC_OOB)
701 write_unlock_irqrestore(&mhi_chan->lock, flags);
702 else
703 read_unlock_bh(&mhi_chan->lock);
704
705 return 0;
706 }
707
parse_rsc_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)708 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
709 struct mhi_ring_element *event,
710 struct mhi_chan *mhi_chan)
711 {
712 struct mhi_ring *buf_ring, *tre_ring;
713 struct mhi_buf_info *buf_info;
714 struct mhi_result result;
715 int ev_code;
716 u32 cookie; /* offset to local descriptor */
717 u16 xfer_len;
718
719 buf_ring = &mhi_chan->buf_ring;
720 tre_ring = &mhi_chan->tre_ring;
721
722 ev_code = MHI_TRE_GET_EV_CODE(event);
723 cookie = MHI_TRE_GET_EV_COOKIE(event);
724 xfer_len = MHI_TRE_GET_EV_LEN(event);
725
726 /* Received out of bound cookie */
727 WARN_ON(cookie >= buf_ring->len);
728
729 buf_info = buf_ring->base + cookie;
730
731 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
732 -EOVERFLOW : 0;
733
734 /* truncate to buf len if xfer_len is larger */
735 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
736 result.buf_addr = buf_info->cb_buf;
737 result.dir = mhi_chan->dir;
738
739 read_lock_bh(&mhi_chan->lock);
740
741 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
742 goto end_process_rsc_event;
743
744 WARN_ON(!buf_info->used);
745
746 /* notify the client */
747 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
748
749 /*
750 * Note: We're arbitrarily incrementing RP even though, completion
751 * packet we processed might not be the same one, reason we can do this
752 * is because device guaranteed to cache descriptors in order it
753 * receive, so even though completion event is different we can re-use
754 * all descriptors in between.
755 * Example:
756 * Transfer Ring has descriptors: A, B, C, D
757 * Last descriptor host queue is D (WP) and first descriptor
758 * host queue is A (RP).
759 * The completion event we just serviced is descriptor C.
760 * Then we can safely queue descriptors to replace A, B, and C
761 * even though host did not receive any completions.
762 */
763 mhi_del_ring_element(mhi_cntrl, tre_ring);
764 buf_info->used = false;
765
766 end_process_rsc_event:
767 read_unlock_bh(&mhi_chan->lock);
768
769 return 0;
770 }
771
mhi_process_cmd_completion(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * tre)772 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
773 struct mhi_ring_element *tre)
774 {
775 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
776 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
777 struct mhi_ring *mhi_ring = &cmd_ring->ring;
778 struct mhi_ring_element *cmd_pkt;
779 struct mhi_chan *mhi_chan;
780 u32 chan;
781
782 if (!is_valid_ring_ptr(mhi_ring, ptr)) {
783 dev_err(&mhi_cntrl->mhi_dev->dev,
784 "Event element points outside of the cmd ring\n");
785 return;
786 }
787
788 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
789
790 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
791
792 if (chan < mhi_cntrl->max_chan &&
793 mhi_cntrl->mhi_chan[chan].configured) {
794 mhi_chan = &mhi_cntrl->mhi_chan[chan];
795 write_lock_bh(&mhi_chan->lock);
796 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
797 complete(&mhi_chan->completion);
798 write_unlock_bh(&mhi_chan->lock);
799 } else {
800 dev_err(&mhi_cntrl->mhi_dev->dev,
801 "Completion packet for invalid channel ID: %d\n", chan);
802 }
803
804 mhi_del_ring_element(mhi_cntrl, mhi_ring);
805 }
806
mhi_process_ctrl_ev_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)807 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
808 struct mhi_event *mhi_event,
809 u32 event_quota)
810 {
811 struct mhi_ring_element *dev_rp, *local_rp;
812 struct mhi_ring *ev_ring = &mhi_event->ring;
813 struct mhi_event_ctxt *er_ctxt =
814 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
815 struct mhi_chan *mhi_chan;
816 struct device *dev = &mhi_cntrl->mhi_dev->dev;
817 u32 chan;
818 int count = 0;
819 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
820
821 /*
822 * This is a quick check to avoid unnecessary event processing
823 * in case MHI is already in error state, but it's still possible
824 * to transition to error state while processing events
825 */
826 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
827 return -EIO;
828
829 if (!is_valid_ring_ptr(ev_ring, ptr)) {
830 dev_err(&mhi_cntrl->mhi_dev->dev,
831 "Event ring rp points outside of the event ring\n");
832 return -EIO;
833 }
834
835 dev_rp = mhi_to_virtual(ev_ring, ptr);
836 local_rp = ev_ring->rp;
837
838 while (dev_rp != local_rp) {
839 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
840
841 switch (type) {
842 case MHI_PKT_TYPE_BW_REQ_EVENT:
843 {
844 struct mhi_link_info *link_info;
845
846 link_info = &mhi_cntrl->mhi_link_info;
847 write_lock_irq(&mhi_cntrl->pm_lock);
848 link_info->target_link_speed =
849 MHI_TRE_GET_EV_LINKSPEED(local_rp);
850 link_info->target_link_width =
851 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
852 write_unlock_irq(&mhi_cntrl->pm_lock);
853 dev_dbg(dev, "Received BW_REQ event\n");
854 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
855 break;
856 }
857 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
858 {
859 enum mhi_state new_state;
860
861 new_state = MHI_TRE_GET_EV_STATE(local_rp);
862
863 dev_dbg(dev, "State change event to state: %s\n",
864 mhi_state_str(new_state));
865
866 switch (new_state) {
867 case MHI_STATE_M0:
868 mhi_pm_m0_transition(mhi_cntrl);
869 break;
870 case MHI_STATE_M1:
871 mhi_pm_m1_transition(mhi_cntrl);
872 break;
873 case MHI_STATE_M3:
874 mhi_pm_m3_transition(mhi_cntrl);
875 break;
876 case MHI_STATE_SYS_ERR:
877 {
878 enum mhi_pm_state pm_state;
879
880 dev_dbg(dev, "System error detected\n");
881 write_lock_irq(&mhi_cntrl->pm_lock);
882 pm_state = mhi_tryset_pm_state(mhi_cntrl,
883 MHI_PM_SYS_ERR_DETECT);
884 write_unlock_irq(&mhi_cntrl->pm_lock);
885 if (pm_state == MHI_PM_SYS_ERR_DETECT)
886 mhi_pm_sys_err_handler(mhi_cntrl);
887 break;
888 }
889 default:
890 dev_err(dev, "Invalid state: %s\n",
891 mhi_state_str(new_state));
892 }
893
894 break;
895 }
896 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
897 mhi_process_cmd_completion(mhi_cntrl, local_rp);
898 break;
899 case MHI_PKT_TYPE_EE_EVENT:
900 {
901 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
902 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
903
904 dev_dbg(dev, "Received EE event: %s\n",
905 TO_MHI_EXEC_STR(event));
906 switch (event) {
907 case MHI_EE_SBL:
908 st = DEV_ST_TRANSITION_SBL;
909 break;
910 case MHI_EE_WFW:
911 case MHI_EE_AMSS:
912 st = DEV_ST_TRANSITION_MISSION_MODE;
913 break;
914 case MHI_EE_FP:
915 st = DEV_ST_TRANSITION_FP;
916 break;
917 case MHI_EE_RDDM:
918 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
919 write_lock_irq(&mhi_cntrl->pm_lock);
920 mhi_cntrl->ee = event;
921 write_unlock_irq(&mhi_cntrl->pm_lock);
922 wake_up_all(&mhi_cntrl->state_event);
923 break;
924 default:
925 dev_err(dev,
926 "Unhandled EE event: 0x%x\n", type);
927 }
928 if (st != DEV_ST_TRANSITION_MAX)
929 mhi_queue_state_transition(mhi_cntrl, st);
930
931 break;
932 }
933 case MHI_PKT_TYPE_TX_EVENT:
934 chan = MHI_TRE_GET_EV_CHID(local_rp);
935
936 WARN_ON(chan >= mhi_cntrl->max_chan);
937
938 /*
939 * Only process the event ring elements whose channel
940 * ID is within the maximum supported range.
941 */
942 if (chan < mhi_cntrl->max_chan) {
943 mhi_chan = &mhi_cntrl->mhi_chan[chan];
944 if (!mhi_chan->configured)
945 break;
946 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
947 }
948 break;
949 default:
950 dev_err(dev, "Unhandled event type: %d\n", type);
951 break;
952 }
953
954 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
955 local_rp = ev_ring->rp;
956
957 ptr = le64_to_cpu(er_ctxt->rp);
958 if (!is_valid_ring_ptr(ev_ring, ptr)) {
959 dev_err(&mhi_cntrl->mhi_dev->dev,
960 "Event ring rp points outside of the event ring\n");
961 return -EIO;
962 }
963
964 dev_rp = mhi_to_virtual(ev_ring, ptr);
965 count++;
966 }
967
968 read_lock_bh(&mhi_cntrl->pm_lock);
969
970 /* Ring EV DB only if there is any pending element to process */
971 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
972 mhi_ring_er_db(mhi_event);
973 read_unlock_bh(&mhi_cntrl->pm_lock);
974
975 return count;
976 }
977
mhi_process_data_event_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)978 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
979 struct mhi_event *mhi_event,
980 u32 event_quota)
981 {
982 struct mhi_ring_element *dev_rp, *local_rp;
983 struct mhi_ring *ev_ring = &mhi_event->ring;
984 struct mhi_event_ctxt *er_ctxt =
985 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
986 int count = 0;
987 u32 chan;
988 struct mhi_chan *mhi_chan;
989 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
990
991 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
992 return -EIO;
993
994 if (!is_valid_ring_ptr(ev_ring, ptr)) {
995 dev_err(&mhi_cntrl->mhi_dev->dev,
996 "Event ring rp points outside of the event ring\n");
997 return -EIO;
998 }
999
1000 dev_rp = mhi_to_virtual(ev_ring, ptr);
1001 local_rp = ev_ring->rp;
1002
1003 while (dev_rp != local_rp && event_quota > 0) {
1004 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
1005
1006 chan = MHI_TRE_GET_EV_CHID(local_rp);
1007
1008 WARN_ON(chan >= mhi_cntrl->max_chan);
1009
1010 /*
1011 * Only process the event ring elements whose channel
1012 * ID is within the maximum supported range.
1013 */
1014 if (chan < mhi_cntrl->max_chan &&
1015 mhi_cntrl->mhi_chan[chan].configured) {
1016 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1017
1018 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
1019 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1020 event_quota--;
1021 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
1022 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1023 event_quota--;
1024 }
1025 }
1026
1027 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1028 local_rp = ev_ring->rp;
1029
1030 ptr = le64_to_cpu(er_ctxt->rp);
1031 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1032 dev_err(&mhi_cntrl->mhi_dev->dev,
1033 "Event ring rp points outside of the event ring\n");
1034 return -EIO;
1035 }
1036
1037 dev_rp = mhi_to_virtual(ev_ring, ptr);
1038 count++;
1039 }
1040 read_lock_bh(&mhi_cntrl->pm_lock);
1041
1042 /* Ring EV DB only if there is any pending element to process */
1043 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
1044 mhi_ring_er_db(mhi_event);
1045 read_unlock_bh(&mhi_cntrl->pm_lock);
1046
1047 return count;
1048 }
1049
mhi_ev_task(unsigned long data)1050 void mhi_ev_task(unsigned long data)
1051 {
1052 struct mhi_event *mhi_event = (struct mhi_event *)data;
1053 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1054
1055 /* process all pending events */
1056 spin_lock_bh(&mhi_event->lock);
1057 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1058 spin_unlock_bh(&mhi_event->lock);
1059 }
1060
mhi_ctrl_ev_task(unsigned long data)1061 void mhi_ctrl_ev_task(unsigned long data)
1062 {
1063 struct mhi_event *mhi_event = (struct mhi_event *)data;
1064 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1065 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1066 enum mhi_state state;
1067 enum mhi_pm_state pm_state = 0;
1068 int ret;
1069
1070 /*
1071 * We can check PM state w/o a lock here because there is no way
1072 * PM state can change from reg access valid to no access while this
1073 * thread being executed.
1074 */
1075 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1076 /*
1077 * We may have a pending event but not allowed to
1078 * process it since we are probably in a suspended state,
1079 * so trigger a resume.
1080 */
1081 mhi_trigger_resume(mhi_cntrl);
1082
1083 return;
1084 }
1085
1086 /* Process ctrl events */
1087 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1088
1089 /*
1090 * We received an IRQ but no events to process, maybe device went to
1091 * SYS_ERR state? Check the state to confirm.
1092 */
1093 if (!ret) {
1094 write_lock_irq(&mhi_cntrl->pm_lock);
1095 state = mhi_get_mhi_state(mhi_cntrl);
1096 if (state == MHI_STATE_SYS_ERR) {
1097 dev_dbg(dev, "System error detected\n");
1098 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1099 MHI_PM_SYS_ERR_DETECT);
1100 }
1101 write_unlock_irq(&mhi_cntrl->pm_lock);
1102 if (pm_state == MHI_PM_SYS_ERR_DETECT)
1103 mhi_pm_sys_err_handler(mhi_cntrl);
1104 }
1105 }
1106
mhi_is_ring_full(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)1107 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1108 struct mhi_ring *ring)
1109 {
1110 void *tmp = ring->wp + ring->el_size;
1111
1112 if (tmp >= (ring->base + ring->len))
1113 tmp = ring->base;
1114
1115 return (tmp == ring->rp);
1116 }
1117
mhi_queue(struct mhi_device * mhi_dev,struct mhi_buf_info * buf_info,enum dma_data_direction dir,enum mhi_flags mflags)1118 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1119 enum dma_data_direction dir, enum mhi_flags mflags)
1120 {
1121 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1122 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1123 mhi_dev->dl_chan;
1124 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1125 unsigned long flags;
1126 int ret;
1127
1128 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1129 return -EIO;
1130
1131 ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1132 if (unlikely(ret))
1133 return -EAGAIN;
1134
1135 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1136 if (unlikely(ret))
1137 return ret;
1138
1139 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1140
1141 /* Packet is queued, take a usage ref to exit M3 if necessary
1142 * for host->device buffer, balanced put is done on buffer completion
1143 * for device->host buffer, balanced put is after ringing the DB
1144 */
1145 mhi_cntrl->runtime_get(mhi_cntrl);
1146
1147 /* Assert dev_wake (to exit/prevent M1/M2)*/
1148 mhi_cntrl->wake_toggle(mhi_cntrl);
1149
1150 if (mhi_chan->dir == DMA_TO_DEVICE)
1151 atomic_inc(&mhi_cntrl->pending_pkts);
1152
1153 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1154 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1155
1156 if (dir == DMA_FROM_DEVICE)
1157 mhi_cntrl->runtime_put(mhi_cntrl);
1158
1159 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1160
1161 return ret;
1162 }
1163
mhi_queue_skb(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct sk_buff * skb,size_t len,enum mhi_flags mflags)1164 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1165 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1166 {
1167 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1168 mhi_dev->dl_chan;
1169 struct mhi_buf_info buf_info = { };
1170
1171 buf_info.v_addr = skb->data;
1172 buf_info.cb_buf = skb;
1173 buf_info.len = len;
1174
1175 if (unlikely(mhi_chan->pre_alloc))
1176 return -EINVAL;
1177
1178 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1179 }
1180 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1181
mhi_queue_dma(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct mhi_buf * mhi_buf,size_t len,enum mhi_flags mflags)1182 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1183 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1184 {
1185 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1186 mhi_dev->dl_chan;
1187 struct mhi_buf_info buf_info = { };
1188
1189 buf_info.p_addr = mhi_buf->dma_addr;
1190 buf_info.cb_buf = mhi_buf;
1191 buf_info.pre_mapped = true;
1192 buf_info.len = len;
1193
1194 if (unlikely(mhi_chan->pre_alloc))
1195 return -EINVAL;
1196
1197 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1198 }
1199 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1200
mhi_gen_tre(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,struct mhi_buf_info * info,enum mhi_flags flags)1201 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1202 struct mhi_buf_info *info, enum mhi_flags flags)
1203 {
1204 struct mhi_ring *buf_ring, *tre_ring;
1205 struct mhi_ring_element *mhi_tre;
1206 struct mhi_buf_info *buf_info;
1207 int eot, eob, chain, bei;
1208 int ret;
1209
1210 /* Protect accesses for reading and incrementing WP */
1211 write_lock_bh(&mhi_chan->lock);
1212
1213 buf_ring = &mhi_chan->buf_ring;
1214 tre_ring = &mhi_chan->tre_ring;
1215
1216 buf_info = buf_ring->wp;
1217 WARN_ON(buf_info->used);
1218 buf_info->pre_mapped = info->pre_mapped;
1219 if (info->pre_mapped)
1220 buf_info->p_addr = info->p_addr;
1221 else
1222 buf_info->v_addr = info->v_addr;
1223 buf_info->cb_buf = info->cb_buf;
1224 buf_info->wp = tre_ring->wp;
1225 buf_info->dir = mhi_chan->dir;
1226 buf_info->len = info->len;
1227
1228 if (!info->pre_mapped) {
1229 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1230 if (ret) {
1231 write_unlock_bh(&mhi_chan->lock);
1232 return ret;
1233 }
1234 }
1235
1236 eob = !!(flags & MHI_EOB);
1237 eot = !!(flags & MHI_EOT);
1238 chain = !!(flags & MHI_CHAIN);
1239 bei = !!(mhi_chan->intmod);
1240
1241 mhi_tre = tre_ring->wp;
1242 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1243 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1244 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1245
1246 /* increment WP */
1247 mhi_add_ring_element(mhi_cntrl, tre_ring);
1248 mhi_add_ring_element(mhi_cntrl, buf_ring);
1249
1250 write_unlock_bh(&mhi_chan->lock);
1251
1252 return 0;
1253 }
1254
mhi_queue_buf(struct mhi_device * mhi_dev,enum dma_data_direction dir,void * buf,size_t len,enum mhi_flags mflags)1255 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1256 void *buf, size_t len, enum mhi_flags mflags)
1257 {
1258 struct mhi_buf_info buf_info = { };
1259
1260 buf_info.v_addr = buf;
1261 buf_info.cb_buf = buf;
1262 buf_info.len = len;
1263
1264 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1265 }
1266 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1267
mhi_queue_is_full(struct mhi_device * mhi_dev,enum dma_data_direction dir)1268 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1269 {
1270 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1271 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1272 mhi_dev->ul_chan : mhi_dev->dl_chan;
1273 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1274
1275 return mhi_is_ring_full(mhi_cntrl, tre_ring);
1276 }
1277 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1278
mhi_send_cmd(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_cmd_type cmd)1279 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1280 struct mhi_chan *mhi_chan,
1281 enum mhi_cmd_type cmd)
1282 {
1283 struct mhi_ring_element *cmd_tre = NULL;
1284 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1285 struct mhi_ring *ring = &mhi_cmd->ring;
1286 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1287 int chan = 0;
1288
1289 if (mhi_chan)
1290 chan = mhi_chan->chan;
1291
1292 spin_lock_bh(&mhi_cmd->lock);
1293 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1294 spin_unlock_bh(&mhi_cmd->lock);
1295 return -ENOMEM;
1296 }
1297
1298 /* prepare the cmd tre */
1299 cmd_tre = ring->wp;
1300 switch (cmd) {
1301 case MHI_CMD_RESET_CHAN:
1302 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1303 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1304 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1305 break;
1306 case MHI_CMD_STOP_CHAN:
1307 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
1308 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
1309 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
1310 break;
1311 case MHI_CMD_START_CHAN:
1312 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1313 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1314 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1315 break;
1316 default:
1317 dev_err(dev, "Command not supported\n");
1318 break;
1319 }
1320
1321 /* queue to hardware */
1322 mhi_add_ring_element(mhi_cntrl, ring);
1323 read_lock_bh(&mhi_cntrl->pm_lock);
1324 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1325 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1326 read_unlock_bh(&mhi_cntrl->pm_lock);
1327 spin_unlock_bh(&mhi_cmd->lock);
1328
1329 return 0;
1330 }
1331
mhi_update_channel_state(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_ch_state_type to_state)1332 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1333 struct mhi_chan *mhi_chan,
1334 enum mhi_ch_state_type to_state)
1335 {
1336 struct device *dev = &mhi_chan->mhi_dev->dev;
1337 enum mhi_cmd_type cmd = MHI_CMD_NOP;
1338 int ret;
1339
1340 dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
1341 TO_CH_STATE_TYPE_STR(to_state));
1342
1343 switch (to_state) {
1344 case MHI_CH_STATE_TYPE_RESET:
1345 write_lock_irq(&mhi_chan->lock);
1346 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1347 mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1348 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1349 write_unlock_irq(&mhi_chan->lock);
1350 return -EINVAL;
1351 }
1352 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1353 write_unlock_irq(&mhi_chan->lock);
1354
1355 cmd = MHI_CMD_RESET_CHAN;
1356 break;
1357 case MHI_CH_STATE_TYPE_STOP:
1358 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1359 return -EINVAL;
1360
1361 cmd = MHI_CMD_STOP_CHAN;
1362 break;
1363 case MHI_CH_STATE_TYPE_START:
1364 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1365 mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1366 return -EINVAL;
1367
1368 cmd = MHI_CMD_START_CHAN;
1369 break;
1370 default:
1371 dev_err(dev, "%d: Channel state update to %s not allowed\n",
1372 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1373 return -EINVAL;
1374 }
1375
1376 /* bring host and device out of suspended states */
1377 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1378 if (ret)
1379 return ret;
1380 mhi_cntrl->runtime_get(mhi_cntrl);
1381
1382 reinit_completion(&mhi_chan->completion);
1383 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1384 if (ret) {
1385 dev_err(dev, "%d: Failed to send %s channel command\n",
1386 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1387 goto exit_channel_update;
1388 }
1389
1390 ret = wait_for_completion_timeout(&mhi_chan->completion,
1391 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1392 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1393 dev_err(dev,
1394 "%d: Failed to receive %s channel command completion\n",
1395 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1396 ret = -EIO;
1397 goto exit_channel_update;
1398 }
1399
1400 ret = 0;
1401
1402 if (to_state != MHI_CH_STATE_TYPE_RESET) {
1403 write_lock_irq(&mhi_chan->lock);
1404 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1405 MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
1406 write_unlock_irq(&mhi_chan->lock);
1407 }
1408
1409 dev_dbg(dev, "%d: Channel state change to %s successful\n",
1410 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1411
1412 exit_channel_update:
1413 mhi_cntrl->runtime_put(mhi_cntrl);
1414 mhi_device_put(mhi_cntrl->mhi_dev);
1415
1416 return ret;
1417 }
1418
mhi_unprepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1419 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1420 struct mhi_chan *mhi_chan)
1421 {
1422 int ret;
1423 struct device *dev = &mhi_chan->mhi_dev->dev;
1424
1425 mutex_lock(&mhi_chan->mutex);
1426
1427 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1428 dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1429 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1430 goto exit_unprepare_channel;
1431 }
1432
1433 /* no more processing events for this channel */
1434 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1435 MHI_CH_STATE_TYPE_RESET);
1436 if (ret)
1437 dev_err(dev, "%d: Failed to reset channel, still resetting\n",
1438 mhi_chan->chan);
1439
1440 exit_unprepare_channel:
1441 write_lock_irq(&mhi_chan->lock);
1442 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1443 write_unlock_irq(&mhi_chan->lock);
1444
1445 if (!mhi_chan->offload_ch) {
1446 mhi_reset_chan(mhi_cntrl, mhi_chan);
1447 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1448 }
1449 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1450
1451 mutex_unlock(&mhi_chan->mutex);
1452 }
1453
mhi_prepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,unsigned int flags)1454 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1455 struct mhi_chan *mhi_chan, unsigned int flags)
1456 {
1457 int ret = 0;
1458 struct device *dev = &mhi_chan->mhi_dev->dev;
1459
1460 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1461 dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1462 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1463 return -ENOTCONN;
1464 }
1465
1466 mutex_lock(&mhi_chan->mutex);
1467
1468 /* Check of client manages channel context for offload channels */
1469 if (!mhi_chan->offload_ch) {
1470 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1471 if (ret)
1472 goto error_init_chan;
1473 }
1474
1475 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1476 MHI_CH_STATE_TYPE_START);
1477 if (ret)
1478 goto error_pm_state;
1479
1480 if (mhi_chan->dir == DMA_FROM_DEVICE)
1481 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
1482
1483 /* Pre-allocate buffer for xfer ring */
1484 if (mhi_chan->pre_alloc) {
1485 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1486 &mhi_chan->tre_ring);
1487 size_t len = mhi_cntrl->buffer_len;
1488
1489 while (nr_el--) {
1490 void *buf;
1491 struct mhi_buf_info info = { };
1492
1493 buf = kmalloc(len, GFP_KERNEL);
1494 if (!buf) {
1495 ret = -ENOMEM;
1496 goto error_pre_alloc;
1497 }
1498
1499 /* Prepare transfer descriptors */
1500 info.v_addr = buf;
1501 info.cb_buf = buf;
1502 info.len = len;
1503 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1504 if (ret) {
1505 kfree(buf);
1506 goto error_pre_alloc;
1507 }
1508 }
1509
1510 read_lock_bh(&mhi_cntrl->pm_lock);
1511 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1512 read_lock_irq(&mhi_chan->lock);
1513 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1514 read_unlock_irq(&mhi_chan->lock);
1515 }
1516 read_unlock_bh(&mhi_cntrl->pm_lock);
1517 }
1518
1519 mutex_unlock(&mhi_chan->mutex);
1520
1521 return 0;
1522
1523 error_pm_state:
1524 if (!mhi_chan->offload_ch)
1525 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1526
1527 error_init_chan:
1528 mutex_unlock(&mhi_chan->mutex);
1529
1530 return ret;
1531
1532 error_pre_alloc:
1533 mutex_unlock(&mhi_chan->mutex);
1534 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1535
1536 return ret;
1537 }
1538
mhi_mark_stale_events(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,struct mhi_event_ctxt * er_ctxt,int chan)1539 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1540 struct mhi_event *mhi_event,
1541 struct mhi_event_ctxt *er_ctxt,
1542 int chan)
1543
1544 {
1545 struct mhi_ring_element *dev_rp, *local_rp;
1546 struct mhi_ring *ev_ring;
1547 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1548 unsigned long flags;
1549 dma_addr_t ptr;
1550
1551 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1552
1553 ev_ring = &mhi_event->ring;
1554
1555 /* mark all stale events related to channel as STALE event */
1556 spin_lock_irqsave(&mhi_event->lock, flags);
1557
1558 ptr = le64_to_cpu(er_ctxt->rp);
1559 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1560 dev_err(&mhi_cntrl->mhi_dev->dev,
1561 "Event ring rp points outside of the event ring\n");
1562 dev_rp = ev_ring->rp;
1563 } else {
1564 dev_rp = mhi_to_virtual(ev_ring, ptr);
1565 }
1566
1567 local_rp = ev_ring->rp;
1568 while (dev_rp != local_rp) {
1569 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1570 chan == MHI_TRE_GET_EV_CHID(local_rp))
1571 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1572 MHI_PKT_TYPE_STALE_EVENT);
1573 local_rp++;
1574 if (local_rp == (ev_ring->base + ev_ring->len))
1575 local_rp = ev_ring->base;
1576 }
1577
1578 dev_dbg(dev, "Finished marking events as stale events\n");
1579 spin_unlock_irqrestore(&mhi_event->lock, flags);
1580 }
1581
mhi_reset_data_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1582 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1583 struct mhi_chan *mhi_chan)
1584 {
1585 struct mhi_ring *buf_ring, *tre_ring;
1586 struct mhi_result result;
1587
1588 /* Reset any pending buffers */
1589 buf_ring = &mhi_chan->buf_ring;
1590 tre_ring = &mhi_chan->tre_ring;
1591 result.transaction_status = -ENOTCONN;
1592 result.bytes_xferd = 0;
1593 while (tre_ring->rp != tre_ring->wp) {
1594 struct mhi_buf_info *buf_info = buf_ring->rp;
1595
1596 if (mhi_chan->dir == DMA_TO_DEVICE) {
1597 atomic_dec(&mhi_cntrl->pending_pkts);
1598 /* Release the reference got from mhi_queue() */
1599 mhi_cntrl->runtime_put(mhi_cntrl);
1600 }
1601
1602 if (!buf_info->pre_mapped)
1603 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1604
1605 mhi_del_ring_element(mhi_cntrl, buf_ring);
1606 mhi_del_ring_element(mhi_cntrl, tre_ring);
1607
1608 if (mhi_chan->pre_alloc) {
1609 kfree(buf_info->cb_buf);
1610 } else {
1611 result.buf_addr = buf_info->cb_buf;
1612 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1613 }
1614 }
1615 }
1616
mhi_reset_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1617 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1618 {
1619 struct mhi_event *mhi_event;
1620 struct mhi_event_ctxt *er_ctxt;
1621 int chan = mhi_chan->chan;
1622
1623 /* Nothing to reset, client doesn't queue buffers */
1624 if (mhi_chan->offload_ch)
1625 return;
1626
1627 read_lock_bh(&mhi_cntrl->pm_lock);
1628 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1629 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1630
1631 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1632
1633 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1634
1635 read_unlock_bh(&mhi_cntrl->pm_lock);
1636 }
1637
__mhi_prepare_for_transfer(struct mhi_device * mhi_dev,unsigned int flags)1638 static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
1639 {
1640 int ret, dir;
1641 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1642 struct mhi_chan *mhi_chan;
1643
1644 for (dir = 0; dir < 2; dir++) {
1645 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1646 if (!mhi_chan)
1647 continue;
1648
1649 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1650 if (ret)
1651 goto error_open_chan;
1652 }
1653
1654 return 0;
1655
1656 error_open_chan:
1657 for (--dir; dir >= 0; dir--) {
1658 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1659 if (!mhi_chan)
1660 continue;
1661
1662 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1663 }
1664
1665 return ret;
1666 }
1667
mhi_prepare_for_transfer(struct mhi_device * mhi_dev)1668 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1669 {
1670 return __mhi_prepare_for_transfer(mhi_dev, 0);
1671 }
1672 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1673
mhi_prepare_for_transfer_autoqueue(struct mhi_device * mhi_dev)1674 int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
1675 {
1676 return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
1677 }
1678 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
1679
mhi_unprepare_from_transfer(struct mhi_device * mhi_dev)1680 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1681 {
1682 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1683 struct mhi_chan *mhi_chan;
1684 int dir;
1685
1686 for (dir = 0; dir < 2; dir++) {
1687 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1688 if (!mhi_chan)
1689 continue;
1690
1691 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1692 }
1693 }
1694 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1695