1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 #include "internal.h"
18 #include "trace.h"
19
mhi_read_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 * out)20 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
21 void __iomem *base, u32 offset, u32 *out)
22 {
23 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
24 }
25
mhi_read_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 * out)26 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
27 void __iomem *base, u32 offset,
28 u32 mask, u32 *out)
29 {
30 u32 tmp;
31 int ret;
32
33 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
34 if (ret)
35 return ret;
36
37 *out = (tmp & mask) >> __ffs(mask);
38
39 return 0;
40 }
41
mhi_poll_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val,u32 delayus,u32 timeout_ms)42 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
43 void __iomem *base, u32 offset,
44 u32 mask, u32 val, u32 delayus,
45 u32 timeout_ms)
46 {
47 int ret;
48 u32 out, retry = (timeout_ms * 1000) / delayus;
49
50 while (retry--) {
51 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
52 if (ret)
53 return ret;
54
55 if (out == val)
56 return 0;
57
58 fsleep(delayus);
59 }
60
61 return -ETIMEDOUT;
62 }
63
mhi_write_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 val)64 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
65 u32 offset, u32 val)
66 {
67 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
68 }
69
mhi_write_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val)70 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
71 void __iomem *base, u32 offset, u32 mask,
72 u32 val)
73 {
74 int ret;
75 u32 tmp;
76
77 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
78 if (ret)
79 return ret;
80
81 tmp &= ~mask;
82 tmp |= (val << __ffs(mask));
83 mhi_write_reg(mhi_cntrl, base, offset, tmp);
84
85 return 0;
86 }
87
mhi_write_db(struct mhi_controller * mhi_cntrl,void __iomem * db_addr,dma_addr_t db_val)88 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
89 dma_addr_t db_val)
90 {
91 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
92 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
93 }
94
mhi_db_brstmode(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)95 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
96 struct db_cfg *db_cfg,
97 void __iomem *db_addr,
98 dma_addr_t db_val)
99 {
100 if (db_cfg->db_mode) {
101 db_cfg->db_val = db_val;
102 mhi_write_db(mhi_cntrl, db_addr, db_val);
103 db_cfg->db_mode = 0;
104 }
105 }
106
mhi_db_brstmode_disable(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)107 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
108 struct db_cfg *db_cfg,
109 void __iomem *db_addr,
110 dma_addr_t db_val)
111 {
112 db_cfg->db_val = db_val;
113 mhi_write_db(mhi_cntrl, db_addr, db_val);
114 }
115
mhi_ring_er_db(struct mhi_event * mhi_event)116 void mhi_ring_er_db(struct mhi_event *mhi_event)
117 {
118 struct mhi_ring *ring = &mhi_event->ring;
119
120 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
121 ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
122 }
123
mhi_ring_cmd_db(struct mhi_controller * mhi_cntrl,struct mhi_cmd * mhi_cmd)124 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
125 {
126 dma_addr_t db;
127 struct mhi_ring *ring = &mhi_cmd->ring;
128
129 db = ring->iommu_base + (ring->wp - ring->base);
130 *ring->ctxt_wp = cpu_to_le64(db);
131 mhi_write_db(mhi_cntrl, ring->db_addr, db);
132 }
133
mhi_ring_chan_db(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)134 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
135 struct mhi_chan *mhi_chan)
136 {
137 struct mhi_ring *ring = &mhi_chan->tre_ring;
138 dma_addr_t db;
139
140 db = ring->iommu_base + (ring->wp - ring->base);
141
142 /*
143 * Writes to the new ring element must be visible to the hardware
144 * before letting h/w know there is new element to fetch.
145 */
146 dma_wmb();
147 *ring->ctxt_wp = cpu_to_le64(db);
148
149 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
150 ring->db_addr, db);
151 }
152
mhi_get_exec_env(struct mhi_controller * mhi_cntrl)153 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
154 {
155 u32 exec;
156 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
157
158 return (ret) ? MHI_EE_MAX : exec;
159 }
160 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
161
mhi_get_mhi_state(struct mhi_controller * mhi_cntrl)162 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
163 {
164 u32 state;
165 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
166 MHISTATUS_MHISTATE_MASK, &state);
167 return ret ? MHI_STATE_MAX : state;
168 }
169 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
170
mhi_soc_reset(struct mhi_controller * mhi_cntrl)171 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
172 {
173 if (mhi_cntrl->reset) {
174 mhi_cntrl->reset(mhi_cntrl);
175 return;
176 }
177
178 /* Generic MHI SoC reset */
179 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
180 MHI_SOC_RESET_REQ);
181 }
182 EXPORT_SYMBOL_GPL(mhi_soc_reset);
183
mhi_map_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)184 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
185 struct mhi_buf_info *buf_info)
186 {
187 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
188 buf_info->v_addr, buf_info->len,
189 buf_info->dir);
190 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
191 return -ENOMEM;
192
193 return 0;
194 }
195
mhi_map_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)196 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
197 struct mhi_buf_info *buf_info)
198 {
199 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
200 &buf_info->p_addr, GFP_ATOMIC);
201
202 if (!buf)
203 return -ENOMEM;
204
205 if (buf_info->dir == DMA_TO_DEVICE)
206 memcpy(buf, buf_info->v_addr, buf_info->len);
207
208 buf_info->bb_addr = buf;
209
210 return 0;
211 }
212
mhi_unmap_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)213 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
214 struct mhi_buf_info *buf_info)
215 {
216 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
217 buf_info->dir);
218 }
219
mhi_unmap_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)220 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
221 struct mhi_buf_info *buf_info)
222 {
223 if (buf_info->dir == DMA_FROM_DEVICE)
224 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
225
226 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
227 buf_info->bb_addr, buf_info->p_addr);
228 }
229
get_nr_avail_ring_elements(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)230 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
231 struct mhi_ring *ring)
232 {
233 int nr_el;
234
235 if (ring->wp < ring->rp) {
236 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
237 } else {
238 nr_el = (ring->rp - ring->base) / ring->el_size;
239 nr_el += ((ring->base + ring->len - ring->wp) /
240 ring->el_size) - 1;
241 }
242
243 return nr_el;
244 }
245
mhi_to_virtual(struct mhi_ring * ring,dma_addr_t addr)246 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
247 {
248 return (addr - ring->iommu_base) + ring->base;
249 }
250
mhi_add_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)251 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
252 struct mhi_ring *ring)
253 {
254 ring->wp += ring->el_size;
255 if (ring->wp >= (ring->base + ring->len))
256 ring->wp = ring->base;
257 /* smp update */
258 smp_wmb();
259 }
260
mhi_del_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)261 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
262 struct mhi_ring *ring)
263 {
264 ring->rp += ring->el_size;
265 if (ring->rp >= (ring->base + ring->len))
266 ring->rp = ring->base;
267 /* smp update */
268 smp_wmb();
269 }
270
is_valid_ring_ptr(struct mhi_ring * ring,dma_addr_t addr)271 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
272 {
273 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
274 !(addr & (sizeof(struct mhi_ring_element) - 1));
275 }
276
mhi_destroy_device(struct device * dev,void * data)277 int mhi_destroy_device(struct device *dev, void *data)
278 {
279 struct mhi_chan *ul_chan, *dl_chan;
280 struct mhi_device *mhi_dev;
281 struct mhi_controller *mhi_cntrl;
282 enum mhi_ee_type ee = MHI_EE_MAX;
283
284 if (dev->bus != &mhi_bus_type)
285 return 0;
286
287 mhi_dev = to_mhi_device(dev);
288 mhi_cntrl = mhi_dev->mhi_cntrl;
289
290 /* Only destroy virtual devices thats attached to bus */
291 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
292 return 0;
293
294 ul_chan = mhi_dev->ul_chan;
295 dl_chan = mhi_dev->dl_chan;
296
297 /*
298 * If execution environment is specified, remove only those devices that
299 * started in them based on ee_mask for the channels as we move on to a
300 * different execution environment
301 */
302 if (data)
303 ee = *(enum mhi_ee_type *)data;
304
305 /*
306 * For the suspend and resume case, this function will get called
307 * without mhi_unregister_controller(). Hence, we need to drop the
308 * references to mhi_dev created for ul and dl channels. We can
309 * be sure that there will be no instances of mhi_dev left after
310 * this.
311 */
312 if (ul_chan) {
313 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
314 return 0;
315
316 put_device(&ul_chan->mhi_dev->dev);
317 }
318
319 if (dl_chan) {
320 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
321 return 0;
322
323 put_device(&dl_chan->mhi_dev->dev);
324 }
325
326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
327 mhi_dev->name);
328
329 /* Notify the client and remove the device from MHI bus */
330 device_del(dev);
331 put_device(dev);
332
333 return 0;
334 }
335
mhi_get_free_desc_count(struct mhi_device * mhi_dev,enum dma_data_direction dir)336 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
337 enum dma_data_direction dir)
338 {
339 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
340 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
341 mhi_dev->ul_chan : mhi_dev->dl_chan;
342 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
343
344 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
345 }
346 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
347
mhi_notify(struct mhi_device * mhi_dev,enum mhi_callback cb_reason)348 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
349 {
350 struct mhi_driver *mhi_drv;
351
352 if (!mhi_dev->dev.driver)
353 return;
354
355 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
356
357 if (mhi_drv->status_cb)
358 mhi_drv->status_cb(mhi_dev, cb_reason);
359 }
360 EXPORT_SYMBOL_GPL(mhi_notify);
361
362 /* Bind MHI channels to MHI devices */
mhi_create_devices(struct mhi_controller * mhi_cntrl)363 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
364 {
365 struct mhi_chan *mhi_chan;
366 struct mhi_device *mhi_dev;
367 struct device *dev = &mhi_cntrl->mhi_dev->dev;
368 int i, ret;
369
370 mhi_chan = mhi_cntrl->mhi_chan;
371 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
372 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
373 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
374 continue;
375 mhi_dev = mhi_alloc_device(mhi_cntrl);
376 if (IS_ERR(mhi_dev))
377 return;
378
379 mhi_dev->dev_type = MHI_DEVICE_XFER;
380 switch (mhi_chan->dir) {
381 case DMA_TO_DEVICE:
382 mhi_dev->ul_chan = mhi_chan;
383 mhi_dev->ul_chan_id = mhi_chan->chan;
384 break;
385 case DMA_FROM_DEVICE:
386 /* We use dl_chan as offload channels */
387 mhi_dev->dl_chan = mhi_chan;
388 mhi_dev->dl_chan_id = mhi_chan->chan;
389 break;
390 default:
391 dev_err(dev, "Direction not supported\n");
392 put_device(&mhi_dev->dev);
393 return;
394 }
395
396 get_device(&mhi_dev->dev);
397 mhi_chan->mhi_dev = mhi_dev;
398
399 /* Check next channel if it matches */
400 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
401 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
402 i++;
403 mhi_chan++;
404 if (mhi_chan->dir == DMA_TO_DEVICE) {
405 mhi_dev->ul_chan = mhi_chan;
406 mhi_dev->ul_chan_id = mhi_chan->chan;
407 } else {
408 mhi_dev->dl_chan = mhi_chan;
409 mhi_dev->dl_chan_id = mhi_chan->chan;
410 }
411 get_device(&mhi_dev->dev);
412 mhi_chan->mhi_dev = mhi_dev;
413 }
414 }
415
416 /* Channel name is same for both UL and DL */
417 mhi_dev->name = mhi_chan->name;
418 dev_set_name(&mhi_dev->dev, "%s_%s",
419 dev_name(&mhi_cntrl->mhi_dev->dev),
420 mhi_dev->name);
421
422 /* Init wakeup source if available */
423 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
424 device_init_wakeup(&mhi_dev->dev, true);
425
426 ret = device_add(&mhi_dev->dev);
427 if (ret)
428 put_device(&mhi_dev->dev);
429 }
430 }
431
mhi_irq_handler(int irq_number,void * dev)432 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
433 {
434 struct mhi_event *mhi_event = dev;
435 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
436 struct mhi_event_ctxt *er_ctxt;
437 struct mhi_ring *ev_ring = &mhi_event->ring;
438 dma_addr_t ptr;
439 void *dev_rp;
440
441 /*
442 * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
443 * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
444 * before handling the IRQs.
445 */
446 if (!mhi_cntrl->mhi_ctxt) {
447 dev_dbg(&mhi_cntrl->mhi_dev->dev,
448 "mhi_ctxt has been freed\n");
449 return IRQ_HANDLED;
450 }
451
452 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
453 ptr = le64_to_cpu(er_ctxt->rp);
454
455 if (!is_valid_ring_ptr(ev_ring, ptr)) {
456 dev_err(&mhi_cntrl->mhi_dev->dev,
457 "Event ring rp points outside of the event ring\n");
458 return IRQ_HANDLED;
459 }
460
461 dev_rp = mhi_to_virtual(ev_ring, ptr);
462
463 /* Only proceed if event ring has pending events */
464 if (ev_ring->rp == dev_rp)
465 return IRQ_HANDLED;
466
467 /* For client managed event ring, notify pending data */
468 if (mhi_event->cl_manage) {
469 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
470 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
471
472 if (mhi_dev)
473 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
474 } else {
475 tasklet_schedule(&mhi_event->task);
476 }
477
478 return IRQ_HANDLED;
479 }
480
mhi_intvec_threaded_handler(int irq_number,void * priv)481 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
482 {
483 struct mhi_controller *mhi_cntrl = priv;
484 struct device *dev = &mhi_cntrl->mhi_dev->dev;
485 enum mhi_state state;
486 enum mhi_pm_state pm_state = 0;
487 enum mhi_ee_type ee;
488
489 write_lock_irq(&mhi_cntrl->pm_lock);
490 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
491 write_unlock_irq(&mhi_cntrl->pm_lock);
492 goto exit_intvec;
493 }
494
495 state = mhi_get_mhi_state(mhi_cntrl);
496 ee = mhi_get_exec_env(mhi_cntrl);
497
498 trace_mhi_intvec_states(mhi_cntrl, ee, state);
499 if (state == MHI_STATE_SYS_ERR) {
500 dev_dbg(dev, "System error detected\n");
501 pm_state = mhi_tryset_pm_state(mhi_cntrl,
502 MHI_PM_SYS_ERR_DETECT);
503 }
504 write_unlock_irq(&mhi_cntrl->pm_lock);
505
506 if (pm_state != MHI_PM_SYS_ERR_DETECT)
507 goto exit_intvec;
508
509 switch (ee) {
510 case MHI_EE_RDDM:
511 /* proceed if power down is not already in progress */
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
514 mhi_cntrl->ee = ee;
515 mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
516 wake_up_all(&mhi_cntrl->state_event);
517 }
518 break;
519 case MHI_EE_PBL:
520 case MHI_EE_EDL:
521 case MHI_EE_PTHRU:
522 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
523 mhi_cntrl->ee = ee;
524 wake_up_all(&mhi_cntrl->state_event);
525 mhi_pm_sys_err_handler(mhi_cntrl);
526 break;
527 default:
528 wake_up_all(&mhi_cntrl->state_event);
529 mhi_pm_sys_err_handler(mhi_cntrl);
530 break;
531 }
532
533 exit_intvec:
534
535 return IRQ_HANDLED;
536 }
537
mhi_intvec_handler(int irq_number,void * dev)538 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
539 {
540 struct mhi_controller *mhi_cntrl = dev;
541
542 /* Wake up events waiting for state change */
543 wake_up_all(&mhi_cntrl->state_event);
544
545 return IRQ_WAKE_THREAD;
546 }
547
mhi_recycle_ev_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)548 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
549 struct mhi_ring *ring)
550 {
551 /* Update the WP */
552 ring->wp += ring->el_size;
553
554 if (ring->wp >= (ring->base + ring->len))
555 ring->wp = ring->base;
556
557 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
558
559 /* Update the RP */
560 ring->rp += ring->el_size;
561 if (ring->rp >= (ring->base + ring->len))
562 ring->rp = ring->base;
563
564 /* Update to all cores */
565 smp_wmb();
566 }
567
parse_xfer_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)568 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
569 struct mhi_ring_element *event,
570 struct mhi_chan *mhi_chan)
571 {
572 struct mhi_ring *buf_ring, *tre_ring;
573 struct device *dev = &mhi_cntrl->mhi_dev->dev;
574 struct mhi_result result;
575 unsigned long flags = 0;
576 u32 ev_code;
577
578 ev_code = MHI_TRE_GET_EV_CODE(event);
579 buf_ring = &mhi_chan->buf_ring;
580 tre_ring = &mhi_chan->tre_ring;
581
582 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
583 -EOVERFLOW : 0;
584
585 /*
586 * If it's a DB Event then we need to grab the lock
587 * with preemption disabled and as a write because we
588 * have to update db register and there are chances that
589 * another thread could be doing the same.
590 */
591 if (ev_code >= MHI_EV_CC_OOB)
592 write_lock_irqsave(&mhi_chan->lock, flags);
593 else
594 read_lock_bh(&mhi_chan->lock);
595
596 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
597 goto end_process_tx_event;
598
599 switch (ev_code) {
600 case MHI_EV_CC_OVERFLOW:
601 case MHI_EV_CC_EOB:
602 case MHI_EV_CC_EOT:
603 {
604 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
605 struct mhi_ring_element *local_rp, *ev_tre;
606 void *dev_rp, *next_rp;
607 struct mhi_buf_info *buf_info;
608 u16 xfer_len;
609
610 if (!is_valid_ring_ptr(tre_ring, ptr)) {
611 dev_err(&mhi_cntrl->mhi_dev->dev,
612 "Event element points outside of the tre ring\n");
613 break;
614 }
615 /* Get the TRB this event points to */
616 ev_tre = mhi_to_virtual(tre_ring, ptr);
617
618 dev_rp = ev_tre + 1;
619 if (dev_rp >= (tre_ring->base + tre_ring->len))
620 dev_rp = tre_ring->base;
621
622 result.dir = mhi_chan->dir;
623
624 local_rp = tre_ring->rp;
625
626 next_rp = local_rp + 1;
627 if (next_rp >= tre_ring->base + tre_ring->len)
628 next_rp = tre_ring->base;
629 if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) {
630 dev_err(&mhi_cntrl->mhi_dev->dev,
631 "Event element points to an unexpected TRE\n");
632 break;
633 }
634
635 while (local_rp != dev_rp) {
636 buf_info = buf_ring->rp;
637 /* If it's the last TRE, get length from the event */
638 if (local_rp == ev_tre)
639 xfer_len = MHI_TRE_GET_EV_LEN(event);
640 else
641 xfer_len = buf_info->len;
642
643 /* Unmap if it's not pre-mapped by client */
644 if (likely(!buf_info->pre_mapped))
645 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
646
647 result.buf_addr = buf_info->cb_buf;
648
649 /* truncate to buf len if xfer_len is larger */
650 result.bytes_xferd =
651 min_t(u16, xfer_len, buf_info->len);
652 mhi_del_ring_element(mhi_cntrl, buf_ring);
653 mhi_del_ring_element(mhi_cntrl, tre_ring);
654 local_rp = tre_ring->rp;
655
656 read_unlock_bh(&mhi_chan->lock);
657
658 /* notify client */
659 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
660
661 if (mhi_chan->dir == DMA_TO_DEVICE) {
662 atomic_dec(&mhi_cntrl->pending_pkts);
663 /* Release the reference got from mhi_queue() */
664 mhi_cntrl->runtime_put(mhi_cntrl);
665 }
666
667 /*
668 * Recycle the buffer if buffer is pre-allocated,
669 * if there is an error, not much we can do apart
670 * from dropping the packet
671 */
672 if (mhi_chan->pre_alloc) {
673 if (mhi_queue_buf(mhi_chan->mhi_dev,
674 mhi_chan->dir,
675 buf_info->cb_buf,
676 buf_info->len, MHI_EOT)) {
677 dev_err(dev,
678 "Error recycling buffer for chan:%d\n",
679 mhi_chan->chan);
680 kfree(buf_info->cb_buf);
681 }
682 }
683
684 read_lock_bh(&mhi_chan->lock);
685 }
686 break;
687 } /* CC_EOT */
688 case MHI_EV_CC_OOB:
689 case MHI_EV_CC_DB_MODE:
690 {
691 unsigned long pm_lock_flags;
692
693 mhi_chan->db_cfg.db_mode = 1;
694 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
695 if (tre_ring->wp != tre_ring->rp &&
696 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
697 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
698 }
699 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
700 break;
701 }
702 case MHI_EV_CC_BAD_TRE:
703 default:
704 dev_err(dev, "Unknown event 0x%x\n", ev_code);
705 break;
706 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
707
708 end_process_tx_event:
709 if (ev_code >= MHI_EV_CC_OOB)
710 write_unlock_irqrestore(&mhi_chan->lock, flags);
711 else
712 read_unlock_bh(&mhi_chan->lock);
713
714 return 0;
715 }
716
parse_rsc_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)717 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
718 struct mhi_ring_element *event,
719 struct mhi_chan *mhi_chan)
720 {
721 struct mhi_ring *buf_ring, *tre_ring;
722 struct mhi_buf_info *buf_info;
723 struct mhi_result result;
724 int ev_code;
725 u32 cookie; /* offset to local descriptor */
726 u16 xfer_len;
727
728 buf_ring = &mhi_chan->buf_ring;
729 tre_ring = &mhi_chan->tre_ring;
730
731 ev_code = MHI_TRE_GET_EV_CODE(event);
732 cookie = MHI_TRE_GET_EV_COOKIE(event);
733 xfer_len = MHI_TRE_GET_EV_LEN(event);
734
735 /* Received out of bound cookie */
736 WARN_ON(cookie >= buf_ring->len);
737
738 buf_info = buf_ring->base + cookie;
739
740 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
741 -EOVERFLOW : 0;
742
743 /* truncate to buf len if xfer_len is larger */
744 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
745 result.buf_addr = buf_info->cb_buf;
746 result.dir = mhi_chan->dir;
747
748 read_lock_bh(&mhi_chan->lock);
749
750 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
751 goto end_process_rsc_event;
752
753 WARN_ON(!buf_info->used);
754
755 /* notify the client */
756 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
757
758 /*
759 * Note: We're arbitrarily incrementing RP even though, completion
760 * packet we processed might not be the same one, reason we can do this
761 * is because device guaranteed to cache descriptors in order it
762 * receive, so even though completion event is different we can re-use
763 * all descriptors in between.
764 * Example:
765 * Transfer Ring has descriptors: A, B, C, D
766 * Last descriptor host queue is D (WP) and first descriptor
767 * host queue is A (RP).
768 * The completion event we just serviced is descriptor C.
769 * Then we can safely queue descriptors to replace A, B, and C
770 * even though host did not receive any completions.
771 */
772 mhi_del_ring_element(mhi_cntrl, tre_ring);
773 buf_info->used = false;
774
775 end_process_rsc_event:
776 read_unlock_bh(&mhi_chan->lock);
777
778 return 0;
779 }
780
mhi_process_cmd_completion(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * tre)781 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
782 struct mhi_ring_element *tre)
783 {
784 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
785 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
786 struct mhi_ring *mhi_ring = &cmd_ring->ring;
787 struct mhi_ring_element *cmd_pkt;
788 struct mhi_chan *mhi_chan;
789 u32 chan;
790
791 if (!is_valid_ring_ptr(mhi_ring, ptr)) {
792 dev_err(&mhi_cntrl->mhi_dev->dev,
793 "Event element points outside of the cmd ring\n");
794 return;
795 }
796
797 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
798
799 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
800
801 if (chan < mhi_cntrl->max_chan &&
802 mhi_cntrl->mhi_chan[chan].configured) {
803 mhi_chan = &mhi_cntrl->mhi_chan[chan];
804 write_lock_bh(&mhi_chan->lock);
805 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
806 complete(&mhi_chan->completion);
807 write_unlock_bh(&mhi_chan->lock);
808 } else {
809 dev_err(&mhi_cntrl->mhi_dev->dev,
810 "Completion packet for invalid channel ID: %d\n", chan);
811 }
812
813 mhi_del_ring_element(mhi_cntrl, mhi_ring);
814 }
815
mhi_process_ctrl_ev_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)816 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
817 struct mhi_event *mhi_event,
818 u32 event_quota)
819 {
820 struct mhi_ring_element *dev_rp, *local_rp;
821 struct mhi_ring *ev_ring = &mhi_event->ring;
822 struct mhi_event_ctxt *er_ctxt =
823 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
824 struct mhi_chan *mhi_chan;
825 struct device *dev = &mhi_cntrl->mhi_dev->dev;
826 u32 chan;
827 int count = 0;
828 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
829
830 /*
831 * This is a quick check to avoid unnecessary event processing
832 * in case MHI is already in error state, but it's still possible
833 * to transition to error state while processing events
834 */
835 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
836 return -EIO;
837
838 if (!is_valid_ring_ptr(ev_ring, ptr)) {
839 dev_err(&mhi_cntrl->mhi_dev->dev,
840 "Event ring rp points outside of the event ring\n");
841 return -EIO;
842 }
843
844 dev_rp = mhi_to_virtual(ev_ring, ptr);
845 local_rp = ev_ring->rp;
846
847 while (dev_rp != local_rp) {
848 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
849
850 trace_mhi_ctrl_event(mhi_cntrl, local_rp);
851
852 switch (type) {
853 case MHI_PKT_TYPE_BW_REQ_EVENT:
854 {
855 struct mhi_link_info *link_info;
856
857 link_info = &mhi_cntrl->mhi_link_info;
858 write_lock_irq(&mhi_cntrl->pm_lock);
859 link_info->target_link_speed =
860 MHI_TRE_GET_EV_LINKSPEED(local_rp);
861 link_info->target_link_width =
862 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
863 write_unlock_irq(&mhi_cntrl->pm_lock);
864 dev_dbg(dev, "Received BW_REQ event\n");
865 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
866 break;
867 }
868 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
869 {
870 enum mhi_state new_state;
871
872 new_state = MHI_TRE_GET_EV_STATE(local_rp);
873
874 dev_dbg(dev, "State change event to state: %s\n",
875 mhi_state_str(new_state));
876
877 switch (new_state) {
878 case MHI_STATE_M0:
879 mhi_pm_m0_transition(mhi_cntrl);
880 break;
881 case MHI_STATE_M1:
882 mhi_pm_m1_transition(mhi_cntrl);
883 break;
884 case MHI_STATE_M3:
885 mhi_pm_m3_transition(mhi_cntrl);
886 break;
887 case MHI_STATE_SYS_ERR:
888 {
889 enum mhi_pm_state pm_state;
890
891 dev_dbg(dev, "System error detected\n");
892 write_lock_irq(&mhi_cntrl->pm_lock);
893 pm_state = mhi_tryset_pm_state(mhi_cntrl,
894 MHI_PM_SYS_ERR_DETECT);
895 write_unlock_irq(&mhi_cntrl->pm_lock);
896 if (pm_state == MHI_PM_SYS_ERR_DETECT)
897 mhi_pm_sys_err_handler(mhi_cntrl);
898 break;
899 }
900 default:
901 dev_err(dev, "Invalid state: %s\n",
902 mhi_state_str(new_state));
903 }
904
905 break;
906 }
907 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
908 mhi_process_cmd_completion(mhi_cntrl, local_rp);
909 break;
910 case MHI_PKT_TYPE_EE_EVENT:
911 {
912 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
913 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
914
915 dev_dbg(dev, "Received EE event: %s\n",
916 TO_MHI_EXEC_STR(event));
917 switch (event) {
918 case MHI_EE_SBL:
919 st = DEV_ST_TRANSITION_SBL;
920 break;
921 case MHI_EE_WFW:
922 case MHI_EE_AMSS:
923 st = DEV_ST_TRANSITION_MISSION_MODE;
924 break;
925 case MHI_EE_FP:
926 st = DEV_ST_TRANSITION_FP;
927 break;
928 case MHI_EE_RDDM:
929 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
930 write_lock_irq(&mhi_cntrl->pm_lock);
931 mhi_cntrl->ee = event;
932 write_unlock_irq(&mhi_cntrl->pm_lock);
933 wake_up_all(&mhi_cntrl->state_event);
934 break;
935 default:
936 dev_err(dev,
937 "Unhandled EE event: 0x%x\n", type);
938 }
939 if (st != DEV_ST_TRANSITION_MAX)
940 mhi_queue_state_transition(mhi_cntrl, st);
941
942 break;
943 }
944 case MHI_PKT_TYPE_TX_EVENT:
945 chan = MHI_TRE_GET_EV_CHID(local_rp);
946
947 WARN_ON(chan >= mhi_cntrl->max_chan);
948
949 /*
950 * Only process the event ring elements whose channel
951 * ID is within the maximum supported range.
952 */
953 if (chan < mhi_cntrl->max_chan) {
954 mhi_chan = &mhi_cntrl->mhi_chan[chan];
955 if (!mhi_chan->configured)
956 break;
957 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
958 }
959 break;
960 default:
961 dev_err(dev, "Unhandled event type: %d\n", type);
962 break;
963 }
964
965 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
966 local_rp = ev_ring->rp;
967
968 ptr = le64_to_cpu(er_ctxt->rp);
969 if (!is_valid_ring_ptr(ev_ring, ptr)) {
970 dev_err(&mhi_cntrl->mhi_dev->dev,
971 "Event ring rp points outside of the event ring\n");
972 return -EIO;
973 }
974
975 dev_rp = mhi_to_virtual(ev_ring, ptr);
976 count++;
977 }
978
979 read_lock_bh(&mhi_cntrl->pm_lock);
980
981 /* Ring EV DB only if there is any pending element to process */
982 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
983 mhi_ring_er_db(mhi_event);
984 read_unlock_bh(&mhi_cntrl->pm_lock);
985
986 return count;
987 }
988
mhi_process_data_event_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)989 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
990 struct mhi_event *mhi_event,
991 u32 event_quota)
992 {
993 struct mhi_ring_element *dev_rp, *local_rp;
994 struct mhi_ring *ev_ring = &mhi_event->ring;
995 struct mhi_event_ctxt *er_ctxt =
996 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
997 int count = 0;
998 u32 chan;
999 struct mhi_chan *mhi_chan;
1000 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
1001
1002 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
1003 return -EIO;
1004
1005 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1006 dev_err(&mhi_cntrl->mhi_dev->dev,
1007 "Event ring rp points outside of the event ring\n");
1008 return -EIO;
1009 }
1010
1011 dev_rp = mhi_to_virtual(ev_ring, ptr);
1012 local_rp = ev_ring->rp;
1013
1014 while (dev_rp != local_rp && event_quota > 0) {
1015 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
1016
1017 trace_mhi_data_event(mhi_cntrl, local_rp);
1018
1019 chan = MHI_TRE_GET_EV_CHID(local_rp);
1020
1021 WARN_ON(chan >= mhi_cntrl->max_chan);
1022
1023 /*
1024 * Only process the event ring elements whose channel
1025 * ID is within the maximum supported range.
1026 */
1027 if (chan < mhi_cntrl->max_chan &&
1028 mhi_cntrl->mhi_chan[chan].configured) {
1029 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1030
1031 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
1032 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1033 event_quota--;
1034 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
1035 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1036 event_quota--;
1037 }
1038 }
1039
1040 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1041 local_rp = ev_ring->rp;
1042
1043 ptr = le64_to_cpu(er_ctxt->rp);
1044 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1045 dev_err(&mhi_cntrl->mhi_dev->dev,
1046 "Event ring rp points outside of the event ring\n");
1047 return -EIO;
1048 }
1049
1050 dev_rp = mhi_to_virtual(ev_ring, ptr);
1051 count++;
1052 }
1053 read_lock_bh(&mhi_cntrl->pm_lock);
1054
1055 /* Ring EV DB only if there is any pending element to process */
1056 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
1057 mhi_ring_er_db(mhi_event);
1058 read_unlock_bh(&mhi_cntrl->pm_lock);
1059
1060 return count;
1061 }
1062
mhi_ev_task(unsigned long data)1063 void mhi_ev_task(unsigned long data)
1064 {
1065 struct mhi_event *mhi_event = (struct mhi_event *)data;
1066 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1067
1068 /* process all pending events */
1069 spin_lock_bh(&mhi_event->lock);
1070 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1071 spin_unlock_bh(&mhi_event->lock);
1072 }
1073
mhi_ctrl_ev_task(unsigned long data)1074 void mhi_ctrl_ev_task(unsigned long data)
1075 {
1076 struct mhi_event *mhi_event = (struct mhi_event *)data;
1077 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1078 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1079 enum mhi_state state;
1080 enum mhi_pm_state pm_state = 0;
1081 int ret;
1082
1083 /*
1084 * We can check PM state w/o a lock here because there is no way
1085 * PM state can change from reg access valid to no access while this
1086 * thread being executed.
1087 */
1088 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1089 /*
1090 * We may have a pending event but not allowed to
1091 * process it since we are probably in a suspended state,
1092 * so trigger a resume.
1093 */
1094 mhi_trigger_resume(mhi_cntrl);
1095
1096 return;
1097 }
1098
1099 /* Process ctrl events */
1100 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1101
1102 /*
1103 * We received an IRQ but no events to process, maybe device went to
1104 * SYS_ERR state? Check the state to confirm.
1105 */
1106 if (!ret) {
1107 write_lock_irq(&mhi_cntrl->pm_lock);
1108 state = mhi_get_mhi_state(mhi_cntrl);
1109 if (state == MHI_STATE_SYS_ERR) {
1110 dev_dbg(dev, "System error detected\n");
1111 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1112 MHI_PM_SYS_ERR_DETECT);
1113 }
1114 write_unlock_irq(&mhi_cntrl->pm_lock);
1115 if (pm_state == MHI_PM_SYS_ERR_DETECT)
1116 mhi_pm_sys_err_handler(mhi_cntrl);
1117 }
1118 }
1119
mhi_is_ring_full(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)1120 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1121 struct mhi_ring *ring)
1122 {
1123 void *tmp = ring->wp + ring->el_size;
1124
1125 if (tmp >= (ring->base + ring->len))
1126 tmp = ring->base;
1127
1128 return (tmp == ring->rp);
1129 }
1130
mhi_queue(struct mhi_device * mhi_dev,struct mhi_buf_info * buf_info,enum dma_data_direction dir,enum mhi_flags mflags)1131 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1132 enum dma_data_direction dir, enum mhi_flags mflags)
1133 {
1134 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1135 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1136 mhi_dev->dl_chan;
1137 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1138 unsigned long flags;
1139 int ret;
1140
1141 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1142 return -EIO;
1143
1144 ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1145 if (unlikely(ret))
1146 return -EAGAIN;
1147
1148 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1149 if (unlikely(ret))
1150 return ret;
1151
1152 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1153
1154 /* Packet is queued, take a usage ref to exit M3 if necessary
1155 * for host->device buffer, balanced put is done on buffer completion
1156 * for device->host buffer, balanced put is after ringing the DB
1157 */
1158 mhi_cntrl->runtime_get(mhi_cntrl);
1159
1160 /* Assert dev_wake (to exit/prevent M1/M2)*/
1161 mhi_cntrl->wake_toggle(mhi_cntrl);
1162
1163 if (mhi_chan->dir == DMA_TO_DEVICE)
1164 atomic_inc(&mhi_cntrl->pending_pkts);
1165
1166 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1167 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1168
1169 if (dir == DMA_FROM_DEVICE)
1170 mhi_cntrl->runtime_put(mhi_cntrl);
1171
1172 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1173
1174 return ret;
1175 }
1176
mhi_queue_skb(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct sk_buff * skb,size_t len,enum mhi_flags mflags)1177 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1178 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1179 {
1180 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1181 mhi_dev->dl_chan;
1182 struct mhi_buf_info buf_info = { };
1183
1184 buf_info.v_addr = skb->data;
1185 buf_info.cb_buf = skb;
1186 buf_info.len = len;
1187
1188 if (unlikely(mhi_chan->pre_alloc))
1189 return -EINVAL;
1190
1191 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1192 }
1193 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1194
mhi_gen_tre(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,struct mhi_buf_info * info,enum mhi_flags flags)1195 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1196 struct mhi_buf_info *info, enum mhi_flags flags)
1197 {
1198 struct mhi_ring *buf_ring, *tre_ring;
1199 struct mhi_ring_element *mhi_tre;
1200 struct mhi_buf_info *buf_info;
1201 int eot, eob, chain, bei;
1202 int ret = 0;
1203
1204 /* Protect accesses for reading and incrementing WP */
1205 write_lock_bh(&mhi_chan->lock);
1206
1207 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1208 ret = -ENODEV;
1209 goto out;
1210 }
1211
1212 buf_ring = &mhi_chan->buf_ring;
1213 tre_ring = &mhi_chan->tre_ring;
1214
1215 buf_info = buf_ring->wp;
1216 WARN_ON(buf_info->used);
1217 buf_info->pre_mapped = info->pre_mapped;
1218 if (info->pre_mapped)
1219 buf_info->p_addr = info->p_addr;
1220 else
1221 buf_info->v_addr = info->v_addr;
1222 buf_info->cb_buf = info->cb_buf;
1223 buf_info->wp = tre_ring->wp;
1224 buf_info->dir = mhi_chan->dir;
1225 buf_info->len = info->len;
1226
1227 if (!info->pre_mapped) {
1228 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1229 if (ret)
1230 goto out;
1231 }
1232
1233 eob = !!(flags & MHI_EOB);
1234 eot = !!(flags & MHI_EOT);
1235 chain = !!(flags & MHI_CHAIN);
1236 bei = !!(mhi_chan->intmod);
1237
1238 mhi_tre = tre_ring->wp;
1239 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1240 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1241 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1242
1243 trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre);
1244 /* increment WP */
1245 mhi_add_ring_element(mhi_cntrl, tre_ring);
1246 mhi_add_ring_element(mhi_cntrl, buf_ring);
1247
1248 out:
1249 write_unlock_bh(&mhi_chan->lock);
1250
1251 return ret;
1252 }
1253
mhi_queue_buf(struct mhi_device * mhi_dev,enum dma_data_direction dir,void * buf,size_t len,enum mhi_flags mflags)1254 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1255 void *buf, size_t len, enum mhi_flags mflags)
1256 {
1257 struct mhi_buf_info buf_info = { };
1258
1259 buf_info.v_addr = buf;
1260 buf_info.cb_buf = buf;
1261 buf_info.len = len;
1262
1263 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1264 }
1265 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1266
mhi_queue_is_full(struct mhi_device * mhi_dev,enum dma_data_direction dir)1267 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1268 {
1269 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1270 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1271 mhi_dev->ul_chan : mhi_dev->dl_chan;
1272 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1273
1274 return mhi_is_ring_full(mhi_cntrl, tre_ring);
1275 }
1276 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1277
mhi_send_cmd(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_cmd_type cmd)1278 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1279 struct mhi_chan *mhi_chan,
1280 enum mhi_cmd_type cmd)
1281 {
1282 struct mhi_ring_element *cmd_tre = NULL;
1283 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1284 struct mhi_ring *ring = &mhi_cmd->ring;
1285 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1286 int chan = 0;
1287
1288 if (mhi_chan)
1289 chan = mhi_chan->chan;
1290
1291 spin_lock_bh(&mhi_cmd->lock);
1292 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1293 spin_unlock_bh(&mhi_cmd->lock);
1294 return -ENOMEM;
1295 }
1296
1297 /* prepare the cmd tre */
1298 cmd_tre = ring->wp;
1299 switch (cmd) {
1300 case MHI_CMD_RESET_CHAN:
1301 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1302 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1303 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1304 break;
1305 case MHI_CMD_STOP_CHAN:
1306 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
1307 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
1308 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
1309 break;
1310 case MHI_CMD_START_CHAN:
1311 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1312 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1313 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1314 break;
1315 default:
1316 dev_err(dev, "Command not supported\n");
1317 break;
1318 }
1319
1320 /* queue to hardware */
1321 mhi_add_ring_element(mhi_cntrl, ring);
1322 read_lock_bh(&mhi_cntrl->pm_lock);
1323 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1324 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1325 read_unlock_bh(&mhi_cntrl->pm_lock);
1326 spin_unlock_bh(&mhi_cmd->lock);
1327
1328 return 0;
1329 }
1330
mhi_update_channel_state(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_ch_state_type to_state)1331 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1332 struct mhi_chan *mhi_chan,
1333 enum mhi_ch_state_type to_state)
1334 {
1335 struct device *dev = &mhi_chan->mhi_dev->dev;
1336 enum mhi_cmd_type cmd = MHI_CMD_NOP;
1337 int ret;
1338
1339 trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating"));
1340 switch (to_state) {
1341 case MHI_CH_STATE_TYPE_RESET:
1342 write_lock_irq(&mhi_chan->lock);
1343 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1344 mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1345 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1346 write_unlock_irq(&mhi_chan->lock);
1347 return -EINVAL;
1348 }
1349 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1350 write_unlock_irq(&mhi_chan->lock);
1351
1352 cmd = MHI_CMD_RESET_CHAN;
1353 break;
1354 case MHI_CH_STATE_TYPE_STOP:
1355 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1356 return -EINVAL;
1357
1358 cmd = MHI_CMD_STOP_CHAN;
1359 break;
1360 case MHI_CH_STATE_TYPE_START:
1361 if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1362 mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1363 return -EINVAL;
1364
1365 cmd = MHI_CMD_START_CHAN;
1366 break;
1367 default:
1368 dev_err(dev, "%d: Channel state update to %s not allowed\n",
1369 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1370 return -EINVAL;
1371 }
1372
1373 /* bring host and device out of suspended states */
1374 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1375 if (ret)
1376 return ret;
1377 mhi_cntrl->runtime_get(mhi_cntrl);
1378
1379 reinit_completion(&mhi_chan->completion);
1380 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1381 if (ret) {
1382 dev_err(dev, "%d: Failed to send %s channel command\n",
1383 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1384 goto exit_channel_update;
1385 }
1386
1387 ret = wait_for_completion_timeout(&mhi_chan->completion,
1388 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1389 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1390 dev_err(dev,
1391 "%d: Failed to receive %s channel command completion\n",
1392 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1393 ret = -EIO;
1394 goto exit_channel_update;
1395 }
1396
1397 ret = 0;
1398
1399 if (to_state != MHI_CH_STATE_TYPE_RESET) {
1400 write_lock_irq(&mhi_chan->lock);
1401 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1402 MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
1403 write_unlock_irq(&mhi_chan->lock);
1404 }
1405
1406 trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated"));
1407 exit_channel_update:
1408 mhi_cntrl->runtime_put(mhi_cntrl);
1409 mhi_device_put(mhi_cntrl->mhi_dev);
1410
1411 return ret;
1412 }
1413
mhi_unprepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1414 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1415 struct mhi_chan *mhi_chan)
1416 {
1417 int ret;
1418 struct device *dev = &mhi_chan->mhi_dev->dev;
1419
1420 mutex_lock(&mhi_chan->mutex);
1421
1422 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1423 dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1424 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1425 goto exit_unprepare_channel;
1426 }
1427
1428 /* no more processing events for this channel */
1429 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1430 MHI_CH_STATE_TYPE_RESET);
1431 if (ret)
1432 dev_err(dev, "%d: Failed to reset channel, still resetting\n",
1433 mhi_chan->chan);
1434
1435 exit_unprepare_channel:
1436 write_lock_irq(&mhi_chan->lock);
1437 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1438 write_unlock_irq(&mhi_chan->lock);
1439
1440 if (!mhi_chan->offload_ch) {
1441 mhi_reset_chan(mhi_cntrl, mhi_chan);
1442 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1443 }
1444 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1445
1446 mutex_unlock(&mhi_chan->mutex);
1447 }
1448
mhi_prepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,unsigned int flags)1449 static int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1450 struct mhi_chan *mhi_chan, unsigned int flags)
1451 {
1452 int ret = 0;
1453 struct device *dev = &mhi_chan->mhi_dev->dev;
1454
1455 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1456 dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1457 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1458 return -ENOTCONN;
1459 }
1460
1461 mutex_lock(&mhi_chan->mutex);
1462
1463 /* Check of client manages channel context for offload channels */
1464 if (!mhi_chan->offload_ch) {
1465 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1466 if (ret)
1467 goto error_init_chan;
1468 }
1469
1470 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1471 MHI_CH_STATE_TYPE_START);
1472 if (ret)
1473 goto error_pm_state;
1474
1475 if (mhi_chan->dir == DMA_FROM_DEVICE)
1476 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
1477
1478 /* Pre-allocate buffer for xfer ring */
1479 if (mhi_chan->pre_alloc) {
1480 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1481 &mhi_chan->tre_ring);
1482 size_t len = mhi_cntrl->buffer_len;
1483
1484 while (nr_el--) {
1485 void *buf;
1486 struct mhi_buf_info info = { };
1487
1488 buf = kmalloc(len, GFP_KERNEL);
1489 if (!buf) {
1490 ret = -ENOMEM;
1491 goto error_pre_alloc;
1492 }
1493
1494 /* Prepare transfer descriptors */
1495 info.v_addr = buf;
1496 info.cb_buf = buf;
1497 info.len = len;
1498 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1499 if (ret) {
1500 kfree(buf);
1501 goto error_pre_alloc;
1502 }
1503 }
1504
1505 read_lock_bh(&mhi_cntrl->pm_lock);
1506 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1507 read_lock_irq(&mhi_chan->lock);
1508 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1509 read_unlock_irq(&mhi_chan->lock);
1510 }
1511 read_unlock_bh(&mhi_cntrl->pm_lock);
1512 }
1513
1514 mutex_unlock(&mhi_chan->mutex);
1515
1516 return 0;
1517
1518 error_pm_state:
1519 if (!mhi_chan->offload_ch)
1520 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1521
1522 error_init_chan:
1523 mutex_unlock(&mhi_chan->mutex);
1524
1525 return ret;
1526
1527 error_pre_alloc:
1528 mutex_unlock(&mhi_chan->mutex);
1529 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1530
1531 return ret;
1532 }
1533
mhi_mark_stale_events(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,struct mhi_event_ctxt * er_ctxt,int chan)1534 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1535 struct mhi_event *mhi_event,
1536 struct mhi_event_ctxt *er_ctxt,
1537 int chan)
1538
1539 {
1540 struct mhi_ring_element *dev_rp, *local_rp;
1541 struct mhi_ring *ev_ring;
1542 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1543 unsigned long flags;
1544 dma_addr_t ptr;
1545
1546 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1547
1548 ev_ring = &mhi_event->ring;
1549
1550 /* mark all stale events related to channel as STALE event */
1551 spin_lock_irqsave(&mhi_event->lock, flags);
1552
1553 ptr = le64_to_cpu(er_ctxt->rp);
1554 if (!is_valid_ring_ptr(ev_ring, ptr)) {
1555 dev_err(&mhi_cntrl->mhi_dev->dev,
1556 "Event ring rp points outside of the event ring\n");
1557 dev_rp = ev_ring->rp;
1558 } else {
1559 dev_rp = mhi_to_virtual(ev_ring, ptr);
1560 }
1561
1562 local_rp = ev_ring->rp;
1563 while (dev_rp != local_rp) {
1564 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1565 chan == MHI_TRE_GET_EV_CHID(local_rp))
1566 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1567 MHI_PKT_TYPE_STALE_EVENT);
1568 local_rp++;
1569 if (local_rp == (ev_ring->base + ev_ring->len))
1570 local_rp = ev_ring->base;
1571 }
1572
1573 dev_dbg(dev, "Finished marking events as stale events\n");
1574 spin_unlock_irqrestore(&mhi_event->lock, flags);
1575 }
1576
mhi_reset_data_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1577 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1578 struct mhi_chan *mhi_chan)
1579 {
1580 struct mhi_ring *buf_ring, *tre_ring;
1581 struct mhi_result result;
1582
1583 /* Reset any pending buffers */
1584 buf_ring = &mhi_chan->buf_ring;
1585 tre_ring = &mhi_chan->tre_ring;
1586 result.transaction_status = -ENOTCONN;
1587 result.bytes_xferd = 0;
1588 while (tre_ring->rp != tre_ring->wp) {
1589 struct mhi_buf_info *buf_info = buf_ring->rp;
1590
1591 if (mhi_chan->dir == DMA_TO_DEVICE) {
1592 atomic_dec(&mhi_cntrl->pending_pkts);
1593 /* Release the reference got from mhi_queue() */
1594 mhi_cntrl->runtime_put(mhi_cntrl);
1595 }
1596
1597 if (!buf_info->pre_mapped)
1598 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1599
1600 mhi_del_ring_element(mhi_cntrl, buf_ring);
1601 mhi_del_ring_element(mhi_cntrl, tre_ring);
1602
1603 if (mhi_chan->pre_alloc) {
1604 kfree(buf_info->cb_buf);
1605 } else {
1606 result.buf_addr = buf_info->cb_buf;
1607 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1608 }
1609 }
1610 }
1611
mhi_reset_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1612 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1613 {
1614 struct mhi_event *mhi_event;
1615 struct mhi_event_ctxt *er_ctxt;
1616 int chan = mhi_chan->chan;
1617
1618 /* Nothing to reset, client doesn't queue buffers */
1619 if (mhi_chan->offload_ch)
1620 return;
1621
1622 read_lock_bh(&mhi_cntrl->pm_lock);
1623 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1624 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1625
1626 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1627
1628 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1629
1630 read_unlock_bh(&mhi_cntrl->pm_lock);
1631 }
1632
__mhi_prepare_for_transfer(struct mhi_device * mhi_dev,unsigned int flags)1633 static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
1634 {
1635 int ret, dir;
1636 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1637 struct mhi_chan *mhi_chan;
1638
1639 for (dir = 0; dir < 2; dir++) {
1640 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1641 if (!mhi_chan)
1642 continue;
1643
1644 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1645 if (ret)
1646 goto error_open_chan;
1647 }
1648
1649 return 0;
1650
1651 error_open_chan:
1652 for (--dir; dir >= 0; dir--) {
1653 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1654 if (!mhi_chan)
1655 continue;
1656
1657 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1658 }
1659
1660 return ret;
1661 }
1662
mhi_prepare_for_transfer(struct mhi_device * mhi_dev)1663 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1664 {
1665 return __mhi_prepare_for_transfer(mhi_dev, 0);
1666 }
1667 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1668
mhi_prepare_for_transfer_autoqueue(struct mhi_device * mhi_dev)1669 int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
1670 {
1671 return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
1672 }
1673 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
1674
mhi_unprepare_from_transfer(struct mhi_device * mhi_dev)1675 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1676 {
1677 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1678 struct mhi_chan *mhi_chan;
1679 int dir;
1680
1681 for (dir = 0; dir < 2; dir++) {
1682 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1683 if (!mhi_chan)
1684 continue;
1685
1686 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1687 }
1688 }
1689 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1690
mhi_get_channel_doorbell_offset(struct mhi_controller * mhi_cntrl,u32 * chdb_offset)1691 int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset)
1692 {
1693 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1694 void __iomem *base = mhi_cntrl->regs;
1695 int ret;
1696
1697 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset);
1698 if (ret) {
1699 dev_err(dev, "Unable to read CHDBOFF register\n");
1700 return -EIO;
1701 }
1702
1703 return 0;
1704 }
1705 EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset);
1706