Lines Matching +full:cmd +full:- +full:db

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
22 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); in mhi_read_reg()
49 while (retry--) { in mhi_poll_reg_field()
60 return -ETIMEDOUT; in mhi_poll_reg_field()
66 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); in mhi_write_reg()
99 if (db_cfg->db_mode) { in mhi_db_brstmode()
100 db_cfg->db_val = db_val; in mhi_db_brstmode()
102 db_cfg->db_mode = 0; in mhi_db_brstmode()
111 db_cfg->db_val = db_val; in mhi_db_brstmode_disable()
117 struct mhi_ring *ring = &mhi_event->ring; in mhi_ring_er_db()
119 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, in mhi_ring_er_db()
120 ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); in mhi_ring_er_db()
125 dma_addr_t db; in mhi_ring_cmd_db() local
126 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_ring_cmd_db()
128 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_cmd_db()
129 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_cmd_db()
130 mhi_write_db(mhi_cntrl, ring->db_addr, db); in mhi_ring_cmd_db()
136 struct mhi_ring *ring = &mhi_chan->tre_ring; in mhi_ring_chan_db()
137 dma_addr_t db; in mhi_ring_chan_db() local
139 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_chan_db()
146 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_chan_db()
148 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, in mhi_ring_chan_db()
149 ring->db_addr, db); in mhi_ring_chan_db()
155 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); in mhi_get_exec_env()
164 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_get_mhi_state()
172 if (mhi_cntrl->reset) { in mhi_soc_reset()
173 mhi_cntrl->reset(mhi_cntrl); in mhi_soc_reset()
178 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, in mhi_soc_reset()
186 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, in mhi_map_single_no_bb()
187 buf_info->v_addr, buf_info->len, in mhi_map_single_no_bb()
188 buf_info->dir); in mhi_map_single_no_bb()
189 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) in mhi_map_single_no_bb()
190 return -ENOMEM; in mhi_map_single_no_bb()
198 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_map_single_use_bb()
199 &buf_info->p_addr, GFP_ATOMIC); in mhi_map_single_use_bb()
202 return -ENOMEM; in mhi_map_single_use_bb()
204 if (buf_info->dir == DMA_TO_DEVICE) in mhi_map_single_use_bb()
205 memcpy(buf, buf_info->v_addr, buf_info->len); in mhi_map_single_use_bb()
207 buf_info->bb_addr = buf; in mhi_map_single_use_bb()
215 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, in mhi_unmap_single_no_bb()
216 buf_info->dir); in mhi_unmap_single_no_bb()
222 if (buf_info->dir == DMA_FROM_DEVICE) in mhi_unmap_single_use_bb()
223 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); in mhi_unmap_single_use_bb()
225 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_unmap_single_use_bb()
226 buf_info->bb_addr, buf_info->p_addr); in mhi_unmap_single_use_bb()
234 if (ring->wp < ring->rp) { in get_nr_avail_ring_elements()
235 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; in get_nr_avail_ring_elements()
237 nr_el = (ring->rp - ring->base) / ring->el_size; in get_nr_avail_ring_elements()
238 nr_el += ((ring->base + ring->len - ring->wp) / in get_nr_avail_ring_elements()
239 ring->el_size) - 1; in get_nr_avail_ring_elements()
247 return (addr - ring->iommu_base) + ring->base; in mhi_to_virtual()
253 ring->wp += ring->el_size; in mhi_add_ring_element()
254 if (ring->wp >= (ring->base + ring->len)) in mhi_add_ring_element()
255 ring->wp = ring->base; in mhi_add_ring_element()
263 ring->rp += ring->el_size; in mhi_del_ring_element()
264 if (ring->rp >= (ring->base + ring->len)) in mhi_del_ring_element()
265 ring->rp = ring->base; in mhi_del_ring_element()
272 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len && in is_valid_ring_ptr()
273 !(addr & (sizeof(struct mhi_ring_element) - 1)); in is_valid_ring_ptr()
283 if (dev->bus != &mhi_bus_type) in mhi_destroy_device()
287 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_destroy_device()
290 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_destroy_device()
293 ul_chan = mhi_dev->ul_chan; in mhi_destroy_device()
294 dl_chan = mhi_dev->dl_chan; in mhi_destroy_device()
312 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
315 put_device(&ul_chan->mhi_dev->dev); in mhi_destroy_device()
319 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
322 put_device(&dl_chan->mhi_dev->dev); in mhi_destroy_device()
325 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", in mhi_destroy_device()
326 mhi_dev->name); in mhi_destroy_device()
338 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_get_free_desc_count()
340 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_get_free_desc_count()
341 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_get_free_desc_count()
351 if (!mhi_dev->dev.driver) in mhi_notify()
354 mhi_drv = to_mhi_driver(mhi_dev->dev.driver); in mhi_notify()
356 if (mhi_drv->status_cb) in mhi_notify()
357 mhi_drv->status_cb(mhi_dev, cb_reason); in mhi_notify()
366 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_create_devices()
369 mhi_chan = mhi_cntrl->mhi_chan; in mhi_create_devices()
370 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_create_devices()
371 if (!mhi_chan->configured || mhi_chan->mhi_dev || in mhi_create_devices()
372 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) in mhi_create_devices()
378 mhi_dev->dev_type = MHI_DEVICE_XFER; in mhi_create_devices()
379 switch (mhi_chan->dir) { in mhi_create_devices()
381 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
382 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
386 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
387 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
391 put_device(&mhi_dev->dev); in mhi_create_devices()
395 get_device(&mhi_dev->dev); in mhi_create_devices()
396 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
399 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { in mhi_create_devices()
400 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { in mhi_create_devices()
403 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_create_devices()
404 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
405 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
407 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
408 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
410 get_device(&mhi_dev->dev); in mhi_create_devices()
411 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
416 mhi_dev->name = mhi_chan->name; in mhi_create_devices()
417 dev_set_name(&mhi_dev->dev, "%s_%s", in mhi_create_devices()
418 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_create_devices()
419 mhi_dev->name); in mhi_create_devices()
422 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) in mhi_create_devices()
423 device_init_wakeup(&mhi_dev->dev, true); in mhi_create_devices()
425 ret = device_add(&mhi_dev->dev); in mhi_create_devices()
427 put_device(&mhi_dev->dev); in mhi_create_devices()
434 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_irq_handler()
436 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_irq_handler()
445 if (!mhi_cntrl->mhi_ctxt) { in mhi_irq_handler()
446 dev_dbg(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
451 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_irq_handler()
452 ptr = le64_to_cpu(er_ctxt->rp); in mhi_irq_handler()
455 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
463 if (ev_ring->rp == dev_rp) in mhi_irq_handler()
467 if (mhi_event->cl_manage) { in mhi_irq_handler()
468 struct mhi_chan *mhi_chan = mhi_event->mhi_chan; in mhi_irq_handler()
469 struct mhi_device *mhi_dev = mhi_chan->mhi_dev; in mhi_irq_handler()
474 tasklet_schedule(&mhi_event->task); in mhi_irq_handler()
483 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_intvec_threaded_handler()
488 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
489 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_intvec_threaded_handler()
490 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
497 TO_MHI_EXEC_STR(mhi_cntrl->ee), in mhi_intvec_threaded_handler()
498 mhi_state_str(mhi_cntrl->dev_state), in mhi_intvec_threaded_handler()
506 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
514 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { in mhi_intvec_threaded_handler()
515 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_intvec_threaded_handler()
516 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
517 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
523 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); in mhi_intvec_threaded_handler()
524 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
525 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
529 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
544 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_handler()
553 ring->wp += ring->el_size; in mhi_recycle_ev_ring_element()
555 if (ring->wp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
556 ring->wp = ring->base; in mhi_recycle_ev_ring_element()
558 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); in mhi_recycle_ev_ring_element()
561 ring->rp += ring->el_size; in mhi_recycle_ev_ring_element()
562 if (ring->rp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
563 ring->rp = ring->base; in mhi_recycle_ev_ring_element()
574 struct device *dev = &mhi_cntrl->mhi_dev->dev; in parse_xfer_event()
580 buf_ring = &mhi_chan->buf_ring; in parse_xfer_event()
581 tre_ring = &mhi_chan->tre_ring; in parse_xfer_event()
584 -EOVERFLOW : 0; in parse_xfer_event()
587 * If it's a DB Event then we need to grab the lock in parse_xfer_event()
589 * have to update db register and there are chances that in parse_xfer_event()
593 write_lock_irqsave(&mhi_chan->lock, flags); in parse_xfer_event()
595 read_lock_bh(&mhi_chan->lock); in parse_xfer_event()
597 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_xfer_event()
612 dev_err(&mhi_cntrl->mhi_dev->dev, in parse_xfer_event()
620 if (dev_rp >= (tre_ring->base + tre_ring->len)) in parse_xfer_event()
621 dev_rp = tre_ring->base; in parse_xfer_event()
623 result.dir = mhi_chan->dir; in parse_xfer_event()
625 local_rp = tre_ring->rp; in parse_xfer_event()
627 buf_info = buf_ring->rp; in parse_xfer_event()
632 xfer_len = buf_info->len; in parse_xfer_event()
634 /* Unmap if it's not pre-mapped by client */ in parse_xfer_event()
635 if (likely(!buf_info->pre_mapped)) in parse_xfer_event()
636 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in parse_xfer_event()
638 result.buf_addr = buf_info->cb_buf; in parse_xfer_event()
642 min_t(u16, xfer_len, buf_info->len); in parse_xfer_event()
645 local_rp = tre_ring->rp; in parse_xfer_event()
647 read_unlock_bh(&mhi_chan->lock); in parse_xfer_event()
650 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_xfer_event()
652 if (mhi_chan->dir == DMA_TO_DEVICE) { in parse_xfer_event()
653 atomic_dec(&mhi_cntrl->pending_pkts); in parse_xfer_event()
655 mhi_cntrl->runtime_put(mhi_cntrl); in parse_xfer_event()
659 * Recycle the buffer if buffer is pre-allocated, in parse_xfer_event()
663 if (mhi_chan->pre_alloc) { in parse_xfer_event()
664 if (mhi_queue_buf(mhi_chan->mhi_dev, in parse_xfer_event()
665 mhi_chan->dir, in parse_xfer_event()
666 buf_info->cb_buf, in parse_xfer_event()
667 buf_info->len, MHI_EOT)) { in parse_xfer_event()
670 mhi_chan->chan); in parse_xfer_event()
671 kfree(buf_info->cb_buf); in parse_xfer_event()
675 read_lock_bh(&mhi_chan->lock); in parse_xfer_event()
684 mhi_chan->db_cfg.db_mode = 1; in parse_xfer_event()
685 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
686 if (tre_ring->wp != tre_ring->rp && in parse_xfer_event()
690 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
701 write_unlock_irqrestore(&mhi_chan->lock, flags); in parse_xfer_event()
703 read_unlock_bh(&mhi_chan->lock); in parse_xfer_event()
719 buf_ring = &mhi_chan->buf_ring; in parse_rsc_event()
720 tre_ring = &mhi_chan->tre_ring; in parse_rsc_event()
727 WARN_ON(cookie >= buf_ring->len); in parse_rsc_event()
729 buf_info = buf_ring->base + cookie; in parse_rsc_event()
732 -EOVERFLOW : 0; in parse_rsc_event()
735 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); in parse_rsc_event()
736 result.buf_addr = buf_info->cb_buf; in parse_rsc_event()
737 result.dir = mhi_chan->dir; in parse_rsc_event()
739 read_lock_bh(&mhi_chan->lock); in parse_rsc_event()
741 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_rsc_event()
744 WARN_ON(!buf_info->used); in parse_rsc_event()
747 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_rsc_event()
753 * receive, so even though completion event is different we can re-use in parse_rsc_event()
764 buf_info->used = false; in parse_rsc_event()
767 read_unlock_bh(&mhi_chan->lock); in parse_rsc_event()
776 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_process_cmd_completion()
777 struct mhi_ring *mhi_ring = &cmd_ring->ring; in mhi_process_cmd_completion()
783 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
784 "Event element points outside of the cmd ring\n"); in mhi_process_cmd_completion()
792 if (chan < mhi_cntrl->max_chan && in mhi_process_cmd_completion()
793 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_cmd_completion()
794 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_cmd_completion()
795 write_lock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
796 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); in mhi_process_cmd_completion()
797 complete(&mhi_chan->completion); in mhi_process_cmd_completion()
798 write_unlock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
800 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
812 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_ctrl_ev_ring()
814 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_ctrl_ev_ring()
816 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_process_ctrl_ev_ring()
819 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
826 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_ctrl_ev_ring()
827 return -EIO; in mhi_process_ctrl_ev_ring()
830 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
832 return -EIO; in mhi_process_ctrl_ev_ring()
836 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
846 link_info = &mhi_cntrl->mhi_link_info; in mhi_process_ctrl_ev_ring()
847 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
848 link_info->target_link_speed = in mhi_process_ctrl_ev_ring()
850 link_info->target_link_width = in mhi_process_ctrl_ev_ring()
852 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
854 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); in mhi_process_ctrl_ev_ring()
881 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
884 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
918 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_process_ctrl_ev_ring()
919 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
920 mhi_cntrl->ee = event; in mhi_process_ctrl_ev_ring()
921 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
922 wake_up_all(&mhi_cntrl->state_event); in mhi_process_ctrl_ev_ring()
936 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_ctrl_ev_ring()
942 if (chan < mhi_cntrl->max_chan) { in mhi_process_ctrl_ev_ring()
943 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_ctrl_ev_ring()
944 if (!mhi_chan->configured) in mhi_process_ctrl_ev_ring()
955 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
957 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
959 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
961 return -EIO; in mhi_process_ctrl_ev_ring()
968 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
970 /* Ring EV DB only if there is any pending element to process */ in mhi_process_ctrl_ev_ring()
973 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
983 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_data_event_ring()
985 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_data_event_ring()
989 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
991 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_data_event_ring()
992 return -EIO; in mhi_process_data_event_ring()
995 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
997 return -EIO; in mhi_process_data_event_ring()
1001 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1008 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_data_event_ring()
1014 if (chan < mhi_cntrl->max_chan && in mhi_process_data_event_ring()
1015 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_data_event_ring()
1016 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_data_event_ring()
1020 event_quota--; in mhi_process_data_event_ring()
1023 event_quota--; in mhi_process_data_event_ring()
1028 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1030 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
1032 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1034 return -EIO; in mhi_process_data_event_ring()
1040 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1042 /* Ring EV DB only if there is any pending element to process */ in mhi_process_data_event_ring()
1045 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1053 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ev_task()
1056 spin_lock_bh(&mhi_event->lock); in mhi_ev_task()
1057 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ev_task()
1058 spin_unlock_bh(&mhi_event->lock); in mhi_ev_task()
1064 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ctrl_ev_task()
1065 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ctrl_ev_task()
1075 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ctrl_ev_task()
1087 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ctrl_ev_task()
1094 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1101 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1110 void *tmp = ring->wp + ring->el_size; in mhi_is_ring_full()
1112 if (tmp >= (ring->base + ring->len)) in mhi_is_ring_full()
1113 tmp = ring->base; in mhi_is_ring_full()
1115 return (tmp == ring->rp); in mhi_is_ring_full()
1121 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue()
1122 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue()
1123 mhi_dev->dl_chan; in mhi_queue()
1124 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue()
1128 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) in mhi_queue()
1129 return -EIO; in mhi_queue()
1133 return -EAGAIN; in mhi_queue()
1139 read_lock_irqsave(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1142 * for host->device buffer, balanced put is done on buffer completion in mhi_queue()
1143 * for device->host buffer, balanced put is after ringing the DB in mhi_queue()
1145 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_queue()
1148 mhi_cntrl->wake_toggle(mhi_cntrl); in mhi_queue()
1150 if (mhi_chan->dir == DMA_TO_DEVICE) in mhi_queue()
1151 atomic_inc(&mhi_cntrl->pending_pkts); in mhi_queue()
1157 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_queue()
1159 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1167 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue_skb()
1168 mhi_dev->dl_chan; in mhi_queue_skb()
1171 buf_info.v_addr = skb->data; in mhi_queue_skb()
1175 if (unlikely(mhi_chan->pre_alloc)) in mhi_queue_skb()
1176 return -EINVAL; in mhi_queue_skb()
1185 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue_dma()
1186 mhi_dev->dl_chan; in mhi_queue_dma()
1189 buf_info.p_addr = mhi_buf->dma_addr; in mhi_queue_dma()
1194 if (unlikely(mhi_chan->pre_alloc)) in mhi_queue_dma()
1195 return -EINVAL; in mhi_queue_dma()
1211 write_lock_bh(&mhi_chan->lock); in mhi_gen_tre()
1213 buf_ring = &mhi_chan->buf_ring; in mhi_gen_tre()
1214 tre_ring = &mhi_chan->tre_ring; in mhi_gen_tre()
1216 buf_info = buf_ring->wp; in mhi_gen_tre()
1217 WARN_ON(buf_info->used); in mhi_gen_tre()
1218 buf_info->pre_mapped = info->pre_mapped; in mhi_gen_tre()
1219 if (info->pre_mapped) in mhi_gen_tre()
1220 buf_info->p_addr = info->p_addr; in mhi_gen_tre()
1222 buf_info->v_addr = info->v_addr; in mhi_gen_tre()
1223 buf_info->cb_buf = info->cb_buf; in mhi_gen_tre()
1224 buf_info->wp = tre_ring->wp; in mhi_gen_tre()
1225 buf_info->dir = mhi_chan->dir; in mhi_gen_tre()
1226 buf_info->len = info->len; in mhi_gen_tre()
1228 if (!info->pre_mapped) { in mhi_gen_tre()
1229 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); in mhi_gen_tre()
1231 write_unlock_bh(&mhi_chan->lock); in mhi_gen_tre()
1239 bei = !!(mhi_chan->intmod); in mhi_gen_tre()
1241 mhi_tre = tre_ring->wp; in mhi_gen_tre()
1242 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); in mhi_gen_tre()
1243 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); in mhi_gen_tre()
1244 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); in mhi_gen_tre()
1250 write_unlock_bh(&mhi_chan->lock); in mhi_gen_tre()
1270 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue_is_full()
1272 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_queue_is_full()
1273 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue_is_full()
1281 enum mhi_cmd_type cmd) in mhi_send_cmd() argument
1284 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_send_cmd()
1285 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_send_cmd()
1286 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_send_cmd()
1290 chan = mhi_chan->chan; in mhi_send_cmd()
1292 spin_lock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1294 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1295 return -ENOMEM; in mhi_send_cmd()
1298 /* prepare the cmd tre */ in mhi_send_cmd()
1299 cmd_tre = ring->wp; in mhi_send_cmd()
1300 switch (cmd) { in mhi_send_cmd()
1302 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; in mhi_send_cmd()
1303 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; in mhi_send_cmd()
1304 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); in mhi_send_cmd()
1307 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; in mhi_send_cmd()
1308 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; in mhi_send_cmd()
1309 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); in mhi_send_cmd()
1312 cmd_tre->ptr = MHI_TRE_CMD_START_PTR; in mhi_send_cmd()
1313 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; in mhi_send_cmd()
1314 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); in mhi_send_cmd()
1323 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1326 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1327 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1336 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_update_channel_state()
1337 enum mhi_cmd_type cmd = MHI_CMD_NOP; in mhi_update_channel_state() local
1340 dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, in mhi_update_channel_state()
1345 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1346 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1347 mhi_chan->ch_state != MHI_CH_STATE_ENABLED && in mhi_update_channel_state()
1348 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { in mhi_update_channel_state()
1349 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1350 return -EINVAL; in mhi_update_channel_state()
1352 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_update_channel_state()
1353 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1355 cmd = MHI_CMD_RESET_CHAN; in mhi_update_channel_state()
1358 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in mhi_update_channel_state()
1359 return -EINVAL; in mhi_update_channel_state()
1361 cmd = MHI_CMD_STOP_CHAN; in mhi_update_channel_state()
1364 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1365 mhi_chan->ch_state != MHI_CH_STATE_DISABLED) in mhi_update_channel_state()
1366 return -EINVAL; in mhi_update_channel_state()
1368 cmd = MHI_CMD_START_CHAN; in mhi_update_channel_state()
1372 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1373 return -EINVAL; in mhi_update_channel_state()
1377 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1380 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_update_channel_state()
1382 reinit_completion(&mhi_chan->completion); in mhi_update_channel_state()
1383 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); in mhi_update_channel_state()
1386 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1390 ret = wait_for_completion_timeout(&mhi_chan->completion, in mhi_update_channel_state()
1391 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_update_channel_state()
1392 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { in mhi_update_channel_state()
1395 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1396 ret = -EIO; in mhi_update_channel_state()
1403 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1404 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? in mhi_update_channel_state()
1406 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1410 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1413 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_update_channel_state()
1414 mhi_device_put(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1423 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_unprepare_channel()
1425 mutex_lock(&mhi_chan->mutex); in mhi_unprepare_channel()
1427 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_unprepare_channel()
1429 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_unprepare_channel()
1438 mhi_chan->chan); in mhi_unprepare_channel()
1441 write_lock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1442 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_unprepare_channel()
1443 write_unlock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1445 if (!mhi_chan->offload_ch) { in mhi_unprepare_channel()
1449 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); in mhi_unprepare_channel()
1451 mutex_unlock(&mhi_chan->mutex); in mhi_unprepare_channel()
1458 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_prepare_channel()
1460 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_prepare_channel()
1462 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_prepare_channel()
1463 return -ENOTCONN; in mhi_prepare_channel()
1466 mutex_lock(&mhi_chan->mutex); in mhi_prepare_channel()
1469 if (!mhi_chan->offload_ch) { in mhi_prepare_channel()
1480 if (mhi_chan->dir == DMA_FROM_DEVICE) in mhi_prepare_channel()
1481 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); in mhi_prepare_channel()
1483 /* Pre-allocate buffer for xfer ring */ in mhi_prepare_channel()
1484 if (mhi_chan->pre_alloc) { in mhi_prepare_channel()
1486 &mhi_chan->tre_ring); in mhi_prepare_channel()
1487 size_t len = mhi_cntrl->buffer_len; in mhi_prepare_channel()
1489 while (nr_el--) { in mhi_prepare_channel()
1495 ret = -ENOMEM; in mhi_prepare_channel()
1510 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1512 read_lock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1514 read_unlock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1516 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1519 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1524 if (!mhi_chan->offload_ch) in mhi_prepare_channel()
1528 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1533 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1547 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_mark_stale_events()
1553 ev_ring = &mhi_event->ring; in mhi_mark_stale_events()
1556 spin_lock_irqsave(&mhi_event->lock, flags); in mhi_mark_stale_events()
1558 ptr = le64_to_cpu(er_ctxt->rp); in mhi_mark_stale_events()
1560 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_mark_stale_events()
1562 dev_rp = ev_ring->rp; in mhi_mark_stale_events()
1567 local_rp = ev_ring->rp; in mhi_mark_stale_events()
1571 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, in mhi_mark_stale_events()
1574 if (local_rp == (ev_ring->base + ev_ring->len)) in mhi_mark_stale_events()
1575 local_rp = ev_ring->base; in mhi_mark_stale_events()
1579 spin_unlock_irqrestore(&mhi_event->lock, flags); in mhi_mark_stale_events()
1589 buf_ring = &mhi_chan->buf_ring; in mhi_reset_data_chan()
1590 tre_ring = &mhi_chan->tre_ring; in mhi_reset_data_chan()
1591 result.transaction_status = -ENOTCONN; in mhi_reset_data_chan()
1593 while (tre_ring->rp != tre_ring->wp) { in mhi_reset_data_chan()
1594 struct mhi_buf_info *buf_info = buf_ring->rp; in mhi_reset_data_chan()
1596 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_reset_data_chan()
1597 atomic_dec(&mhi_cntrl->pending_pkts); in mhi_reset_data_chan()
1599 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_reset_data_chan()
1602 if (!buf_info->pre_mapped) in mhi_reset_data_chan()
1603 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in mhi_reset_data_chan()
1608 if (mhi_chan->pre_alloc) { in mhi_reset_data_chan()
1609 kfree(buf_info->cb_buf); in mhi_reset_data_chan()
1611 result.buf_addr = buf_info->cb_buf; in mhi_reset_data_chan()
1612 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_reset_data_chan()
1621 int chan = mhi_chan->chan; in mhi_reset_chan()
1624 if (mhi_chan->offload_ch) in mhi_reset_chan()
1627 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1628 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_reset_chan()
1629 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; in mhi_reset_chan()
1635 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1641 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in __mhi_prepare_for_transfer()
1645 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1657 for (--dir; dir >= 0; dir--) { in __mhi_prepare_for_transfer()
1658 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1682 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_unprepare_from_transfer()
1687 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_unprepare_from_transfer()