Lines Matching +full:cmd +full:- +full:db

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
23 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); in mhi_read_reg()
50 while (retry--) { in mhi_poll_reg_field()
61 return -ETIMEDOUT; in mhi_poll_reg_field()
67 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); in mhi_write_reg()
100 if (db_cfg->db_mode) { in mhi_db_brstmode()
101 db_cfg->db_val = db_val; in mhi_db_brstmode()
103 db_cfg->db_mode = 0; in mhi_db_brstmode()
112 db_cfg->db_val = db_val; in mhi_db_brstmode_disable()
118 struct mhi_ring *ring = &mhi_event->ring; in mhi_ring_er_db()
120 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, in mhi_ring_er_db()
121 ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); in mhi_ring_er_db()
126 dma_addr_t db; in mhi_ring_cmd_db() local
127 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_ring_cmd_db()
129 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_cmd_db()
130 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_cmd_db()
131 mhi_write_db(mhi_cntrl, ring->db_addr, db); in mhi_ring_cmd_db()
137 struct mhi_ring *ring = &mhi_chan->tre_ring; in mhi_ring_chan_db()
138 dma_addr_t db; in mhi_ring_chan_db() local
140 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_chan_db()
147 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_chan_db()
149 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, in mhi_ring_chan_db()
150 ring->db_addr, db); in mhi_ring_chan_db()
156 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); in mhi_get_exec_env()
165 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_get_mhi_state()
173 if (mhi_cntrl->reset) { in mhi_soc_reset()
174 mhi_cntrl->reset(mhi_cntrl); in mhi_soc_reset()
179 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, in mhi_soc_reset()
187 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, in mhi_map_single_no_bb()
188 buf_info->v_addr, buf_info->len, in mhi_map_single_no_bb()
189 buf_info->dir); in mhi_map_single_no_bb()
190 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) in mhi_map_single_no_bb()
191 return -ENOMEM; in mhi_map_single_no_bb()
199 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_map_single_use_bb()
200 &buf_info->p_addr, GFP_ATOMIC); in mhi_map_single_use_bb()
203 return -ENOMEM; in mhi_map_single_use_bb()
205 if (buf_info->dir == DMA_TO_DEVICE) in mhi_map_single_use_bb()
206 memcpy(buf, buf_info->v_addr, buf_info->len); in mhi_map_single_use_bb()
208 buf_info->bb_addr = buf; in mhi_map_single_use_bb()
216 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, in mhi_unmap_single_no_bb()
217 buf_info->dir); in mhi_unmap_single_no_bb()
223 if (buf_info->dir == DMA_FROM_DEVICE) in mhi_unmap_single_use_bb()
224 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); in mhi_unmap_single_use_bb()
226 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_unmap_single_use_bb()
227 buf_info->bb_addr, buf_info->p_addr); in mhi_unmap_single_use_bb()
235 if (ring->wp < ring->rp) { in get_nr_avail_ring_elements()
236 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; in get_nr_avail_ring_elements()
238 nr_el = (ring->rp - ring->base) / ring->el_size; in get_nr_avail_ring_elements()
239 nr_el += ((ring->base + ring->len - ring->wp) / in get_nr_avail_ring_elements()
240 ring->el_size) - 1; in get_nr_avail_ring_elements()
248 return (addr - ring->iommu_base) + ring->base; in mhi_to_virtual()
254 ring->wp += ring->el_size; in mhi_add_ring_element()
255 if (ring->wp >= (ring->base + ring->len)) in mhi_add_ring_element()
256 ring->wp = ring->base; in mhi_add_ring_element()
264 ring->rp += ring->el_size; in mhi_del_ring_element()
265 if (ring->rp >= (ring->base + ring->len)) in mhi_del_ring_element()
266 ring->rp = ring->base; in mhi_del_ring_element()
273 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len && in is_valid_ring_ptr()
274 !(addr & (sizeof(struct mhi_ring_element) - 1)); in is_valid_ring_ptr()
284 if (dev->bus != &mhi_bus_type) in mhi_destroy_device()
288 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_destroy_device()
291 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_destroy_device()
294 ul_chan = mhi_dev->ul_chan; in mhi_destroy_device()
295 dl_chan = mhi_dev->dl_chan; in mhi_destroy_device()
313 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
316 put_device(&ul_chan->mhi_dev->dev); in mhi_destroy_device()
320 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
323 put_device(&dl_chan->mhi_dev->dev); in mhi_destroy_device()
326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", in mhi_destroy_device()
327 mhi_dev->name); in mhi_destroy_device()
339 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_get_free_desc_count()
341 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_get_free_desc_count()
342 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_get_free_desc_count()
352 if (!mhi_dev->dev.driver) in mhi_notify()
355 mhi_drv = to_mhi_driver(mhi_dev->dev.driver); in mhi_notify()
357 if (mhi_drv->status_cb) in mhi_notify()
358 mhi_drv->status_cb(mhi_dev, cb_reason); in mhi_notify()
367 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_create_devices()
370 mhi_chan = mhi_cntrl->mhi_chan; in mhi_create_devices()
371 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_create_devices()
372 if (!mhi_chan->configured || mhi_chan->mhi_dev || in mhi_create_devices()
373 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) in mhi_create_devices()
379 mhi_dev->dev_type = MHI_DEVICE_XFER; in mhi_create_devices()
380 switch (mhi_chan->dir) { in mhi_create_devices()
382 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
383 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
387 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
388 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
392 put_device(&mhi_dev->dev); in mhi_create_devices()
396 get_device(&mhi_dev->dev); in mhi_create_devices()
397 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
400 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { in mhi_create_devices()
401 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { in mhi_create_devices()
404 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_create_devices()
405 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
406 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
408 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
409 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
411 get_device(&mhi_dev->dev); in mhi_create_devices()
412 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
417 mhi_dev->name = mhi_chan->name; in mhi_create_devices()
418 dev_set_name(&mhi_dev->dev, "%s_%s", in mhi_create_devices()
419 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_create_devices()
420 mhi_dev->name); in mhi_create_devices()
423 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) in mhi_create_devices()
424 device_init_wakeup(&mhi_dev->dev, true); in mhi_create_devices()
426 ret = device_add(&mhi_dev->dev); in mhi_create_devices()
428 put_device(&mhi_dev->dev); in mhi_create_devices()
435 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_irq_handler()
437 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_irq_handler()
446 if (!mhi_cntrl->mhi_ctxt) { in mhi_irq_handler()
447 dev_dbg(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
452 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_irq_handler()
453 ptr = le64_to_cpu(er_ctxt->rp); in mhi_irq_handler()
456 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
464 if (ev_ring->rp == dev_rp) in mhi_irq_handler()
468 if (mhi_event->cl_manage) { in mhi_irq_handler()
469 struct mhi_chan *mhi_chan = mhi_event->mhi_chan; in mhi_irq_handler()
470 struct mhi_device *mhi_dev = mhi_chan->mhi_dev; in mhi_irq_handler()
475 tasklet_schedule(&mhi_event->task); in mhi_irq_handler()
484 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_intvec_threaded_handler()
489 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
490 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_intvec_threaded_handler()
491 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
504 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { in mhi_intvec_threaded_handler()
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_intvec_threaded_handler()
514 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
515 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
521 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); in mhi_intvec_threaded_handler()
522 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
523 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
527 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
542 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_handler()
551 ring->wp += ring->el_size; in mhi_recycle_ev_ring_element()
553 if (ring->wp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
554 ring->wp = ring->base; in mhi_recycle_ev_ring_element()
556 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); in mhi_recycle_ev_ring_element()
559 ring->rp += ring->el_size; in mhi_recycle_ev_ring_element()
560 if (ring->rp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
561 ring->rp = ring->base; in mhi_recycle_ev_ring_element()
572 struct device *dev = &mhi_cntrl->mhi_dev->dev; in parse_xfer_event()
578 buf_ring = &mhi_chan->buf_ring; in parse_xfer_event()
579 tre_ring = &mhi_chan->tre_ring; in parse_xfer_event()
582 -EOVERFLOW : 0; in parse_xfer_event()
585 * If it's a DB Event then we need to grab the lock in parse_xfer_event()
587 * have to update db register and there are chances that in parse_xfer_event()
591 write_lock_irqsave(&mhi_chan->lock, flags); in parse_xfer_event()
593 read_lock_bh(&mhi_chan->lock); in parse_xfer_event()
595 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_xfer_event()
610 dev_err(&mhi_cntrl->mhi_dev->dev, in parse_xfer_event()
618 if (dev_rp >= (tre_ring->base + tre_ring->len)) in parse_xfer_event()
619 dev_rp = tre_ring->base; in parse_xfer_event()
621 result.dir = mhi_chan->dir; in parse_xfer_event()
623 local_rp = tre_ring->rp; in parse_xfer_event()
625 buf_info = buf_ring->rp; in parse_xfer_event()
630 xfer_len = buf_info->len; in parse_xfer_event()
632 /* Unmap if it's not pre-mapped by client */ in parse_xfer_event()
633 if (likely(!buf_info->pre_mapped)) in parse_xfer_event()
634 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in parse_xfer_event()
636 result.buf_addr = buf_info->cb_buf; in parse_xfer_event()
640 min_t(u16, xfer_len, buf_info->len); in parse_xfer_event()
643 local_rp = tre_ring->rp; in parse_xfer_event()
645 read_unlock_bh(&mhi_chan->lock); in parse_xfer_event()
648 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_xfer_event()
650 if (mhi_chan->dir == DMA_TO_DEVICE) { in parse_xfer_event()
651 atomic_dec(&mhi_cntrl->pending_pkts); in parse_xfer_event()
653 mhi_cntrl->runtime_put(mhi_cntrl); in parse_xfer_event()
657 * Recycle the buffer if buffer is pre-allocated, in parse_xfer_event()
661 if (mhi_chan->pre_alloc) { in parse_xfer_event()
662 if (mhi_queue_buf(mhi_chan->mhi_dev, in parse_xfer_event()
663 mhi_chan->dir, in parse_xfer_event()
664 buf_info->cb_buf, in parse_xfer_event()
665 buf_info->len, MHI_EOT)) { in parse_xfer_event()
668 mhi_chan->chan); in parse_xfer_event()
669 kfree(buf_info->cb_buf); in parse_xfer_event()
673 read_lock_bh(&mhi_chan->lock); in parse_xfer_event()
682 mhi_chan->db_cfg.db_mode = 1; in parse_xfer_event()
683 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
684 if (tre_ring->wp != tre_ring->rp && in parse_xfer_event()
688 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
699 write_unlock_irqrestore(&mhi_chan->lock, flags); in parse_xfer_event()
701 read_unlock_bh(&mhi_chan->lock); in parse_xfer_event()
717 buf_ring = &mhi_chan->buf_ring; in parse_rsc_event()
718 tre_ring = &mhi_chan->tre_ring; in parse_rsc_event()
725 WARN_ON(cookie >= buf_ring->len); in parse_rsc_event()
727 buf_info = buf_ring->base + cookie; in parse_rsc_event()
730 -EOVERFLOW : 0; in parse_rsc_event()
733 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); in parse_rsc_event()
734 result.buf_addr = buf_info->cb_buf; in parse_rsc_event()
735 result.dir = mhi_chan->dir; in parse_rsc_event()
737 read_lock_bh(&mhi_chan->lock); in parse_rsc_event()
739 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_rsc_event()
742 WARN_ON(!buf_info->used); in parse_rsc_event()
745 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_rsc_event()
751 * receive, so even though completion event is different we can re-use in parse_rsc_event()
762 buf_info->used = false; in parse_rsc_event()
765 read_unlock_bh(&mhi_chan->lock); in parse_rsc_event()
774 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_process_cmd_completion()
775 struct mhi_ring *mhi_ring = &cmd_ring->ring; in mhi_process_cmd_completion()
781 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
782 "Event element points outside of the cmd ring\n"); in mhi_process_cmd_completion()
790 if (chan < mhi_cntrl->max_chan && in mhi_process_cmd_completion()
791 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_cmd_completion()
792 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_cmd_completion()
793 write_lock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
794 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); in mhi_process_cmd_completion()
795 complete(&mhi_chan->completion); in mhi_process_cmd_completion()
796 write_unlock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
798 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
810 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_ctrl_ev_ring()
812 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_ctrl_ev_ring()
814 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_process_ctrl_ev_ring()
817 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
824 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_ctrl_ev_ring()
825 return -EIO; in mhi_process_ctrl_ev_ring()
828 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
830 return -EIO; in mhi_process_ctrl_ev_ring()
834 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
846 link_info = &mhi_cntrl->mhi_link_info; in mhi_process_ctrl_ev_ring()
847 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
848 link_info->target_link_speed = in mhi_process_ctrl_ev_ring()
850 link_info->target_link_width = in mhi_process_ctrl_ev_ring()
852 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
854 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); in mhi_process_ctrl_ev_ring()
881 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
884 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
918 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_process_ctrl_ev_ring()
919 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
920 mhi_cntrl->ee = event; in mhi_process_ctrl_ev_ring()
921 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
922 wake_up_all(&mhi_cntrl->state_event); in mhi_process_ctrl_ev_ring()
936 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_ctrl_ev_ring()
942 if (chan < mhi_cntrl->max_chan) { in mhi_process_ctrl_ev_ring()
943 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_ctrl_ev_ring()
944 if (!mhi_chan->configured) in mhi_process_ctrl_ev_ring()
955 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
957 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
959 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
961 return -EIO; in mhi_process_ctrl_ev_ring()
968 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
970 /* Ring EV DB only if there is any pending element to process */ in mhi_process_ctrl_ev_ring()
973 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
983 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_data_event_ring()
985 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_data_event_ring()
989 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
991 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_data_event_ring()
992 return -EIO; in mhi_process_data_event_ring()
995 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
997 return -EIO; in mhi_process_data_event_ring()
1001 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1010 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_data_event_ring()
1016 if (chan < mhi_cntrl->max_chan && in mhi_process_data_event_ring()
1017 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_data_event_ring()
1018 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_data_event_ring()
1022 event_quota--; in mhi_process_data_event_ring()
1025 event_quota--; in mhi_process_data_event_ring()
1030 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1032 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
1034 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1036 return -EIO; in mhi_process_data_event_ring()
1042 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1044 /* Ring EV DB only if there is any pending element to process */ in mhi_process_data_event_ring()
1047 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1055 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ev_task()
1058 spin_lock_bh(&mhi_event->lock); in mhi_ev_task()
1059 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ev_task()
1060 spin_unlock_bh(&mhi_event->lock); in mhi_ev_task()
1066 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ctrl_ev_task()
1067 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ctrl_ev_task()
1077 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ctrl_ev_task()
1089 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ctrl_ev_task()
1096 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1103 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1112 void *tmp = ring->wp + ring->el_size; in mhi_is_ring_full()
1114 if (tmp >= (ring->base + ring->len)) in mhi_is_ring_full()
1115 tmp = ring->base; in mhi_is_ring_full()
1117 return (tmp == ring->rp); in mhi_is_ring_full()
1123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue()
1124 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue()
1125 mhi_dev->dl_chan; in mhi_queue()
1126 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue()
1130 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) in mhi_queue()
1131 return -EIO; in mhi_queue()
1135 return -EAGAIN; in mhi_queue()
1141 read_lock_irqsave(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1144 * for host->device buffer, balanced put is done on buffer completion in mhi_queue()
1145 * for device->host buffer, balanced put is after ringing the DB in mhi_queue()
1147 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_queue()
1150 mhi_cntrl->wake_toggle(mhi_cntrl); in mhi_queue()
1152 if (mhi_chan->dir == DMA_TO_DEVICE) in mhi_queue()
1153 atomic_inc(&mhi_cntrl->pending_pkts); in mhi_queue()
1159 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_queue()
1161 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1169 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue_skb()
1170 mhi_dev->dl_chan; in mhi_queue_skb()
1173 buf_info.v_addr = skb->data; in mhi_queue_skb()
1177 if (unlikely(mhi_chan->pre_alloc)) in mhi_queue_skb()
1178 return -EINVAL; in mhi_queue_skb()
1194 write_lock_bh(&mhi_chan->lock); in mhi_gen_tre()
1196 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { in mhi_gen_tre()
1197 ret = -ENODEV; in mhi_gen_tre()
1201 buf_ring = &mhi_chan->buf_ring; in mhi_gen_tre()
1202 tre_ring = &mhi_chan->tre_ring; in mhi_gen_tre()
1204 buf_info = buf_ring->wp; in mhi_gen_tre()
1205 WARN_ON(buf_info->used); in mhi_gen_tre()
1206 buf_info->pre_mapped = info->pre_mapped; in mhi_gen_tre()
1207 if (info->pre_mapped) in mhi_gen_tre()
1208 buf_info->p_addr = info->p_addr; in mhi_gen_tre()
1210 buf_info->v_addr = info->v_addr; in mhi_gen_tre()
1211 buf_info->cb_buf = info->cb_buf; in mhi_gen_tre()
1212 buf_info->wp = tre_ring->wp; in mhi_gen_tre()
1213 buf_info->dir = mhi_chan->dir; in mhi_gen_tre()
1214 buf_info->len = info->len; in mhi_gen_tre()
1216 if (!info->pre_mapped) { in mhi_gen_tre()
1217 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); in mhi_gen_tre()
1225 bei = !!(mhi_chan->intmod); in mhi_gen_tre()
1227 mhi_tre = tre_ring->wp; in mhi_gen_tre()
1228 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); in mhi_gen_tre()
1229 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); in mhi_gen_tre()
1230 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); in mhi_gen_tre()
1238 write_unlock_bh(&mhi_chan->lock); in mhi_gen_tre()
1258 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue_is_full()
1260 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_queue_is_full()
1261 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue_is_full()
1269 enum mhi_cmd_type cmd) in mhi_send_cmd() argument
1272 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_send_cmd()
1273 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_send_cmd()
1274 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_send_cmd()
1278 chan = mhi_chan->chan; in mhi_send_cmd()
1280 spin_lock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1282 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1283 return -ENOMEM; in mhi_send_cmd()
1286 /* prepare the cmd tre */ in mhi_send_cmd()
1287 cmd_tre = ring->wp; in mhi_send_cmd()
1288 switch (cmd) { in mhi_send_cmd()
1290 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; in mhi_send_cmd()
1291 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; in mhi_send_cmd()
1292 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); in mhi_send_cmd()
1295 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; in mhi_send_cmd()
1296 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; in mhi_send_cmd()
1297 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); in mhi_send_cmd()
1300 cmd_tre->ptr = MHI_TRE_CMD_START_PTR; in mhi_send_cmd()
1301 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; in mhi_send_cmd()
1302 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); in mhi_send_cmd()
1311 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1314 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1315 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1324 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_update_channel_state()
1325 enum mhi_cmd_type cmd = MHI_CMD_NOP; in mhi_update_channel_state() local
1331 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1332 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1333 mhi_chan->ch_state != MHI_CH_STATE_ENABLED && in mhi_update_channel_state()
1334 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { in mhi_update_channel_state()
1335 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1336 return -EINVAL; in mhi_update_channel_state()
1338 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_update_channel_state()
1339 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1341 cmd = MHI_CMD_RESET_CHAN; in mhi_update_channel_state()
1344 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in mhi_update_channel_state()
1345 return -EINVAL; in mhi_update_channel_state()
1347 cmd = MHI_CMD_STOP_CHAN; in mhi_update_channel_state()
1350 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1351 mhi_chan->ch_state != MHI_CH_STATE_DISABLED) in mhi_update_channel_state()
1352 return -EINVAL; in mhi_update_channel_state()
1354 cmd = MHI_CMD_START_CHAN; in mhi_update_channel_state()
1358 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1359 return -EINVAL; in mhi_update_channel_state()
1363 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1366 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_update_channel_state()
1368 reinit_completion(&mhi_chan->completion); in mhi_update_channel_state()
1369 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); in mhi_update_channel_state()
1372 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1376 ret = wait_for_completion_timeout(&mhi_chan->completion, in mhi_update_channel_state()
1377 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_update_channel_state()
1378 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { in mhi_update_channel_state()
1381 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1382 ret = -EIO; in mhi_update_channel_state()
1389 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1390 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? in mhi_update_channel_state()
1392 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1397 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_update_channel_state()
1398 mhi_device_put(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1407 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_unprepare_channel()
1409 mutex_lock(&mhi_chan->mutex); in mhi_unprepare_channel()
1411 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_unprepare_channel()
1413 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_unprepare_channel()
1422 mhi_chan->chan); in mhi_unprepare_channel()
1425 write_lock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1426 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_unprepare_channel()
1427 write_unlock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1429 if (!mhi_chan->offload_ch) { in mhi_unprepare_channel()
1433 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); in mhi_unprepare_channel()
1435 mutex_unlock(&mhi_chan->mutex); in mhi_unprepare_channel()
1442 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_prepare_channel()
1444 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_prepare_channel()
1446 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_prepare_channel()
1447 return -ENOTCONN; in mhi_prepare_channel()
1450 mutex_lock(&mhi_chan->mutex); in mhi_prepare_channel()
1453 if (!mhi_chan->offload_ch) { in mhi_prepare_channel()
1464 if (mhi_chan->dir == DMA_FROM_DEVICE) in mhi_prepare_channel()
1465 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); in mhi_prepare_channel()
1467 /* Pre-allocate buffer for xfer ring */ in mhi_prepare_channel()
1468 if (mhi_chan->pre_alloc) { in mhi_prepare_channel()
1470 &mhi_chan->tre_ring); in mhi_prepare_channel()
1471 size_t len = mhi_cntrl->buffer_len; in mhi_prepare_channel()
1473 while (nr_el--) { in mhi_prepare_channel()
1479 ret = -ENOMEM; in mhi_prepare_channel()
1494 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1496 read_lock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1498 read_unlock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1500 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1503 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1508 if (!mhi_chan->offload_ch) in mhi_prepare_channel()
1512 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1517 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1531 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_mark_stale_events()
1537 ev_ring = &mhi_event->ring; in mhi_mark_stale_events()
1540 spin_lock_irqsave(&mhi_event->lock, flags); in mhi_mark_stale_events()
1542 ptr = le64_to_cpu(er_ctxt->rp); in mhi_mark_stale_events()
1544 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_mark_stale_events()
1546 dev_rp = ev_ring->rp; in mhi_mark_stale_events()
1551 local_rp = ev_ring->rp; in mhi_mark_stale_events()
1555 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, in mhi_mark_stale_events()
1558 if (local_rp == (ev_ring->base + ev_ring->len)) in mhi_mark_stale_events()
1559 local_rp = ev_ring->base; in mhi_mark_stale_events()
1563 spin_unlock_irqrestore(&mhi_event->lock, flags); in mhi_mark_stale_events()
1573 buf_ring = &mhi_chan->buf_ring; in mhi_reset_data_chan()
1574 tre_ring = &mhi_chan->tre_ring; in mhi_reset_data_chan()
1575 result.transaction_status = -ENOTCONN; in mhi_reset_data_chan()
1577 while (tre_ring->rp != tre_ring->wp) { in mhi_reset_data_chan()
1578 struct mhi_buf_info *buf_info = buf_ring->rp; in mhi_reset_data_chan()
1580 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_reset_data_chan()
1581 atomic_dec(&mhi_cntrl->pending_pkts); in mhi_reset_data_chan()
1583 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_reset_data_chan()
1586 if (!buf_info->pre_mapped) in mhi_reset_data_chan()
1587 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in mhi_reset_data_chan()
1592 if (mhi_chan->pre_alloc) { in mhi_reset_data_chan()
1593 kfree(buf_info->cb_buf); in mhi_reset_data_chan()
1595 result.buf_addr = buf_info->cb_buf; in mhi_reset_data_chan()
1596 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_reset_data_chan()
1605 int chan = mhi_chan->chan; in mhi_reset_chan()
1608 if (mhi_chan->offload_ch) in mhi_reset_chan()
1611 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1612 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_reset_chan()
1613 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; in mhi_reset_chan()
1619 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1625 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in __mhi_prepare_for_transfer()
1629 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1641 for (--dir; dir >= 0; dir--) { in __mhi_prepare_for_transfer()
1642 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1666 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_unprepare_from_transfer()
1671 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_unprepare_from_transfer()
1682 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_get_channel_doorbell_offset()
1683 void __iomem *base = mhi_cntrl->regs; in mhi_get_channel_doorbell_offset()
1689 return -EIO; in mhi_get_channel_doorbell_offset()