Lines Matching +full:wp +full:- +full:controller

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
31 * L0: DISABLE <--> POR
32 * POR <--> POR
33 * POR -> M0 -> M2 --> M0
34 * POR -> FW_DL_ERR
35 * FW_DL_ERR <--> FW_DL_ERR
36 * M0 <--> M0
37 * M0 -> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
41 * SHUTDOWN_PROCESS -> DISABLE
42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
43 * LD_ERR_FATAL_DETECT -> DISABLE
114 unsigned long cur_state = mhi_cntrl->pm_state; in mhi_tryset_pm_state()
126 mhi_cntrl->pm_state = state; in mhi_tryset_pm_state()
127 return mhi_cntrl->pm_state; in mhi_tryset_pm_state()
132 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_set_mhi_state()
136 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_set_mhi_state()
139 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_set_mhi_state()
155 mhi_cntrl->wake_get(mhi_cntrl, false); in mhi_toggle_dev_wake()
156 mhi_cntrl->wake_put(mhi_cntrl, true); in mhi_toggle_dev_wake()
164 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ready_state_transition()
170 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { in mhi_ready_state_transition()
172 return -EIO; in mhi_ready_state_transition()
176 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_ready_state_transition()
178 mhi_cntrl->timeout_ms); in mhi_ready_state_transition()
184 timeout_ms = mhi_cntrl->ready_timeout_ms ? in mhi_ready_state_transition()
185 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; in mhi_ready_state_transition()
186 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_ready_state_transition()
195 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
197 mhi_cntrl->dev_state = MHI_STATE_READY; in mhi_ready_state_transition()
198 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
204 return -EIO; in mhi_ready_state_transition()
207 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
208 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ready_state_transition()
221 mhi_event = mhi_cntrl->mhi_event; in mhi_ready_state_transition()
222 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_ready_state_transition()
223 struct mhi_ring *ring = &mhi_event->ring; in mhi_ready_state_transition()
226 if (mhi_event->offload_ev || mhi_event->hw_ring) in mhi_ready_state_transition()
229 ring->wp = ring->base + ring->len - ring->el_size; in mhi_ready_state_transition()
230 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); in mhi_ready_state_transition()
235 spin_lock_irq(&mhi_event->lock); in mhi_ready_state_transition()
237 spin_unlock_irq(&mhi_event->lock); in mhi_ready_state_transition()
242 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
247 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
249 return -EIO; in mhi_ready_state_transition()
256 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_m0_transition()
259 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
260 mhi_cntrl->dev_state = MHI_STATE_M0; in mhi_pm_m0_transition()
262 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
265 return -EIO; in mhi_pm_m0_transition()
267 mhi_cntrl->M0++; in mhi_pm_m0_transition()
270 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
271 mhi_cntrl->wake_get(mhi_cntrl, true); in mhi_pm_m0_transition()
274 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { in mhi_pm_m0_transition()
275 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_pm_m0_transition()
277 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_pm_m0_transition()
279 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_m0_transition()
280 if (mhi_event->offload_ev) in mhi_pm_m0_transition()
283 spin_lock_irq(&mhi_event->lock); in mhi_pm_m0_transition()
285 spin_unlock_irq(&mhi_event->lock); in mhi_pm_m0_transition()
289 spin_lock_irq(&mhi_cmd->lock); in mhi_pm_m0_transition()
290 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) in mhi_pm_m0_transition()
292 spin_unlock_irq(&mhi_cmd->lock); in mhi_pm_m0_transition()
296 mhi_chan = mhi_cntrl->mhi_chan; in mhi_pm_m0_transition()
297 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_pm_m0_transition()
298 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_pm_m0_transition()
300 if (mhi_chan->db_cfg.reset_req) { in mhi_pm_m0_transition()
301 write_lock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
302 mhi_chan->db_cfg.db_mode = true; in mhi_pm_m0_transition()
303 write_unlock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
306 read_lock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
309 if (tre_ring->base && tre_ring->wp != tre_ring->rp && in mhi_pm_m0_transition()
310 mhi_chan->ch_state == MHI_CH_STATE_ENABLED) in mhi_pm_m0_transition()
312 read_unlock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
315 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_pm_m0_transition()
316 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
317 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_m0_transition()
330 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_m1_transition()
332 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
336 mhi_cntrl->dev_state = MHI_STATE_M2; in mhi_pm_m1_transition()
338 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
340 mhi_cntrl->M2++; in mhi_pm_m1_transition()
341 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_m1_transition()
344 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || in mhi_pm_m1_transition()
345 atomic_read(&mhi_cntrl->dev_wake))) { in mhi_pm_m1_transition()
348 atomic_read(&mhi_cntrl->pending_pkts), in mhi_pm_m1_transition()
349 atomic_read(&mhi_cntrl->dev_wake)); in mhi_pm_m1_transition()
350 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
351 mhi_cntrl->wake_get(mhi_cntrl, true); in mhi_pm_m1_transition()
352 mhi_cntrl->wake_put(mhi_cntrl, true); in mhi_pm_m1_transition()
353 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
355 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); in mhi_pm_m1_transition()
358 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
366 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_m3_transition()
368 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m3_transition()
369 mhi_cntrl->dev_state = MHI_STATE_M3; in mhi_pm_m3_transition()
371 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m3_transition()
374 return -EIO; in mhi_pm_m3_transition()
377 mhi_cntrl->M3++; in mhi_pm_m3_transition()
378 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_m3_transition()
387 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_mission_mode_transition()
388 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; in mhi_pm_mission_mode_transition()
393 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
394 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) in mhi_pm_mission_mode_transition()
398 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; in mhi_pm_mission_mode_transition()
399 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
400 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_mission_mode_transition()
401 return -EIO; in mhi_pm_mission_mode_transition()
403 mhi_cntrl->ee = ee; in mhi_pm_mission_mode_transition()
404 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
406 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_mission_mode_transition()
408 device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee, in mhi_pm_mission_mode_transition()
410 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); in mhi_pm_mission_mode_transition()
417 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
419 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in mhi_pm_mission_mode_transition()
420 ret = -EIO; in mhi_pm_mission_mode_transition()
425 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_mission_mode_transition()
426 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_mission_mode_transition()
427 struct mhi_ring *ring = &mhi_event->ring; in mhi_pm_mission_mode_transition()
429 if (mhi_event->offload_ev || !mhi_event->hw_ring) in mhi_pm_mission_mode_transition()
432 ring->wp = ring->base + ring->len - ring->el_size; in mhi_pm_mission_mode_transition()
433 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); in mhi_pm_mission_mode_transition()
437 spin_lock_irq(&mhi_event->lock); in mhi_pm_mission_mode_transition()
440 spin_unlock_irq(&mhi_event->lock); in mhi_pm_mission_mode_transition()
443 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
451 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
454 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_pm_mission_mode_transition()
455 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
468 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_disable_transition()
472 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in mhi_pm_disable_transition()
474 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
477 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { in mhi_pm_disable_transition()
479 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) in mhi_pm_disable_transition()
486 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_pm_disable_transition()
487 MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms); in mhi_pm_disable_transition()
493 * hence re-program it in mhi_pm_disable_transition()
495 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_pm_disable_transition()
499 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, in mhi_pm_disable_transition()
501 1, 25000, mhi_cntrl->timeout_ms); in mhi_pm_disable_transition()
510 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_disable_transition()
511 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_disable_transition()
512 if (mhi_event->offload_ev) in mhi_pm_disable_transition()
514 disable_irq(mhi_cntrl->irq[mhi_event->irq]); in mhi_pm_disable_transition()
515 tasklet_kill(&mhi_event->task); in mhi_pm_disable_transition()
519 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
521 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_disable_transition()
524 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); in mhi_pm_disable_transition()
526 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
528 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); in mhi_pm_disable_transition()
529 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); in mhi_pm_disable_transition()
533 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_pm_disable_transition()
534 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; in mhi_pm_disable_transition()
536 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_pm_disable_transition()
538 ring->rp = ring->base; in mhi_pm_disable_transition()
539 ring->wp = ring->base; in mhi_pm_disable_transition()
540 cmd_ctxt->rp = cmd_ctxt->rbase; in mhi_pm_disable_transition()
541 cmd_ctxt->wp = cmd_ctxt->rbase; in mhi_pm_disable_transition()
544 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_disable_transition()
545 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; in mhi_pm_disable_transition()
546 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, in mhi_pm_disable_transition()
548 struct mhi_ring *ring = &mhi_event->ring; in mhi_pm_disable_transition()
551 if (mhi_event->offload_ev) in mhi_pm_disable_transition()
554 ring->rp = ring->base; in mhi_pm_disable_transition()
555 ring->wp = ring->base; in mhi_pm_disable_transition()
556 er_ctxt->rp = er_ctxt->rbase; in mhi_pm_disable_transition()
557 er_ctxt->wp = er_ctxt->rbase; in mhi_pm_disable_transition()
561 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_disable_transition()
563 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_disable_transition()
570 to_mhi_pm_state_str(mhi_cntrl->pm_state), in mhi_pm_disable_transition()
571 mhi_state_str(mhi_cntrl->dev_state)); in mhi_pm_disable_transition()
573 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
585 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_sys_error_transition()
589 to_mhi_pm_state_str(mhi_cntrl->pm_state), in mhi_pm_sys_error_transition()
593 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); in mhi_pm_sys_error_transition()
595 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
596 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
597 prev_state = mhi_cntrl->pm_state; in mhi_pm_sys_error_transition()
599 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
608 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; in mhi_pm_sys_error_transition()
609 mhi_cntrl->dev_state = MHI_STATE_RESET; in mhi_pm_sys_error_transition()
612 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_sys_error_transition()
616 u32 in_reset = -1; in mhi_pm_sys_error_transition()
617 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); in mhi_pm_sys_error_transition()
623 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_pm_sys_error_transition()
625 mhi_cntrl->regs, in mhi_pm_sys_error_transition()
637 * hence re-program it in mhi_pm_sys_error_transition()
639 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_pm_sys_error_transition()
644 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_sys_error_transition()
645 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_sys_error_transition()
646 if (mhi_event->offload_ev) in mhi_pm_sys_error_transition()
648 tasklet_kill(&mhi_event->task); in mhi_pm_sys_error_transition()
652 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
654 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_sys_error_transition()
657 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); in mhi_pm_sys_error_transition()
659 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
661 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); in mhi_pm_sys_error_transition()
662 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); in mhi_pm_sys_error_transition()
666 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_pm_sys_error_transition()
667 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; in mhi_pm_sys_error_transition()
669 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_pm_sys_error_transition()
671 ring->rp = ring->base; in mhi_pm_sys_error_transition()
672 ring->wp = ring->base; in mhi_pm_sys_error_transition()
673 cmd_ctxt->rp = cmd_ctxt->rbase; in mhi_pm_sys_error_transition()
674 cmd_ctxt->wp = cmd_ctxt->rbase; in mhi_pm_sys_error_transition()
677 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_sys_error_transition()
678 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; in mhi_pm_sys_error_transition()
679 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, in mhi_pm_sys_error_transition()
681 struct mhi_ring *ring = &mhi_event->ring; in mhi_pm_sys_error_transition()
684 if (mhi_event->offload_ev) in mhi_pm_sys_error_transition()
687 ring->rp = ring->base; in mhi_pm_sys_error_transition()
688 ring->wp = ring->base; in mhi_pm_sys_error_transition()
689 er_ctxt->rp = er_ctxt->rbase; in mhi_pm_sys_error_transition()
690 er_ctxt->wp = er_ctxt->rbase; in mhi_pm_sys_error_transition()
695 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
697 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
713 to_mhi_pm_state_str(mhi_cntrl->pm_state), in mhi_pm_sys_error_transition()
714 mhi_state_str(mhi_cntrl->dev_state)); in mhi_pm_sys_error_transition()
716 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
727 return -ENOMEM; in mhi_queue_state_transition()
729 item->state = state; in mhi_queue_state_transition()
730 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); in mhi_queue_state_transition()
731 list_add_tail(&item->node, &mhi_cntrl->transition_list); in mhi_queue_state_transition()
732 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); in mhi_queue_state_transition()
734 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); in mhi_queue_state_transition()
742 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_sys_err_handler()
744 /* skip if controller supports RDDM */ in mhi_pm_sys_err_handler()
745 if (mhi_cntrl->rddm_image) { in mhi_pm_sys_err_handler()
746 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); in mhi_pm_sys_err_handler()
761 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_st_worker()
763 spin_lock_irq(&mhi_cntrl->transition_lock); in mhi_pm_st_worker()
764 list_splice_tail_init(&mhi_cntrl->transition_list, &head); in mhi_pm_st_worker()
765 spin_unlock_irq(&mhi_cntrl->transition_lock); in mhi_pm_st_worker()
768 list_del(&itr->node); in mhi_pm_st_worker()
770 TO_DEV_STATE_TRANS_STR(itr->state)); in mhi_pm_st_worker()
772 switch (itr->state) { in mhi_pm_st_worker()
774 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
775 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) in mhi_pm_st_worker()
776 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); in mhi_pm_st_worker()
777 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
781 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
782 mhi_cntrl->ee = MHI_EE_SBL; in mhi_pm_st_worker()
783 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
790 if (mhi_cntrl->fbc_download) in mhi_pm_st_worker()
797 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
798 mhi_cntrl->ee = MHI_EE_FP; in mhi_pm_st_worker()
799 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
821 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_suspend()
825 if (mhi_cntrl->pm_state == MHI_PM_DISABLE) in mhi_pm_suspend()
826 return -EINVAL; in mhi_pm_suspend()
828 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) in mhi_pm_suspend()
829 return -EIO; in mhi_pm_suspend()
832 if (atomic_read(&mhi_cntrl->dev_wake) || in mhi_pm_suspend()
833 atomic_read(&mhi_cntrl->pending_pkts)) in mhi_pm_suspend()
834 return -EBUSY; in mhi_pm_suspend()
837 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
838 mhi_cntrl->wake_get(mhi_cntrl, false); in mhi_pm_suspend()
839 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
841 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_pm_suspend()
842 mhi_cntrl->dev_state == MHI_STATE_M0 || in mhi_pm_suspend()
843 mhi_cntrl->dev_state == MHI_STATE_M1 || in mhi_pm_suspend()
844 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in mhi_pm_suspend()
845 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_pm_suspend()
847 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
848 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_pm_suspend()
849 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
851 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in mhi_pm_suspend()
854 return -EIO; in mhi_pm_suspend()
857 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
859 if (atomic_read(&mhi_cntrl->dev_wake) || in mhi_pm_suspend()
860 atomic_read(&mhi_cntrl->pending_pkts)) { in mhi_pm_suspend()
861 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
862 return -EBUSY; in mhi_pm_suspend()
868 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
872 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in mhi_pm_suspend()
873 return -EIO; in mhi_pm_suspend()
878 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
881 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_pm_suspend()
882 mhi_cntrl->dev_state == MHI_STATE_M3 || in mhi_pm_suspend()
883 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in mhi_pm_suspend()
884 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_pm_suspend()
886 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in mhi_pm_suspend()
889 mhi_state_str(mhi_cntrl->dev_state), in mhi_pm_suspend()
890 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in mhi_pm_suspend()
891 return -EIO; in mhi_pm_suspend()
895 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { in mhi_pm_suspend()
896 mutex_lock(&itr->mutex); in mhi_pm_suspend()
897 if (itr->mhi_dev) in mhi_pm_suspend()
898 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); in mhi_pm_suspend()
899 mutex_unlock(&itr->mutex); in mhi_pm_suspend()
909 struct device *dev = &mhi_cntrl->mhi_dev->dev; in __mhi_pm_resume()
914 to_mhi_pm_state_str(mhi_cntrl->pm_state), in __mhi_pm_resume()
915 mhi_state_str(mhi_cntrl->dev_state)); in __mhi_pm_resume()
917 if (mhi_cntrl->pm_state == MHI_PM_DISABLE) in __mhi_pm_resume()
920 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) in __mhi_pm_resume()
921 return -EIO; in __mhi_pm_resume()
927 return -EINVAL; in __mhi_pm_resume()
931 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { in __mhi_pm_resume()
932 mutex_lock(&itr->mutex); in __mhi_pm_resume()
933 if (itr->mhi_dev) in __mhi_pm_resume()
934 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); in __mhi_pm_resume()
935 mutex_unlock(&itr->mutex); in __mhi_pm_resume()
938 write_lock_irq(&mhi_cntrl->pm_lock); in __mhi_pm_resume()
941 write_unlock_irq(&mhi_cntrl->pm_lock); in __mhi_pm_resume()
945 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in __mhi_pm_resume()
946 return -EIO; in __mhi_pm_resume()
951 write_unlock_irq(&mhi_cntrl->pm_lock); in __mhi_pm_resume()
953 ret = wait_event_timeout(mhi_cntrl->state_event, in __mhi_pm_resume()
954 mhi_cntrl->dev_state == MHI_STATE_M0 || in __mhi_pm_resume()
955 mhi_cntrl->dev_state == MHI_STATE_M2 || in __mhi_pm_resume()
956 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in __mhi_pm_resume()
957 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in __mhi_pm_resume()
959 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in __mhi_pm_resume()
962 mhi_state_str(mhi_cntrl->dev_state), in __mhi_pm_resume()
963 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in __mhi_pm_resume()
964 return -EIO; in __mhi_pm_resume()
987 read_lock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
988 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in __mhi_device_get_sync()
989 read_unlock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
990 return -EIO; in __mhi_device_get_sync()
992 mhi_cntrl->wake_get(mhi_cntrl, true); in __mhi_device_get_sync()
993 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) in __mhi_device_get_sync()
995 read_unlock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
997 ret = wait_event_timeout(mhi_cntrl->state_event, in __mhi_device_get_sync()
998 mhi_cntrl->pm_state == MHI_PM_M0 || in __mhi_device_get_sync()
999 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in __mhi_device_get_sync()
1000 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in __mhi_device_get_sync()
1002 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in __mhi_device_get_sync()
1003 read_lock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
1004 mhi_cntrl->wake_put(mhi_cntrl, false); in __mhi_device_get_sync()
1005 read_unlock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
1006 return -EIO; in __mhi_device_get_sync()
1022 spin_lock_irqsave(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1023 atomic_inc(&mhi_cntrl->dev_wake); in mhi_assert_dev_wake()
1024 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && in mhi_assert_dev_wake()
1025 !mhi_cntrl->wake_set) { in mhi_assert_dev_wake()
1026 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); in mhi_assert_dev_wake()
1027 mhi_cntrl->wake_set = true; in mhi_assert_dev_wake()
1029 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1035 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) in mhi_assert_dev_wake()
1038 spin_lock_irqsave(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1039 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && in mhi_assert_dev_wake()
1040 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && in mhi_assert_dev_wake()
1041 !mhi_cntrl->wake_set) { in mhi_assert_dev_wake()
1042 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); in mhi_assert_dev_wake()
1043 mhi_cntrl->wake_set = true; in mhi_assert_dev_wake()
1045 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1049 /* De-assert device wake db */
1059 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) in mhi_deassert_dev_wake()
1062 spin_lock_irqsave(&mhi_cntrl->wlock, flags); in mhi_deassert_dev_wake()
1063 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && in mhi_deassert_dev_wake()
1064 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && in mhi_deassert_dev_wake()
1065 mhi_cntrl->wake_set) { in mhi_deassert_dev_wake()
1066 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); in mhi_deassert_dev_wake()
1067 mhi_cntrl->wake_set = false; in mhi_deassert_dev_wake()
1069 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); in mhi_deassert_dev_wake()
1074 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_async_power_up()
1078 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_async_power_up()
1084 /* Supply default wake routines if not provided by controller driver */ in mhi_async_power_up()
1085 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || in mhi_async_power_up()
1086 !mhi_cntrl->wake_toggle) { in mhi_async_power_up()
1087 mhi_cntrl->wake_get = mhi_assert_dev_wake; in mhi_async_power_up()
1088 mhi_cntrl->wake_put = mhi_deassert_dev_wake; in mhi_async_power_up()
1089 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? in mhi_async_power_up()
1093 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_async_power_up()
1094 mhi_cntrl->pm_state = MHI_PM_DISABLE; in mhi_async_power_up()
1097 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_async_power_up()
1098 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_async_power_up()
1099 mhi_cntrl->pm_state = MHI_PM_POR; in mhi_async_power_up()
1100 mhi_cntrl->ee = MHI_EE_MAX; in mhi_async_power_up()
1102 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_async_power_up()
1108 ret = -EIO; in mhi_async_power_up()
1118 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_async_power_up()
1120 mhi_cntrl->timeout_ms); in mhi_async_power_up()
1128 * re-program it in mhi_async_power_up()
1130 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_async_power_up()
1134 enable_irq(mhi_cntrl->irq[0]); in mhi_async_power_up()
1136 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_async_power_up()
1137 if (mhi_event->offload_ev) in mhi_async_power_up()
1140 enable_irq(mhi_cntrl->irq[mhi_event->irq]); in mhi_async_power_up()
1149 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_async_power_up()
1156 mhi_cntrl->pm_state = MHI_PM_DISABLE; in mhi_async_power_up()
1157 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_async_power_up()
1166 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_power_down()
1168 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_power_down()
1169 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_power_down()
1170 cur_state = mhi_cntrl->pm_state; in mhi_power_down()
1172 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_power_down()
1173 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_power_down()
1185 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in mhi_power_down()
1187 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; in mhi_power_down()
1191 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; in mhi_power_down()
1192 mhi_cntrl->dev_state = MHI_STATE_RESET; in mhi_power_down()
1194 wake_up_all(&mhi_cntrl->state_event); in mhi_power_down()
1196 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_power_down()
1197 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_power_down()
1202 flush_work(&mhi_cntrl->st_worker); in mhi_power_down()
1204 disable_irq(mhi_cntrl->irq[0]); in mhi_power_down()
1217 timeout_ms = mhi_cntrl->ready_timeout_ms ? in mhi_sync_power_up()
1218 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; in mhi_sync_power_up()
1219 wait_event_timeout(mhi_cntrl->state_event, in mhi_sync_power_up()
1220 MHI_IN_MISSION_MODE(mhi_cntrl->ee) || in mhi_sync_power_up()
1221 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in mhi_sync_power_up()
1224 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; in mhi_sync_power_up()
1234 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_force_rddm_mode()
1238 if (mhi_cntrl->ee == MHI_EE_RDDM) in mhi_force_rddm_mode()
1245 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_force_rddm_mode()
1246 mhi_cntrl->ee == MHI_EE_RDDM, in mhi_force_rddm_mode()
1247 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_force_rddm_mode()
1248 ret = ret ? 0 : -EIO; in mhi_force_rddm_mode()
1256 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_device_get()
1258 mhi_dev->dev_wake++; in mhi_device_get()
1259 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_device_get()
1260 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) in mhi_device_get()
1263 mhi_cntrl->wake_get(mhi_cntrl, true); in mhi_device_get()
1264 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_device_get()
1270 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_device_get_sync()
1275 mhi_dev->dev_wake++; in mhi_device_get_sync()
1283 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_device_put()
1285 mhi_dev->dev_wake--; in mhi_device_put()
1286 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_device_put()
1287 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) in mhi_device_put()
1290 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_device_put()
1291 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_device_put()