Lines Matching +full:skip +full:- +full:power +full:- +full:up

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
32 * L0: DISABLE <--> POR
33 * POR <--> POR
34 * POR -> M0 -> M2 --> M0
35 * POR -> FW_DL_ERR
36 * FW_DL_ERR <--> FW_DL_ERR
37 * M0 <--> M0
38 * M0 -> FW_DL_ERR
39 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
40 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
41 * SYS_ERR_PROCESS -> SYS_ERR_FAIL
42 * SYS_ERR_FAIL -> SYS_ERR_DETECT
43 * SYS_ERR_PROCESS --> POR
44 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
45 * SHUTDOWN_PROCESS -> DISABLE
46 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
47 * LD_ERR_FATAL_DETECT -> DISABLE
123 unsigned long cur_state = mhi_cntrl->pm_state; in mhi_tryset_pm_state()
136 mhi_cntrl->pm_state = state; in mhi_tryset_pm_state()
137 return mhi_cntrl->pm_state; in mhi_tryset_pm_state()
142 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_set_mhi_state()
146 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_set_mhi_state()
149 ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_set_mhi_state()
165 mhi_cntrl->wake_get(mhi_cntrl, false); in mhi_toggle_dev_wake()
166 mhi_cntrl->wake_put(mhi_cntrl, true); in mhi_toggle_dev_wake()
174 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ready_state_transition()
180 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { in mhi_ready_state_transition()
182 return -EIO; in mhi_ready_state_transition()
186 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_ready_state_transition()
188 mhi_cntrl->timeout_ms); in mhi_ready_state_transition()
194 timeout_ms = mhi_cntrl->ready_timeout_ms ? in mhi_ready_state_transition()
195 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; in mhi_ready_state_transition()
196 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_ready_state_transition()
205 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
207 mhi_cntrl->dev_state = MHI_STATE_READY; in mhi_ready_state_transition()
208 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
214 return -EIO; in mhi_ready_state_transition()
217 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
218 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ready_state_transition()
231 mhi_event = mhi_cntrl->mhi_event; in mhi_ready_state_transition()
232 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_ready_state_transition()
233 struct mhi_ring *ring = &mhi_event->ring; in mhi_ready_state_transition()
235 /* Skip if this is an offload or HW event */ in mhi_ready_state_transition()
236 if (mhi_event->offload_ev || mhi_event->hw_ring) in mhi_ready_state_transition()
239 ring->wp = ring->base + ring->len - ring->el_size; in mhi_ready_state_transition()
240 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); in mhi_ready_state_transition()
245 spin_lock_irq(&mhi_event->lock); in mhi_ready_state_transition()
247 spin_unlock_irq(&mhi_event->lock); in mhi_ready_state_transition()
252 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
257 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_ready_state_transition()
259 return -EIO; in mhi_ready_state_transition()
266 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_m0_transition()
269 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
270 mhi_cntrl->dev_state = MHI_STATE_M0; in mhi_pm_m0_transition()
272 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
275 return -EIO; in mhi_pm_m0_transition()
277 mhi_cntrl->M0++; in mhi_pm_m0_transition()
279 /* Wake up the device */ in mhi_pm_m0_transition()
280 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
281 mhi_cntrl->wake_get(mhi_cntrl, true); in mhi_pm_m0_transition()
284 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { in mhi_pm_m0_transition()
285 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_pm_m0_transition()
287 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_pm_m0_transition()
289 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_m0_transition()
290 if (mhi_event->offload_ev) in mhi_pm_m0_transition()
293 spin_lock_irq(&mhi_event->lock); in mhi_pm_m0_transition()
295 spin_unlock_irq(&mhi_event->lock); in mhi_pm_m0_transition()
299 spin_lock_irq(&mhi_cmd->lock); in mhi_pm_m0_transition()
300 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) in mhi_pm_m0_transition()
302 spin_unlock_irq(&mhi_cmd->lock); in mhi_pm_m0_transition()
306 mhi_chan = mhi_cntrl->mhi_chan; in mhi_pm_m0_transition()
307 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_pm_m0_transition()
308 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_pm_m0_transition()
310 if (mhi_chan->db_cfg.reset_req) { in mhi_pm_m0_transition()
311 write_lock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
312 mhi_chan->db_cfg.db_mode = true; in mhi_pm_m0_transition()
313 write_unlock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
316 read_lock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
319 if (tre_ring->base && tre_ring->wp != tre_ring->rp && in mhi_pm_m0_transition()
320 mhi_chan->ch_state == MHI_CH_STATE_ENABLED) in mhi_pm_m0_transition()
322 read_unlock_irq(&mhi_chan->lock); in mhi_pm_m0_transition()
325 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_pm_m0_transition()
326 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m0_transition()
327 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_m0_transition()
335 * for keeping it in low power state.
340 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_m1_transition()
342 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
346 mhi_cntrl->dev_state = MHI_STATE_M2; in mhi_pm_m1_transition()
348 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
350 mhi_cntrl->M2++; in mhi_pm_m1_transition()
351 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_m1_transition()
354 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || in mhi_pm_m1_transition()
355 atomic_read(&mhi_cntrl->dev_wake))) { in mhi_pm_m1_transition()
358 atomic_read(&mhi_cntrl->pending_pkts), in mhi_pm_m1_transition()
359 atomic_read(&mhi_cntrl->dev_wake)); in mhi_pm_m1_transition()
360 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
361 mhi_cntrl->wake_get(mhi_cntrl, true); in mhi_pm_m1_transition()
362 mhi_cntrl->wake_put(mhi_cntrl, true); in mhi_pm_m1_transition()
363 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
365 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); in mhi_pm_m1_transition()
368 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m1_transition()
376 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_m3_transition()
378 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m3_transition()
379 mhi_cntrl->dev_state = MHI_STATE_M3; in mhi_pm_m3_transition()
381 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_m3_transition()
384 return -EIO; in mhi_pm_m3_transition()
387 mhi_cntrl->M3++; in mhi_pm_m3_transition()
388 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_m3_transition()
397 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_mission_mode_transition()
398 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; in mhi_pm_mission_mode_transition()
403 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
404 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) in mhi_pm_mission_mode_transition()
408 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; in mhi_pm_mission_mode_transition()
409 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
410 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_mission_mode_transition()
411 return -EIO; in mhi_pm_mission_mode_transition()
413 mhi_cntrl->ee = ee; in mhi_pm_mission_mode_transition()
414 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
416 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_mission_mode_transition()
418 device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee, in mhi_pm_mission_mode_transition()
420 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); in mhi_pm_mission_mode_transition()
427 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
429 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in mhi_pm_mission_mode_transition()
430 ret = -EIO; in mhi_pm_mission_mode_transition()
435 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_mission_mode_transition()
436 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_mission_mode_transition()
437 struct mhi_ring *ring = &mhi_event->ring; in mhi_pm_mission_mode_transition()
439 if (mhi_event->offload_ev || !mhi_event->hw_ring) in mhi_pm_mission_mode_transition()
442 ring->wp = ring->base + ring->len - ring->el_size; in mhi_pm_mission_mode_transition()
443 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); in mhi_pm_mission_mode_transition()
447 spin_lock_irq(&mhi_event->lock); in mhi_pm_mission_mode_transition()
450 spin_unlock_irq(&mhi_event->lock); in mhi_pm_mission_mode_transition()
453 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
461 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
464 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_pm_mission_mode_transition()
465 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_mission_mode_transition()
479 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_disable_transition()
483 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in mhi_pm_disable_transition()
485 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
488 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { in mhi_pm_disable_transition()
489 /* Skip MHI RESET if in RDDM state */ in mhi_pm_disable_transition()
490 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) in mhi_pm_disable_transition()
497 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_pm_disable_transition()
498 MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms); in mhi_pm_disable_transition()
504 * hence re-program it in mhi_pm_disable_transition()
506 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_pm_disable_transition()
510 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, in mhi_pm_disable_transition()
512 1, 25000, mhi_cntrl->timeout_ms); in mhi_pm_disable_transition()
521 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_disable_transition()
522 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_disable_transition()
523 if (mhi_event->offload_ev) in mhi_pm_disable_transition()
525 disable_irq(mhi_cntrl->irq[mhi_event->irq]); in mhi_pm_disable_transition()
526 tasklet_kill(&mhi_event->task); in mhi_pm_disable_transition()
530 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
532 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_disable_transition()
542 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); in mhi_pm_disable_transition()
545 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
547 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); in mhi_pm_disable_transition()
548 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); in mhi_pm_disable_transition()
552 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_pm_disable_transition()
553 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; in mhi_pm_disable_transition()
555 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_pm_disable_transition()
557 ring->rp = ring->base; in mhi_pm_disable_transition()
558 ring->wp = ring->base; in mhi_pm_disable_transition()
559 cmd_ctxt->rp = cmd_ctxt->rbase; in mhi_pm_disable_transition()
560 cmd_ctxt->wp = cmd_ctxt->rbase; in mhi_pm_disable_transition()
563 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_disable_transition()
564 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; in mhi_pm_disable_transition()
565 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, in mhi_pm_disable_transition()
567 struct mhi_ring *ring = &mhi_event->ring; in mhi_pm_disable_transition()
569 /* Skip offload events */ in mhi_pm_disable_transition()
570 if (mhi_event->offload_ev) in mhi_pm_disable_transition()
573 ring->rp = ring->base; in mhi_pm_disable_transition()
574 ring->wp = ring->base; in mhi_pm_disable_transition()
575 er_ctxt->rp = er_ctxt->rbase; in mhi_pm_disable_transition()
576 er_ctxt->wp = er_ctxt->rbase; in mhi_pm_disable_transition()
580 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_disable_transition()
582 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_disable_transition()
589 to_mhi_pm_state_str(mhi_cntrl->pm_state), in mhi_pm_disable_transition()
590 mhi_state_str(mhi_cntrl->dev_state)); in mhi_pm_disable_transition()
592 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_disable_transition()
604 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_sys_error_transition()
608 to_mhi_pm_state_str(mhi_cntrl->pm_state), in mhi_pm_sys_error_transition()
611 /* We must notify MHI control driver so it can clean up first */ in mhi_pm_sys_error_transition()
612 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); in mhi_pm_sys_error_transition()
614 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
615 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
616 prev_state = mhi_cntrl->pm_state; in mhi_pm_sys_error_transition()
618 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
627 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; in mhi_pm_sys_error_transition()
628 mhi_cntrl->dev_state = MHI_STATE_RESET; in mhi_pm_sys_error_transition()
630 /* Wake up threads waiting for state transition */ in mhi_pm_sys_error_transition()
631 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_sys_error_transition()
635 u32 in_reset = -1; in mhi_pm_sys_error_transition()
636 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); in mhi_pm_sys_error_transition()
642 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_pm_sys_error_transition()
644 mhi_cntrl->regs, in mhi_pm_sys_error_transition()
651 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
654 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
662 * hence re-program it in mhi_pm_sys_error_transition()
664 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_pm_sys_error_transition()
669 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_sys_error_transition()
670 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_pm_sys_error_transition()
671 if (mhi_event->offload_ev) in mhi_pm_sys_error_transition()
673 tasklet_kill(&mhi_event->task); in mhi_pm_sys_error_transition()
677 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
679 wake_up_all(&mhi_cntrl->state_event); in mhi_pm_sys_error_transition()
682 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); in mhi_pm_sys_error_transition()
684 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
686 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); in mhi_pm_sys_error_transition()
687 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); in mhi_pm_sys_error_transition()
691 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_pm_sys_error_transition()
692 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; in mhi_pm_sys_error_transition()
694 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_pm_sys_error_transition()
696 ring->rp = ring->base; in mhi_pm_sys_error_transition()
697 ring->wp = ring->base; in mhi_pm_sys_error_transition()
698 cmd_ctxt->rp = cmd_ctxt->rbase; in mhi_pm_sys_error_transition()
699 cmd_ctxt->wp = cmd_ctxt->rbase; in mhi_pm_sys_error_transition()
702 mhi_event = mhi_cntrl->mhi_event; in mhi_pm_sys_error_transition()
703 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; in mhi_pm_sys_error_transition()
704 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, in mhi_pm_sys_error_transition()
706 struct mhi_ring *ring = &mhi_event->ring; in mhi_pm_sys_error_transition()
708 /* Skip offload events */ in mhi_pm_sys_error_transition()
709 if (mhi_event->offload_ev) in mhi_pm_sys_error_transition()
712 ring->rp = ring->base; in mhi_pm_sys_error_transition()
713 ring->wp = ring->base; in mhi_pm_sys_error_transition()
714 er_ctxt->rp = er_ctxt->rbase; in mhi_pm_sys_error_transition()
715 er_ctxt->wp = er_ctxt->rbase; in mhi_pm_sys_error_transition()
720 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
722 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_sys_error_transition()
738 to_mhi_pm_state_str(mhi_cntrl->pm_state), in mhi_pm_sys_error_transition()
739 mhi_state_str(mhi_cntrl->dev_state)); in mhi_pm_sys_error_transition()
741 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_pm_sys_error_transition()
752 return -ENOMEM; in mhi_queue_state_transition()
754 item->state = state; in mhi_queue_state_transition()
755 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); in mhi_queue_state_transition()
756 list_add_tail(&item->node, &mhi_cntrl->transition_list); in mhi_queue_state_transition()
757 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); in mhi_queue_state_transition()
759 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); in mhi_queue_state_transition()
767 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_sys_err_handler()
769 /* skip if controller supports RDDM */ in mhi_pm_sys_err_handler()
770 if (mhi_cntrl->rddm_image) { in mhi_pm_sys_err_handler()
771 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); in mhi_pm_sys_err_handler()
787 spin_lock_irq(&mhi_cntrl->transition_lock); in mhi_pm_st_worker()
788 list_splice_tail_init(&mhi_cntrl->transition_list, &head); in mhi_pm_st_worker()
789 spin_unlock_irq(&mhi_cntrl->transition_lock); in mhi_pm_st_worker()
792 list_del(&itr->node); in mhi_pm_st_worker()
794 trace_mhi_pm_st_transition(mhi_cntrl, itr->state); in mhi_pm_st_worker()
796 switch (itr->state) { in mhi_pm_st_worker()
798 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
799 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) in mhi_pm_st_worker()
800 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); in mhi_pm_st_worker()
801 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
805 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
806 mhi_cntrl->ee = MHI_EE_SBL; in mhi_pm_st_worker()
807 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
814 if (mhi_cntrl->fbc_download) in mhi_pm_st_worker()
821 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
822 mhi_cntrl->ee = MHI_EE_FP; in mhi_pm_st_worker()
823 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_st_worker()
848 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_pm_suspend()
852 if (mhi_cntrl->pm_state == MHI_PM_DISABLE) in mhi_pm_suspend()
853 return -EINVAL; in mhi_pm_suspend()
855 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) in mhi_pm_suspend()
856 return -EIO; in mhi_pm_suspend()
859 if (atomic_read(&mhi_cntrl->dev_wake) || in mhi_pm_suspend()
860 atomic_read(&mhi_cntrl->pending_pkts)) in mhi_pm_suspend()
861 return -EBUSY; in mhi_pm_suspend()
864 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
865 mhi_cntrl->wake_get(mhi_cntrl, false); in mhi_pm_suspend()
866 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
868 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_pm_suspend()
869 mhi_cntrl->dev_state == MHI_STATE_M0 || in mhi_pm_suspend()
870 mhi_cntrl->dev_state == MHI_STATE_M1 || in mhi_pm_suspend()
871 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in mhi_pm_suspend()
872 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_pm_suspend()
874 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
875 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_pm_suspend()
876 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
878 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in mhi_pm_suspend()
881 return -EIO; in mhi_pm_suspend()
884 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
886 if (atomic_read(&mhi_cntrl->dev_wake) || in mhi_pm_suspend()
887 atomic_read(&mhi_cntrl->pending_pkts)) { in mhi_pm_suspend()
888 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
889 return -EBUSY; in mhi_pm_suspend()
895 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
899 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in mhi_pm_suspend()
900 return -EIO; in mhi_pm_suspend()
905 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_pm_suspend()
908 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_pm_suspend()
909 mhi_cntrl->dev_state == MHI_STATE_M3 || in mhi_pm_suspend()
910 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in mhi_pm_suspend()
911 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_pm_suspend()
913 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in mhi_pm_suspend()
916 mhi_state_str(mhi_cntrl->dev_state), in mhi_pm_suspend()
917 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in mhi_pm_suspend()
918 return -EIO; in mhi_pm_suspend()
922 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { in mhi_pm_suspend()
923 mutex_lock(&itr->mutex); in mhi_pm_suspend()
924 if (itr->mhi_dev) in mhi_pm_suspend()
925 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); in mhi_pm_suspend()
926 mutex_unlock(&itr->mutex); in mhi_pm_suspend()
936 struct device *dev = &mhi_cntrl->mhi_dev->dev; in __mhi_pm_resume()
941 to_mhi_pm_state_str(mhi_cntrl->pm_state), in __mhi_pm_resume()
942 mhi_state_str(mhi_cntrl->dev_state)); in __mhi_pm_resume()
944 if (mhi_cntrl->pm_state == MHI_PM_DISABLE) in __mhi_pm_resume()
947 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) in __mhi_pm_resume()
948 return -EIO; in __mhi_pm_resume()
954 return -EINVAL; in __mhi_pm_resume()
958 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { in __mhi_pm_resume()
959 mutex_lock(&itr->mutex); in __mhi_pm_resume()
960 if (itr->mhi_dev) in __mhi_pm_resume()
961 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); in __mhi_pm_resume()
962 mutex_unlock(&itr->mutex); in __mhi_pm_resume()
965 write_lock_irq(&mhi_cntrl->pm_lock); in __mhi_pm_resume()
968 write_unlock_irq(&mhi_cntrl->pm_lock); in __mhi_pm_resume()
972 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in __mhi_pm_resume()
973 return -EIO; in __mhi_pm_resume()
978 write_unlock_irq(&mhi_cntrl->pm_lock); in __mhi_pm_resume()
980 ret = wait_event_timeout(mhi_cntrl->state_event, in __mhi_pm_resume()
981 mhi_cntrl->dev_state == MHI_STATE_M0 || in __mhi_pm_resume()
982 mhi_cntrl->dev_state == MHI_STATE_M2 || in __mhi_pm_resume()
983 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in __mhi_pm_resume()
984 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in __mhi_pm_resume()
986 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in __mhi_pm_resume()
989 mhi_state_str(mhi_cntrl->dev_state), in __mhi_pm_resume()
990 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in __mhi_pm_resume()
991 return -EIO; in __mhi_pm_resume()
1013 /* Wake up the device */ in __mhi_device_get_sync()
1014 read_lock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
1015 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in __mhi_device_get_sync()
1016 read_unlock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
1017 return -EIO; in __mhi_device_get_sync()
1019 mhi_cntrl->wake_get(mhi_cntrl, true); in __mhi_device_get_sync()
1020 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) in __mhi_device_get_sync()
1022 read_unlock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
1024 ret = wait_event_timeout(mhi_cntrl->state_event, in __mhi_device_get_sync()
1025 mhi_cntrl->pm_state == MHI_PM_M0 || in __mhi_device_get_sync()
1026 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in __mhi_device_get_sync()
1027 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in __mhi_device_get_sync()
1029 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { in __mhi_device_get_sync()
1030 read_lock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
1031 mhi_cntrl->wake_put(mhi_cntrl, false); in __mhi_device_get_sync()
1032 read_unlock_bh(&mhi_cntrl->pm_lock); in __mhi_device_get_sync()
1033 return -EIO; in __mhi_device_get_sync()
1049 spin_lock_irqsave(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1050 atomic_inc(&mhi_cntrl->dev_wake); in mhi_assert_dev_wake()
1051 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && in mhi_assert_dev_wake()
1052 !mhi_cntrl->wake_set) { in mhi_assert_dev_wake()
1053 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); in mhi_assert_dev_wake()
1054 mhi_cntrl->wake_set = true; in mhi_assert_dev_wake()
1056 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1062 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) in mhi_assert_dev_wake()
1065 spin_lock_irqsave(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1066 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && in mhi_assert_dev_wake()
1067 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && in mhi_assert_dev_wake()
1068 !mhi_cntrl->wake_set) { in mhi_assert_dev_wake()
1069 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); in mhi_assert_dev_wake()
1070 mhi_cntrl->wake_set = true; in mhi_assert_dev_wake()
1072 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); in mhi_assert_dev_wake()
1076 /* De-assert device wake db */
1086 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) in mhi_deassert_dev_wake()
1089 spin_lock_irqsave(&mhi_cntrl->wlock, flags); in mhi_deassert_dev_wake()
1090 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && in mhi_deassert_dev_wake()
1091 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && in mhi_deassert_dev_wake()
1092 mhi_cntrl->wake_set) { in mhi_deassert_dev_wake()
1093 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); in mhi_deassert_dev_wake()
1094 mhi_cntrl->wake_set = false; in mhi_deassert_dev_wake()
1096 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); in mhi_deassert_dev_wake()
1101 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_async_power_up()
1105 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_async_power_up()
1109 dev_info(dev, "Requested to power ON\n"); in mhi_async_power_up()
1112 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || in mhi_async_power_up()
1113 !mhi_cntrl->wake_toggle) { in mhi_async_power_up()
1114 mhi_cntrl->wake_get = mhi_assert_dev_wake; in mhi_async_power_up()
1115 mhi_cntrl->wake_put = mhi_deassert_dev_wake; in mhi_async_power_up()
1116 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? in mhi_async_power_up()
1120 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_async_power_up()
1121 mhi_cntrl->pm_state = MHI_PM_DISABLE; in mhi_async_power_up()
1124 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_async_power_up()
1125 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_async_power_up()
1126 mhi_cntrl->pm_state = MHI_PM_POR; in mhi_async_power_up()
1127 mhi_cntrl->ee = MHI_EE_MAX; in mhi_async_power_up()
1129 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_async_power_up()
1133 dev_err(dev, "%s is not a valid EE for power on\n", in mhi_async_power_up()
1135 ret = -EIO; in mhi_async_power_up()
1140 dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", in mhi_async_power_up()
1145 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, in mhi_async_power_up()
1147 mhi_cntrl->timeout_ms); in mhi_async_power_up()
1155 * re-program it in mhi_async_power_up()
1157 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); in mhi_async_power_up()
1161 enable_irq(mhi_cntrl->irq[0]); in mhi_async_power_up()
1163 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_async_power_up()
1164 if (mhi_event->offload_ev) in mhi_async_power_up()
1167 enable_irq(mhi_cntrl->irq[mhi_event->irq]); in mhi_async_power_up()
1176 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_async_power_up()
1178 dev_info(dev, "Power on setup success\n"); in mhi_async_power_up()
1183 mhi_cntrl->pm_state = MHI_PM_DISABLE; in mhi_async_power_up()
1184 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_async_power_up()
1194 struct device *dev = &mhi_cntrl->mhi_dev->dev; in __mhi_power_down()
1196 mutex_lock(&mhi_cntrl->pm_mutex); in __mhi_power_down()
1197 write_lock_irq(&mhi_cntrl->pm_lock); in __mhi_power_down()
1198 cur_state = mhi_cntrl->pm_state; in __mhi_power_down()
1200 write_unlock_irq(&mhi_cntrl->pm_lock); in __mhi_power_down()
1201 mutex_unlock(&mhi_cntrl->pm_mutex); in __mhi_power_down()
1213 to_mhi_pm_state_str(mhi_cntrl->pm_state)); in __mhi_power_down()
1215 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; in __mhi_power_down()
1219 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; in __mhi_power_down()
1220 mhi_cntrl->dev_state = MHI_STATE_RESET; in __mhi_power_down()
1222 wake_up_all(&mhi_cntrl->state_event); in __mhi_power_down()
1224 write_unlock_irq(&mhi_cntrl->pm_lock); in __mhi_power_down()
1225 mutex_unlock(&mhi_cntrl->pm_mutex); in __mhi_power_down()
1235 flush_work(&mhi_cntrl->st_worker); in __mhi_power_down()
1237 disable_irq(mhi_cntrl->irq[0]); in __mhi_power_down()
1261 /* Some devices need more time to set ready during power up */ in mhi_sync_power_up()
1262 timeout_ms = mhi_cntrl->ready_timeout_ms ? in mhi_sync_power_up()
1263 mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; in mhi_sync_power_up()
1264 wait_event_timeout(mhi_cntrl->state_event, in mhi_sync_power_up()
1265 MHI_IN_MISSION_MODE(mhi_cntrl->ee) || in mhi_sync_power_up()
1266 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), in mhi_sync_power_up()
1269 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; in mhi_sync_power_up()
1279 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_force_rddm_mode()
1283 if (mhi_cntrl->ee == MHI_EE_RDDM) in mhi_force_rddm_mode()
1290 ret = wait_event_timeout(mhi_cntrl->state_event, in mhi_force_rddm_mode()
1291 mhi_cntrl->ee == MHI_EE_RDDM, in mhi_force_rddm_mode()
1292 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_force_rddm_mode()
1293 ret = ret ? 0 : -EIO; in mhi_force_rddm_mode()
1301 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_device_get()
1303 mhi_dev->dev_wake++; in mhi_device_get()
1304 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_device_get()
1305 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) in mhi_device_get()
1308 mhi_cntrl->wake_get(mhi_cntrl, true); in mhi_device_get()
1309 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_device_get()
1315 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_device_get_sync()
1320 mhi_dev->dev_wake++; in mhi_device_get_sync()
1328 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_device_put()
1330 mhi_dev->dev_wake--; in mhi_device_put()
1331 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_device_put()
1332 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) in mhi_device_put()
1335 mhi_cntrl->wake_put(mhi_cntrl, false); in mhi_device_put()
1336 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_device_put()