Lines Matching refs:mhi_cntrl

20 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,  in mhi_read_reg()  argument
23 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); in mhi_read_reg()
26 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, in mhi_read_reg_field() argument
33 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); in mhi_read_reg_field()
42 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, in mhi_poll_reg_field() argument
51 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); in mhi_poll_reg_field()
64 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, in mhi_write_reg() argument
67 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); in mhi_write_reg()
70 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, in mhi_write_reg_field() argument
77 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); in mhi_write_reg_field()
83 mhi_write_reg(mhi_cntrl, base, offset, tmp); in mhi_write_reg_field()
88 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, in mhi_write_db() argument
91 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); in mhi_write_db()
92 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); in mhi_write_db()
95 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, in mhi_db_brstmode() argument
102 mhi_write_db(mhi_cntrl, db_addr, db_val); in mhi_db_brstmode()
107 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, in mhi_db_brstmode_disable() argument
113 mhi_write_db(mhi_cntrl, db_addr, db_val); in mhi_db_brstmode_disable()
120 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, in mhi_ring_er_db()
124 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) in mhi_ring_cmd_db() argument
131 mhi_write_db(mhi_cntrl, ring->db_addr, db); in mhi_ring_cmd_db()
134 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, in mhi_ring_chan_db() argument
149 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, in mhi_ring_chan_db()
153 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) in mhi_get_exec_env() argument
156 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); in mhi_get_exec_env()
162 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) in mhi_get_mhi_state() argument
165 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_get_mhi_state()
171 void mhi_soc_reset(struct mhi_controller *mhi_cntrl) in mhi_soc_reset() argument
173 if (mhi_cntrl->reset) { in mhi_soc_reset()
174 mhi_cntrl->reset(mhi_cntrl); in mhi_soc_reset()
179 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, in mhi_soc_reset()
184 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, in mhi_map_single_no_bb() argument
187 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, in mhi_map_single_no_bb()
190 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) in mhi_map_single_no_bb()
196 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, in mhi_map_single_use_bb() argument
199 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_map_single_use_bb()
213 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, in mhi_unmap_single_no_bb() argument
216 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, in mhi_unmap_single_no_bb()
220 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, in mhi_unmap_single_use_bb() argument
226 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_unmap_single_use_bb()
230 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, in get_nr_avail_ring_elements() argument
251 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, in mhi_add_ring_element() argument
261 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, in mhi_del_ring_element() argument
281 struct mhi_controller *mhi_cntrl; in mhi_destroy_device() local
288 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_destroy_device()
326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", in mhi_destroy_device()
339 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_get_free_desc_count() local
344 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); in mhi_get_free_desc_count()
363 void mhi_create_devices(struct mhi_controller *mhi_cntrl) in mhi_create_devices() argument
367 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_create_devices()
370 mhi_chan = mhi_cntrl->mhi_chan; in mhi_create_devices()
371 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_create_devices()
373 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) in mhi_create_devices()
375 mhi_dev = mhi_alloc_device(mhi_cntrl); in mhi_create_devices()
400 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { in mhi_create_devices()
419 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_create_devices()
435 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_irq_handler() local
446 if (!mhi_cntrl->mhi_ctxt) { in mhi_irq_handler()
447 dev_dbg(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
452 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_irq_handler()
456 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
483 struct mhi_controller *mhi_cntrl = priv; in mhi_intvec_threaded_handler() local
484 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_intvec_threaded_handler()
489 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
490 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_intvec_threaded_handler()
491 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
495 state = mhi_get_mhi_state(mhi_cntrl); in mhi_intvec_threaded_handler()
496 ee = mhi_get_exec_env(mhi_cntrl); in mhi_intvec_threaded_handler()
498 trace_mhi_intvec_states(mhi_cntrl, ee, state); in mhi_intvec_threaded_handler()
501 pm_state = mhi_tryset_pm_state(mhi_cntrl, in mhi_intvec_threaded_handler()
504 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { in mhi_intvec_threaded_handler()
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_intvec_threaded_handler()
514 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
515 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
521 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); in mhi_intvec_threaded_handler()
522 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
523 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
524 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_intvec_threaded_handler()
527 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
528 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_intvec_threaded_handler()
539 struct mhi_controller *mhi_cntrl = dev; in mhi_intvec_handler() local
542 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_handler()
547 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, in mhi_recycle_ev_ring_element() argument
567 static int parse_xfer_event(struct mhi_controller *mhi_cntrl, in parse_xfer_event() argument
572 struct device *dev = &mhi_cntrl->mhi_dev->dev; in parse_xfer_event()
610 dev_err(&mhi_cntrl->mhi_dev->dev, in parse_xfer_event()
634 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in parse_xfer_event()
641 mhi_del_ring_element(mhi_cntrl, buf_ring); in parse_xfer_event()
642 mhi_del_ring_element(mhi_cntrl, tre_ring); in parse_xfer_event()
651 atomic_dec(&mhi_cntrl->pending_pkts); in parse_xfer_event()
653 mhi_cntrl->runtime_put(mhi_cntrl); in parse_xfer_event()
683 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
685 MHI_DB_ACCESS_VALID(mhi_cntrl)) { in parse_xfer_event()
686 mhi_ring_chan_db(mhi_cntrl, mhi_chan); in parse_xfer_event()
688 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
706 static int parse_rsc_event(struct mhi_controller *mhi_cntrl, in parse_rsc_event() argument
761 mhi_del_ring_element(mhi_cntrl, tre_ring); in parse_rsc_event()
770 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, in mhi_process_cmd_completion() argument
774 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_process_cmd_completion()
781 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
790 if (chan < mhi_cntrl->max_chan && in mhi_process_cmd_completion()
791 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_cmd_completion()
792 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_cmd_completion()
798 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
802 mhi_del_ring_element(mhi_cntrl, mhi_ring); in mhi_process_cmd_completion()
805 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, in mhi_process_ctrl_ev_ring() argument
812 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_ctrl_ev_ring()
814 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_process_ctrl_ev_ring()
824 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_ctrl_ev_ring()
828 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
839 trace_mhi_ctrl_event(mhi_cntrl, local_rp); in mhi_process_ctrl_ev_ring()
846 link_info = &mhi_cntrl->mhi_link_info; in mhi_process_ctrl_ev_ring()
847 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
852 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
854 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); in mhi_process_ctrl_ev_ring()
868 mhi_pm_m0_transition(mhi_cntrl); in mhi_process_ctrl_ev_ring()
871 mhi_pm_m1_transition(mhi_cntrl); in mhi_process_ctrl_ev_ring()
874 mhi_pm_m3_transition(mhi_cntrl); in mhi_process_ctrl_ev_ring()
881 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
882 pm_state = mhi_tryset_pm_state(mhi_cntrl, in mhi_process_ctrl_ev_ring()
884 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
886 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_process_ctrl_ev_ring()
897 mhi_process_cmd_completion(mhi_cntrl, local_rp); in mhi_process_ctrl_ev_ring()
918 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_process_ctrl_ev_ring()
919 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
920 mhi_cntrl->ee = event; in mhi_process_ctrl_ev_ring()
921 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
922 wake_up_all(&mhi_cntrl->state_event); in mhi_process_ctrl_ev_ring()
929 mhi_queue_state_transition(mhi_cntrl, st); in mhi_process_ctrl_ev_ring()
936 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_ctrl_ev_ring()
942 if (chan < mhi_cntrl->max_chan) { in mhi_process_ctrl_ev_ring()
943 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_ctrl_ev_ring()
946 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); in mhi_process_ctrl_ev_ring()
954 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); in mhi_process_ctrl_ev_ring()
959 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
968 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
971 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count) in mhi_process_ctrl_ev_ring()
973 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
978 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, in mhi_process_data_event_ring() argument
985 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_data_event_ring()
991 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_data_event_ring()
995 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1006 trace_mhi_data_event(mhi_cntrl, local_rp); in mhi_process_data_event_ring()
1010 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_data_event_ring()
1016 if (chan < mhi_cntrl->max_chan && in mhi_process_data_event_ring()
1017 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_data_event_ring()
1018 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_data_event_ring()
1021 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); in mhi_process_data_event_ring()
1024 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); in mhi_process_data_event_ring()
1029 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); in mhi_process_data_event_ring()
1034 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1042 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1045 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count) in mhi_process_data_event_ring()
1047 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1055 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ev_task() local
1059 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ev_task()
1066 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ctrl_ev_task() local
1067 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ctrl_ev_task()
1077 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ctrl_ev_task()
1083 mhi_trigger_resume(mhi_cntrl); in mhi_ctrl_ev_task()
1089 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ctrl_ev_task()
1096 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1097 state = mhi_get_mhi_state(mhi_cntrl); in mhi_ctrl_ev_task()
1100 pm_state = mhi_tryset_pm_state(mhi_cntrl, in mhi_ctrl_ev_task()
1103 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1105 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_ctrl_ev_task()
1109 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, in mhi_is_ring_full() argument
1123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue() local
1130 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) in mhi_queue()
1133 ret = mhi_is_ring_full(mhi_cntrl, tre_ring); in mhi_queue()
1137 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); in mhi_queue()
1141 read_lock_irqsave(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1147 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_queue()
1150 mhi_cntrl->wake_toggle(mhi_cntrl); in mhi_queue()
1153 atomic_inc(&mhi_cntrl->pending_pkts); in mhi_queue()
1155 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) in mhi_queue()
1156 mhi_ring_chan_db(mhi_cntrl, mhi_chan); in mhi_queue()
1159 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_queue()
1161 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1203 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, in mhi_gen_tre() argument
1231 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); in mhi_gen_tre()
1248 trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre); in mhi_gen_tre()
1250 mhi_add_ring_element(mhi_cntrl, tre_ring); in mhi_gen_tre()
1251 mhi_add_ring_element(mhi_cntrl, buf_ring); in mhi_gen_tre()
1273 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue_is_full() local
1278 return mhi_is_ring_full(mhi_cntrl, tre_ring); in mhi_queue_is_full()
1282 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, in mhi_send_cmd() argument
1287 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_send_cmd()
1289 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_send_cmd()
1296 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { in mhi_send_cmd()
1325 mhi_add_ring_element(mhi_cntrl, ring); in mhi_send_cmd()
1326 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1327 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) in mhi_send_cmd()
1328 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); in mhi_send_cmd()
1329 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1335 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, in mhi_update_channel_state() argument
1343 trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating")); in mhi_update_channel_state()
1378 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1381 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_update_channel_state()
1384 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); in mhi_update_channel_state()
1392 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_update_channel_state()
1410 trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated")); in mhi_update_channel_state()
1412 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_update_channel_state()
1413 mhi_device_put(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1418 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, in mhi_unprepare_channel() argument
1426 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_unprepare_channel()
1428 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_unprepare_channel()
1433 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, in mhi_unprepare_channel()
1445 mhi_reset_chan(mhi_cntrl, mhi_chan); in mhi_unprepare_channel()
1446 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); in mhi_unprepare_channel()
1453 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, in mhi_prepare_channel() argument
1459 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_prepare_channel()
1461 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_prepare_channel()
1469 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1474 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, in mhi_prepare_channel()
1484 int nr_el = get_nr_avail_ring_elements(mhi_cntrl, in mhi_prepare_channel()
1486 size_t len = mhi_cntrl->buffer_len; in mhi_prepare_channel()
1502 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); in mhi_prepare_channel()
1509 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1510 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { in mhi_prepare_channel()
1512 mhi_ring_chan_db(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1515 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1524 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1533 mhi_unprepare_channel(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1538 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, in mhi_mark_stale_events() argument
1546 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_mark_stale_events()
1559 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_mark_stale_events()
1581 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, in mhi_reset_data_chan() argument
1596 atomic_dec(&mhi_cntrl->pending_pkts); in mhi_reset_data_chan()
1598 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_reset_data_chan()
1602 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in mhi_reset_data_chan()
1604 mhi_del_ring_element(mhi_cntrl, buf_ring); in mhi_reset_data_chan()
1605 mhi_del_ring_element(mhi_cntrl, tre_ring); in mhi_reset_data_chan()
1616 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) in mhi_reset_chan() argument
1626 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1627 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_reset_chan()
1628 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; in mhi_reset_chan()
1630 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); in mhi_reset_chan()
1632 mhi_reset_data_chan(mhi_cntrl, mhi_chan); in mhi_reset_chan()
1634 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1640 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in __mhi_prepare_for_transfer() local
1648 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); in __mhi_prepare_for_transfer()
1661 mhi_unprepare_channel(mhi_cntrl, mhi_chan); in __mhi_prepare_for_transfer()
1681 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_unprepare_from_transfer() local
1690 mhi_unprepare_channel(mhi_cntrl, mhi_chan); in mhi_unprepare_from_transfer()
1695 int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset) in mhi_get_channel_doorbell_offset() argument
1697 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_get_channel_doorbell_offset()
1698 void __iomem *base = mhi_cntrl->regs; in mhi_get_channel_doorbell_offset()
1701 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset); in mhi_get_channel_doorbell_offset()