Lines Matching refs:mhi_cntrl

25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, in mhi_ep_send_event() argument
31 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_send_event()
36 mutex_lock(&mhi_cntrl->event_lock); in mhi_ep_send_event()
37 ring = &mhi_cntrl->mhi_event[ring_idx].ring; in mhi_ep_send_event()
38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; in mhi_ep_send_event()
40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); in mhi_ep_send_event()
54 mutex_unlock(&mhi_cntrl->event_lock); in mhi_ep_send_event()
73 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); in mhi_ep_send_event()
82 mutex_unlock(&mhi_cntrl->event_lock); in mhi_ep_send_event()
87 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, in mhi_ep_send_completion_event() argument
93 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); in mhi_ep_send_completion_event()
101 ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); in mhi_ep_send_completion_event()
102 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); in mhi_ep_send_completion_event()
107 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) in mhi_ep_send_state_change_event() argument
112 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); in mhi_ep_send_state_change_event()
119 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); in mhi_ep_send_state_change_event()
120 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); in mhi_ep_send_state_change_event()
125 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) in mhi_ep_send_ee_event() argument
130 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); in mhi_ep_send_ee_event()
137 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); in mhi_ep_send_ee_event()
138 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); in mhi_ep_send_ee_event()
143 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) in mhi_ep_send_cmd_comp_event() argument
145 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; in mhi_ep_send_cmd_comp_event()
149 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); in mhi_ep_send_cmd_comp_event()
157 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); in mhi_ep_send_cmd_comp_event()
158 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); in mhi_ep_send_cmd_comp_event()
165 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; in mhi_ep_process_cmd_ring() local
166 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_process_cmd_ring()
176 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { in mhi_ep_process_cmd_ring()
181 mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; in mhi_ep_process_cmd_ring()
182 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; in mhi_ep_process_cmd_ring()
191 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, in mhi_ep_process_cmd_ring()
192 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); in mhi_ep_process_cmd_ring()
195 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, in mhi_ep_process_cmd_ring()
208 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); in mhi_ep_process_cmd_ring()
211 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); in mhi_ep_process_cmd_ring()
213 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); in mhi_ep_process_cmd_ring()
232 ret = mhi_ep_create_device(mhi_cntrl, ch_id); in mhi_ep_process_cmd_ring()
235 mhi_ep_handle_syserr(mhi_cntrl); in mhi_ep_process_cmd_ring()
241 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); in mhi_ep_process_cmd_ring()
253 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); in mhi_ep_process_cmd_ring()
264 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); in mhi_ep_process_cmd_ring()
267 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); in mhi_ep_process_cmd_ring()
269 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); in mhi_ep_process_cmd_ring()
287 mhi_ep_ring_reset(mhi_cntrl, ch_ring); in mhi_ep_process_cmd_ring()
298 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); in mhi_ep_process_cmd_ring()
301 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); in mhi_ep_process_cmd_ring()
303 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); in mhi_ep_process_cmd_ring()
330 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_queue_is_empty() local
331 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_queue_is_empty()
340 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_read_completion() local
342 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_read_completion()
367 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, in mhi_ep_read_completion()
383 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, in mhi_ep_read_completion()
398 kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf); in mhi_ep_read_completion()
401 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, in mhi_ep_read_channel() argument
404 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_read_channel()
405 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_read_channel()
441 buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); in mhi_ep_read_channel()
456 ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); in mhi_ep_read_channel()
476 kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr); in mhi_ep_read_channel()
483 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; in mhi_ep_process_ch_ring() local
488 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_process_ch_ring()
506 ret = mhi_ep_read_channel(mhi_cntrl, ring); in mhi_ep_process_ch_ring()
522 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_skb_completion() local
524 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_skb_completion()
538 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size, in mhi_ep_skb_completion()
551 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_queue_skb() local
563 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_queue_skb()
606 ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); in mhi_ep_queue_skb()
632 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_cache_host_cfg() argument
635 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_cache_host_cfg()
639 mhi_ep_mmio_update_ner(mhi_cntrl); in mhi_ep_cache_host_cfg()
642 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); in mhi_ep_cache_host_cfg()
644 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; in mhi_ep_cache_host_cfg()
645 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; in mhi_ep_cache_host_cfg()
649 mhi_ep_mmio_get_chc_base(mhi_cntrl); in mhi_ep_cache_host_cfg()
652 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, in mhi_ep_cache_host_cfg()
653 &mhi_cntrl->ch_ctx_cache_phys, in mhi_ep_cache_host_cfg()
654 (void __iomem **) &mhi_cntrl->ch_ctx_cache, in mhi_ep_cache_host_cfg()
662 mhi_ep_mmio_get_erc_base(mhi_cntrl); in mhi_ep_cache_host_cfg()
665 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, in mhi_ep_cache_host_cfg()
666 &mhi_cntrl->ev_ctx_cache_phys, in mhi_ep_cache_host_cfg()
667 (void __iomem **) &mhi_cntrl->ev_ctx_cache, in mhi_ep_cache_host_cfg()
675 mhi_ep_mmio_get_crc_base(mhi_cntrl); in mhi_ep_cache_host_cfg()
678 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, in mhi_ep_cache_host_cfg()
679 &mhi_cntrl->cmd_ctx_cache_phys, in mhi_ep_cache_host_cfg()
680 (void __iomem **) &mhi_cntrl->cmd_ctx_cache, in mhi_ep_cache_host_cfg()
688 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, in mhi_ep_cache_host_cfg()
689 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); in mhi_ep_cache_host_cfg()
698 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, in mhi_ep_cache_host_cfg()
699 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); in mhi_ep_cache_host_cfg()
702 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, in mhi_ep_cache_host_cfg()
703 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); in mhi_ep_cache_host_cfg()
706 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, in mhi_ep_cache_host_cfg()
707 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); in mhi_ep_cache_host_cfg()
712 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_free_host_cfg() argument
716 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; in mhi_ep_free_host_cfg()
717 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; in mhi_ep_free_host_cfg()
720 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, in mhi_ep_free_host_cfg()
721 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); in mhi_ep_free_host_cfg()
723 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, in mhi_ep_free_host_cfg()
724 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); in mhi_ep_free_host_cfg()
726 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, in mhi_ep_free_host_cfg()
727 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); in mhi_ep_free_host_cfg()
730 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_enable_int() argument
737 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); in mhi_ep_enable_int()
738 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); in mhi_ep_enable_int()
741 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_enable() argument
743 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_enable()
752 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); in mhi_ep_enable()
755 mhi_ep_mmio_clear_reset(mhi_cntrl); in mhi_ep_enable()
766 ret = mhi_ep_cache_host_cfg(mhi_cntrl); in mhi_ep_enable()
772 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); in mhi_ep_enable()
775 mhi_ep_enable_int(mhi_cntrl); in mhi_ep_enable()
782 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); in mhi_ep_cmd_ring_worker() local
783 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; in mhi_ep_cmd_ring_worker()
784 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_cmd_ring_worker()
816 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); in mhi_ep_ch_ring_worker() local
817 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_ch_ring_worker()
825 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); in mhi_ep_ch_ring_worker()
826 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); in mhi_ep_ch_ring_worker()
827 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); in mhi_ep_ch_ring_worker()
834 chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_ch_ring_worker()
852 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); in mhi_ep_ch_ring_worker()
859 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); in mhi_ep_ch_ring_worker()
869 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); in mhi_ep_ch_ring_worker()
874 kmem_cache_free(mhi_cntrl->ring_item_cache, itr); in mhi_ep_ch_ring_worker()
880 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); in mhi_ep_state_worker() local
881 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_state_worker()
887 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); in mhi_ep_state_worker()
888 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); in mhi_ep_state_worker()
889 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); in mhi_ep_state_worker()
898 ret = mhi_ep_set_m0_state(mhi_cntrl); in mhi_ep_state_worker()
903 ret = mhi_ep_set_m3_state(mhi_cntrl); in mhi_ep_state_worker()
915 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, in mhi_ep_queue_channel_db() argument
929 ring = &mhi_cntrl->mhi_chan[ch_id].ring; in mhi_ep_queue_channel_db()
930 item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); in mhi_ep_queue_channel_db()
940 spin_lock(&mhi_cntrl->list_lock); in mhi_ep_queue_channel_db()
941 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); in mhi_ep_queue_channel_db()
942 spin_unlock(&mhi_cntrl->list_lock); in mhi_ep_queue_channel_db()
944 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); in mhi_ep_queue_channel_db()
953 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_check_channel_interrupt() argument
958 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) in mhi_ep_check_channel_interrupt()
965 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; in mhi_ep_check_channel_interrupt()
967 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); in mhi_ep_check_channel_interrupt()
968 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), in mhi_ep_check_channel_interrupt()
969 mhi_cntrl->chdb[i].status); in mhi_ep_check_channel_interrupt()
974 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, in mhi_ep_process_ctrl_interrupt() argument
984 spin_lock(&mhi_cntrl->list_lock); in mhi_ep_process_ctrl_interrupt()
985 list_add_tail(&item->node, &mhi_cntrl->st_transition_list); in mhi_ep_process_ctrl_interrupt()
986 spin_unlock(&mhi_cntrl->list_lock); in mhi_ep_process_ctrl_interrupt()
988 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); in mhi_ep_process_ctrl_interrupt()
998 struct mhi_ep_cntrl *mhi_cntrl = data; in mhi_ep_irq() local
999 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_irq()
1005 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); in mhi_ep_irq()
1006 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); in mhi_ep_irq()
1011 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); in mhi_ep_irq()
1014 disable_irq_nosync(mhi_cntrl->irq); in mhi_ep_irq()
1015 schedule_work(&mhi_cntrl->reset_work); in mhi_ep_irq()
1019 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); in mhi_ep_irq()
1025 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); in mhi_ep_irq()
1029 mhi_ep_check_channel_interrupt(mhi_cntrl); in mhi_ep_irq()
1034 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_abort_transfer() argument
1042 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_abort_transfer()
1043 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_abort_transfer()
1059 flush_workqueue(mhi_cntrl->wq); in mhi_ep_abort_transfer()
1062 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); in mhi_ep_abort_transfer()
1065 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_abort_transfer()
1066 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_abort_transfer()
1070 ch_ring = &mhi_cntrl->mhi_chan[i].ring; in mhi_ep_abort_transfer()
1072 mhi_ep_ring_reset(mhi_cntrl, ch_ring); in mhi_ep_abort_transfer()
1077 for (i = 0; i < mhi_cntrl->event_rings; i++) { in mhi_ep_abort_transfer()
1078 ev_ring = &mhi_cntrl->mhi_event[i].ring; in mhi_ep_abort_transfer()
1082 mutex_lock(&mhi_cntrl->event_lock); in mhi_ep_abort_transfer()
1083 mhi_ep_ring_reset(mhi_cntrl, ev_ring); in mhi_ep_abort_transfer()
1084 mutex_unlock(&mhi_cntrl->event_lock); in mhi_ep_abort_transfer()
1088 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); in mhi_ep_abort_transfer()
1090 mhi_ep_free_host_cfg(mhi_cntrl); in mhi_ep_abort_transfer()
1091 mhi_ep_mmio_mask_interrupts(mhi_cntrl); in mhi_ep_abort_transfer()
1093 mhi_cntrl->enabled = false; in mhi_ep_abort_transfer()
1098 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); in mhi_ep_reset_worker() local
1101 mhi_ep_power_down(mhi_cntrl); in mhi_ep_reset_worker()
1103 mutex_lock(&mhi_cntrl->state_lock); in mhi_ep_reset_worker()
1106 mhi_ep_mmio_reset(mhi_cntrl); in mhi_ep_reset_worker()
1107 cur_state = mhi_cntrl->mhi_state; in mhi_ep_reset_worker()
1115 mhi_ep_power_up(mhi_cntrl); in mhi_ep_reset_worker()
1117 mutex_unlock(&mhi_cntrl->state_lock); in mhi_ep_reset_worker()
1125 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_handle_syserr() argument
1127 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_handle_syserr()
1130 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); in mhi_ep_handle_syserr()
1135 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); in mhi_ep_handle_syserr()
1140 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_power_up() argument
1142 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_power_up()
1149 mhi_ep_mmio_mask_interrupts(mhi_cntrl); in mhi_ep_power_up()
1150 mhi_ep_mmio_init(mhi_cntrl); in mhi_ep_power_up()
1152 mhi_cntrl->mhi_event = kcalloc(mhi_cntrl->event_rings, in mhi_ep_power_up()
1153 sizeof(*mhi_cntrl->mhi_event), in mhi_ep_power_up()
1155 if (!mhi_cntrl->mhi_event) in mhi_ep_power_up()
1159 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); in mhi_ep_power_up()
1160 for (i = 0; i < mhi_cntrl->max_chan; i++) in mhi_ep_power_up()
1161 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); in mhi_ep_power_up()
1162 for (i = 0; i < mhi_cntrl->event_rings; i++) in mhi_ep_power_up()
1163 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); in mhi_ep_power_up()
1165 mhi_cntrl->mhi_state = MHI_STATE_RESET; in mhi_ep_power_up()
1168 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); in mhi_ep_power_up()
1171 ret = mhi_ep_set_ready_state(mhi_cntrl); in mhi_ep_power_up()
1177 ret = mhi_ep_enable(mhi_cntrl); in mhi_ep_power_up()
1183 enable_irq(mhi_cntrl->irq); in mhi_ep_power_up()
1184 mhi_cntrl->enabled = true; in mhi_ep_power_up()
1189 kfree(mhi_cntrl->mhi_event); in mhi_ep_power_up()
1195 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_power_down() argument
1197 if (mhi_cntrl->enabled) { in mhi_ep_power_down()
1198 mhi_ep_abort_transfer(mhi_cntrl); in mhi_ep_power_down()
1199 kfree(mhi_cntrl->mhi_event); in mhi_ep_power_down()
1200 disable_irq(mhi_cntrl->irq); in mhi_ep_power_down()
1205 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_suspend_channels() argument
1211 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_suspend_channels()
1212 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_suspend_channels()
1219 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); in mhi_ep_suspend_channels()
1230 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); in mhi_ep_suspend_channels()
1235 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_resume_channels() argument
1241 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_resume_channels()
1242 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_resume_channels()
1249 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); in mhi_ep_resume_channels()
1260 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); in mhi_ep_resume_channels()
1270 mhi_dev->mhi_cntrl->mhi_dev = NULL; in mhi_ep_release_device()
1286 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, in mhi_ep_alloc_device() argument
1304 dev->parent = mhi_cntrl->cntrl_dev; in mhi_ep_alloc_device()
1307 dev->parent = &mhi_cntrl->mhi_dev->dev; in mhi_ep_alloc_device()
1309 mhi_dev->mhi_cntrl = mhi_cntrl; in mhi_ep_alloc_device()
1321 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) in mhi_ep_create_device() argument
1323 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; in mhi_ep_create_device()
1324 struct device *dev = mhi_cntrl->cntrl_dev; in mhi_ep_create_device()
1335 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); in mhi_ep_create_device()
1353 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_ep_create_device()
1370 struct mhi_ep_cntrl *mhi_cntrl; in mhi_ep_destroy_device() local
1377 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_destroy_device()
1392 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", in mhi_ep_destroy_device()
1402 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, in mhi_ep_chan_init() argument
1406 struct device *dev = mhi_cntrl->cntrl_dev; in mhi_ep_chan_init()
1410 mhi_cntrl->max_chan = config->max_channels; in mhi_ep_chan_init()
1416 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), in mhi_ep_chan_init()
1418 if (!mhi_cntrl->mhi_chan) in mhi_ep_chan_init()
1427 if (chan >= mhi_cntrl->max_chan) { in mhi_ep_chan_init()
1429 chan, mhi_cntrl->max_chan); in mhi_ep_chan_init()
1440 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_ep_chan_init()
1450 kfree(mhi_cntrl->mhi_chan); in mhi_ep_chan_init()
1459 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, in mhi_ep_register_controller() argument
1465 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) in mhi_ep_register_controller()
1468 if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync || in mhi_ep_register_controller()
1469 !mhi_cntrl->read_async || !mhi_cntrl->write_async) in mhi_ep_register_controller()
1472 ret = mhi_ep_chan_init(mhi_cntrl, config); in mhi_ep_register_controller()
1476 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); in mhi_ep_register_controller()
1477 if (!mhi_cntrl->mhi_cmd) { in mhi_ep_register_controller()
1482 mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", in mhi_ep_register_controller()
1485 if (!mhi_cntrl->ev_ring_el_cache) { in mhi_ep_register_controller()
1490 mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, in mhi_ep_register_controller()
1492 if (!mhi_cntrl->tre_buf_cache) { in mhi_ep_register_controller()
1497 mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", in mhi_ep_register_controller()
1500 if (!mhi_cntrl->ring_item_cache) { in mhi_ep_register_controller()
1505 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); in mhi_ep_register_controller()
1506 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); in mhi_ep_register_controller()
1507 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); in mhi_ep_register_controller()
1508 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); in mhi_ep_register_controller()
1510 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); in mhi_ep_register_controller()
1511 if (!mhi_cntrl->wq) { in mhi_ep_register_controller()
1516 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); in mhi_ep_register_controller()
1517 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); in mhi_ep_register_controller()
1518 spin_lock_init(&mhi_cntrl->list_lock); in mhi_ep_register_controller()
1519 mutex_init(&mhi_cntrl->state_lock); in mhi_ep_register_controller()
1520 mutex_init(&mhi_cntrl->event_lock); in mhi_ep_register_controller()
1523 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); in mhi_ep_register_controller()
1524 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); in mhi_ep_register_controller()
1531 mhi_cntrl->index = ret; in mhi_ep_register_controller()
1533 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); in mhi_ep_register_controller()
1534 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, in mhi_ep_register_controller()
1535 "doorbell_irq", mhi_cntrl); in mhi_ep_register_controller()
1537 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); in mhi_ep_register_controller()
1542 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); in mhi_ep_register_controller()
1544 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); in mhi_ep_register_controller()
1549 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); in mhi_ep_register_controller()
1554 mhi_cntrl->mhi_dev = mhi_dev; in mhi_ep_register_controller()
1567 free_irq(mhi_cntrl->irq, mhi_cntrl); in mhi_ep_register_controller()
1569 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); in mhi_ep_register_controller()
1571 destroy_workqueue(mhi_cntrl->wq); in mhi_ep_register_controller()
1573 kmem_cache_destroy(mhi_cntrl->ring_item_cache); in mhi_ep_register_controller()
1575 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); in mhi_ep_register_controller()
1577 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); in mhi_ep_register_controller()
1579 kfree(mhi_cntrl->mhi_cmd); in mhi_ep_register_controller()
1581 kfree(mhi_cntrl->mhi_chan); in mhi_ep_register_controller()
1591 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) in mhi_ep_unregister_controller() argument
1593 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; in mhi_ep_unregister_controller()
1595 destroy_workqueue(mhi_cntrl->wq); in mhi_ep_unregister_controller()
1597 free_irq(mhi_cntrl->irq, mhi_cntrl); in mhi_ep_unregister_controller()
1599 kmem_cache_destroy(mhi_cntrl->tre_buf_cache); in mhi_ep_unregister_controller()
1600 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); in mhi_ep_unregister_controller()
1601 kmem_cache_destroy(mhi_cntrl->ring_item_cache); in mhi_ep_unregister_controller()
1602 kfree(mhi_cntrl->mhi_cmd); in mhi_ep_unregister_controller()
1603 kfree(mhi_cntrl->mhi_chan); in mhi_ep_unregister_controller()
1608 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); in mhi_ep_unregister_controller()