Lines Matching refs:mhi_chan

168 	struct mhi_ep_chan *mhi_chan;  in mhi_ep_process_cmd_ring()  local
176 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { in mhi_ep_process_cmd_ring()
181 mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; in mhi_ep_process_cmd_ring()
182 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; in mhi_ep_process_cmd_ring()
188 mutex_lock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
203 mhi_chan->rd_offset = ch_ring->rd_offset; in mhi_ep_process_cmd_ring()
207 mhi_chan->state = MHI_CH_STATE_RUNNING; in mhi_ep_process_cmd_ring()
220 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
231 if (!(ch_id % 2) && !mhi_chan->mhi_dev) { in mhi_ep_process_cmd_ring()
251 mutex_lock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
256 if (mhi_chan->xfer_cb) { in mhi_ep_process_cmd_ring()
259 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_process_cmd_ring()
263 mhi_chan->state = MHI_CH_STATE_STOP; in mhi_ep_process_cmd_ring()
276 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
285 mutex_lock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
290 if (mhi_chan->xfer_cb) { in mhi_ep_process_cmd_ring()
293 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_process_cmd_ring()
297 mhi_chan->state = MHI_CH_STATE_DISABLED; in mhi_ep_process_cmd_ring()
310 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
321 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
328 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : in mhi_ep_queue_is_empty() local
331 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_queue_is_empty()
333 return !!(mhi_chan->rd_offset == ring->wr_offset); in mhi_ep_queue_is_empty()
341 struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan; in mhi_ep_read_completion() local
342 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_read_completion()
347 if (mhi_chan->xfer_cb) { in mhi_ep_read_completion()
349 result.dir = mhi_chan->dir; in mhi_ep_read_completion()
352 mhi_chan->xfer_cb(mhi_dev, &result); in mhi_ep_read_completion()
371 dev_err(&mhi_chan->mhi_dev->dev, in mhi_ep_read_completion()
387 dev_err(&mhi_chan->mhi_dev->dev, in mhi_ep_read_completion()
404 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_read_channel() local
419 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { in mhi_ep_read_channel()
424 el = &ring->ring_cache[mhi_chan->rd_offset]; in mhi_ep_read_channel()
427 if (mhi_chan->tre_bytes_left) { in mhi_ep_read_channel()
428 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); in mhi_ep_read_channel()
429 tr_len = min(buf_left, mhi_chan->tre_bytes_left); in mhi_ep_read_channel()
431 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); in mhi_ep_read_channel()
432 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); in mhi_ep_read_channel()
433 mhi_chan->tre_bytes_left = mhi_chan->tre_size; in mhi_ep_read_channel()
435 tr_len = min(buf_left, mhi_chan->tre_size); in mhi_ep_read_channel()
438 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; in mhi_ep_read_channel()
445 buf_info.host_addr = mhi_chan->tre_loc + read_offset; in mhi_ep_read_channel()
450 buf_info.mhi_dev = mhi_chan->mhi_dev; in mhi_ep_read_channel()
452 if (mhi_chan->tre_bytes_left - tr_len) in mhi_ep_read_channel()
458 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); in mhi_ep_read_channel()
463 mhi_chan->tre_bytes_left -= tr_len; in mhi_ep_read_channel()
465 if (!mhi_chan->tre_bytes_left) { in mhi_ep_read_channel()
469 mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; in mhi_ep_read_channel()
485 struct mhi_ep_chan *mhi_chan; in mhi_ep_process_ch_ring() local
488 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_process_ch_ring()
494 if (!mhi_chan->xfer_cb) { in mhi_ep_process_ch_ring()
495 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); in mhi_ep_process_ch_ring()
501 result.dir = mhi_chan->dir; in mhi_ep_process_ch_ring()
502 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_process_ch_ring()
508 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); in mhi_ep_process_ch_ring()
513 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); in mhi_ep_process_ch_ring()
523 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; in mhi_ep_skb_completion() local
524 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_skb_completion()
530 if (mhi_chan->xfer_cb) { in mhi_ep_skb_completion()
532 result.dir = mhi_chan->dir; in mhi_ep_skb_completion()
535 mhi_chan->xfer_cb(mhi_dev, &result); in mhi_ep_skb_completion()
552 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; in mhi_ep_queue_skb() local
553 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_ep_queue_skb()
563 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_queue_skb()
565 mutex_lock(&mhi_chan->lock); in mhi_ep_queue_skb()
569 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { in mhi_ep_queue_skb()
581 el = &ring->ring_cache[mhi_chan->rd_offset]; in mhi_ep_queue_skb()
618 mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; in mhi_ep_queue_skb()
621 mutex_unlock(&mhi_chan->lock); in mhi_ep_queue_skb()
626 mutex_unlock(&mhi_chan->lock); in mhi_ep_queue_skb()
834 chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_ch_ring_worker()
929 ring = &mhi_cntrl->mhi_chan[ch_id].ring; in mhi_ep_queue_channel_db()
1038 struct mhi_ep_chan *mhi_chan; in mhi_ep_abort_transfer() local
1043 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_abort_transfer()
1044 if (!mhi_chan->ring.started) in mhi_ep_abort_transfer()
1047 mutex_lock(&mhi_chan->lock); in mhi_ep_abort_transfer()
1049 if (mhi_chan->xfer_cb) { in mhi_ep_abort_transfer()
1052 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_abort_transfer()
1055 mhi_chan->state = MHI_CH_STATE_DISABLED; in mhi_ep_abort_transfer()
1056 mutex_unlock(&mhi_chan->lock); in mhi_ep_abort_transfer()
1066 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_abort_transfer()
1067 if (!mhi_chan->ring.started) in mhi_ep_abort_transfer()
1070 ch_ring = &mhi_cntrl->mhi_chan[i].ring; in mhi_ep_abort_transfer()
1071 mutex_lock(&mhi_chan->lock); in mhi_ep_abort_transfer()
1073 mutex_unlock(&mhi_chan->lock); in mhi_ep_abort_transfer()
1161 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); in mhi_ep_power_up()
1207 struct mhi_ep_chan *mhi_chan; in mhi_ep_suspend_channels() local
1212 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_suspend_channels()
1214 if (!mhi_chan->mhi_dev) in mhi_ep_suspend_channels()
1217 mutex_lock(&mhi_chan->lock); in mhi_ep_suspend_channels()
1221 mutex_unlock(&mhi_chan->lock); in mhi_ep_suspend_channels()
1225 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); in mhi_ep_suspend_channels()
1227 mhi_chan->state = MHI_CH_STATE_SUSPENDED; in mhi_ep_suspend_channels()
1231 mutex_unlock(&mhi_chan->lock); in mhi_ep_suspend_channels()
1237 struct mhi_ep_chan *mhi_chan; in mhi_ep_resume_channels() local
1242 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_resume_channels()
1244 if (!mhi_chan->mhi_dev) in mhi_ep_resume_channels()
1247 mutex_lock(&mhi_chan->lock); in mhi_ep_resume_channels()
1251 mutex_unlock(&mhi_chan->lock); in mhi_ep_resume_channels()
1255 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); in mhi_ep_resume_channels()
1257 mhi_chan->state = MHI_CH_STATE_RUNNING; in mhi_ep_resume_channels()
1261 mutex_unlock(&mhi_chan->lock); in mhi_ep_resume_channels()
1323 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; in mhi_ep_create_device() local
1329 if (strcmp(mhi_chan->name, mhi_chan[1].name)) { in mhi_ep_create_device()
1331 mhi_chan->name, mhi_chan[1].name); in mhi_ep_create_device()
1340 mhi_dev->ul_chan = mhi_chan; in mhi_ep_create_device()
1342 mhi_chan->mhi_dev = mhi_dev; in mhi_ep_create_device()
1345 mhi_chan++; in mhi_ep_create_device()
1346 mhi_dev->dl_chan = mhi_chan; in mhi_ep_create_device()
1348 mhi_chan->mhi_dev = mhi_dev; in mhi_ep_create_device()
1351 mhi_dev->name = mhi_chan->name; in mhi_ep_create_device()
1416 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), in mhi_ep_chan_init()
1418 if (!mhi_cntrl->mhi_chan) in mhi_ep_chan_init()
1422 struct mhi_ep_chan *mhi_chan; in mhi_ep_chan_init() local
1440 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_ep_chan_init()
1441 mhi_chan->name = ch_cfg->name; in mhi_ep_chan_init()
1442 mhi_chan->chan = chan; in mhi_ep_chan_init()
1443 mhi_chan->dir = ch_cfg->dir; in mhi_ep_chan_init()
1444 mutex_init(&mhi_chan->lock); in mhi_ep_chan_init()
1450 kfree(mhi_cntrl->mhi_chan); in mhi_ep_chan_init()
1581 kfree(mhi_cntrl->mhi_chan); in mhi_ep_register_controller()
1603 kfree(mhi_cntrl->mhi_chan); in mhi_ep_unregister_controller()
1630 struct mhi_ep_chan *mhi_chan; in mhi_ep_driver_remove() local
1639 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_ep_driver_remove()
1641 if (!mhi_chan) in mhi_ep_driver_remove()
1644 mutex_lock(&mhi_chan->lock); in mhi_ep_driver_remove()
1646 if (mhi_chan->xfer_cb) { in mhi_ep_driver_remove()
1649 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_driver_remove()
1652 mhi_chan->state = MHI_CH_STATE_DISABLED; in mhi_ep_driver_remove()
1653 mhi_chan->xfer_cb = NULL; in mhi_ep_driver_remove()
1654 mutex_unlock(&mhi_chan->lock); in mhi_ep_driver_remove()