Lines Matching +full:wp +full:- +full:controller

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
86 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in serial_number_show()
89 mhi_cntrl->serial_number); in serial_number_show()
98 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in oem_pk_hash_show()
103 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]); in oem_pk_hash_show()
123 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in soc_reset_store()
135 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in trigger_edl_store()
144 return -EINVAL; in trigger_edl_store()
146 ret = mhi_cntrl->edl_trigger(mhi_cntrl); in trigger_edl_store()
167 ring->alloc_size = len + (len - 1); in mhi_alloc_aligned_ring()
168 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_alloc_aligned_ring()
169 &ring->dma_handle, GFP_KERNEL); in mhi_alloc_aligned_ring()
170 if (!ring->pre_aligned) in mhi_alloc_aligned_ring()
171 return -ENOMEM; in mhi_alloc_aligned_ring()
173 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); in mhi_alloc_aligned_ring()
174 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); in mhi_alloc_aligned_ring()
182 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_deinit_free_irq()
184 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_deinit_free_irq()
185 if (mhi_event->offload_ev) in mhi_deinit_free_irq()
188 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); in mhi_deinit_free_irq()
191 free_irq(mhi_cntrl->irq[0], mhi_cntrl); in mhi_deinit_free_irq()
196 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_init_irq_setup()
197 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_init_irq_setup()
201 /* if controller driver has set irq_flags, use it */ in mhi_init_irq_setup()
202 if (mhi_cntrl->irq_flags) in mhi_init_irq_setup()
203 irq_flags = mhi_cntrl->irq_flags; in mhi_init_irq_setup()
206 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, in mhi_init_irq_setup()
217 disable_irq(mhi_cntrl->irq[0]); in mhi_init_irq_setup()
219 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_init_irq_setup()
220 if (mhi_event->offload_ev) in mhi_init_irq_setup()
223 if (mhi_event->irq >= mhi_cntrl->nr_irqs) { in mhi_init_irq_setup()
225 mhi_event->irq); in mhi_init_irq_setup()
226 ret = -EINVAL; in mhi_init_irq_setup()
230 ret = request_irq(mhi_cntrl->irq[mhi_event->irq], in mhi_init_irq_setup()
236 mhi_cntrl->irq[mhi_event->irq], i); in mhi_init_irq_setup()
240 disable_irq(mhi_cntrl->irq[mhi_event->irq]); in mhi_init_irq_setup()
246 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { in mhi_init_irq_setup()
247 if (mhi_event->offload_ev) in mhi_init_irq_setup()
250 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); in mhi_init_irq_setup()
252 free_irq(mhi_cntrl->irq[0], mhi_cntrl); in mhi_init_irq_setup()
260 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; in mhi_deinit_dev_ctxt()
265 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_deinit_dev_ctxt()
267 ring = &mhi_cmd->ring; in mhi_deinit_dev_ctxt()
268 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_deinit_dev_ctxt()
269 ring->pre_aligned, ring->dma_handle); in mhi_deinit_dev_ctxt()
270 ring->base = NULL; in mhi_deinit_dev_ctxt()
271 ring->iommu_base = 0; in mhi_deinit_dev_ctxt()
274 dma_free_coherent(mhi_cntrl->cntrl_dev, in mhi_deinit_dev_ctxt()
275 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, in mhi_deinit_dev_ctxt()
276 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); in mhi_deinit_dev_ctxt()
278 mhi_event = mhi_cntrl->mhi_event; in mhi_deinit_dev_ctxt()
279 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_deinit_dev_ctxt()
280 if (mhi_event->offload_ev) in mhi_deinit_dev_ctxt()
283 ring = &mhi_event->ring; in mhi_deinit_dev_ctxt()
284 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_deinit_dev_ctxt()
285 ring->pre_aligned, ring->dma_handle); in mhi_deinit_dev_ctxt()
286 ring->base = NULL; in mhi_deinit_dev_ctxt()
287 ring->iommu_base = 0; in mhi_deinit_dev_ctxt()
290 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * in mhi_deinit_dev_ctxt()
291 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, in mhi_deinit_dev_ctxt()
292 mhi_ctxt->er_ctxt_addr); in mhi_deinit_dev_ctxt()
294 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * in mhi_deinit_dev_ctxt()
295 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, in mhi_deinit_dev_ctxt()
296 mhi_ctxt->chan_ctxt_addr); in mhi_deinit_dev_ctxt()
299 mhi_cntrl->mhi_ctxt = NULL; in mhi_deinit_dev_ctxt()
312 int ret = -ENOMEM, i; in mhi_init_dev_ctxt()
314 atomic_set(&mhi_cntrl->dev_wake, 0); in mhi_init_dev_ctxt()
315 atomic_set(&mhi_cntrl->pending_pkts, 0); in mhi_init_dev_ctxt()
319 return -ENOMEM; in mhi_init_dev_ctxt()
322 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
323 sizeof(*mhi_ctxt->chan_ctxt) * in mhi_init_dev_ctxt()
324 mhi_cntrl->max_chan, in mhi_init_dev_ctxt()
325 &mhi_ctxt->chan_ctxt_addr, in mhi_init_dev_ctxt()
327 if (!mhi_ctxt->chan_ctxt) in mhi_init_dev_ctxt()
330 mhi_chan = mhi_cntrl->mhi_chan; in mhi_init_dev_ctxt()
331 chan_ctxt = mhi_ctxt->chan_ctxt; in mhi_init_dev_ctxt()
332 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { in mhi_init_dev_ctxt()
334 if (mhi_chan->offload_ch) in mhi_init_dev_ctxt()
337 tmp = le32_to_cpu(chan_ctxt->chcfg); in mhi_init_dev_ctxt()
341 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode); in mhi_init_dev_ctxt()
343 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg); in mhi_init_dev_ctxt()
344 chan_ctxt->chcfg = cpu_to_le32(tmp); in mhi_init_dev_ctxt()
346 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); in mhi_init_dev_ctxt()
347 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); in mhi_init_dev_ctxt()
349 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_init_dev_ctxt()
350 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; in mhi_init_dev_ctxt()
354 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
355 sizeof(*mhi_ctxt->er_ctxt) * in mhi_init_dev_ctxt()
356 mhi_cntrl->total_ev_rings, in mhi_init_dev_ctxt()
357 &mhi_ctxt->er_ctxt_addr, in mhi_init_dev_ctxt()
359 if (!mhi_ctxt->er_ctxt) in mhi_init_dev_ctxt()
362 er_ctxt = mhi_ctxt->er_ctxt; in mhi_init_dev_ctxt()
363 mhi_event = mhi_cntrl->mhi_event; in mhi_init_dev_ctxt()
364 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, in mhi_init_dev_ctxt()
366 struct mhi_ring *ring = &mhi_event->ring; in mhi_init_dev_ctxt()
369 if (mhi_event->offload_ev) in mhi_init_dev_ctxt()
372 tmp = le32_to_cpu(er_ctxt->intmod); in mhi_init_dev_ctxt()
375 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod); in mhi_init_dev_ctxt()
376 er_ctxt->intmod = cpu_to_le32(tmp); in mhi_init_dev_ctxt()
378 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); in mhi_init_dev_ctxt()
379 er_ctxt->msivec = cpu_to_le32(mhi_event->irq); in mhi_init_dev_ctxt()
380 mhi_event->db_cfg.db_mode = true; in mhi_init_dev_ctxt()
382 ring->el_size = sizeof(struct mhi_ring_element); in mhi_init_dev_ctxt()
383 ring->len = ring->el_size * ring->elements; in mhi_init_dev_ctxt()
384 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); in mhi_init_dev_ctxt()
392 ring->rp = ring->wp = ring->base; in mhi_init_dev_ctxt()
393 er_ctxt->rbase = cpu_to_le64(ring->iommu_base); in mhi_init_dev_ctxt()
394 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; in mhi_init_dev_ctxt()
395 er_ctxt->rlen = cpu_to_le64(ring->len); in mhi_init_dev_ctxt()
396 ring->ctxt_wp = &er_ctxt->wp; in mhi_init_dev_ctxt()
400 ret = -ENOMEM; in mhi_init_dev_ctxt()
401 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
402 sizeof(*mhi_ctxt->cmd_ctxt) * in mhi_init_dev_ctxt()
404 &mhi_ctxt->cmd_ctxt_addr, in mhi_init_dev_ctxt()
406 if (!mhi_ctxt->cmd_ctxt) in mhi_init_dev_ctxt()
409 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_init_dev_ctxt()
410 cmd_ctxt = mhi_ctxt->cmd_ctxt; in mhi_init_dev_ctxt()
412 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_init_dev_ctxt()
414 ring->el_size = sizeof(struct mhi_ring_element); in mhi_init_dev_ctxt()
415 ring->elements = CMD_EL_PER_RING; in mhi_init_dev_ctxt()
416 ring->len = ring->el_size * ring->elements; in mhi_init_dev_ctxt()
417 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); in mhi_init_dev_ctxt()
421 ring->rp = ring->wp = ring->base; in mhi_init_dev_ctxt()
422 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); in mhi_init_dev_ctxt()
423 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; in mhi_init_dev_ctxt()
424 cmd_ctxt->rlen = cpu_to_le64(ring->len); in mhi_init_dev_ctxt()
425 ring->ctxt_wp = &cmd_ctxt->wp; in mhi_init_dev_ctxt()
428 mhi_cntrl->mhi_ctxt = mhi_ctxt; in mhi_init_dev_ctxt()
433 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { in mhi_init_dev_ctxt()
434 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_init_dev_ctxt()
436 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_init_dev_ctxt()
437 ring->pre_aligned, ring->dma_handle); in mhi_init_dev_ctxt()
439 dma_free_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
440 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, in mhi_init_dev_ctxt()
441 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); in mhi_init_dev_ctxt()
442 i = mhi_cntrl->total_ev_rings; in mhi_init_dev_ctxt()
443 mhi_event = mhi_cntrl->mhi_event + i; in mhi_init_dev_ctxt()
446 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { in mhi_init_dev_ctxt()
447 struct mhi_ring *ring = &mhi_event->ring; in mhi_init_dev_ctxt()
449 if (mhi_event->offload_ev) in mhi_init_dev_ctxt()
452 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_init_dev_ctxt()
453 ring->pre_aligned, ring->dma_handle); in mhi_init_dev_ctxt()
455 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * in mhi_init_dev_ctxt()
456 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, in mhi_init_dev_ctxt()
457 mhi_ctxt->er_ctxt_addr); in mhi_init_dev_ctxt()
460 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * in mhi_init_dev_ctxt()
461 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, in mhi_init_dev_ctxt()
462 mhi_ctxt->chan_ctxt_addr); in mhi_init_dev_ctxt()
476 void __iomem *base = mhi_cntrl->regs; in mhi_init_mmio()
477 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_init_mmio()
484 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), in mhi_init_mmio()
488 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), in mhi_init_mmio()
492 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), in mhi_init_mmio()
496 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), in mhi_init_mmio()
500 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), in mhi_init_mmio()
504 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), in mhi_init_mmio()
508 upper_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
512 lower_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
516 upper_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
520 lower_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
524 upper_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
528 lower_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
532 upper_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
536 lower_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
548 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { in mhi_init_mmio()
550 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); in mhi_init_mmio()
551 return -ERANGE; in mhi_init_mmio()
555 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); in mhi_init_mmio()
556 mhi_cntrl->wake_set = false; in mhi_init_mmio()
559 mhi_chan = mhi_cntrl->mhi_chan; in mhi_init_mmio()
560 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) in mhi_init_mmio()
561 mhi_chan->tre_ring.db_addr = base + val; in mhi_init_mmio()
567 return -EIO; in mhi_init_mmio()
570 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) { in mhi_init_mmio()
572 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); in mhi_init_mmio()
573 return -ERANGE; in mhi_init_mmio()
577 mhi_event = mhi_cntrl->mhi_event; in mhi_init_mmio()
578 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { in mhi_init_mmio()
579 if (mhi_event->offload_ev) in mhi_init_mmio()
582 mhi_event->ring.db_addr = base + val; in mhi_init_mmio()
586 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; in mhi_init_mmio()
594 mhi_cntrl->total_ev_rings); in mhi_init_mmio()
601 mhi_cntrl->hw_ev_rings); in mhi_init_mmio()
618 buf_ring = &mhi_chan->buf_ring; in mhi_deinit_chan_ctxt()
619 tre_ring = &mhi_chan->tre_ring; in mhi_deinit_chan_ctxt()
620 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; in mhi_deinit_chan_ctxt()
622 if (!chan_ctxt->rbase) /* Already uninitialized */ in mhi_deinit_chan_ctxt()
625 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, in mhi_deinit_chan_ctxt()
626 tre_ring->pre_aligned, tre_ring->dma_handle); in mhi_deinit_chan_ctxt()
627 vfree(buf_ring->base); in mhi_deinit_chan_ctxt()
629 buf_ring->base = tre_ring->base = NULL; in mhi_deinit_chan_ctxt()
630 tre_ring->ctxt_wp = NULL; in mhi_deinit_chan_ctxt()
631 chan_ctxt->rbase = 0; in mhi_deinit_chan_ctxt()
632 chan_ctxt->rlen = 0; in mhi_deinit_chan_ctxt()
633 chan_ctxt->rp = 0; in mhi_deinit_chan_ctxt()
634 chan_ctxt->wp = 0; in mhi_deinit_chan_ctxt()
636 tmp = le32_to_cpu(chan_ctxt->chcfg); in mhi_deinit_chan_ctxt()
639 chan_ctxt->chcfg = cpu_to_le32(tmp); in mhi_deinit_chan_ctxt()
654 buf_ring = &mhi_chan->buf_ring; in mhi_init_chan_ctxt()
655 tre_ring = &mhi_chan->tre_ring; in mhi_init_chan_ctxt()
656 tre_ring->el_size = sizeof(struct mhi_ring_element); in mhi_init_chan_ctxt()
657 tre_ring->len = tre_ring->el_size * tre_ring->elements; in mhi_init_chan_ctxt()
658 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; in mhi_init_chan_ctxt()
659 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); in mhi_init_chan_ctxt()
661 return -ENOMEM; in mhi_init_chan_ctxt()
663 buf_ring->el_size = sizeof(struct mhi_buf_info); in mhi_init_chan_ctxt()
664 buf_ring->len = buf_ring->el_size * buf_ring->elements; in mhi_init_chan_ctxt()
665 buf_ring->base = vzalloc(buf_ring->len); in mhi_init_chan_ctxt()
667 if (!buf_ring->base) { in mhi_init_chan_ctxt()
668 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, in mhi_init_chan_ctxt()
669 tre_ring->pre_aligned, tre_ring->dma_handle); in mhi_init_chan_ctxt()
670 return -ENOMEM; in mhi_init_chan_ctxt()
673 tmp = le32_to_cpu(chan_ctxt->chcfg); in mhi_init_chan_ctxt()
676 chan_ctxt->chcfg = cpu_to_le32(tmp); in mhi_init_chan_ctxt()
678 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); in mhi_init_chan_ctxt()
679 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; in mhi_init_chan_ctxt()
680 chan_ctxt->rlen = cpu_to_le64(tre_ring->len); in mhi_init_chan_ctxt()
681 tre_ring->ctxt_wp = &chan_ctxt->wp; in mhi_init_chan_ctxt()
683 tre_ring->rp = tre_ring->wp = tre_ring->base; in mhi_init_chan_ctxt()
684 buf_ring->rp = buf_ring->wp = buf_ring->base; in mhi_init_chan_ctxt()
685 mhi_chan->db_cfg.db_mode = 1; in mhi_init_chan_ctxt()
698 struct device *dev = mhi_cntrl->cntrl_dev; in parse_ev_cfg()
701 num = config->num_events; in parse_ev_cfg()
702 mhi_cntrl->total_ev_rings = num; in parse_ev_cfg()
703 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), in parse_ev_cfg()
705 if (!mhi_cntrl->mhi_event) in parse_ev_cfg()
706 return -ENOMEM; in parse_ev_cfg()
709 mhi_event = mhi_cntrl->mhi_event; in parse_ev_cfg()
711 event_cfg = &config->event_cfg[i]; in parse_ev_cfg()
713 mhi_event->er_index = i; in parse_ev_cfg()
714 mhi_event->ring.elements = event_cfg->num_elements; in parse_ev_cfg()
715 mhi_event->intmod = event_cfg->irq_moderation_ms; in parse_ev_cfg()
716 mhi_event->irq = event_cfg->irq; in parse_ev_cfg()
718 if (event_cfg->channel != U32_MAX) { in parse_ev_cfg()
720 mhi_event->chan = event_cfg->channel; in parse_ev_cfg()
721 if (mhi_event->chan >= mhi_cntrl->max_chan) { in parse_ev_cfg()
727 mhi_event->mhi_chan = in parse_ev_cfg()
728 &mhi_cntrl->mhi_chan[mhi_event->chan]; in parse_ev_cfg()
732 mhi_event->priority = 1; in parse_ev_cfg()
734 mhi_event->db_cfg.brstmode = event_cfg->mode; in parse_ev_cfg()
735 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) in parse_ev_cfg()
738 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) in parse_ev_cfg()
739 mhi_event->db_cfg.process_db = mhi_db_brstmode; in parse_ev_cfg()
741 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; in parse_ev_cfg()
743 mhi_event->data_type = event_cfg->data_type; in parse_ev_cfg()
745 switch (mhi_event->data_type) { in parse_ev_cfg()
747 mhi_event->process_event = mhi_process_data_event_ring; in parse_ev_cfg()
750 mhi_event->process_event = mhi_process_ctrl_ev_ring; in parse_ev_cfg()
757 mhi_event->hw_ring = event_cfg->hardware_event; in parse_ev_cfg()
758 if (mhi_event->hw_ring) in parse_ev_cfg()
759 mhi_cntrl->hw_ev_rings++; in parse_ev_cfg()
761 mhi_cntrl->sw_ev_rings++; in parse_ev_cfg()
763 mhi_event->cl_manage = event_cfg->client_managed; in parse_ev_cfg()
764 mhi_event->offload_ev = event_cfg->offload_channel; in parse_ev_cfg()
772 kfree(mhi_cntrl->mhi_event); in parse_ev_cfg()
773 return -EINVAL; in parse_ev_cfg()
780 struct device *dev = mhi_cntrl->cntrl_dev; in parse_ch_cfg()
784 mhi_cntrl->max_chan = config->max_channels; in parse_ch_cfg()
791 mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan, in parse_ch_cfg()
792 sizeof(*mhi_cntrl->mhi_chan)); in parse_ch_cfg()
793 if (!mhi_cntrl->mhi_chan) in parse_ch_cfg()
794 return -ENOMEM; in parse_ch_cfg()
796 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); in parse_ch_cfg()
799 for (i = 0; i < config->num_channels; i++) { in parse_ch_cfg()
802 ch_cfg = &config->ch_cfg[i]; in parse_ch_cfg()
804 chan = ch_cfg->num; in parse_ch_cfg()
805 if (chan >= mhi_cntrl->max_chan) { in parse_ch_cfg()
810 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in parse_ch_cfg()
811 mhi_chan->name = ch_cfg->name; in parse_ch_cfg()
812 mhi_chan->chan = chan; in parse_ch_cfg()
814 mhi_chan->tre_ring.elements = ch_cfg->num_elements; in parse_ch_cfg()
815 if (!mhi_chan->tre_ring.elements) in parse_ch_cfg()
825 mhi_chan->buf_ring.elements = ch_cfg->local_elements; in parse_ch_cfg()
826 if (!mhi_chan->buf_ring.elements) in parse_ch_cfg()
827 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; in parse_ch_cfg()
828 mhi_chan->er_index = ch_cfg->event_ring; in parse_ch_cfg()
829 mhi_chan->dir = ch_cfg->dir; in parse_ch_cfg()
836 mhi_chan->type = ch_cfg->type; in parse_ch_cfg()
837 if (!mhi_chan->type) in parse_ch_cfg()
838 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; in parse_ch_cfg()
840 mhi_chan->ee_mask = ch_cfg->ee_mask; in parse_ch_cfg()
841 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; in parse_ch_cfg()
842 mhi_chan->lpm_notify = ch_cfg->lpm_notify; in parse_ch_cfg()
843 mhi_chan->offload_ch = ch_cfg->offload_channel; in parse_ch_cfg()
844 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; in parse_ch_cfg()
845 mhi_chan->pre_alloc = ch_cfg->auto_queue; in parse_ch_cfg()
846 mhi_chan->wake_capable = ch_cfg->wake_capable; in parse_ch_cfg()
852 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { in parse_ch_cfg()
858 * Bi-directional and direction less channel must be an in parse_ch_cfg()
861 if ((mhi_chan->dir == DMA_BIDIRECTIONAL || in parse_ch_cfg()
862 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { in parse_ch_cfg()
867 if (!mhi_chan->offload_ch) { in parse_ch_cfg()
868 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; in parse_ch_cfg()
869 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { in parse_ch_cfg()
875 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) in parse_ch_cfg()
876 mhi_chan->db_cfg.process_db = mhi_db_brstmode; in parse_ch_cfg()
878 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; in parse_ch_cfg()
880 mhi_chan->configured = true; in parse_ch_cfg()
882 if (mhi_chan->lpm_notify) in parse_ch_cfg()
883 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); in parse_ch_cfg()
889 vfree(mhi_cntrl->mhi_chan); in parse_ch_cfg()
891 return -EINVAL; in parse_ch_cfg()
909 mhi_cntrl->timeout_ms = config->timeout_ms; in parse_config()
910 if (!mhi_cntrl->timeout_ms) in parse_config()
911 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; in parse_config()
913 mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms; in parse_config()
914 mhi_cntrl->bounce_buf = config->use_bounce_buf; in parse_config()
915 mhi_cntrl->buffer_len = config->buf_len; in parse_config()
916 if (!mhi_cntrl->buffer_len) in parse_config()
917 mhi_cntrl->buffer_len = MHI_MAX_MTU; in parse_config()
920 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; in parse_config()
921 if (config->m2_no_db) in parse_config()
922 mhi_cntrl->db_access &= ~MHI_PM_M2; in parse_config()
927 vfree(mhi_cntrl->mhi_chan); in parse_config()
941 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || in mhi_register_controller()
942 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || in mhi_register_controller()
943 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || in mhi_register_controller()
944 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || in mhi_register_controller()
945 !mhi_cntrl->irq || !mhi_cntrl->reg_len) in mhi_register_controller()
946 return -EINVAL; in mhi_register_controller()
950 return -EINVAL; in mhi_register_controller()
952 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, in mhi_register_controller()
953 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); in mhi_register_controller()
954 if (!mhi_cntrl->mhi_cmd) { in mhi_register_controller()
955 ret = -ENOMEM; in mhi_register_controller()
959 INIT_LIST_HEAD(&mhi_cntrl->transition_list); in mhi_register_controller()
960 mutex_init(&mhi_cntrl->pm_mutex); in mhi_register_controller()
961 rwlock_init(&mhi_cntrl->pm_lock); in mhi_register_controller()
962 spin_lock_init(&mhi_cntrl->transition_lock); in mhi_register_controller()
963 spin_lock_init(&mhi_cntrl->wlock); in mhi_register_controller()
964 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); in mhi_register_controller()
965 init_waitqueue_head(&mhi_cntrl->state_event); in mhi_register_controller()
967 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); in mhi_register_controller()
968 if (!mhi_cntrl->hiprio_wq) { in mhi_register_controller()
969 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); in mhi_register_controller()
970 ret = -ENOMEM; in mhi_register_controller()
974 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_register_controller()
976 spin_lock_init(&mhi_cmd->lock); in mhi_register_controller()
978 mhi_event = mhi_cntrl->mhi_event; in mhi_register_controller()
979 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_register_controller()
981 if (mhi_event->offload_ev) in mhi_register_controller()
984 mhi_event->mhi_cntrl = mhi_cntrl; in mhi_register_controller()
985 spin_lock_init(&mhi_event->lock); in mhi_register_controller()
986 if (mhi_event->data_type == MHI_ER_CTRL) in mhi_register_controller()
987 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, in mhi_register_controller()
990 tasklet_init(&mhi_event->task, mhi_ev_task, in mhi_register_controller()
994 mhi_chan = mhi_cntrl->mhi_chan; in mhi_register_controller()
995 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_register_controller()
996 mutex_init(&mhi_chan->mutex); in mhi_register_controller()
997 init_completion(&mhi_chan->completion); in mhi_register_controller()
998 rwlock_init(&mhi_chan->lock); in mhi_register_controller()
1001 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_register_controller()
1002 mhi_chan->intmod = mhi_event->intmod; in mhi_register_controller()
1005 if (mhi_cntrl->bounce_buf) { in mhi_register_controller()
1006 mhi_cntrl->map_single = mhi_map_single_use_bb; in mhi_register_controller()
1007 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; in mhi_register_controller()
1009 mhi_cntrl->map_single = mhi_map_single_no_bb; in mhi_register_controller()
1010 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; in mhi_register_controller()
1013 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); in mhi_register_controller()
1014 if (mhi_cntrl->index < 0) { in mhi_register_controller()
1015 ret = mhi_cntrl->index; in mhi_register_controller()
1023 /* Register controller with MHI bus */ in mhi_register_controller()
1026 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); in mhi_register_controller()
1031 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; in mhi_register_controller()
1032 mhi_dev->mhi_cntrl = mhi_cntrl; in mhi_register_controller()
1033 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); in mhi_register_controller()
1034 mhi_dev->name = dev_name(&mhi_dev->dev); in mhi_register_controller()
1037 device_init_wakeup(&mhi_dev->dev, true); in mhi_register_controller()
1039 ret = device_add(&mhi_dev->dev); in mhi_register_controller()
1043 if (mhi_cntrl->edl_trigger) { in mhi_register_controller()
1044 ret = sysfs_create_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); in mhi_register_controller()
1049 mhi_cntrl->mhi_dev = mhi_dev; in mhi_register_controller()
1056 put_device(&mhi_dev->dev); in mhi_register_controller()
1060 ida_free(&mhi_controller_ida, mhi_cntrl->index); in mhi_register_controller()
1062 destroy_workqueue(mhi_cntrl->hiprio_wq); in mhi_register_controller()
1064 kfree(mhi_cntrl->mhi_cmd); in mhi_register_controller()
1066 kfree(mhi_cntrl->mhi_event); in mhi_register_controller()
1067 vfree(mhi_cntrl->mhi_chan); in mhi_register_controller()
1075 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; in mhi_unregister_controller()
1076 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; in mhi_unregister_controller()
1082 if (mhi_cntrl->edl_trigger) in mhi_unregister_controller()
1083 sysfs_remove_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); in mhi_unregister_controller()
1085 destroy_workqueue(mhi_cntrl->hiprio_wq); in mhi_unregister_controller()
1086 kfree(mhi_cntrl->mhi_cmd); in mhi_unregister_controller()
1087 kfree(mhi_cntrl->mhi_event); in mhi_unregister_controller()
1090 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_unregister_controller()
1091 if (!mhi_chan->mhi_dev) in mhi_unregister_controller()
1094 put_device(&mhi_chan->mhi_dev->dev); in mhi_unregister_controller()
1096 vfree(mhi_cntrl->mhi_chan); in mhi_unregister_controller()
1098 device_del(&mhi_dev->dev); in mhi_unregister_controller()
1099 put_device(&mhi_dev->dev); in mhi_unregister_controller()
1101 ida_free(&mhi_controller_ida, mhi_cntrl->index); in mhi_unregister_controller()
1123 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_prepare_for_power_up()
1127 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_prepare_for_power_up()
1133 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); in mhi_prepare_for_power_up()
1139 if (bhi_off >= mhi_cntrl->reg_len) { in mhi_prepare_for_power_up()
1141 bhi_off, mhi_cntrl->reg_len); in mhi_prepare_for_power_up()
1142 ret = -ERANGE; in mhi_prepare_for_power_up()
1145 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; in mhi_prepare_for_power_up()
1147 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { in mhi_prepare_for_power_up()
1148 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, in mhi_prepare_for_power_up()
1155 if (bhie_off >= mhi_cntrl->reg_len) { in mhi_prepare_for_power_up()
1158 bhie_off, mhi_cntrl->reg_len); in mhi_prepare_for_power_up()
1159 ret = -ERANGE; in mhi_prepare_for_power_up()
1162 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; in mhi_prepare_for_power_up()
1165 if (mhi_cntrl->rddm_size) { in mhi_prepare_for_power_up()
1167 * This controller supports RDDM, so we need to manually clear in mhi_prepare_for_power_up()
1170 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, in mhi_prepare_for_power_up()
1171 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + in mhi_prepare_for_power_up()
1176 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, in mhi_prepare_for_power_up()
1177 mhi_cntrl->rddm_size); in mhi_prepare_for_power_up()
1178 if (mhi_cntrl->rddm_image) { in mhi_prepare_for_power_up()
1180 mhi_cntrl->rddm_image); in mhi_prepare_for_power_up()
1183 mhi_cntrl->rddm_image); in mhi_prepare_for_power_up()
1189 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_prepare_for_power_up()
1197 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_prepare_for_power_up()
1205 if (mhi_cntrl->fbc_image) { in mhi_unprepare_after_power_down()
1206 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); in mhi_unprepare_after_power_down()
1207 mhi_cntrl->fbc_image = NULL; in mhi_unprepare_after_power_down()
1210 if (mhi_cntrl->rddm_image) { in mhi_unprepare_after_power_down()
1211 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); in mhi_unprepare_after_power_down()
1212 mhi_cntrl->rddm_image = NULL; in mhi_unprepare_after_power_down()
1215 mhi_cntrl->bhi = NULL; in mhi_unprepare_after_power_down()
1216 mhi_cntrl->bhie = NULL; in mhi_unprepare_after_power_down()
1227 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI in mhi_release_device()
1230 * controller suspend and resume. in mhi_release_device()
1232 if (mhi_dev->ul_chan) in mhi_release_device()
1233 mhi_dev->ul_chan->mhi_dev = NULL; in mhi_release_device()
1235 if (mhi_dev->dl_chan) in mhi_release_device()
1236 mhi_dev->dl_chan->mhi_dev = NULL; in mhi_release_device()
1248 return ERR_PTR(-ENOMEM); in mhi_alloc_device()
1250 dev = &mhi_dev->dev; in mhi_alloc_device()
1252 dev->bus = &mhi_bus_type; in mhi_alloc_device()
1253 dev->release = mhi_release_device; in mhi_alloc_device()
1255 if (mhi_cntrl->mhi_dev) { in mhi_alloc_device()
1256 /* for MHI client devices, parent is the MHI controller device */ in mhi_alloc_device()
1257 dev->parent = &mhi_cntrl->mhi_dev->dev; in mhi_alloc_device()
1259 /* for MHI controller device, parent is the bus device (e.g. pci device) */ in mhi_alloc_device()
1260 dev->parent = mhi_cntrl->cntrl_dev; in mhi_alloc_device()
1263 mhi_dev->mhi_cntrl = mhi_cntrl; in mhi_alloc_device()
1264 mhi_dev->dev_wake = 0; in mhi_alloc_device()
1272 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_driver_probe()
1273 struct device_driver *drv = dev->driver; in mhi_driver_probe()
1276 struct mhi_chan *ul_chan = mhi_dev->ul_chan; in mhi_driver_probe()
1277 struct mhi_chan *dl_chan = mhi_dev->dl_chan; in mhi_driver_probe()
1285 ret = -EINVAL; in mhi_driver_probe()
1292 if (ul_chan->lpm_notify && !mhi_drv->status_cb) in mhi_driver_probe()
1295 /* For non-offload channels then xfer_cb should be provided */ in mhi_driver_probe()
1296 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) in mhi_driver_probe()
1299 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; in mhi_driver_probe()
1302 ret = -EINVAL; in mhi_driver_probe()
1308 if (dl_chan->lpm_notify && !mhi_drv->status_cb) in mhi_driver_probe()
1311 /* For non-offload channels then xfer_cb should be provided */ in mhi_driver_probe()
1312 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) in mhi_driver_probe()
1315 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; in mhi_driver_probe()
1322 if (mhi_event->cl_manage && !mhi_drv->status_cb) in mhi_driver_probe()
1325 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; in mhi_driver_probe()
1329 ret = mhi_drv->probe(mhi_dev, mhi_dev->id); in mhi_driver_probe()
1348 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); in mhi_driver_remove()
1349 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_driver_remove()
1357 /* Skip if it is a controller device */ in mhi_driver_remove()
1358 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_driver_remove()
1363 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_driver_remove()
1369 write_lock_irq(&mhi_chan->lock); in mhi_driver_remove()
1370 mhi_chan->ccs = MHI_EV_CC_INVALID; in mhi_driver_remove()
1371 complete_all(&mhi_chan->completion); in mhi_driver_remove()
1372 write_unlock_irq(&mhi_chan->lock); in mhi_driver_remove()
1375 mutex_lock(&mhi_chan->mutex); in mhi_driver_remove()
1376 write_lock_irq(&mhi_chan->lock); in mhi_driver_remove()
1377 ch_state[dir] = mhi_chan->ch_state; in mhi_driver_remove()
1378 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; in mhi_driver_remove()
1379 write_unlock_irq(&mhi_chan->lock); in mhi_driver_remove()
1381 /* Reset the non-offload channel */ in mhi_driver_remove()
1382 if (!mhi_chan->offload_ch) in mhi_driver_remove()
1385 mutex_unlock(&mhi_chan->mutex); in mhi_driver_remove()
1388 mhi_drv->remove(mhi_dev); in mhi_driver_remove()
1390 /* De-init channel if it was enabled */ in mhi_driver_remove()
1392 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_driver_remove()
1397 mutex_lock(&mhi_chan->mutex); in mhi_driver_remove()
1401 !mhi_chan->offload_ch) in mhi_driver_remove()
1404 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_driver_remove()
1406 mutex_unlock(&mhi_chan->mutex); in mhi_driver_remove()
1409 while (mhi_dev->dev_wake) in mhi_driver_remove()
1417 struct device_driver *driver = &mhi_drv->driver; in __mhi_driver_register()
1419 if (!mhi_drv->probe || !mhi_drv->remove) in __mhi_driver_register()
1420 return -EINVAL; in __mhi_driver_register()
1422 driver->bus = &mhi_bus_type; in __mhi_driver_register()
1423 driver->owner = owner; in __mhi_driver_register()
1424 driver->probe = mhi_driver_probe; in __mhi_driver_register()
1425 driver->remove = mhi_driver_remove; in __mhi_driver_register()
1433 driver_unregister(&mhi_drv->driver); in mhi_driver_unregister()
1442 mhi_dev->name); in mhi_uevent()
1452 * If the device is a controller type then there is no client driver in mhi_match()
1455 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_match()
1458 for (id = mhi_drv->id_table; id->chan[0]; id++) in mhi_match()
1459 if (!strcmp(mhi_dev->name, id->chan)) { in mhi_match()
1460 mhi_dev->id = id; in mhi_match()