Lines Matching +full:pa +full:- +full:stats
1 // SPDX-License-Identifier: GPL-2.0-only
23 #include <linux/dma-mapping.h>
30 #include <linux/mailbox/brcm-message.h>
50 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
66 /* Per-Ring register offsets */
91 #define BD_START_ADDR_VALUE(pa) \ argument
92 ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
97 #define CMPL_START_ADDR_VALUE(pa) \ argument
98 ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
272 /* Atomic stats */
316 return -EIO; in flexrm_cmpl_desc_to_error()
322 return -ETIMEDOUT; in flexrm_cmpl_desc_to_error()
407 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST, in flexrm_enqueue_desc()
408 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors in flexrm_enqueue_desc()
411 * means we can only have 31 non-HEADER descriptors following one in flexrm_enqueue_desc()
414 * In general use, number of non-HEADER descriptors can easily go in flexrm_enqueue_desc()
429 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) { in flexrm_enqueue_desc()
431 nhavail = (nhcnt - nhpos); in flexrm_enqueue_desc()
568 if (!msg->spu.src || !msg->spu.dst) in flexrm_spu_sanity_check()
570 for (sg = msg->spu.src; sg; sg = sg_next(sg)) { in flexrm_spu_sanity_check()
571 if (sg->length & 0xf) { in flexrm_spu_sanity_check()
572 if (sg->length > SRC_LENGTH_MASK) in flexrm_spu_sanity_check()
575 if (sg->length > (MSRC_LENGTH_MASK * 16)) in flexrm_spu_sanity_check()
579 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) { in flexrm_spu_sanity_check()
580 if (sg->length & 0xf) { in flexrm_spu_sanity_check()
581 if (sg->length > DST_LENGTH_MASK) in flexrm_spu_sanity_check()
584 if (sg->length > (MDST_LENGTH_MASK * 16)) in flexrm_spu_sanity_check()
596 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; in flexrm_spu_estimate_nonheader_desc_count()
601 dst_target = src_sg->length; in flexrm_spu_estimate_nonheader_desc_count()
608 if (dst_sg->length < dst_target) in flexrm_spu_estimate_nonheader_desc_count()
609 dst_target -= dst_sg->length; in flexrm_spu_estimate_nonheader_desc_count()
623 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_map()
626 return -EIO; in flexrm_spu_dma_map()
628 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), in flexrm_spu_dma_map()
631 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_map()
633 return -EIO; in flexrm_spu_dma_map()
641 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), in flexrm_spu_dma_unmap()
643 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), in flexrm_spu_dma_unmap()
655 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; in flexrm_spu_write_descs()
686 dst_target -= sg_dma_len(dst_sg); in flexrm_spu_write_descs()
709 if (!msg->sba.cmds || !msg->sba.cmds_count) in flexrm_sba_sanity_check()
712 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_sanity_check()
713 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_sanity_check()
714 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) && in flexrm_sba_sanity_check()
715 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)) in flexrm_sba_sanity_check()
717 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) && in flexrm_sba_sanity_check()
718 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) in flexrm_sba_sanity_check()
720 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) && in flexrm_sba_sanity_check()
721 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) in flexrm_sba_sanity_check()
723 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) && in flexrm_sba_sanity_check()
724 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK)) in flexrm_sba_sanity_check()
726 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) && in flexrm_sba_sanity_check()
727 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK)) in flexrm_sba_sanity_check()
739 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_estimate_nonheader_desc_count()
742 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_estimate_nonheader_desc_count()
743 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) in flexrm_sba_estimate_nonheader_desc_count()
746 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) in flexrm_sba_estimate_nonheader_desc_count()
749 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) in flexrm_sba_estimate_nonheader_desc_count()
766 for (i = 0; i < msg->sba.cmds_count; i++) { in flexrm_sba_write_descs()
767 c = &msg->sba.cmds[i]; in flexrm_sba_write_descs()
769 if ((c->flags & BRCM_SBA_CMD_HAS_RESP) && in flexrm_sba_write_descs()
770 (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) { in flexrm_sba_write_descs()
772 d = flexrm_dst_desc(c->resp, c->resp_len); in flexrm_sba_write_descs()
777 } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) { in flexrm_sba_write_descs()
779 d = flexrm_dstt_desc(c->resp, c->resp_len); in flexrm_sba_write_descs()
786 if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) { in flexrm_sba_write_descs()
788 d = flexrm_dstt_desc(c->data, c->data_len); in flexrm_sba_write_descs()
795 if (c->flags & BRCM_SBA_CMD_TYPE_B) { in flexrm_sba_write_descs()
797 d = flexrm_imm_desc(c->cmd); in flexrm_sba_write_descs()
804 d = flexrm_immt_desc(c->cmd); in flexrm_sba_write_descs()
811 if ((c->flags & BRCM_SBA_CMD_TYPE_B) || in flexrm_sba_write_descs()
812 (c->flags & BRCM_SBA_CMD_TYPE_C)) { in flexrm_sba_write_descs()
814 d = flexrm_srct_desc(c->data, c->data_len); in flexrm_sba_write_descs()
839 switch (msg->type) { in flexrm_sanity_check()
854 switch (msg->type) { in flexrm_estimate_nonheader_desc_count()
867 return -EINVAL; in flexrm_dma_map()
869 switch (msg->type) { in flexrm_dma_map()
884 switch (msg->type) { in flexrm_dma_unmap()
898 return ERR_PTR(-ENOTSUPP); in flexrm_write_descs()
901 return ERR_PTR(-ERANGE); in flexrm_write_descs()
903 switch (msg->type) { in flexrm_write_descs()
913 return ERR_PTR(-ENOTSUPP); in flexrm_write_descs()
926 seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n", in flexrm_write_config_in_seqfile()
930 for (i = 0; i < mbox->num_rings; i++) { in flexrm_write_config_in_seqfile()
931 ring = &mbox->rings[i]; in flexrm_write_config_in_seqfile()
932 if (readl(ring->regs + RING_CONTROL) & in flexrm_write_config_in_seqfile()
938 "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n", in flexrm_write_config_in_seqfile()
939 ring->num, state, in flexrm_write_config_in_seqfile()
940 (unsigned long long)ring->bd_dma_base, in flexrm_write_config_in_seqfile()
942 (unsigned long long)ring->cmpl_dma_base, in flexrm_write_config_in_seqfile()
954 seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n", in flexrm_write_stats_in_seqfile()
958 for (i = 0; i < mbox->num_rings; i++) { in flexrm_write_stats_in_seqfile()
959 ring = &mbox->rings[i]; in flexrm_write_stats_in_seqfile()
960 bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); in flexrm_write_stats_in_seqfile()
961 val = readl_relaxed(ring->regs + RING_BD_START_ADDR); in flexrm_write_stats_in_seqfile()
963 bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) - in flexrm_write_stats_in_seqfile()
964 ring->bd_dma_base); in flexrm_write_stats_in_seqfile()
965 seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n", in flexrm_write_stats_in_seqfile()
966 ring->num, in flexrm_write_stats_in_seqfile()
968 (u32)ring->bd_write_offset, in flexrm_write_stats_in_seqfile()
969 (u32)ring->cmpl_read_offset, in flexrm_write_stats_in_seqfile()
970 (u32)atomic_read(&ring->msg_send_count), in flexrm_write_stats_in_seqfile()
971 (u32)atomic_read(&ring->msg_cmpl_count)); in flexrm_write_stats_in_seqfile()
988 return -EIO; in flexrm_new_request()
989 msg->error = 0; in flexrm_new_request()
992 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
993 reqid = bitmap_find_free_region(ring->requests_bmap, in flexrm_new_request()
995 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
997 return -ENOSPC; in flexrm_new_request()
998 ring->requests[reqid] = msg; in flexrm_new_request()
1001 ret = flexrm_dma_map(ring->mbox->dev, msg); in flexrm_new_request()
1003 ring->requests[reqid] = NULL; in flexrm_new_request()
1004 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
1005 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_new_request()
1006 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
1011 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); in flexrm_new_request()
1012 val = readl_relaxed(ring->regs + RING_BD_START_ADDR); in flexrm_new_request()
1014 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base); in flexrm_new_request()
1017 * Number required descriptors = number of non-header descriptors + in flexrm_new_request()
1025 write_offset = ring->bd_write_offset; in flexrm_new_request()
1027 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset)) in flexrm_new_request()
1028 count--; in flexrm_new_request()
1036 ret = -ENOSPC; in flexrm_new_request()
1043 ring->bd_base + ring->bd_write_offset, in flexrm_new_request()
1044 RING_BD_TOGGLE_VALID(ring->bd_write_offset), in flexrm_new_request()
1045 ring->bd_base, ring->bd_base + RING_BD_SIZE); in flexrm_new_request()
1053 ring->bd_write_offset = (unsigned long)(next - ring->bd_base); in flexrm_new_request()
1056 atomic_inc_return(&ring->msg_send_count); in flexrm_new_request()
1060 msg->error = ret; in flexrm_new_request()
1064 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_new_request()
1065 ring->requests[reqid] = NULL; in flexrm_new_request()
1066 spin_lock_irqsave(&ring->lock, flags); in flexrm_new_request()
1067 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_new_request()
1068 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_new_request()
1081 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num]; in flexrm_process_completions()
1083 spin_lock_irqsave(&ring->lock, flags); in flexrm_process_completions()
1093 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); in flexrm_process_completions()
1095 cmpl_read_offset = ring->cmpl_read_offset; in flexrm_process_completions()
1096 ring->cmpl_read_offset = cmpl_write_offset; in flexrm_process_completions()
1098 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_process_completions()
1104 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset)); in flexrm_process_completions()
1114 dev_warn(ring->mbox->dev, in flexrm_process_completions()
1116 ring->num, (unsigned long)desc, err); in flexrm_process_completions()
1123 msg = ring->requests[reqid]; in flexrm_process_completions()
1125 dev_warn(ring->mbox->dev, in flexrm_process_completions()
1127 ring->num, (unsigned long)desc); in flexrm_process_completions()
1132 ring->requests[reqid] = NULL; in flexrm_process_completions()
1133 spin_lock_irqsave(&ring->lock, flags); in flexrm_process_completions()
1134 bitmap_release_region(ring->requests_bmap, reqid, 0); in flexrm_process_completions()
1135 spin_unlock_irqrestore(&ring->lock, flags); in flexrm_process_completions()
1138 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_process_completions()
1140 /* Give-back message to mailbox client */ in flexrm_process_completions()
1141 msg->error = err; in flexrm_process_completions()
1145 atomic_inc_return(&ring->msg_cmpl_count); in flexrm_process_completions()
1156 struct flexrm_mbox *mbox = dev_get_drvdata(file->private); in flexrm_debugfs_conf_show()
1166 struct flexrm_mbox *mbox = dev_get_drvdata(file->private); in flexrm_debugfs_stats_show()
1168 /* Write stats in file */ in flexrm_debugfs_stats_show()
1196 struct flexrm_ring *ring = chan->con_priv; in flexrm_send_data()
1199 if (msg->type == BRCM_MESSAGE_BATCH) { in flexrm_send_data()
1200 for (i = msg->batch.msgs_queued; in flexrm_send_data()
1201 i < msg->batch.msgs_count; i++) { in flexrm_send_data()
1203 &msg->batch.msgs[i]); in flexrm_send_data()
1205 msg->error = rc; in flexrm_send_data()
1208 msg->batch.msgs_queued++; in flexrm_send_data()
1218 int cnt = flexrm_process_completions(chan->con_priv); in flexrm_peek_data()
1229 struct flexrm_ring *ring = chan->con_priv; in flexrm_startup()
1232 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool, in flexrm_startup()
1233 GFP_KERNEL, &ring->bd_dma_base); in flexrm_startup()
1234 if (!ring->bd_base) { in flexrm_startup()
1235 dev_err(ring->mbox->dev, in flexrm_startup()
1237 ring->num); in flexrm_startup()
1238 ret = -ENOMEM; in flexrm_startup()
1247 next_addr += ring->bd_dma_base; in flexrm_startup()
1253 flexrm_write_desc(ring->bd_base + off, d); in flexrm_startup()
1257 ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool, in flexrm_startup()
1258 GFP_KERNEL, &ring->cmpl_dma_base); in flexrm_startup()
1259 if (!ring->cmpl_base) { in flexrm_startup()
1260 dev_err(ring->mbox->dev, in flexrm_startup()
1262 ring->num); in flexrm_startup()
1263 ret = -ENOMEM; in flexrm_startup()
1268 if (ring->irq == UINT_MAX) { in flexrm_startup()
1269 dev_err(ring->mbox->dev, in flexrm_startup()
1270 "ring%d IRQ not available\n", ring->num); in flexrm_startup()
1271 ret = -ENODEV; in flexrm_startup()
1274 ret = request_threaded_irq(ring->irq, in flexrm_startup()
1277 0, dev_name(ring->mbox->dev), ring); in flexrm_startup()
1279 dev_err(ring->mbox->dev, in flexrm_startup()
1280 "failed to request ring%d IRQ\n", ring->num); in flexrm_startup()
1283 ring->irq_requested = true; in flexrm_startup()
1286 ring->irq_aff_hint = CPU_MASK_NONE; in flexrm_startup()
1287 val = ring->mbox->num_rings; in flexrm_startup()
1289 cpumask_set_cpu((ring->num / val) % num_online_cpus(), in flexrm_startup()
1290 &ring->irq_aff_hint); in flexrm_startup()
1291 ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint); in flexrm_startup()
1293 dev_err(ring->mbox->dev, in flexrm_startup()
1295 ring->num); in flexrm_startup()
1300 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_startup()
1303 val = BD_START_ADDR_VALUE(ring->bd_dma_base); in flexrm_startup()
1304 writel_relaxed(val, ring->regs + RING_BD_START_ADDR); in flexrm_startup()
1307 ring->bd_write_offset = in flexrm_startup()
1308 readl_relaxed(ring->regs + RING_BD_WRITE_PTR); in flexrm_startup()
1309 ring->bd_write_offset *= RING_DESC_SIZE; in flexrm_startup()
1312 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base); in flexrm_startup()
1313 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR); in flexrm_startup()
1316 ring->cmpl_read_offset = in flexrm_startup()
1317 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); in flexrm_startup()
1318 ring->cmpl_read_offset *= RING_DESC_SIZE; in flexrm_startup()
1321 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS); in flexrm_startup()
1322 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS); in flexrm_startup()
1323 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS); in flexrm_startup()
1324 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS); in flexrm_startup()
1325 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND); in flexrm_startup()
1329 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT); in flexrm_startup()
1331 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT; in flexrm_startup()
1332 writel_relaxed(val, ring->regs + RING_MSI_CONTROL); in flexrm_startup()
1336 writel_relaxed(val, ring->regs + RING_CONTROL); in flexrm_startup()
1338 /* Reset stats to zero */ in flexrm_startup()
1339 atomic_set(&ring->msg_send_count, 0); in flexrm_startup()
1340 atomic_set(&ring->msg_cmpl_count, 0); in flexrm_startup()
1345 free_irq(ring->irq, ring); in flexrm_startup()
1346 ring->irq_requested = false; in flexrm_startup()
1348 dma_pool_free(ring->mbox->cmpl_pool, in flexrm_startup()
1349 ring->cmpl_base, ring->cmpl_dma_base); in flexrm_startup()
1350 ring->cmpl_base = NULL; in flexrm_startup()
1352 dma_pool_free(ring->mbox->bd_pool, in flexrm_startup()
1353 ring->bd_base, ring->bd_dma_base); in flexrm_startup()
1354 ring->bd_base = NULL; in flexrm_startup()
1364 struct flexrm_ring *ring = chan->con_priv; in flexrm_shutdown()
1367 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_shutdown()
1372 ring->regs + RING_CONTROL); in flexrm_shutdown()
1374 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & in flexrm_shutdown()
1378 } while (--timeout); in flexrm_shutdown()
1380 dev_err(ring->mbox->dev, in flexrm_shutdown()
1381 "setting ring%d flush state timedout\n", ring->num); in flexrm_shutdown()
1385 writel_relaxed(0x0, ring->regs + RING_CONTROL); in flexrm_shutdown()
1387 if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & in flexrm_shutdown()
1391 } while (--timeout); in flexrm_shutdown()
1393 dev_err(ring->mbox->dev, in flexrm_shutdown()
1394 "clearing ring%d flush state timedout\n", ring->num); in flexrm_shutdown()
1396 /* Abort all in-flight requests */ in flexrm_shutdown()
1398 msg = ring->requests[reqid]; in flexrm_shutdown()
1403 ring->requests[reqid] = NULL; in flexrm_shutdown()
1406 flexrm_dma_unmap(ring->mbox->dev, msg); in flexrm_shutdown()
1408 /* Give-back message to mailbox client */ in flexrm_shutdown()
1409 msg->error = -EIO; in flexrm_shutdown()
1414 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); in flexrm_shutdown()
1417 if (ring->irq_requested) { in flexrm_shutdown()
1418 irq_update_affinity_hint(ring->irq, NULL); in flexrm_shutdown()
1419 free_irq(ring->irq, ring); in flexrm_shutdown()
1420 ring->irq_requested = false; in flexrm_shutdown()
1423 /* Free-up completion descriptor ring */ in flexrm_shutdown()
1424 if (ring->cmpl_base) { in flexrm_shutdown()
1425 dma_pool_free(ring->mbox->cmpl_pool, in flexrm_shutdown()
1426 ring->cmpl_base, ring->cmpl_dma_base); in flexrm_shutdown()
1427 ring->cmpl_base = NULL; in flexrm_shutdown()
1430 /* Free-up BD descriptor ring */ in flexrm_shutdown()
1431 if (ring->bd_base) { in flexrm_shutdown()
1432 dma_pool_free(ring->mbox->bd_pool, in flexrm_shutdown()
1433 ring->bd_base, ring->bd_dma_base); in flexrm_shutdown()
1434 ring->bd_base = NULL; in flexrm_shutdown()
1446 const struct of_phandle_args *pa) in flexrm_mbox_of_xlate() argument
1451 if (pa->args_count < 3) in flexrm_mbox_of_xlate()
1452 return ERR_PTR(-EINVAL); in flexrm_mbox_of_xlate()
1454 if (pa->args[0] >= cntlr->num_chans) in flexrm_mbox_of_xlate()
1455 return ERR_PTR(-ENOENT); in flexrm_mbox_of_xlate()
1457 if (pa->args[1] > MSI_COUNT_MASK) in flexrm_mbox_of_xlate()
1458 return ERR_PTR(-EINVAL); in flexrm_mbox_of_xlate()
1460 if (pa->args[2] > MSI_TIMER_VAL_MASK) in flexrm_mbox_of_xlate()
1461 return ERR_PTR(-EINVAL); in flexrm_mbox_of_xlate()
1463 chan = &cntlr->chans[pa->args[0]]; in flexrm_mbox_of_xlate()
1464 ring = chan->con_priv; in flexrm_mbox_of_xlate()
1465 ring->msi_count_threshold = pa->args[1]; in flexrm_mbox_of_xlate()
1466 ring->msi_timer_val = pa->args[2]; in flexrm_mbox_of_xlate()
1477 struct flexrm_ring *ring = &mbox->rings[desc->msi_index]; in flexrm_mbox_msi_write()
1479 /* Configure per-Ring MSI registers */ in flexrm_mbox_msi_write()
1480 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); in flexrm_mbox_msi_write()
1481 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS); in flexrm_mbox_msi_write()
1482 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE); in flexrm_mbox_msi_write()
1493 struct device *dev = &pdev->dev; in flexrm_mbox_probe()
1498 ret = -ENOMEM; in flexrm_mbox_probe()
1501 mbox->dev = dev; in flexrm_mbox_probe()
1505 mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem); in flexrm_mbox_probe()
1507 ret = -ENODEV; in flexrm_mbox_probe()
1509 } else if (IS_ERR(mbox->regs)) { in flexrm_mbox_probe()
1510 ret = PTR_ERR(mbox->regs); in flexrm_mbox_probe()
1513 regs_end = mbox->regs + resource_size(iomem); in flexrm_mbox_probe()
1516 mbox->num_rings = 0; in flexrm_mbox_probe()
1517 for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) { in flexrm_mbox_probe()
1519 mbox->num_rings++; in flexrm_mbox_probe()
1521 if (!mbox->num_rings) { in flexrm_mbox_probe()
1522 ret = -ENODEV; in flexrm_mbox_probe()
1527 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL); in flexrm_mbox_probe()
1529 ret = -ENOMEM; in flexrm_mbox_probe()
1532 mbox->rings = ring; in flexrm_mbox_probe()
1535 regs = mbox->regs; in flexrm_mbox_probe()
1536 for (index = 0; index < mbox->num_rings; index++) { in flexrm_mbox_probe()
1537 ring = &mbox->rings[index]; in flexrm_mbox_probe()
1538 ring->num = index; in flexrm_mbox_probe()
1539 ring->mbox = mbox; in flexrm_mbox_probe()
1544 ret = -ENODEV; in flexrm_mbox_probe()
1547 ring->regs = regs; in flexrm_mbox_probe()
1549 ring->irq = UINT_MAX; in flexrm_mbox_probe()
1550 ring->irq_requested = false; in flexrm_mbox_probe()
1551 ring->msi_timer_val = MSI_TIMER_VAL_MASK; in flexrm_mbox_probe()
1552 ring->msi_count_threshold = 0x1; in flexrm_mbox_probe()
1553 memset(ring->requests, 0, sizeof(ring->requests)); in flexrm_mbox_probe()
1554 ring->bd_base = NULL; in flexrm_mbox_probe()
1555 ring->bd_dma_base = 0; in flexrm_mbox_probe()
1556 ring->cmpl_base = NULL; in flexrm_mbox_probe()
1557 ring->cmpl_dma_base = 0; in flexrm_mbox_probe()
1558 atomic_set(&ring->msg_send_count, 0); in flexrm_mbox_probe()
1559 atomic_set(&ring->msg_cmpl_count, 0); in flexrm_mbox_probe()
1560 spin_lock_init(&ring->lock); in flexrm_mbox_probe()
1561 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); in flexrm_mbox_probe()
1562 ring->cmpl_read_offset = 0; in flexrm_mbox_probe()
1565 /* FlexRM is capable of 40-bit physical addresses only */ in flexrm_mbox_probe()
1574 mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE, in flexrm_mbox_probe()
1576 if (!mbox->bd_pool) { in flexrm_mbox_probe()
1577 ret = -ENOMEM; in flexrm_mbox_probe()
1582 mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE, in flexrm_mbox_probe()
1584 if (!mbox->cmpl_pool) { in flexrm_mbox_probe()
1585 ret = -ENOMEM; in flexrm_mbox_probe()
1590 ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings, in flexrm_mbox_probe()
1596 for (index = 0; index < mbox->num_rings; index++) in flexrm_mbox_probe()
1597 mbox->rings[index].irq = msi_get_virq(dev, index); in flexrm_mbox_probe()
1604 mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL); in flexrm_mbox_probe()
1607 debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root, in flexrm_mbox_probe()
1610 /* Create debugfs stats entry */ in flexrm_mbox_probe()
1611 debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root, in flexrm_mbox_probe()
1617 mbox->controller.txdone_irq = false; in flexrm_mbox_probe()
1618 mbox->controller.txdone_poll = false; in flexrm_mbox_probe()
1619 mbox->controller.ops = &flexrm_mbox_chan_ops; in flexrm_mbox_probe()
1620 mbox->controller.dev = dev; in flexrm_mbox_probe()
1621 mbox->controller.num_chans = mbox->num_rings; in flexrm_mbox_probe()
1622 mbox->controller.of_xlate = flexrm_mbox_of_xlate; in flexrm_mbox_probe()
1623 mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings, in flexrm_mbox_probe()
1624 sizeof(*mbox->controller.chans), GFP_KERNEL); in flexrm_mbox_probe()
1625 if (!mbox->controller.chans) { in flexrm_mbox_probe()
1626 ret = -ENOMEM; in flexrm_mbox_probe()
1629 for (index = 0; index < mbox->num_rings; index++) in flexrm_mbox_probe()
1630 mbox->controller.chans[index].con_priv = &mbox->rings[index]; in flexrm_mbox_probe()
1633 ret = devm_mbox_controller_register(dev, &mbox->controller); in flexrm_mbox_probe()
1638 mbox->controller.num_chans); in flexrm_mbox_probe()
1643 debugfs_remove_recursive(mbox->root); in flexrm_mbox_probe()
1646 dma_pool_destroy(mbox->cmpl_pool); in flexrm_mbox_probe()
1648 dma_pool_destroy(mbox->bd_pool); in flexrm_mbox_probe()
1655 struct device *dev = &pdev->dev; in flexrm_mbox_remove()
1658 debugfs_remove_recursive(mbox->root); in flexrm_mbox_remove()
1662 dma_pool_destroy(mbox->cmpl_pool); in flexrm_mbox_remove()
1663 dma_pool_destroy(mbox->bd_pool); in flexrm_mbox_remove()
1667 { .compatible = "brcm,iproc-flexrm-mbox", },
1674 .name = "brcm-flexrm-mbox",