Lines Matching +full:rpmsg +full:- +full:out

1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dma-resv.h>
19 #include <linux/rpmsg.h>
78 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \ argument
82 ((out & 0xff) << 8) | \
86 #define FASTRPC_SCALARS(method, in, out) \ argument
87 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
317 if (map->table) { in fastrpc_free_map()
318 if (map->attr & FASTRPC_ATTR_SECUREMAP) { in fastrpc_free_map()
320 int vmid = map->fl->cctx->vmperms[0].vmid; in fastrpc_free_map()
326 err = qcom_scm_assign_mem(map->phys, map->size, in fastrpc_free_map()
329 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_free_map()
330 map->phys, map->size, err); in fastrpc_free_map()
334 dma_buf_unmap_attachment_unlocked(map->attach, map->table, in fastrpc_free_map()
336 dma_buf_detach(map->buf, map->attach); in fastrpc_free_map()
337 dma_buf_put(map->buf); in fastrpc_free_map()
340 if (map->fl) { in fastrpc_free_map()
341 spin_lock(&map->fl->lock); in fastrpc_free_map()
342 list_del(&map->node); in fastrpc_free_map()
343 spin_unlock(&map->fl->lock); in fastrpc_free_map()
344 map->fl = NULL; in fastrpc_free_map()
353 kref_put(&map->refcount, fastrpc_free_map); in fastrpc_map_put()
359 return -ENOENT; in fastrpc_map_get()
361 return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT; in fastrpc_map_get()
368 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_lookup()
370 int ret = -ENOENT; in fastrpc_map_lookup()
372 spin_lock(&fl->lock); in fastrpc_map_lookup()
373 list_for_each_entry(map, &fl->maps, node) { in fastrpc_map_lookup()
374 if (map->fd != fd) in fastrpc_map_lookup()
380 dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", in fastrpc_map_lookup()
390 spin_unlock(&fl->lock); in fastrpc_map_lookup()
397 dma_free_coherent(buf->dev, buf->size, buf->virt, in fastrpc_buf_free()
398 FASTRPC_PHYS(buf->phys)); in fastrpc_buf_free()
409 return -ENOMEM; in __fastrpc_buf_alloc()
411 INIT_LIST_HEAD(&buf->attachments); in __fastrpc_buf_alloc()
412 INIT_LIST_HEAD(&buf->node); in __fastrpc_buf_alloc()
413 mutex_init(&buf->lock); in __fastrpc_buf_alloc()
415 buf->fl = fl; in __fastrpc_buf_alloc()
416 buf->virt = NULL; in __fastrpc_buf_alloc()
417 buf->phys = 0; in __fastrpc_buf_alloc()
418 buf->size = size; in __fastrpc_buf_alloc()
419 buf->dev = dev; in __fastrpc_buf_alloc()
420 buf->raddr = 0; in __fastrpc_buf_alloc()
422 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, in __fastrpc_buf_alloc()
424 if (!buf->virt) { in __fastrpc_buf_alloc()
425 mutex_destroy(&buf->lock); in __fastrpc_buf_alloc()
427 return -ENOMEM; in __fastrpc_buf_alloc()
447 if (fl->sctx && fl->sctx->sid) in fastrpc_buf_alloc()
448 buf->phys += ((u64)fl->sctx->sid << 32); in fastrpc_buf_alloc()
456 struct device *rdev = &fl->cctx->rpdev->dev; in fastrpc_remote_heap_alloc()
472 kref_get(&cctx->refcount); in fastrpc_channel_ctx_get()
477 kref_put(&cctx->refcount, fastrpc_channel_ctx_free); in fastrpc_channel_ctx_put()
488 cctx = ctx->cctx; in fastrpc_context_free()
490 for (i = 0; i < ctx->nbufs; i++) in fastrpc_context_free()
491 fastrpc_map_put(ctx->maps[i]); in fastrpc_context_free()
493 if (ctx->buf) in fastrpc_context_free()
494 fastrpc_buf_free(ctx->buf); in fastrpc_context_free()
496 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_context_free()
497 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); in fastrpc_context_free()
498 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_free()
500 kfree(ctx->maps); in fastrpc_context_free()
501 kfree(ctx->olaps); in fastrpc_context_free()
509 kref_get(&ctx->refcount); in fastrpc_context_get()
514 kref_put(&ctx->refcount, fastrpc_context_free); in fastrpc_context_put()
525 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
531 int st = CMP(pa->start, pb->start); in olaps_cmp()
533 int ed = CMP(pb->end, pa->end); in olaps_cmp()
543 for (i = 0; i < ctx->nbufs; ++i) { in fastrpc_get_buff_overlaps()
544 ctx->olaps[i].start = ctx->args[i].ptr; in fastrpc_get_buff_overlaps()
545 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length; in fastrpc_get_buff_overlaps()
546 ctx->olaps[i].raix = i; in fastrpc_get_buff_overlaps()
549 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL); in fastrpc_get_buff_overlaps()
551 for (i = 0; i < ctx->nbufs; ++i) { in fastrpc_get_buff_overlaps()
553 if (ctx->olaps[i].start < max_end) { in fastrpc_get_buff_overlaps()
554 ctx->olaps[i].mstart = max_end; in fastrpc_get_buff_overlaps()
555 ctx->olaps[i].mend = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
556 ctx->olaps[i].offset = max_end - ctx->olaps[i].start; in fastrpc_get_buff_overlaps()
558 if (ctx->olaps[i].end > max_end) { in fastrpc_get_buff_overlaps()
559 max_end = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
561 ctx->olaps[i].mend = 0; in fastrpc_get_buff_overlaps()
562 ctx->olaps[i].mstart = 0; in fastrpc_get_buff_overlaps()
566 ctx->olaps[i].mend = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
567 ctx->olaps[i].mstart = ctx->olaps[i].start; in fastrpc_get_buff_overlaps()
568 ctx->olaps[i].offset = 0; in fastrpc_get_buff_overlaps()
569 max_end = ctx->olaps[i].end; in fastrpc_get_buff_overlaps()
578 struct fastrpc_channel_ctx *cctx = user->cctx; in fastrpc_context_alloc()
585 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
587 INIT_LIST_HEAD(&ctx->node); in fastrpc_context_alloc()
588 ctx->fl = user; in fastrpc_context_alloc()
589 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); in fastrpc_context_alloc()
590 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + in fastrpc_context_alloc()
593 if (ctx->nscalars) { in fastrpc_context_alloc()
594 ctx->maps = kcalloc(ctx->nscalars, in fastrpc_context_alloc()
595 sizeof(*ctx->maps), GFP_KERNEL); in fastrpc_context_alloc()
596 if (!ctx->maps) { in fastrpc_context_alloc()
598 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
600 ctx->olaps = kcalloc(ctx->nscalars, in fastrpc_context_alloc()
601 sizeof(*ctx->olaps), GFP_KERNEL); in fastrpc_context_alloc()
602 if (!ctx->olaps) { in fastrpc_context_alloc()
603 kfree(ctx->maps); in fastrpc_context_alloc()
605 return ERR_PTR(-ENOMEM); in fastrpc_context_alloc()
607 ctx->args = args; in fastrpc_context_alloc()
614 ctx->sc = sc; in fastrpc_context_alloc()
615 ctx->retval = -1; in fastrpc_context_alloc()
616 ctx->pid = current->pid; in fastrpc_context_alloc()
617 ctx->tgid = user->tgid; in fastrpc_context_alloc()
618 ctx->cctx = cctx; in fastrpc_context_alloc()
619 init_completion(&ctx->work); in fastrpc_context_alloc()
620 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); in fastrpc_context_alloc()
622 spin_lock(&user->lock); in fastrpc_context_alloc()
623 list_add_tail(&ctx->node, &user->pending); in fastrpc_context_alloc()
624 spin_unlock(&user->lock); in fastrpc_context_alloc()
626 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_context_alloc()
627 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, in fastrpc_context_alloc()
630 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_alloc()
633 ctx->ctxid = ret << 4; in fastrpc_context_alloc()
634 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_context_alloc()
636 kref_init(&ctx->refcount); in fastrpc_context_alloc()
640 spin_lock(&user->lock); in fastrpc_context_alloc()
641 list_del(&ctx->node); in fastrpc_context_alloc()
642 spin_unlock(&user->lock); in fastrpc_context_alloc()
644 kfree(ctx->maps); in fastrpc_context_alloc()
645 kfree(ctx->olaps); in fastrpc_context_alloc()
655 struct fastrpc_dma_buf_attachment *a = attachment->priv; in fastrpc_map_dma_buf()
659 table = &a->sgt; in fastrpc_map_dma_buf()
661 ret = dma_map_sgtable(attachment->dev, table, dir, 0); in fastrpc_map_dma_buf()
671 dma_unmap_sgtable(attach->dev, table, dir, 0); in fastrpc_unmap_dma_buf()
676 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_release()
685 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_dma_buf_attach()
690 return -ENOMEM; in fastrpc_dma_buf_attach()
692 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, in fastrpc_dma_buf_attach()
693 FASTRPC_PHYS(buffer->phys), buffer->size); in fastrpc_dma_buf_attach()
695 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); in fastrpc_dma_buf_attach()
697 return -EINVAL; in fastrpc_dma_buf_attach()
700 a->dev = attachment->dev; in fastrpc_dma_buf_attach()
701 INIT_LIST_HEAD(&a->node); in fastrpc_dma_buf_attach()
702 attachment->priv = a; in fastrpc_dma_buf_attach()
704 mutex_lock(&buffer->lock); in fastrpc_dma_buf_attach()
705 list_add(&a->node, &buffer->attachments); in fastrpc_dma_buf_attach()
706 mutex_unlock(&buffer->lock); in fastrpc_dma_buf_attach()
714 struct fastrpc_dma_buf_attachment *a = attachment->priv; in fastrpc_dma_buf_detatch()
715 struct fastrpc_buf *buffer = dmabuf->priv; in fastrpc_dma_buf_detatch()
717 mutex_lock(&buffer->lock); in fastrpc_dma_buf_detatch()
718 list_del(&a->node); in fastrpc_dma_buf_detatch()
719 mutex_unlock(&buffer->lock); in fastrpc_dma_buf_detatch()
720 sg_free_table(&a->sgt); in fastrpc_dma_buf_detatch()
726 struct fastrpc_buf *buf = dmabuf->priv; in fastrpc_vmap()
728 iosys_map_set_vaddr(map, buf->virt); in fastrpc_vmap()
736 struct fastrpc_buf *buf = dmabuf->priv; in fastrpc_mmap()
737 size_t size = vma->vm_end - vma->vm_start; in fastrpc_mmap()
739 dma_resv_assert_held(dmabuf->resv); in fastrpc_mmap()
741 return dma_mmap_coherent(buf->dev, vma, buf->virt, in fastrpc_mmap()
742 FASTRPC_PHYS(buf->phys), size); in fastrpc_mmap()
758 struct fastrpc_session_ctx *sess = fl->sctx; in fastrpc_map_create()
768 return -ENOMEM; in fastrpc_map_create()
770 INIT_LIST_HEAD(&map->node); in fastrpc_map_create()
771 kref_init(&map->refcount); in fastrpc_map_create()
773 map->fl = fl; in fastrpc_map_create()
774 map->fd = fd; in fastrpc_map_create()
775 map->buf = dma_buf_get(fd); in fastrpc_map_create()
776 if (IS_ERR(map->buf)) { in fastrpc_map_create()
777 err = PTR_ERR(map->buf); in fastrpc_map_create()
781 map->attach = dma_buf_attach(map->buf, sess->dev); in fastrpc_map_create()
782 if (IS_ERR(map->attach)) { in fastrpc_map_create()
783 dev_err(sess->dev, "Failed to attach dmabuf\n"); in fastrpc_map_create()
784 err = PTR_ERR(map->attach); in fastrpc_map_create()
788 table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL); in fastrpc_map_create()
793 map->table = table; in fastrpc_map_create()
796 map->phys = sg_phys(map->table->sgl); in fastrpc_map_create()
798 map->phys = sg_dma_address(map->table->sgl); in fastrpc_map_create()
799 map->phys += ((u64)fl->sctx->sid << 32); in fastrpc_map_create()
801 map->size = len; in fastrpc_map_create()
802 map->va = sg_virt(map->table->sgl); in fastrpc_map_create()
803 map->len = len; in fastrpc_map_create()
815 dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; in fastrpc_map_create()
817 map->attr = attr; in fastrpc_map_create()
818 err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2); in fastrpc_map_create()
820 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", in fastrpc_map_create()
821 map->phys, map->size, err); in fastrpc_map_create()
825 spin_lock(&fl->lock); in fastrpc_map_create()
826 list_add_tail(&map->node, &fl->maps); in fastrpc_map_create()
827 spin_unlock(&fl->lock); in fastrpc_map_create()
833 dma_buf_detach(map->buf, map->attach); in fastrpc_map_create()
835 dma_buf_put(map->buf); in fastrpc_map_create()
846 * +---------------------------------+
849 * | (0 - N) |
850 * +---------------------------------+
853 * | (0 - N) |
854 * +---------------------------------+
857 * | (0 - N) |
858 * +---------------------------------+
861 * +---------------------------------+
863 * +---------------------------------+
865 * | (0-N) |
866 * +---------------------------------+
875 sizeof(struct fastrpc_phy_page)) * ctx->nscalars + in fastrpc_get_meta_size()
888 for (oix = 0; oix < ctx->nbufs; oix++) { in fastrpc_get_payload_size()
889 int i = ctx->olaps[oix].raix; in fastrpc_get_payload_size()
891 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { in fastrpc_get_payload_size()
893 if (ctx->olaps[oix].offset == 0) in fastrpc_get_payload_size()
896 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart); in fastrpc_get_payload_size()
905 struct device *dev = ctx->fl->sctx->dev; in fastrpc_create_maps()
908 for (i = 0; i < ctx->nscalars; ++i) { in fastrpc_create_maps()
910 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || in fastrpc_create_maps()
911 ctx->args[i].length == 0) in fastrpc_create_maps()
914 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, in fastrpc_create_maps()
915 ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); in fastrpc_create_maps()
918 return -EINVAL; in fastrpc_create_maps()
937 struct device *dev = ctx->fl->sctx->dev; in fastrpc_get_args()
947 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); in fastrpc_get_args()
955 ctx->msg_sz = pkt_size; in fastrpc_get_args()
957 if (ctx->fl->sctx->sid) in fastrpc_get_args()
958 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
960 err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf); in fastrpc_get_args()
964 memset(ctx->buf->virt, 0, pkt_size); in fastrpc_get_args()
965 rpra = ctx->buf->virt; in fastrpc_get_args()
966 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); in fastrpc_get_args()
967 pages = fastrpc_phy_page_start(list, ctx->nscalars); in fastrpc_get_args()
968 args = (uintptr_t)ctx->buf->virt + metalen; in fastrpc_get_args()
969 rlen = pkt_size - metalen; in fastrpc_get_args()
970 ctx->rpra = rpra; in fastrpc_get_args()
972 for (oix = 0; oix < ctx->nbufs; ++oix) { in fastrpc_get_args()
975 i = ctx->olaps[oix].raix; in fastrpc_get_args()
976 len = ctx->args[i].length; in fastrpc_get_args()
986 if (ctx->maps[i]) { in fastrpc_get_args()
989 rpra[i].buf.pv = (u64) ctx->args[i].ptr; in fastrpc_get_args()
990 pages[i].addr = ctx->maps[i]->phys; in fastrpc_get_args()
992 mmap_read_lock(current->mm); in fastrpc_get_args()
993 vma = find_vma(current->mm, ctx->args[i].ptr); in fastrpc_get_args()
995 pages[i].addr += ctx->args[i].ptr - in fastrpc_get_args()
996 vma->vm_start; in fastrpc_get_args()
997 mmap_read_unlock(current->mm); in fastrpc_get_args()
999 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args()
1000 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >> in fastrpc_get_args()
1002 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args()
1006 if (ctx->olaps[oix].offset == 0) { in fastrpc_get_args()
1007 rlen -= ALIGN(args, FASTRPC_ALIGN) - args; in fastrpc_get_args()
1011 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart; in fastrpc_get_args()
1016 rpra[i].buf.pv = args - ctx->olaps[oix].offset; in fastrpc_get_args()
1017 pages[i].addr = ctx->buf->phys - in fastrpc_get_args()
1018 ctx->olaps[oix].offset + in fastrpc_get_args()
1019 (pkt_size - rlen); in fastrpc_get_args()
1023 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args()
1024 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args()
1026 rlen -= mlen; in fastrpc_get_args()
1029 if (i < inbufs && !ctx->maps[i]) { in fastrpc_get_args()
1031 void *src = (void *)(uintptr_t)ctx->args[i].ptr; in fastrpc_get_args()
1036 err = -EFAULT; in fastrpc_get_args()
1045 for (i = ctx->nbufs; i < ctx->nscalars; ++i) { in fastrpc_get_args()
1046 list[i].num = ctx->args[i].length ? 1 : 0; in fastrpc_get_args()
1048 if (ctx->maps[i]) { in fastrpc_get_args()
1049 pages[i].addr = ctx->maps[i]->phys; in fastrpc_get_args()
1050 pages[i].size = ctx->maps[i]->size; in fastrpc_get_args()
1052 rpra[i].dma.fd = ctx->args[i].fd; in fastrpc_get_args()
1053 rpra[i].dma.len = ctx->args[i].length; in fastrpc_get_args()
1054 rpra[i].dma.offset = (u64) ctx->args[i].ptr; in fastrpc_get_args()
1067 union fastrpc_remote_arg *rpra = ctx->rpra; in fastrpc_put_args()
1068 struct fastrpc_user *fl = ctx->fl; in fastrpc_put_args()
1075 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); in fastrpc_put_args()
1076 outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); in fastrpc_put_args()
1077 handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc); in fastrpc_put_args()
1078 list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); in fastrpc_put_args()
1079 pages = fastrpc_phy_page_start(list, ctx->nscalars); in fastrpc_put_args()
1082 for (i = inbufs; i < ctx->nbufs; ++i) { in fastrpc_put_args()
1083 if (!ctx->maps[i]) { in fastrpc_put_args()
1085 void *dst = (void *)(uintptr_t)ctx->args[i].ptr; in fastrpc_put_args()
1090 return -EFAULT; in fastrpc_put_args()
1113 struct fastrpc_user *fl = ctx->fl; in fastrpc_invoke_send()
1114 struct fastrpc_msg *msg = &ctx->msg; in fastrpc_invoke_send()
1117 cctx = fl->cctx; in fastrpc_invoke_send()
1118 msg->pid = fl->tgid; in fastrpc_invoke_send()
1119 msg->tid = current->pid; in fastrpc_invoke_send()
1122 msg->pid = 0; in fastrpc_invoke_send()
1124 msg->ctx = ctx->ctxid | fl->pd; in fastrpc_invoke_send()
1125 msg->handle = handle; in fastrpc_invoke_send()
1126 msg->sc = ctx->sc; in fastrpc_invoke_send()
1127 msg->addr = ctx->buf ? ctx->buf->phys : 0; in fastrpc_invoke_send()
1128 msg->size = roundup(ctx->msg_sz, PAGE_SIZE); in fastrpc_invoke_send()
1131 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); in fastrpc_invoke_send()
1149 if (!fl->sctx) in fastrpc_internal_invoke()
1150 return -EINVAL; in fastrpc_internal_invoke()
1152 if (!fl->cctx->rpdev) in fastrpc_internal_invoke()
1153 return -EPIPE; in fastrpc_internal_invoke()
1156 …dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle… in fastrpc_internal_invoke()
1157 return -EPERM; in fastrpc_internal_invoke()
1171 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); in fastrpc_internal_invoke()
1176 if (!wait_for_completion_timeout(&ctx->work, 10 * HZ)) in fastrpc_internal_invoke()
1177 err = -ETIMEDOUT; in fastrpc_internal_invoke()
1179 err = wait_for_completion_interruptible(&ctx->work); in fastrpc_internal_invoke()
1193 err = ctx->retval; in fastrpc_internal_invoke()
1198 if (err != -ERESTARTSYS && err != -ETIMEDOUT) { in fastrpc_internal_invoke()
1200 spin_lock(&fl->lock); in fastrpc_internal_invoke()
1201 list_del(&ctx->node); in fastrpc_internal_invoke()
1202 spin_unlock(&fl->lock); in fastrpc_internal_invoke()
1206 if (err == -ERESTARTSYS) { in fastrpc_internal_invoke()
1207 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_internal_invoke()
1208 list_del(&buf->node); in fastrpc_internal_invoke()
1209 list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps); in fastrpc_internal_invoke()
1214 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); in fastrpc_internal_invoke()
1221 /* Check if the device node is non-secure and channel is secure*/ in is_session_rejected()
1222 if (!fl->is_secure_dev && fl->cctx->secure) { in is_session_rejected()
1228 if (!fl->cctx->unsigned_support || !unsigned_pd_request) { in is_session_rejected()
1229 dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n"); in is_session_rejected()
1255 return -ENOMEM; in fastrpc_init_create_static_process()
1258 err = -EFAULT; in fastrpc_init_create_static_process()
1263 err = -EINVAL; in fastrpc_init_create_static_process()
1273 if (!fl->cctx->remote_heap) { in fastrpc_init_create_static_process()
1274 err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen, in fastrpc_init_create_static_process()
1275 &fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1280 if (fl->cctx->vmcount) { in fastrpc_init_create_static_process()
1283 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1284 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1286 fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_init_create_static_process()
1288 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1289 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1296 inbuf.pgid = fl->tgid; in fastrpc_init_create_static_process()
1299 fl->pd = USER_PD; in fastrpc_init_create_static_process()
1303 args[0].fd = -1; in fastrpc_init_create_static_process()
1307 args[1].fd = -1; in fastrpc_init_create_static_process()
1309 pages[0].addr = fl->cctx->remote_heap->phys; in fastrpc_init_create_static_process()
1310 pages[0].size = fl->cctx->remote_heap->size; in fastrpc_init_create_static_process()
1314 args[2].fd = -1; in fastrpc_init_create_static_process()
1328 if (fl->cctx->vmcount && scm_done) { in fastrpc_init_create_static_process()
1333 for (i = 0; i < fl->cctx->vmcount; i++) in fastrpc_init_create_static_process()
1334 src_perms |= BIT(fl->cctx->vmperms[i].vmid); in fastrpc_init_create_static_process()
1338 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, in fastrpc_init_create_static_process()
1339 (u64)fl->cctx->remote_heap->size, in fastrpc_init_create_static_process()
1342 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", in fastrpc_init_create_static_process()
1343 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); in fastrpc_init_create_static_process()
1346 fastrpc_buf_free(fl->cctx->remote_heap); in fastrpc_init_create_static_process()
1378 return -ENOMEM; in fastrpc_init_create_process()
1381 err = -EFAULT; in fastrpc_init_create_process()
1389 err = -ECONNREFUSED; in fastrpc_init_create_process()
1394 err = -EINVAL; in fastrpc_init_create_process()
1398 inbuf.pgid = fl->tgid; in fastrpc_init_create_process()
1399 inbuf.namelen = strlen(current->comm) + 1; in fastrpc_init_create_process()
1404 fl->pd = USER_PD; in fastrpc_init_create_process()
1414 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, in fastrpc_init_create_process()
1419 fl->init_mem = imem; in fastrpc_init_create_process()
1422 args[0].fd = -1; in fastrpc_init_create_process()
1424 args[1].ptr = (u64)(uintptr_t)current->comm; in fastrpc_init_create_process()
1426 args[1].fd = -1; in fastrpc_init_create_process()
1432 pages[0].addr = imem->phys; in fastrpc_init_create_process()
1433 pages[0].size = imem->size; in fastrpc_init_create_process()
1437 args[3].fd = -1; in fastrpc_init_create_process()
1441 args[4].fd = -1; in fastrpc_init_create_process()
1445 args[5].fd = -1; in fastrpc_init_create_process()
1461 fl->init_mem = NULL; in fastrpc_init_create_process()
1478 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_session_alloc()
1479 for (i = 0; i < cctx->sesscount; i++) { in fastrpc_session_alloc()
1480 if (!cctx->session[i].used && cctx->session[i].valid) { in fastrpc_session_alloc()
1481 cctx->session[i].used = true; in fastrpc_session_alloc()
1482 session = &cctx->session[i]; in fastrpc_session_alloc()
1486 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_session_alloc()
1496 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_session_free()
1497 session->used = false; in fastrpc_session_free()
1498 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_session_free()
1507 tgid = fl->tgid; in fastrpc_release_current_dsp_process()
1510 args[0].fd = -1; in fastrpc_release_current_dsp_process()
1519 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_release()
1520 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_device_release()
1528 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_device_release()
1529 list_del(&fl->user); in fastrpc_device_release()
1530 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_device_release()
1532 if (fl->init_mem) in fastrpc_device_release()
1533 fastrpc_buf_free(fl->init_mem); in fastrpc_device_release()
1535 list_for_each_entry_safe(ctx, n, &fl->pending, node) { in fastrpc_device_release()
1536 list_del(&ctx->node); in fastrpc_device_release()
1540 list_for_each_entry_safe(map, m, &fl->maps, node) in fastrpc_device_release()
1543 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { in fastrpc_device_release()
1544 list_del(&buf->node); in fastrpc_device_release()
1548 fastrpc_session_free(cctx, fl->sctx); in fastrpc_device_release()
1551 mutex_destroy(&fl->mutex); in fastrpc_device_release()
1553 file->private_data = NULL; in fastrpc_device_release()
1565 fdevice = miscdev_to_fdevice(filp->private_data); in fastrpc_device_open()
1566 cctx = fdevice->cctx; in fastrpc_device_open()
1570 return -ENOMEM; in fastrpc_device_open()
1575 filp->private_data = fl; in fastrpc_device_open()
1576 spin_lock_init(&fl->lock); in fastrpc_device_open()
1577 mutex_init(&fl->mutex); in fastrpc_device_open()
1578 INIT_LIST_HEAD(&fl->pending); in fastrpc_device_open()
1579 INIT_LIST_HEAD(&fl->maps); in fastrpc_device_open()
1580 INIT_LIST_HEAD(&fl->mmaps); in fastrpc_device_open()
1581 INIT_LIST_HEAD(&fl->user); in fastrpc_device_open()
1582 fl->tgid = current->tgid; in fastrpc_device_open()
1583 fl->cctx = cctx; in fastrpc_device_open()
1584 fl->is_secure_dev = fdevice->secure; in fastrpc_device_open()
1586 fl->sctx = fastrpc_session_alloc(cctx); in fastrpc_device_open()
1587 if (!fl->sctx) { in fastrpc_device_open()
1588 dev_err(&cctx->rpdev->dev, "No session available\n"); in fastrpc_device_open()
1589 mutex_destroy(&fl->mutex); in fastrpc_device_open()
1592 return -EBUSY; in fastrpc_device_open()
1595 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_device_open()
1596 list_add_tail(&fl->user, &cctx->users); in fastrpc_device_open()
1597 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_device_open()
1610 return -EFAULT; in fastrpc_dmabuf_alloc()
1612 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); in fastrpc_dmabuf_alloc()
1619 buf->dmabuf = dma_buf_export(&exp_info); in fastrpc_dmabuf_alloc()
1620 if (IS_ERR(buf->dmabuf)) { in fastrpc_dmabuf_alloc()
1621 err = PTR_ERR(buf->dmabuf); in fastrpc_dmabuf_alloc()
1626 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); in fastrpc_dmabuf_alloc()
1628 dma_buf_put(buf->dmabuf); in fastrpc_dmabuf_alloc()
1629 return -EINVAL; in fastrpc_dmabuf_alloc()
1641 return -EFAULT; in fastrpc_dmabuf_alloc()
1650 int tgid = fl->tgid; in fastrpc_init_attach()
1655 args[0].fd = -1; in fastrpc_init_attach()
1657 fl->pd = pd; in fastrpc_init_attach()
1671 return -EFAULT; in fastrpc_invoke()
1678 return -ENOMEM; in fastrpc_invoke()
1683 return -EFAULT; in fastrpc_invoke()
1704 dsp_attr_buf_len -= 1; in fastrpc_get_info_from_dsp()
1708 args[0].fd = -1; in fastrpc_get_info_from_dsp()
1711 args[1].fd = -1; in fastrpc_get_info_from_dsp()
1720 struct fastrpc_channel_ctx *cctx = fl->cctx; in fastrpc_get_info_from_kernel()
1721 uint32_t attribute_id = cap->attribute_id; in fastrpc_get_info_from_kernel()
1724 uint32_t domain = cap->domain; in fastrpc_get_info_from_kernel()
1727 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1729 if (cctx->valid_attributes) { in fastrpc_get_info_from_kernel()
1730 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1733 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1737 return -ENOMEM; in fastrpc_get_info_from_kernel()
1741 dev_info(&cctx->rpdev->dev, in fastrpc_get_info_from_kernel()
1744 return -EOPNOTSUPP; in fastrpc_get_info_from_kernel()
1746 dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err); in fastrpc_get_info_from_kernel()
1751 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1752 memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); in fastrpc_get_info_from_kernel()
1753 cctx->valid_attributes = true; in fastrpc_get_info_from_kernel()
1754 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_get_info_from_kernel()
1757 cap->capability = cctx->dsp_attributes[attribute_id]; in fastrpc_get_info_from_kernel()
1767 return -EFAULT; in fastrpc_get_dsp_info()
1771 dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n", in fastrpc_get_dsp_info()
1773 return -ECHRNG; in fastrpc_get_dsp_info()
1778 dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err); in fastrpc_get_dsp_info()
1779 return -ECHRNG; in fastrpc_get_dsp_info()
1783 dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n", in fastrpc_get_dsp_info()
1785 return -EOVERFLOW; in fastrpc_get_dsp_info()
1793 return -EFAULT; in fastrpc_get_dsp_info()
1802 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap_impl()
1806 req_msg.pgid = fl->tgid; in fastrpc_req_munmap_impl()
1807 req_msg.size = buf->size; in fastrpc_req_munmap_impl()
1808 req_msg.vaddr = buf->raddr; in fastrpc_req_munmap_impl()
1817 dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr); in fastrpc_req_munmap_impl()
1818 spin_lock(&fl->lock); in fastrpc_req_munmap_impl()
1819 list_del(&buf->node); in fastrpc_req_munmap_impl()
1820 spin_unlock(&fl->lock); in fastrpc_req_munmap_impl()
1823 dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr); in fastrpc_req_munmap_impl()
1833 struct device *dev = fl->sctx->dev; in fastrpc_req_munmap()
1836 return -EFAULT; in fastrpc_req_munmap()
1838 spin_lock(&fl->lock); in fastrpc_req_munmap()
1839 list_for_each_entry_safe(iter, b, &fl->mmaps, node) { in fastrpc_req_munmap()
1840 if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) { in fastrpc_req_munmap()
1845 spin_unlock(&fl->lock); in fastrpc_req_munmap()
1850 return -EINVAL; in fastrpc_req_munmap()
1864 struct device *dev = fl->sctx->dev; in fastrpc_req_mmap()
1869 return -EFAULT; in fastrpc_req_mmap()
1874 return -EINVAL; in fastrpc_req_mmap()
1879 return -EINVAL; in fastrpc_req_mmap()
1892 req_msg.pgid = fl->tgid; in fastrpc_req_mmap()
1900 pages.addr = buf->phys; in fastrpc_req_mmap()
1901 pages.size = buf->size; in fastrpc_req_mmap()
1913 dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); in fastrpc_req_mmap()
1919 buf->raddr = (uintptr_t) rsp_msg.vaddr; in fastrpc_req_mmap()
1925 if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { in fastrpc_req_mmap()
1928 err = qcom_scm_assign_mem(buf->phys, (u64)buf->size, in fastrpc_req_mmap()
1929 &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); in fastrpc_req_mmap()
1931 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", in fastrpc_req_mmap()
1932 buf->phys, buf->size, err); in fastrpc_req_mmap()
1937 spin_lock(&fl->lock); in fastrpc_req_mmap()
1938 list_add_tail(&buf->node, &fl->mmaps); in fastrpc_req_mmap()
1939 spin_unlock(&fl->lock); in fastrpc_req_mmap()
1942 err = -EFAULT; in fastrpc_req_mmap()
1947 buf->raddr, buf->size); in fastrpc_req_mmap()
1964 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_unmap_impl()
1966 spin_lock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1967 list_for_each_entry_safe(iter, m, &fl->maps, node) { in fastrpc_req_mem_unmap_impl()
1968 if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) { in fastrpc_req_mem_unmap_impl()
1974 spin_unlock(&fl->lock); in fastrpc_req_mem_unmap_impl()
1978 return -EINVAL; in fastrpc_req_mem_unmap_impl()
1981 req_msg.pgid = fl->tgid; in fastrpc_req_mem_unmap_impl()
1982 req_msg.len = map->len; in fastrpc_req_mem_unmap_impl()
1983 req_msg.vaddrin = map->raddr; in fastrpc_req_mem_unmap_impl()
1984 req_msg.fd = map->fd; in fastrpc_req_mem_unmap_impl()
1993 dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr); in fastrpc_req_mem_unmap_impl()
2006 return -EFAULT; in fastrpc_req_mem_unmap()
2019 struct device *dev = fl->sctx->dev; in fastrpc_req_mem_map()
2025 return -EFAULT; in fastrpc_req_mem_map()
2034 req_msg.pgid = fl->tgid; in fastrpc_req_mem_map()
2038 map->va = (void *) (uintptr_t) req.vaddrin; in fastrpc_req_mem_map()
2046 pages.addr = map->phys; in fastrpc_req_mem_map()
2047 pages.size = map->size; in fastrpc_req_mem_map()
2062 req.fd, req.vaddrin, map->size); in fastrpc_req_mem_map()
2067 map->raddr = rsp_msg.vaddr; in fastrpc_req_mem_map()
2075 req_unmap.length = map->size; in fastrpc_req_mem_map()
2077 return -EFAULT; in fastrpc_req_mem_map()
2091 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; in fastrpc_device_ioctl()
2130 err = -ENOTTY; in fastrpc_device_ioctl()
2148 struct device *dev = &pdev->dev; in fastrpc_cb_probe()
2153 cctx = dev_get_drvdata(dev->parent); in fastrpc_cb_probe()
2155 return -EINVAL; in fastrpc_cb_probe()
2157 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); in fastrpc_cb_probe()
2159 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_cb_probe()
2160 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) { in fastrpc_cb_probe()
2161 dev_err(&pdev->dev, "too many sessions\n"); in fastrpc_cb_probe()
2162 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_probe()
2163 return -ENOSPC; in fastrpc_cb_probe()
2165 sess = &cctx->session[cctx->sesscount++]; in fastrpc_cb_probe()
2166 sess->used = false; in fastrpc_cb_probe()
2167 sess->valid = true; in fastrpc_cb_probe()
2168 sess->dev = dev; in fastrpc_cb_probe()
2171 if (of_property_read_u32(dev->of_node, "reg", &sess->sid)) in fastrpc_cb_probe()
2178 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) in fastrpc_cb_probe()
2180 dup_sess = &cctx->session[cctx->sesscount++]; in fastrpc_cb_probe()
2184 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_probe()
2187 dev_err(dev, "32-bit DMA enable failed\n"); in fastrpc_cb_probe()
2196 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); in fastrpc_cb_remove()
2197 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); in fastrpc_cb_remove()
2201 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_cb_remove()
2203 if (cctx->session[i].sid == sess->sid) { in fastrpc_cb_remove()
2204 cctx->session[i].valid = false; in fastrpc_cb_remove()
2205 cctx->sesscount--; in fastrpc_cb_remove()
2208 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_cb_remove()
2212 { .compatible = "qcom,fastrpc-compute-cb", },
2220 .name = "qcom,fastrpc-cb",
2234 return -ENOMEM; in fastrpc_device_register()
2236 fdev->secure = is_secured; in fastrpc_device_register()
2237 fdev->cctx = cctx; in fastrpc_device_register()
2238 fdev->miscdev.minor = MISC_DYNAMIC_MINOR; in fastrpc_device_register()
2239 fdev->miscdev.fops = &fastrpc_fops; in fastrpc_device_register()
2240 fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s", in fastrpc_device_register()
2241 domain, is_secured ? "-secure" : ""); in fastrpc_device_register()
2242 if (!fdev->miscdev.name) in fastrpc_device_register()
2243 return -ENOMEM; in fastrpc_device_register()
2245 err = misc_register(&fdev->miscdev); in fastrpc_device_register()
2248 cctx->secure_fdevice = fdev; in fastrpc_device_register()
2250 cctx->fdevice = fdev; in fastrpc_device_register()
2258 struct device *rdev = &rpdev->dev; in fastrpc_rpmsg_probe()
2260 int i, err, domain_id = -1, vmcount; in fastrpc_rpmsg_probe()
2267 err = of_property_read_string(rdev->of_node, "label", &domain); in fastrpc_rpmsg_probe()
2282 return -EINVAL; in fastrpc_rpmsg_probe()
2285 if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0)) in fastrpc_rpmsg_probe()
2288 vmcount = of_property_read_variable_u32_array(rdev->of_node, in fastrpc_rpmsg_probe()
2293 return -EPROBE_DEFER; in fastrpc_rpmsg_probe()
2297 return -ENOMEM; in fastrpc_rpmsg_probe()
2300 data->vmcount = vmcount; in fastrpc_rpmsg_probe()
2301 for (i = 0; i < data->vmcount; i++) { in fastrpc_rpmsg_probe()
2302 data->vmperms[i].vmid = vmids[i]; in fastrpc_rpmsg_probe()
2303 data->vmperms[i].perm = QCOM_SCM_PERM_RWX; in fastrpc_rpmsg_probe()
2307 rmem_node = of_parse_phandle(rdev->of_node, "memory-region", 0); in fastrpc_rpmsg_probe()
2313 err = -EINVAL; in fastrpc_rpmsg_probe()
2319 qcom_scm_assign_mem(rmem->base, rmem->size, &src_perms, in fastrpc_rpmsg_probe()
2320 data->vmperms, data->vmcount); in fastrpc_rpmsg_probe()
2324 secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain")); in fastrpc_rpmsg_probe()
2325 data->secure = secure_dsp; in fastrpc_rpmsg_probe()
2332 data->unsigned_support = false; in fastrpc_rpmsg_probe()
2339 data->unsigned_support = true; in fastrpc_rpmsg_probe()
2350 err = -EINVAL; in fastrpc_rpmsg_probe()
2354 kref_init(&data->refcount); in fastrpc_rpmsg_probe()
2356 dev_set_drvdata(&rpdev->dev, data); in fastrpc_rpmsg_probe()
2357 rdev->dma_mask = &data->dma_mask; in fastrpc_rpmsg_probe()
2359 INIT_LIST_HEAD(&data->users); in fastrpc_rpmsg_probe()
2360 INIT_LIST_HEAD(&data->invoke_interrupted_mmaps); in fastrpc_rpmsg_probe()
2361 spin_lock_init(&data->lock); in fastrpc_rpmsg_probe()
2362 idr_init(&data->ctx_idr); in fastrpc_rpmsg_probe()
2363 data->domain_id = domain_id; in fastrpc_rpmsg_probe()
2364 data->rpdev = rpdev; in fastrpc_rpmsg_probe()
2366 err = of_platform_populate(rdev->of_node, NULL, NULL, rdev); in fastrpc_rpmsg_probe()
2373 if (data->fdevice) in fastrpc_rpmsg_probe()
2374 misc_deregister(&data->fdevice->miscdev); in fastrpc_rpmsg_probe()
2375 if (data->secure_fdevice) in fastrpc_rpmsg_probe()
2376 misc_deregister(&data->secure_fdevice->miscdev); in fastrpc_rpmsg_probe()
2387 spin_lock(&user->lock); in fastrpc_notify_users()
2388 list_for_each_entry(ctx, &user->pending, node) { in fastrpc_notify_users()
2389 ctx->retval = -EPIPE; in fastrpc_notify_users()
2390 complete(&ctx->work); in fastrpc_notify_users()
2392 spin_unlock(&user->lock); in fastrpc_notify_users()
2397 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); in fastrpc_rpmsg_remove()
2403 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_rpmsg_remove()
2404 cctx->rpdev = NULL; in fastrpc_rpmsg_remove()
2405 list_for_each_entry(user, &cctx->users, user) in fastrpc_rpmsg_remove()
2407 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_rpmsg_remove()
2409 if (cctx->fdevice) in fastrpc_rpmsg_remove()
2410 misc_deregister(&cctx->fdevice->miscdev); in fastrpc_rpmsg_remove()
2412 if (cctx->secure_fdevice) in fastrpc_rpmsg_remove()
2413 misc_deregister(&cctx->secure_fdevice->miscdev); in fastrpc_rpmsg_remove()
2415 list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node) in fastrpc_rpmsg_remove()
2416 list_del(&buf->node); in fastrpc_rpmsg_remove()
2418 if (cctx->remote_heap) in fastrpc_rpmsg_remove()
2419 fastrpc_buf_free(cctx->remote_heap); in fastrpc_rpmsg_remove()
2421 of_platform_depopulate(&rpdev->dev); in fastrpc_rpmsg_remove()
2429 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); in fastrpc_rpmsg_callback()
2436 return -EINVAL; in fastrpc_rpmsg_callback()
2438 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); in fastrpc_rpmsg_callback()
2440 spin_lock_irqsave(&cctx->lock, flags); in fastrpc_rpmsg_callback()
2441 ctx = idr_find(&cctx->ctx_idr, ctxid); in fastrpc_rpmsg_callback()
2442 spin_unlock_irqrestore(&cctx->lock, flags); in fastrpc_rpmsg_callback()
2445 dev_err(&rpdev->dev, "No context ID matches response\n"); in fastrpc_rpmsg_callback()
2446 return -ENOENT; in fastrpc_rpmsg_callback()
2449 ctx->retval = rsp->retval; in fastrpc_rpmsg_callback()
2450 complete(&ctx->work); in fastrpc_rpmsg_callback()
2457 schedule_work(&ctx->put_work); in fastrpc_rpmsg_callback()
2490 pr_err("fastrpc: failed to register rpmsg driver\n"); in fastrpc_init()