Lines Matching full:shm

50  * A typical OP-TEE private shm allocation is 224 bytes (argument struct
86 struct tee_shm *shm; in from_msg_param_tmp_mem() local
93 shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref; in from_msg_param_tmp_mem()
94 if (!shm) { in from_msg_param_tmp_mem()
96 p->u.memref.shm = NULL; in from_msg_param_tmp_mem()
100 rc = tee_shm_get_pa(shm, 0, &pa); in from_msg_param_tmp_mem()
105 p->u.memref.shm = shm; in from_msg_param_tmp_mem()
113 struct tee_shm *shm; in from_msg_param_reg_mem() local
118 shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref; in from_msg_param_reg_mem()
120 if (shm) { in from_msg_param_reg_mem()
122 p->u.memref.shm = shm; in from_msg_param_reg_mem()
125 p->u.memref.shm = NULL; in from_msg_param_reg_mem()
189 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; in to_msg_param_tmp_mem()
192 if (!p->u.memref.shm) { in to_msg_param_tmp_mem()
197 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); in to_msg_param_tmp_mem()
214 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm; in to_msg_param_reg_mem()
252 if (tee_shm_is_dynamic(p->u.memref.shm)) in optee_to_msg_param()
322 break; /* All shm's freed */ in __optee_disable_shm_cache()
324 struct tee_shm *shm; in __optee_disable_shm_cache() local
333 shm = reg_pair_to_ptr(res.result.shm_upper32, in __optee_disable_shm_cache()
335 tee_shm_free(shm); in __optee_disable_shm_cache()
453 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, in optee_shm_register() argument
495 tee_shm_get_page_offset(shm)); in optee_shm_register()
502 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; in optee_shm_register()
503 msg_arg->params->u.tmem.size = tee_shm_get_size(shm); in optee_shm_register()
509 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); in optee_shm_register()
521 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) in optee_shm_unregister() argument
552 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; in optee_shm_unregister()
562 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, in optee_shm_register_supp() argument
574 struct tee_shm *shm) in optee_shm_unregister_supp() argument
589 struct tee_shm *shm, size_t size, size_t align) in pool_op_alloc() argument
595 if (shm->flags & TEE_SHM_PRIV) in pool_op_alloc()
596 return tee_dyn_shm_alloc_helper(shm, size, align, NULL); in pool_op_alloc()
598 return tee_dyn_shm_alloc_helper(shm, size, align, optee_shm_register); in pool_op_alloc()
602 struct tee_shm *shm) in pool_op_free() argument
604 if (!(shm->flags & TEE_SHM_PRIV)) in pool_op_free()
605 tee_dyn_shm_free_helper(shm, optee_shm_unregister); in pool_op_free()
607 tee_dyn_shm_free_helper(shm, NULL); in pool_op_free()
624 * This pool is used when OP-TEE supports dymanic SHM. In this case
652 struct tee_shm *shm; in handle_rpc_func_cmd_shm_free() local
662 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; in handle_rpc_func_cmd_shm_free()
665 optee_rpc_cmd_free_suppl(ctx, shm); in handle_rpc_func_cmd_shm_free()
668 tee_shm_free(shm); in handle_rpc_func_cmd_shm_free()
681 struct tee_shm *shm; in handle_rpc_func_cmd_shm_alloc() local
705 shm = optee_rpc_cmd_alloc_suppl(ctx, sz); in handle_rpc_func_cmd_shm_alloc()
708 shm = tee_shm_alloc_priv_buf(optee->ctx, sz); in handle_rpc_func_cmd_shm_alloc()
715 if (IS_ERR(shm)) { in handle_rpc_func_cmd_shm_alloc()
725 pages = tee_shm_get_pages(shm, &page_count); in handle_rpc_func_cmd_shm_alloc()
745 (tee_shm_get_page_offset(shm) & in handle_rpc_func_cmd_shm_alloc()
749 tee_shm_get_page_offset(shm)); in handle_rpc_func_cmd_shm_alloc()
753 if (tee_shm_get_pa(shm, 0, &pa)) { in handle_rpc_func_cmd_shm_alloc()
761 arg->params[0].u.tmem.size = tee_shm_get_size(shm); in handle_rpc_func_cmd_shm_alloc()
762 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; in handle_rpc_func_cmd_shm_alloc()
767 tee_shm_free(shm); in handle_rpc_func_cmd_shm_alloc()
820 struct tee_shm *shm; in optee_handle_rpc() local
825 shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1); in optee_handle_rpc()
826 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { in optee_handle_rpc()
829 (unsigned long)shm); in optee_handle_rpc()
836 kmemleak_not_leak(shm); in optee_handle_rpc()
839 shm = reg_pair_to_ptr(param->a1, param->a2); in optee_handle_rpc()
840 tee_shm_free(shm); in optee_handle_rpc()
854 shm = reg_pair_to_ptr(param->a1, param->a2); in optee_handle_rpc()
855 arg = tee_shm_get_va(shm, 0); in optee_handle_rpc()
858 __func__, shm); in optee_handle_rpc()
877 * @shm: shared memory holding the message to pass to secure world
878 * @offs: offset of the message in @shm
887 struct tee_shm *shm, u_int offs, in optee_smc_do_call_with_arg() argument
901 arg = tee_shm_get_va(shm, offs); in optee_smc_do_call_with_arg()
906 rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs); in optee_smc_do_call_with_arg()
911 if (rpc_arg && tee_shm_is_dynamic(shm)) { in optee_smc_do_call_with_arg()
913 reg_pair_from_64(&param.a1, &param.a2, (u_long)shm); in optee_smc_do_call_with_arg()
918 rc = tee_shm_get_pa(shm, offs, &parg); in optee_smc_do_call_with_arg()
1360 pr_err("static shm service not available\n"); in optee_config_shm_memremap()
1755 * Ensure that there are no pre-existing shm objects before enabling in optee_probe()
1756 * the shm cache so that there's no chance of receiving an invalid in optee_probe()
1759 * shm cache. in optee_probe()
1764 * Only enable the shm cache in case we're not able to pass the RPC in optee_probe()