Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0
7 * provides a mechanism for inter-processor communication between SCP's
14 * Copyright (C) 2018-2024 ARM Ltd.
25 #include <linux/io-64-nonatomic-hi-lo.h>
60 * struct scmi_xfers_info - Structure to manage transfer information
68 * a number of xfers equal to the maximum allowed in-flight
71 * currently in-flight messages.
82 * struct scmi_protocol_instance - Describe an initialized protocol instance.
85 * @gid: A reference for per-protocol devres management.
93 * This field is NON-zero when a successful negotiation
115 * struct scmi_debug_info - Debug common info
119 * @is_atomic: Flag to state if the transport of this instance is atomic
131 * struct scmi_info - Structure representing a SCMI instance
137 * implementation version and (sub-)vendor identification.
152 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
153 * in microseconds, for atomic operations.
155 * to have an execution latency lesser-equal to the threshold
156 * should be considered for atomic mode operation: such
294 version->vendor_id, in scmi_protocol_get()
295 version->sub_vendor_id, in scmi_protocol_get()
296 version->impl_ver); in scmi_protocol_get()
297 if (!proto || !try_module_get(proto->owner)) { in scmi_protocol_get()
305 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n", in scmi_protocol_get()
306 protocol_id, proto->vendor_id ?: "", in scmi_protocol_get()
307 proto->sub_vendor_id ?: "", proto->impl_ver); in scmi_protocol_get()
315 module_put(proto->owner); in scmi_protocol_put()
320 if (!proto->vendor_id) { in scmi_vendor_protocol_check()
321 pr_err("missing vendor_id for protocol 0x%x\n", proto->id); in scmi_vendor_protocol_check()
322 return -EINVAL; in scmi_vendor_protocol_check()
325 if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { in scmi_vendor_protocol_check()
326 pr_err("malformed vendor_id for protocol 0x%x\n", proto->id); in scmi_vendor_protocol_check()
327 return -EINVAL; in scmi_vendor_protocol_check()
330 if (proto->sub_vendor_id && in scmi_vendor_protocol_check()
331 strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) { in scmi_vendor_protocol_check()
333 proto->id); in scmi_vendor_protocol_check()
334 return -EINVAL; in scmi_vendor_protocol_check()
347 return -EINVAL; in scmi_protocol_register()
350 if (!proto->instance_init) { in scmi_protocol_register()
351 pr_err("missing init for protocol 0x%x\n", proto->id); in scmi_protocol_register()
352 return -EINVAL; in scmi_protocol_register()
355 if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE && in scmi_protocol_register()
357 return -EINVAL; in scmi_protocol_register()
363 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, in scmi_protocol_register()
364 proto->sub_vendor_id, in scmi_protocol_register()
365 proto->impl_ver); in scmi_protocol_register()
367 return -EINVAL; in scmi_protocol_register()
371 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n", in scmi_protocol_register()
372 proto->id, ret); in scmi_protocol_register()
376 pr_debug("Registered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_register()
386 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id, in scmi_protocol_unregister()
387 proto->sub_vendor_id, in scmi_protocol_unregister()
388 proto->impl_ver); in scmi_protocol_unregister()
394 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id); in scmi_protocol_unregister()
399 * scmi_create_protocol_devices - Create devices for all pending requests for
415 mutex_lock(&info->devreq_mtx); in scmi_create_protocol_devices()
416 sdev = scmi_device_create(np, info->dev, prot_id, name); in scmi_create_protocol_devices()
418 dev_err(info->dev, in scmi_create_protocol_devices()
421 mutex_unlock(&info->devreq_mtx); in scmi_create_protocol_devices()
427 mutex_lock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
428 scmi_device_destroy(info->dev, prot_id, name); in scmi_destroy_protocol_devices()
429 mutex_unlock(&info->devreq_mtx); in scmi_destroy_protocol_devices()
437 info->notify_priv = priv; in scmi_notification_instance_data_set()
448 return info->notify_priv; in scmi_notification_instance_data_get()
452 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
458 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
459 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
460 * of incorrect association of a late and expired xfer with a live in-flight
461 * transaction, both happening to re-use the same token identifier.
463 * Since platform is NOT required to answer our request in-order we should
466 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
469 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
473 * X = used in-flight
476 * ------
478 * |- xfer_id picked
479 * -----------+----------------------------------------------------------
481 * ----------------------------------------------------------------------
483 * |- next_token
485 * Out-of-order pending at start
486 * -----------------------------
488 * |- xfer_id picked, last_token fixed
489 * -----+----------------------------------------------------------------
491 * ----------------------------------------------------------------------
493 * |- next_token
496 * Out-of-order pending at end
497 * ---------------------------
499 * |- xfer_id picked, last_token fixed
500 * -----+----------------------------------------------------------------
502 * ----------------------------------------------------------------------
504 * |- next_token
516 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] in scmi_xfer_token_set()
517 * using the pre-allocated transfer_id as a base. in scmi_xfer_token_set()
523 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); in scmi_xfer_token_set()
526 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
530 * After heavily out-of-order responses, there are no free in scmi_xfer_token_set()
534 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, in scmi_xfer_token_set()
538 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages in scmi_xfer_token_set()
539 * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. in scmi_xfer_token_set()
542 return -ENOMEM; in scmi_xfer_token_set()
545 /* Update +/- last_token accordingly if we skipped some hole */ in scmi_xfer_token_set()
547 atomic_add((int)(xfer_id - next_token), &transfer_last_id); in scmi_xfer_token_set()
549 xfer->hdr.seq = (u16)xfer_id; in scmi_xfer_token_set()
555 * scmi_xfer_token_clear - Release the token
563 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_token_clear()
567 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
572 * Note that this helper assumes that the xfer to be registered as in-flight
582 /* Set in-flight */ in scmi_xfer_inflight_register_unlocked()
583 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); in scmi_xfer_inflight_register_unlocked()
584 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); in scmi_xfer_inflight_register_unlocked()
585 xfer->pending = true; in scmi_xfer_inflight_register_unlocked()
589 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
597 * same sequence number is currently still registered as in-flight.
599 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
608 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
609 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table)) in scmi_xfer_inflight_register()
612 ret = -EBUSY; in scmi_xfer_inflight_register()
613 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_inflight_register()
619 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
632 return scmi_xfer_inflight_register(xfer, &info->tx_minfo); in scmi_xfer_raw_inflight_register()
636 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
637 * as pending in-flight
650 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
655 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_pending_set()
661 * scmi_xfer_get() - Allocate one message
687 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_get()
688 if (hlist_empty(&minfo->free_xfers)) { in scmi_xfer_get()
689 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
690 return ERR_PTR(-ENOMEM); in scmi_xfer_get()
694 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); in scmi_xfer_get()
695 hlist_del_init(&xfer->node); in scmi_xfer_get()
701 xfer->transfer_id = atomic_inc_return(&transfer_last_id); in scmi_xfer_get()
703 refcount_set(&xfer->users, 1); in scmi_xfer_get()
704 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_get()
705 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_get()
711 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
717 * Return: A valid xfer on Success, or an error-pointer otherwise
724 xfer = scmi_xfer_get(handle, &info->tx_minfo); in scmi_xfer_raw_get()
726 xfer->flags |= SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_get()
732 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
740 * protocol in range is allowed, re-using the Base channel, so as to enable
751 cinfo = idr_find(&info->tx_idr, protocol_id); in scmi_xfer_raw_channel_get()
754 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
756 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); in scmi_xfer_raw_channel_get()
758 return ERR_PTR(-EINVAL); in scmi_xfer_raw_channel_get()
759 dev_warn_once(handle->dev, in scmi_xfer_raw_channel_get()
768 * __scmi_xfer_put() - Release a message
783 spin_lock_irqsave(&minfo->xfer_lock, flags); in __scmi_xfer_put()
784 if (refcount_dec_and_test(&xfer->users)) { in __scmi_xfer_put()
785 if (xfer->pending) { in __scmi_xfer_put()
787 hash_del(&xfer->node); in __scmi_xfer_put()
788 xfer->pending = false; in __scmi_xfer_put()
790 hlist_add_head(&xfer->node, &minfo->free_xfers); in __scmi_xfer_put()
792 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in __scmi_xfer_put()
796 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
808 xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; in scmi_xfer_raw_put()
809 xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; in scmi_xfer_raw_put()
810 return __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_raw_put()
814 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
830 if (test_bit(xfer_id, minfo->xfer_alloc_table)) in scmi_xfer_lookup_unlocked()
831 xfer = XFER_FIND(minfo->pending_xfers, xfer_id); in scmi_xfer_lookup_unlocked()
833 return xfer ?: ERR_PTR(-EINVAL); in scmi_xfer_lookup_unlocked()
837 * scmi_bad_message_trace - A helper to trace weird messages
845 * timed-out message that arrives and as such, can be traced only referring to
852 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_bad_message_trace()
869 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_bad_message_trace()
876 * scmi_msg_response_validate - Validate message type against state of related
885 * related synchronous response (Out-of-Order Delayed Response) the missing
888 * SCMI transport can deliver such out-of-order responses.
890 * Context: Assumes to be called with xfer->lock already acquired.
900 * a buggy platform could wrongly reply feeding us an unexpected in scmi_msg_response_validate()
901 * delayed response we're not prepared to handle: bail-out safely in scmi_msg_response_validate()
904 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { in scmi_msg_response_validate()
905 dev_err(cinfo->dev, in scmi_msg_response_validate()
907 xfer->hdr.seq); in scmi_msg_response_validate()
908 return -EINVAL; in scmi_msg_response_validate()
911 switch (xfer->state) { in scmi_msg_response_validate()
918 xfer->hdr.status = SCMI_SUCCESS; in scmi_msg_response_validate()
919 xfer->state = SCMI_XFER_RESP_OK; in scmi_msg_response_validate()
920 complete(&xfer->done); in scmi_msg_response_validate()
921 dev_warn(cinfo->dev, in scmi_msg_response_validate()
923 xfer->hdr.seq); in scmi_msg_response_validate()
928 return -EINVAL; in scmi_msg_response_validate()
932 return -EINVAL; in scmi_msg_response_validate()
939 * scmi_xfer_state_update - Update xfer state
952 xfer->hdr.type = msg_type; in scmi_xfer_state_update()
955 if (xfer->hdr.type == MSG_TYPE_COMMAND) in scmi_xfer_state_update()
956 xfer->state = SCMI_XFER_RESP_OK; in scmi_xfer_state_update()
958 xfer->state = SCMI_XFER_DRESP_OK; in scmi_xfer_state_update()
965 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); in scmi_xfer_acquired()
971 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
988 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_command_acquire()
989 struct scmi_xfers_info *minfo = &info->tx_minfo; in scmi_xfer_command_acquire()
994 spin_lock_irqsave(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
997 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
1000 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
1003 scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); in scmi_xfer_command_acquire()
1007 refcount_inc(&xfer->users); in scmi_xfer_command_acquire()
1008 spin_unlock_irqrestore(&minfo->xfer_lock, flags); in scmi_xfer_command_acquire()
1010 spin_lock_irqsave(&xfer->lock, flags); in scmi_xfer_command_acquire()
1023 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_xfer_command_acquire()
1026 dev_err(cinfo->dev, in scmi_xfer_command_acquire()
1027 "Invalid message type:%d for %d - HDR:0x%X state:%d\n", in scmi_xfer_command_acquire()
1028 msg_type, xfer_id, msg_hdr, xfer->state); in scmi_xfer_command_acquire()
1031 scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); in scmi_xfer_command_acquire()
1035 xfer = ERR_PTR(-EINVAL); in scmi_xfer_command_acquire()
1044 atomic_set(&xfer->busy, SCMI_XFER_FREE); in scmi_xfer_command_release()
1045 __scmi_xfer_put(&info->tx_minfo, xfer); in scmi_xfer_command_release()
1051 if (!cinfo->is_p2a) { in scmi_clear_channel()
1052 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n"); in scmi_clear_channel()
1056 if (info->desc->ops->clear_channel) in scmi_clear_channel()
1057 info->desc->ops->clear_channel(cinfo); in scmi_clear_channel()
1064 struct device *dev = cinfo->dev; in scmi_handle_notification()
1065 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_notification()
1066 struct scmi_xfers_info *minfo = &info->rx_minfo; in scmi_handle_notification()
1070 xfer = scmi_xfer_get(cinfo->handle, minfo); in scmi_handle_notification()
1076 scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); in scmi_handle_notification()
1082 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_handle_notification()
1084 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_notification()
1085 smp_store_mb(xfer->priv, priv); in scmi_handle_notification()
1086 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, in scmi_handle_notification()
1089 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_notification()
1090 xfer->hdr.id, "NOTI", xfer->hdr.seq, in scmi_handle_notification()
1091 xfer->hdr.status, xfer->rx.buf, xfer->rx.len); in scmi_handle_notification()
1092 scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); in scmi_handle_notification()
1094 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, in scmi_handle_notification()
1095 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); in scmi_handle_notification()
1097 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_notification()
1098 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_notification()
1102 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); in scmi_handle_notification()
1103 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE, in scmi_handle_notification()
1104 cinfo->id); in scmi_handle_notification()
1116 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_handle_response()
1121 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv); in scmi_handle_response()
1129 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) in scmi_handle_response()
1130 xfer->rx.len = info->desc->max_msg_size; in scmi_handle_response()
1133 /* Ensure order between xfer->priv store and following ops */ in scmi_handle_response()
1134 smp_store_mb(xfer->priv, priv); in scmi_handle_response()
1135 info->desc->ops->fetch_response(cinfo, xfer); in scmi_handle_response()
1137 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in scmi_handle_response()
1138 xfer->hdr.id, in scmi_handle_response()
1139 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? in scmi_handle_response()
1142 xfer->hdr.seq, xfer->hdr.status, in scmi_handle_response()
1143 xfer->rx.buf, xfer->rx.len); in scmi_handle_response()
1145 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, in scmi_handle_response()
1146 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_handle_response()
1147 xfer->hdr.type); in scmi_handle_response()
1149 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { in scmi_handle_response()
1151 complete(xfer->async_done); in scmi_handle_response()
1152 scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); in scmi_handle_response()
1154 complete(&xfer->done); in scmi_handle_response()
1155 scmi_inc_count(info->dbg->counters, RESPONSE_OK); in scmi_handle_response()
1164 if (!xfer->hdr.poll_completion) in scmi_handle_response()
1165 scmi_raw_message_report(info->raw, xfer, in scmi_handle_response()
1167 cinfo->id); in scmi_handle_response()
1174 * scmi_rx_callback() - callback for receiving messages
1207 * xfer_put() - Release a transmit message
1216 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_put()
1218 __scmi_xfer_put(&info->tx_minfo, xfer); in xfer_put()
1224 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_done_no_timeout()
1227 * Poll also on xfer->done so that polling can be forcibly terminated in scmi_xfer_done_no_timeout()
1228 * in case of out-of-order receptions of delayed responses in scmi_xfer_done_no_timeout()
1230 return info->desc->ops->poll_done(cinfo, xfer) || in scmi_xfer_done_no_timeout()
1231 try_wait_for_completion(&xfer->done) || in scmi_xfer_done_no_timeout()
1240 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_reply()
1242 if (xfer->hdr.poll_completion) { in scmi_wait_for_reply()
1247 if (!desc->sync_cmds_completed_on_ret) { in scmi_wait_for_reply()
1258 "timed out in resp(caller: %pS) - polling\n", in scmi_wait_for_reply()
1260 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1261 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); in scmi_wait_for_reply()
1269 * Do not fetch_response if an out-of-order delayed in scmi_wait_for_reply()
1272 spin_lock_irqsave(&xfer->lock, flags); in scmi_wait_for_reply()
1273 if (xfer->state == SCMI_XFER_SENT_OK) { in scmi_wait_for_reply()
1274 desc->ops->fetch_response(cinfo, xfer); in scmi_wait_for_reply()
1275 xfer->state = SCMI_XFER_RESP_OK; in scmi_wait_for_reply()
1277 spin_unlock_irqrestore(&xfer->lock, flags); in scmi_wait_for_reply()
1280 trace_scmi_msg_dump(info->id, cinfo->id, in scmi_wait_for_reply()
1281 xfer->hdr.protocol_id, xfer->hdr.id, in scmi_wait_for_reply()
1284 xfer->hdr.seq, xfer->hdr.status, in scmi_wait_for_reply()
1285 xfer->rx.buf, xfer->rx.len); in scmi_wait_for_reply()
1286 scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); in scmi_wait_for_reply()
1289 scmi_raw_message_report(info->raw, xfer, in scmi_wait_for_reply()
1291 cinfo->id); in scmi_wait_for_reply()
1296 if (!wait_for_completion_timeout(&xfer->done, in scmi_wait_for_reply()
1300 ret = -ETIMEDOUT; in scmi_wait_for_reply()
1301 scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); in scmi_wait_for_reply()
1309 * scmi_wait_for_message_response - An helper to group all the possible ways of
1315 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1316 * configuration flags like xfer->hdr.poll_completion.
1323 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_wait_for_message_response()
1324 struct device *dev = info->dev; in scmi_wait_for_message_response()
1326 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id, in scmi_wait_for_message_response()
1327 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_wait_for_message_response()
1328 info->desc->max_rx_timeout_ms, in scmi_wait_for_message_response()
1329 xfer->hdr.poll_completion); in scmi_wait_for_message_response()
1331 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer, in scmi_wait_for_message_response()
1332 info->desc->max_rx_timeout_ms); in scmi_wait_for_message_response()
1336 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1350 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_xfer_raw_wait_for_message_response()
1351 struct device *dev = info->dev; in scmi_xfer_raw_wait_for_message_response()
1353 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms); in scmi_xfer_raw_wait_for_message_response()
1355 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n", in scmi_xfer_raw_wait_for_message_response()
1356 pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_wait_for_message_response()
1362 * do_xfer() - Do one transfer
1367 * Return: -ETIMEDOUT in case of no response, if transmit error,
1376 struct scmi_info *info = handle_to_scmi_info(pi->handle); in do_xfer()
1377 struct device *dev = info->dev; in do_xfer()
1381 if (xfer->hdr.poll_completion && in do_xfer()
1382 !is_transport_polling_capable(info->desc)) { in do_xfer()
1385 scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); in do_xfer()
1386 return -EINVAL; in do_xfer()
1389 cinfo = idr_find(&info->tx_idr, pi->proto->id); in do_xfer()
1391 scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); in do_xfer()
1392 return -EINVAL; in do_xfer()
1395 if (is_polling_enabled(cinfo, info->desc)) in do_xfer()
1396 xfer->hdr.poll_completion = true; in do_xfer()
1403 xfer->hdr.protocol_id = pi->proto->id; in do_xfer()
1404 reinit_completion(&xfer->done); in do_xfer()
1406 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1407 xfer->hdr.protocol_id, xfer->hdr.seq, in do_xfer()
1408 xfer->hdr.poll_completion); in do_xfer()
1411 xfer->hdr.status = SCMI_SUCCESS; in do_xfer()
1412 xfer->state = SCMI_XFER_SENT_OK; in do_xfer()
1415 * on xfer->state due to the monotonically increasing tokens allocation, in do_xfer()
1416 * we must anyway ensure xfer->state initialization is not re-ordered in do_xfer()
1418 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. in do_xfer()
1422 ret = info->desc->ops->send_message(cinfo, xfer); in do_xfer()
1425 scmi_inc_count(info->dbg->counters, SENT_FAIL); in do_xfer()
1429 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, in do_xfer()
1430 xfer->hdr.id, "CMND", xfer->hdr.seq, in do_xfer()
1431 xfer->hdr.status, xfer->tx.buf, xfer->tx.len); in do_xfer()
1432 scmi_inc_count(info->dbg->counters, SENT_OK); in do_xfer()
1435 if (!ret && xfer->hdr.status) { in do_xfer()
1436 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer()
1437 scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); in do_xfer()
1440 if (info->desc->ops->mark_txdone) in do_xfer()
1441 info->desc->ops->mark_txdone(cinfo, ret, xfer); in do_xfer()
1443 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in do_xfer()
1444 xfer->hdr.protocol_id, xfer->hdr.seq, ret); in do_xfer()
1453 struct scmi_info *info = handle_to_scmi_info(pi->handle); in reset_rx_to_maxsz()
1455 xfer->rx.len = info->desc->max_msg_size; in reset_rx_to_maxsz()
1459 * do_xfer_with_response() - Do one transfer and wait until the delayed
1465 * Using asynchronous commands in atomic/polling mode should be avoided since
1466 * it could cause long busy-waiting here, so ignore polling for the delayed
1471 * command even if made available, when an atomic transport is detected, and
1477 * when using atomic/polling mode)
1479 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1488 xfer->async_done = &async_response; in do_xfer_with_response()
1492 * not have been used when requiring an atomic/poll context; WARN and in do_xfer_with_response()
1496 WARN_ON_ONCE(xfer->hdr.poll_completion); in do_xfer_with_response()
1500 if (!wait_for_completion_timeout(xfer->async_done, timeout)) { in do_xfer_with_response()
1501 dev_err(ph->dev, in do_xfer_with_response()
1504 ret = -ETIMEDOUT; in do_xfer_with_response()
1505 } else if (xfer->hdr.status) { in do_xfer_with_response()
1506 ret = scmi_to_linux_errno(xfer->hdr.status); in do_xfer_with_response()
1510 xfer->async_done = NULL; in do_xfer_with_response()
1515 * xfer_get_init() - Allocate and initialise one message for transmit
1536 struct scmi_info *info = handle_to_scmi_info(pi->handle); in xfer_get_init()
1537 struct scmi_xfers_info *minfo = &info->tx_minfo; in xfer_get_init()
1538 struct device *dev = info->dev; in xfer_get_init()
1541 if (rx_size > info->desc->max_msg_size || in xfer_get_init()
1542 tx_size > info->desc->max_msg_size) in xfer_get_init()
1543 return -ERANGE; in xfer_get_init()
1545 xfer = scmi_xfer_get(pi->handle, minfo); in xfer_get_init()
1552 /* Pick a sequence number and register this xfer as in-flight */ in xfer_get_init()
1555 dev_err(pi->handle->dev, in xfer_get_init()
1561 xfer->tx.len = tx_size; in xfer_get_init()
1562 xfer->rx.len = rx_size ? : info->desc->max_msg_size; in xfer_get_init()
1563 xfer->hdr.type = MSG_TYPE_COMMAND; in xfer_get_init()
1564 xfer->hdr.id = msg_id; in xfer_get_init()
1565 xfer->hdr.poll_completion = false; in xfer_get_init()
1573 * version_get() - command to get the revision of the SCMI entity
1594 rev_info = t->rx.buf; in version_get()
1603 * scmi_set_protocol_priv - Set protocol specific data at init time
1616 pi->priv = priv; in scmi_set_protocol_priv()
1617 pi->version = version; in scmi_set_protocol_priv()
1623 * scmi_get_protocol_priv - Set protocol specific data at init time
1633 return pi->priv; in scmi_get_protocol_priv()
1651 * scmi_common_extended_name_get - Common helper to get extended resources name
1672 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t); in scmi_common_extended_name_get()
1676 put_unaligned_le32(res_id, t->tx.buf); in scmi_common_extended_name_get()
1678 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id)); in scmi_common_extended_name_get()
1679 resp = t->rx.buf; in scmi_common_extended_name_get()
1681 ret = ph->xops->do_xfer(ph, t); in scmi_common_extended_name_get()
1683 strscpy(name, resp->name, len); in scmi_common_extended_name_get()
1685 ph->xops->xfer_put(ph, t); in scmi_common_extended_name_get()
1688 dev_warn(ph->dev, in scmi_common_extended_name_get()
1689 "Failed to get extended name - id:%u (ret:%d). Using %s\n", in scmi_common_extended_name_get()
1695 * scmi_common_get_max_msg_size - Get maximum message size
1703 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_common_get_max_msg_size()
1705 return info->desc->max_msg_size; in scmi_common_get_max_msg_size()
1709 * struct scmi_iterator - Iterator descriptor
1711 * a proper custom command payload for each multi-part command request.
1713 * @process_response to parse the multi-part replies.
1719 * internal routines and by the caller-provided @scmi_iterator_ops.
1741 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); in scmi_iterator_init()
1743 return ERR_PTR(-ENOMEM); in scmi_iterator_init()
1745 i->ph = ph; in scmi_iterator_init()
1746 i->ops = ops; in scmi_iterator_init()
1747 i->priv = priv; in scmi_iterator_init()
1749 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); in scmi_iterator_init()
1751 devm_kfree(ph->dev, i); in scmi_iterator_init()
1755 i->state.max_resources = max_resources; in scmi_iterator_init()
1756 i->msg = i->t->tx.buf; in scmi_iterator_init()
1757 i->resp = i->t->rx.buf; in scmi_iterator_init()
1764 int ret = -EINVAL; in scmi_iterator_run()
1770 if (!i || !i->ops || !i->ph) in scmi_iterator_run()
1773 iops = i->ops; in scmi_iterator_run()
1774 ph = i->ph; in scmi_iterator_run()
1775 st = &i->state; in scmi_iterator_run()
1778 iops->prepare_message(i->msg, st->desc_index, i->priv); in scmi_iterator_run()
1779 ret = ph->xops->do_xfer(ph, i->t); in scmi_iterator_run()
1783 st->rx_len = i->t->rx.len; in scmi_iterator_run()
1784 ret = iops->update_state(st, i->resp, i->priv); in scmi_iterator_run()
1788 if (st->num_returned > st->max_resources - st->desc_index) { in scmi_iterator_run()
1789 dev_err(ph->dev, in scmi_iterator_run()
1791 st->max_resources); in scmi_iterator_run()
1792 ret = -EINVAL; in scmi_iterator_run()
1796 for (st->loop_idx = 0; st->loop_idx < st->num_returned; in scmi_iterator_run()
1797 st->loop_idx++) { in scmi_iterator_run()
1798 ret = iops->process_response(ph, i->resp, st, i->priv); in scmi_iterator_run()
1803 st->desc_index += st->num_returned; in scmi_iterator_run()
1804 ph->xops->reset_rx_to_maxsz(ph, i->t); in scmi_iterator_run()
1809 } while (st->num_returned && st->num_remaining); in scmi_iterator_run()
1813 ph->xops->xfer_put(ph, i->t); in scmi_iterator_run()
1814 devm_kfree(ph->dev, i); in scmi_iterator_run()
1858 ret = -EINVAL; in scmi_common_fastchannel_init()
1862 ret = ph->xops->xfer_get_init(ph, describe_id, in scmi_common_fastchannel_init()
1867 info = t->tx.buf; in scmi_common_fastchannel_init()
1868 info->domain = cpu_to_le32(domain); in scmi_common_fastchannel_init()
1869 info->message_id = cpu_to_le32(message_id); in scmi_common_fastchannel_init()
1876 ret = ph->xops->do_xfer(ph, t); in scmi_common_fastchannel_init()
1880 resp = t->rx.buf; in scmi_common_fastchannel_init()
1881 flags = le32_to_cpu(resp->attr); in scmi_common_fastchannel_init()
1882 size = le32_to_cpu(resp->chan_size); in scmi_common_fastchannel_init()
1884 ret = -EINVAL; in scmi_common_fastchannel_init()
1889 *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0); in scmi_common_fastchannel_init()
1891 phys_addr = le32_to_cpu(resp->chan_addr_low); in scmi_common_fastchannel_init()
1892 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; in scmi_common_fastchannel_init()
1893 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1895 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1902 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); in scmi_common_fastchannel_init()
1904 ret = -ENOMEM; in scmi_common_fastchannel_init()
1909 phys_addr = le32_to_cpu(resp->db_addr_low); in scmi_common_fastchannel_init()
1910 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; in scmi_common_fastchannel_init()
1911 addr = devm_ioremap(ph->dev, phys_addr, size); in scmi_common_fastchannel_init()
1913 ret = -EADDRNOTAVAIL; in scmi_common_fastchannel_init()
1917 db->addr = addr; in scmi_common_fastchannel_init()
1918 db->width = size; in scmi_common_fastchannel_init()
1919 db->set = le32_to_cpu(resp->db_set_lmask); in scmi_common_fastchannel_init()
1920 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; in scmi_common_fastchannel_init()
1921 db->mask = le32_to_cpu(resp->db_preserve_lmask); in scmi_common_fastchannel_init()
1922 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; in scmi_common_fastchannel_init()
1927 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1929 dev_dbg(ph->dev, in scmi_common_fastchannel_init()
1931 pi->proto->id, message_id, domain); in scmi_common_fastchannel_init()
1936 devm_kfree(ph->dev, db); in scmi_common_fastchannel_init()
1942 ph->xops->xfer_put(ph, t); in scmi_common_fastchannel_init()
1945 dev_warn(ph->dev, in scmi_common_fastchannel_init()
1946 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", in scmi_common_fastchannel_init()
1947 pi->proto->id, message_id, domain, ret); in scmi_common_fastchannel_init()
1954 if (db->mask) \
1955 val = ioread##w(db->addr) & db->mask; \
1956 iowrite##w((u##w)db->set | val, db->addr); \
1961 if (!db || !db->addr) in scmi_common_fastchannel_db_ring()
1964 if (db->width == 1) in scmi_common_fastchannel_db_ring()
1966 else if (db->width == 2) in scmi_common_fastchannel_db_ring()
1968 else if (db->width == 4) in scmi_common_fastchannel_db_ring()
1970 else /* db->width == 8 */ in scmi_common_fastchannel_db_ring()
1977 if (db->mask) in scmi_common_fastchannel_db_ring()
1978 val = ioread64_hi_lo(db->addr) & db->mask; in scmi_common_fastchannel_db_ring()
1979 iowrite64_hi_lo(db->set | val, db->addr); in scmi_common_fastchannel_db_ring()
1985 * scmi_protocol_msg_check - Check protocol message attributes
2008 put_unaligned_le32(message_id, t->tx.buf); in scmi_protocol_msg_check()
2011 *attributes = get_unaligned_le32(t->rx.buf); in scmi_protocol_msg_check()
2028 * scmi_revision_area_get - Retrieve version memory area.
2043 return pi->handle->version; in scmi_revision_area_get()
2047 * scmi_protocol_version_negotiate - Negotiate protocol version
2074 put_unaligned_le32(pi->proto->supported_version, t->tx.buf); in scmi_protocol_version_negotiate()
2077 pi->negotiated_version = pi->proto->supported_version; in scmi_protocol_version_negotiate()
2085 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2092 * all resources management is handled via a dedicated per-protocol devres
2104 int ret = -ENOMEM; in scmi_alloc_init_protocol_instance()
2107 const struct scmi_handle *handle = &info->handle; in scmi_alloc_init_protocol_instance()
2110 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); in scmi_alloc_init_protocol_instance()
2116 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL); in scmi_alloc_init_protocol_instance()
2120 pi->gid = gid; in scmi_alloc_init_protocol_instance()
2121 pi->proto = proto; in scmi_alloc_init_protocol_instance()
2122 pi->handle = handle; in scmi_alloc_init_protocol_instance()
2123 pi->ph.dev = handle->dev; in scmi_alloc_init_protocol_instance()
2124 pi->ph.xops = &xfer_ops; in scmi_alloc_init_protocol_instance()
2125 pi->ph.hops = &helpers_ops; in scmi_alloc_init_protocol_instance()
2126 pi->ph.set_priv = scmi_set_protocol_priv; in scmi_alloc_init_protocol_instance()
2127 pi->ph.get_priv = scmi_get_protocol_priv; in scmi_alloc_init_protocol_instance()
2128 refcount_set(&pi->users, 1); in scmi_alloc_init_protocol_instance()
2129 /* proto->init is assured NON NULL by scmi_protocol_register */ in scmi_alloc_init_protocol_instance()
2130 ret = pi->proto->instance_init(&pi->ph); in scmi_alloc_init_protocol_instance()
2134 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1, in scmi_alloc_init_protocol_instance()
2136 if (ret != proto->id) in scmi_alloc_init_protocol_instance()
2143 if (pi->proto->events) { in scmi_alloc_init_protocol_instance()
2144 ret = scmi_register_protocol_events(handle, pi->proto->id, in scmi_alloc_init_protocol_instance()
2145 &pi->ph, in scmi_alloc_init_protocol_instance()
2146 pi->proto->events); in scmi_alloc_init_protocol_instance()
2148 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2149 "Protocol:%X - Events Registration Failed - err:%d\n", in scmi_alloc_init_protocol_instance()
2150 pi->proto->id, ret); in scmi_alloc_init_protocol_instance()
2153 devres_close_group(handle->dev, pi->gid); in scmi_alloc_init_protocol_instance()
2154 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id); in scmi_alloc_init_protocol_instance()
2156 if (pi->version > proto->supported_version) { in scmi_alloc_init_protocol_instance()
2157 ret = scmi_protocol_version_negotiate(&pi->ph); in scmi_alloc_init_protocol_instance()
2159 dev_info(handle->dev, in scmi_alloc_init_protocol_instance()
2161 proto->id, pi->negotiated_version); in scmi_alloc_init_protocol_instance()
2163 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2165 pi->version, pi->proto->id); in scmi_alloc_init_protocol_instance()
2166 dev_warn(handle->dev, in scmi_alloc_init_protocol_instance()
2168 pi->proto->supported_version); in scmi_alloc_init_protocol_instance()
2177 devres_release_group(handle->dev, gid); in scmi_alloc_init_protocol_instance()
2183 * scmi_get_protocol_instance - Protocol initialization helper.
2189 * resource allocation with a dedicated per-protocol devres subgroup.
2192 * in particular returns -EPROBE_DEFER when the desired protocol could
2201 mutex_lock(&info->protocols_mtx); in scmi_get_protocol_instance()
2202 pi = idr_find(&info->protocols, protocol_id); in scmi_get_protocol_instance()
2205 refcount_inc(&pi->users); in scmi_get_protocol_instance()
2210 proto = scmi_protocol_get(protocol_id, &info->version); in scmi_get_protocol_instance()
2214 pi = ERR_PTR(-EPROBE_DEFER); in scmi_get_protocol_instance()
2216 mutex_unlock(&info->protocols_mtx); in scmi_get_protocol_instance()
2222 * scmi_protocol_acquire - Protocol acquire
2237 * scmi_protocol_release - Protocol de-initialization helper.
2241 * Remove one user for the specified protocol and triggers de-initialization
2242 * and resources de-allocation once the last user has gone.
2249 mutex_lock(&info->protocols_mtx); in scmi_protocol_release()
2250 pi = idr_find(&info->protocols, protocol_id); in scmi_protocol_release()
2254 if (refcount_dec_and_test(&pi->users)) { in scmi_protocol_release()
2255 void *gid = pi->gid; in scmi_protocol_release()
2257 if (pi->proto->events) in scmi_protocol_release()
2260 if (pi->proto->instance_deinit) in scmi_protocol_release()
2261 pi->proto->instance_deinit(&pi->ph); in scmi_protocol_release()
2263 idr_remove(&info->protocols, protocol_id); in scmi_protocol_release()
2265 scmi_protocol_put(pi->proto); in scmi_protocol_release()
2267 devres_release_group(handle->dev, gid); in scmi_protocol_release()
2268 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n", in scmi_protocol_release()
2273 mutex_unlock(&info->protocols_mtx); in scmi_protocol_release()
2280 struct scmi_info *info = handle_to_scmi_info(pi->handle); in scmi_setup_protocol_implemented()
2282 info->protocols_imp = prot_imp; in scmi_setup_protocol_implemented()
2290 struct scmi_revision_info *rev = handle->version; in scmi_is_protocol_implemented()
2292 if (!info->protocols_imp) in scmi_is_protocol_implemented()
2295 for (i = 0; i < rev->num_protocols; i++) in scmi_is_protocol_implemented()
2296 if (info->protocols_imp[i] == prot_id) in scmi_is_protocol_implemented()
2310 scmi_protocol_release(dres->handle, dres->protocol_id); in scmi_devm_release_protocol()
2322 return ERR_PTR(-ENOMEM); in scmi_devres_protocol_instance_get()
2324 pi = scmi_get_protocol_instance(sdev->handle, protocol_id); in scmi_devres_protocol_instance_get()
2330 dres->handle = sdev->handle; in scmi_devres_protocol_instance_get()
2331 dres->protocol_id = protocol_id; in scmi_devres_protocol_instance_get()
2332 devres_add(&sdev->dev, dres); in scmi_devres_protocol_instance_get()
2338 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2349 * released, and possibly de-initialized on last user, once the SCMI driver
2362 return ERR_PTR(-EINVAL); in scmi_devm_protocol_get()
2368 *ph = &pi->ph; in scmi_devm_protocol_get()
2370 return pi->proto->ops; in scmi_devm_protocol_get()
2374 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2384 * released, and possibly de-initialized on last user, once the SCMI driver
2408 return dres->protocol_id == *((u8 *)data); in scmi_devm_protocol_match()
2412 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2424 ret = devres_release(&sdev->dev, scmi_devm_release_protocol, in scmi_devm_protocol_put()
2430 * scmi_is_transport_atomic - Method to check if underlying transport for an
2431 * SCMI instance is configured as atomic.
2435 * configured threshold for atomic operations.
2437 * Return: True if transport is configured as atomic
2445 ret = info->desc->atomic_enabled && in scmi_is_transport_atomic()
2446 is_transport_polling_capable(info->desc); in scmi_is_transport_atomic()
2448 *atomic_threshold = info->atomic_threshold; in scmi_is_transport_atomic()
2454 * scmi_handle_get() - Get the SCMI handle for a device
2473 if (dev->parent == info->dev) { in scmi_handle_get()
2474 info->users++; in scmi_handle_get()
2475 handle = &info->handle; in scmi_handle_get()
2485 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2494 * if null was passed, it returns -EINVAL;
2501 return -EINVAL; in scmi_handle_put()
2505 if (!WARN_ON(!info->users)) in scmi_handle_put()
2506 info->users--; in scmi_handle_put()
2524 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); in scmi_set_handle()
2525 if (scmi_dev->handle) in scmi_set_handle()
2526 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); in scmi_set_handle()
2534 struct device *dev = sinfo->dev; in __scmi_xfer_info_init()
2535 const struct scmi_desc *desc = sinfo->desc; in __scmi_xfer_info_init()
2537 /* Pre-allocated messages, no more than what hdr.seq can support */ in __scmi_xfer_info_init()
2538 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { in __scmi_xfer_info_init()
2540 "Invalid maximum messages %d, not in range [1 - %lu]\n", in __scmi_xfer_info_init()
2541 info->max_msg, MSG_TOKEN_MAX); in __scmi_xfer_info_init()
2542 return -EINVAL; in __scmi_xfer_info_init()
2545 hash_init(info->pending_xfers); in __scmi_xfer_info_init()
2548 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX, in __scmi_xfer_info_init()
2550 if (!info->xfer_alloc_table) in __scmi_xfer_info_init()
2551 return -ENOMEM; in __scmi_xfer_info_init()
2555 * pre-initialize the buffer pointer to pre-allocated buffers and in __scmi_xfer_info_init()
2558 INIT_HLIST_HEAD(&info->free_xfers); in __scmi_xfer_info_init()
2559 for (i = 0; i < info->max_msg; i++) { in __scmi_xfer_info_init()
2562 return -ENOMEM; in __scmi_xfer_info_init()
2564 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, in __scmi_xfer_info_init()
2566 if (!xfer->rx.buf) in __scmi_xfer_info_init()
2567 return -ENOMEM; in __scmi_xfer_info_init()
2569 xfer->tx.buf = xfer->rx.buf; in __scmi_xfer_info_init()
2570 init_completion(&xfer->done); in __scmi_xfer_info_init()
2571 spin_lock_init(&xfer->lock); in __scmi_xfer_info_init()
2574 hlist_add_head(&xfer->node, &info->free_xfers); in __scmi_xfer_info_init()
2577 spin_lock_init(&info->xfer_lock); in __scmi_xfer_info_init()
2584 const struct scmi_desc *desc = sinfo->desc; in scmi_channels_max_msg_configure()
2586 if (!desc->ops->get_max_msg) { in scmi_channels_max_msg_configure()
2587 sinfo->tx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2588 sinfo->rx_minfo.max_msg = desc->max_msg; in scmi_channels_max_msg_configure()
2592 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2594 return -EINVAL; in scmi_channels_max_msg_configure()
2595 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2598 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); in scmi_channels_max_msg_configure()
2600 sinfo->rx_minfo.max_msg = in scmi_channels_max_msg_configure()
2601 desc->ops->get_max_msg(base_cinfo); in scmi_channels_max_msg_configure()
2615 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); in scmi_xfer_info_init()
2616 if (!ret && !idr_is_empty(&sinfo->rx_idr)) in scmi_xfer_info_init()
2617 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); in scmi_xfer_info_init()
2633 idr = tx ? &info->tx_idr : &info->rx_idr; in scmi_chan_setup()
2635 if (!info->desc->ops->chan_available(of_node, idx)) { in scmi_chan_setup()
2638 return -EINVAL; in scmi_chan_setup()
2642 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_chan_setup()
2644 return -ENOMEM; in scmi_chan_setup()
2646 cinfo->is_p2a = !tx; in scmi_chan_setup()
2647 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; in scmi_chan_setup()
2653 tdev = scmi_device_create(of_node, info->dev, prot_id, name); in scmi_chan_setup()
2655 dev_err(info->dev, in scmi_chan_setup()
2657 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2658 return -EINVAL; in scmi_chan_setup()
2662 cinfo->id = prot_id; in scmi_chan_setup()
2663 cinfo->dev = &tdev->dev; in scmi_chan_setup()
2664 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); in scmi_chan_setup()
2667 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2668 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2672 if (tx && is_polling_required(cinfo, info->desc)) { in scmi_chan_setup()
2673 if (is_transport_polling_capable(info->desc)) in scmi_chan_setup()
2674 dev_info(&tdev->dev, in scmi_chan_setup()
2675 "Enabled polling mode TX channel - prot_id:%d\n", in scmi_chan_setup()
2678 dev_warn(&tdev->dev, in scmi_chan_setup()
2685 dev_err(info->dev, in scmi_chan_setup()
2690 scmi_device_destroy(info->dev, prot_id, name); in scmi_chan_setup()
2691 devm_kfree(info->dev, cinfo); in scmi_chan_setup()
2696 cinfo->handle = &info->handle; in scmi_chan_setup()
2709 if (ret && ret != -ENOMEM) in scmi_txrx_setup()
2714 dev_err(info->dev, in scmi_txrx_setup()
2721 * scmi_channels_setup - Helper to initialize all required channels
2741 struct device_node *top_np = info->dev->of_node; in scmi_channels_setup()
2755 dev_err(info->dev, in scmi_channels_setup()
2770 if (cinfo->dev) { in scmi_chan_destroy()
2771 struct scmi_info *info = handle_to_scmi_info(cinfo->handle); in scmi_chan_destroy()
2772 struct scmi_device *sdev = to_scmi_dev(cinfo->dev); in scmi_chan_destroy()
2774 of_node_put(cinfo->dev->of_node); in scmi_chan_destroy()
2775 scmi_device_destroy(info->dev, id, sdev->name); in scmi_chan_destroy()
2776 cinfo->dev = NULL; in scmi_chan_destroy()
2787 idr_for_each(idr, info->desc->ops->chan_free, idr); in scmi_cleanup_channels()
2797 scmi_cleanup_channels(info, &info->tx_idr); in scmi_cleanup_txrx_channels()
2799 scmi_cleanup_channels(info, &info->rx_idr); in scmi_cleanup_txrx_channels()
2809 if (!strncmp(sdev->name, "__scmi_transport_device", 23) || in scmi_bus_notifier()
2810 sdev->dev.parent != info->dev) in scmi_bus_notifier()
2819 scmi_handle_put(sdev->handle); in scmi_bus_notifier()
2820 sdev->handle = NULL; in scmi_bus_notifier()
2826 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev), in scmi_bus_notifier()
2827 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ? in scmi_bus_notifier()
2840 np = idr_find(&info->active_protocols, id_table->protocol_id); in scmi_device_request_notifier()
2844 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n", in scmi_device_request_notifier()
2845 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-", in scmi_device_request_notifier()
2846 id_table->name, id_table->protocol_id); in scmi_device_request_notifier()
2850 scmi_create_protocol_devices(np, info, id_table->protocol_id, in scmi_device_request_notifier()
2851 id_table->name); in scmi_device_request_notifier()
2854 scmi_destroy_protocol_devices(info, id_table->protocol_id, in scmi_device_request_notifier()
2855 id_table->name); in scmi_device_request_notifier()
2884 struct scmi_debug_info *dbg = filp->private_data; in reset_all_on_write()
2887 atomic_set(&dbg->counters[i], 0); in reset_all_on_write()
2908 &dbg->counters[idx]); in scmi_debugfs_counters_setup()
2920 debugfs_remove_recursive(dbg->top_dentry); in scmi_debugfs_common_cleanup()
2921 kfree(dbg->name); in scmi_debugfs_common_cleanup()
2922 kfree(dbg->type); in scmi_debugfs_common_cleanup()
2932 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL); in scmi_debugfs_common_setup()
2936 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL); in scmi_debugfs_common_setup()
2937 if (!dbg->name) { in scmi_debugfs_common_setup()
2938 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2942 of_property_read_string(info->dev->of_node, "compatible", &c_ptr); in scmi_debugfs_common_setup()
2943 dbg->type = kstrdup(c_ptr, GFP_KERNEL); in scmi_debugfs_common_setup()
2944 if (!dbg->type) { in scmi_debugfs_common_setup()
2945 kfree(dbg->name); in scmi_debugfs_common_setup()
2946 devm_kfree(info->dev, dbg); in scmi_debugfs_common_setup()
2950 snprintf(top_dir, 16, "%d", info->id); in scmi_debugfs_common_setup()
2954 dbg->is_atomic = info->desc->atomic_enabled && in scmi_debugfs_common_setup()
2955 is_transport_polling_capable(info->desc); in scmi_debugfs_common_setup()
2958 (char **)&dbg->name); in scmi_debugfs_common_setup()
2961 &info->atomic_threshold); in scmi_debugfs_common_setup()
2963 debugfs_create_str("type", 0400, trans, (char **)&dbg->type); in scmi_debugfs_common_setup()
2965 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic); in scmi_debugfs_common_setup()
2968 (u32 *)&info->desc->max_rx_timeout_ms); in scmi_debugfs_common_setup()
2971 (u32 *)&info->desc->max_msg_size); in scmi_debugfs_common_setup()
2974 (u32 *)&info->tx_minfo.max_msg); in scmi_debugfs_common_setup()
2977 (u32 *)&info->rx_minfo.max_msg); in scmi_debugfs_common_setup()
2982 dbg->top_dentry = top_dentry; in scmi_debugfs_common_setup()
2984 if (devm_add_action_or_reset(info->dev, in scmi_debugfs_common_setup()
2998 if (!info->dbg) in scmi_debugfs_raw_mode_setup()
2999 return -EINVAL; in scmi_debugfs_raw_mode_setup()
3002 idr_for_each_entry(&info->tx_idr, cinfo, id) { in scmi_debugfs_raw_mode_setup()
3008 dev_warn(info->dev, in scmi_debugfs_raw_mode_setup()
3009 "SCMI RAW - Error enumerating channels\n"); in scmi_debugfs_raw_mode_setup()
3013 if (!test_bit(cinfo->id, protos)) { in scmi_debugfs_raw_mode_setup()
3014 channels[num_chans++] = cinfo->id; in scmi_debugfs_raw_mode_setup()
3015 set_bit(cinfo->id, protos); in scmi_debugfs_raw_mode_setup()
3019 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry, in scmi_debugfs_raw_mode_setup()
3020 info->id, channels, num_chans, in scmi_debugfs_raw_mode_setup()
3021 info->desc, info->tx_minfo.max_msg); in scmi_debugfs_raw_mode_setup()
3022 if (IS_ERR(info->raw)) { in scmi_debugfs_raw_mode_setup()
3023 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n"); in scmi_debugfs_raw_mode_setup()
3024 ret = PTR_ERR(info->raw); in scmi_debugfs_raw_mode_setup()
3025 info->raw = NULL; in scmi_debugfs_raw_mode_setup()
3037 if (!trans || !trans->desc || !trans->supplier || !trans->core_ops) in scmi_transport_setup()
3040 if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) { in scmi_transport_setup()
3047 *trans->core_ops = &scmi_trans_core_ops; in scmi_transport_setup()
3049 dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier)); in scmi_transport_setup()
3051 ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms", in scmi_transport_setup()
3052 &trans->desc->max_rx_timeout_ms); in scmi_transport_setup()
3053 if (ret && ret != -EINVAL) in scmi_transport_setup()
3054 dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n"); in scmi_transport_setup()
3056 dev_info(dev, "SCMI max-rx-timeout: %dms\n", in scmi_transport_setup()
3057 trans->desc->max_rx_timeout_ms); in scmi_transport_setup()
3059 return trans->desc; in scmi_transport_setup()
3070 struct device *dev = &pdev->dev; in scmi_probe()
3071 struct device_node *child, *np = dev->of_node; in scmi_probe()
3076 ret = -EINVAL; in scmi_probe()
3082 return -ENOMEM; in scmi_probe()
3084 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL); in scmi_probe()
3085 if (info->id < 0) in scmi_probe()
3086 return info->id; in scmi_probe()
3088 info->dev = dev; in scmi_probe()
3089 info->desc = desc; in scmi_probe()
3090 info->bus_nb.notifier_call = scmi_bus_notifier; in scmi_probe()
3091 info->dev_req_nb.notifier_call = scmi_device_request_notifier; in scmi_probe()
3092 INIT_LIST_HEAD(&info->node); in scmi_probe()
3093 idr_init(&info->protocols); in scmi_probe()
3094 mutex_init(&info->protocols_mtx); in scmi_probe()
3095 idr_init(&info->active_protocols); in scmi_probe()
3096 mutex_init(&info->devreq_mtx); in scmi_probe()
3099 idr_init(&info->tx_idr); in scmi_probe()
3100 idr_init(&info->rx_idr); in scmi_probe()
3102 handle = &info->handle; in scmi_probe()
3103 handle->dev = info->dev; in scmi_probe()
3104 handle->version = &info->version; in scmi_probe()
3105 handle->devm_protocol_acquire = scmi_devm_protocol_acquire; in scmi_probe()
3106 handle->devm_protocol_get = scmi_devm_protocol_get; in scmi_probe()
3107 handle->devm_protocol_put = scmi_devm_protocol_put; in scmi_probe()
3109 /* System wide atomic threshold for atomic ops .. if any */ in scmi_probe()
3110 if (!of_property_read_u32(np, "atomic-threshold-us", in scmi_probe()
3111 &info->atomic_threshold)) in scmi_probe()
3113 "SCMI System wide atomic threshold set to %d us\n", in scmi_probe()
3114 info->atomic_threshold); in scmi_probe()
3115 handle->is_transport_atomic = scmi_is_transport_atomic; in scmi_probe()
3124 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
3131 &info->dev_req_nb); in scmi_probe()
3144 info->dbg = scmi_debugfs_common_setup(info); in scmi_probe()
3145 if (!info->dbg) in scmi_probe()
3166 if (info->desc->atomic_enabled && in scmi_probe()
3167 !is_transport_polling_capable(info->desc)) in scmi_probe()
3169 "Transport is not polling capable. Atomic mode not supported.\n"); in scmi_probe()
3187 list_add_tail(&info->node, &scmi_list); in scmi_probe()
3209 ret = idr_alloc(&info->active_protocols, child, in scmi_probe()
3225 scmi_raw_mode_cleanup(info->raw); in scmi_probe()
3226 scmi_notification_exit(&info->handle); in scmi_probe()
3229 &info->dev_req_nb); in scmi_probe()
3231 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_probe()
3235 ida_free(&scmi_id, info->id); in scmi_probe()
3248 scmi_raw_mode_cleanup(info->raw); in scmi_remove()
3251 if (info->users) in scmi_remove()
3252 dev_warn(&pdev->dev, in scmi_remove()
3254 list_del(&info->node); in scmi_remove()
3257 scmi_notification_exit(&info->handle); in scmi_remove()
3259 mutex_lock(&info->protocols_mtx); in scmi_remove()
3260 idr_destroy(&info->protocols); in scmi_remove()
3261 mutex_unlock(&info->protocols_mtx); in scmi_remove()
3263 idr_for_each_entry(&info->active_protocols, child, id) in scmi_remove()
3265 idr_destroy(&info->active_protocols); in scmi_remove()
3268 &info->dev_req_nb); in scmi_remove()
3269 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb); in scmi_remove()
3274 ida_free(&scmi_id, info->id); in scmi_remove()
3282 return sprintf(buf, "%u.%u\n", info->version.major_ver, in protocol_version_show()
3283 info->version.minor_ver); in protocol_version_show()
3292 return sprintf(buf, "0x%x\n", info->version.impl_ver); in firmware_version_show()
3301 return sprintf(buf, "%s\n", info->version.vendor_id); in vendor_id_show()
3310 return sprintf(buf, "%s\n", info->version.sub_vendor_id); in sub_vendor_id_show()
3325 .name = "arm-scmi",
3350 return -EINVAL; in scmi_driver_init()
3397 MODULE_ALIAS("platform:arm-scmi");