Lines Matching +full:report +full:- +full:rate +full:- +full:hz
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018-2022 ARM Ltd.
121 } rate[]; member
184 if (clk_id >= ci->num_clocks) in scmi_clock_domain_lookup()
185 return ERR_PTR(-EINVAL); in scmi_clock_domain_lookup()
187 return ci->clk + clk_id; in scmi_clock_domain_lookup()
198 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, in scmi_clock_protocol_attributes_get()
203 attr = t->rx.buf; in scmi_clock_protocol_attributes_get()
205 ret = ph->xops->do_xfer(ph, t); in scmi_clock_protocol_attributes_get()
207 ci->num_clocks = le16_to_cpu(attr->num_clocks); in scmi_clock_protocol_attributes_get()
208 ci->max_async_req = attr->max_async_req; in scmi_clock_protocol_attributes_get()
211 ph->xops->xfer_put(ph, t); in scmi_clock_protocol_attributes_get()
214 if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL)) in scmi_clock_protocol_attributes_get()
215 ci->notify_rate_changed_cmd = true; in scmi_clock_protocol_attributes_get()
217 if (!ph->hops->protocol_msg_check(ph, in scmi_clock_protocol_attributes_get()
220 ci->notify_rate_change_requested_cmd = true; in scmi_clock_protocol_attributes_get()
238 msg->id = cpu_to_le32(p->clk_id); in iter_clk_possible_parents_prepare_message()
240 msg->skip_parents = cpu_to_le32(desc_index); in iter_clk_possible_parents_prepare_message()
248 struct device *dev = ((struct scmi_clk_ipriv *)p)->dev; in iter_clk_possible_parents_update_state()
251 flags = le32_to_cpu(r->num_parent_flags); in iter_clk_possible_parents_update_state()
252 st->num_returned = NUM_PARENTS_RETURNED(flags); in iter_clk_possible_parents_update_state()
253 st->num_remaining = NUM_PARENTS_REMAINING(flags); in iter_clk_possible_parents_update_state()
259 if (!st->max_resources) { in iter_clk_possible_parents_update_state()
260 p->clk->num_parents = st->num_returned + st->num_remaining; in iter_clk_possible_parents_update_state()
261 p->clk->parents = devm_kcalloc(dev, p->clk->num_parents, in iter_clk_possible_parents_update_state()
262 sizeof(*p->clk->parents), in iter_clk_possible_parents_update_state()
264 if (!p->clk->parents) { in iter_clk_possible_parents_update_state()
265 p->clk->num_parents = 0; in iter_clk_possible_parents_update_state()
266 return -ENOMEM; in iter_clk_possible_parents_update_state()
268 st->max_resources = st->num_returned + st->num_remaining; in iter_clk_possible_parents_update_state()
282 u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx]; in iter_clk_possible_parents_process_response()
284 *parent = le32_to_cpu(r->possible_parents[st->loop_idx]); in iter_clk_possible_parents_process_response()
301 .dev = ph->dev, in scmi_clock_possible_parents()
306 iter = ph->hops->iter_response_init(ph, &ops, 0, in scmi_clock_possible_parents()
313 ret = ph->hops->iter_response_run(iter); in scmi_clock_possible_parents()
326 ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS, in scmi_clock_get_permissions()
331 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_get_permissions()
333 ret = ph->xops->do_xfer(ph, t); in scmi_clock_get_permissions()
335 perm = get_unaligned_le32(t->rx.buf); in scmi_clock_get_permissions()
337 clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED); in scmi_clock_get_permissions()
338 clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED); in scmi_clock_get_permissions()
339 clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED); in scmi_clock_get_permissions()
342 ph->xops->xfer_put(ph, t); in scmi_clock_get_permissions()
355 struct scmi_clock_info *clk = cinfo->clk + clk_id; in scmi_clock_attributes_get()
357 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, in scmi_clock_attributes_get()
362 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_attributes_get()
363 attr = t->rx.buf; in scmi_clock_attributes_get()
365 ret = ph->xops->do_xfer(ph, t); in scmi_clock_attributes_get()
369 attributes = le32_to_cpu(attr->attributes); in scmi_clock_attributes_get()
370 strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); in scmi_clock_attributes_get()
373 latency = le32_to_cpu(attr->clock_enable_latency); in scmi_clock_attributes_get()
374 clk->enable_latency = latency ? : U32_MAX; in scmi_clock_attributes_get()
377 ph->xops->xfer_put(ph, t); in scmi_clock_attributes_get()
385 ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id, in scmi_clock_attributes_get()
386 NULL, clk->name, in scmi_clock_attributes_get()
389 if (cinfo->notify_rate_changed_cmd && in scmi_clock_attributes_get()
391 clk->rate_changed_notifications = true; in scmi_clock_attributes_get()
392 if (cinfo->notify_rate_change_requested_cmd && in scmi_clock_attributes_get()
394 clk->rate_change_requested_notifications = true; in scmi_clock_attributes_get()
401 clk->extended_config = true; in scmi_clock_attributes_get()
413 return -1; in rate_cmp_func()
427 msg->id = cpu_to_le32(p->clk_id); in iter_clk_describe_prepare_message()
429 msg->rate_index = cpu_to_le32(desc_index); in iter_clk_describe_prepare_message()
440 flags = le32_to_cpu(r->num_rates_flags); in iter_clk_describe_update_state()
441 st->num_remaining = NUM_REMAINING(flags); in iter_clk_describe_update_state()
442 st->num_returned = NUM_RETURNED(flags); in iter_clk_describe_update_state()
443 p->clk->rate_discrete = RATE_DISCRETE(flags); in iter_clk_describe_update_state()
446 if (!p->clk->rate_discrete && in iter_clk_describe_update_state()
447 (st->num_returned != 3 || st->num_remaining != 0)) { in iter_clk_describe_update_state()
448 dev_warn(p->dev, in iter_clk_describe_update_state()
449 "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n", in iter_clk_describe_update_state()
450 p->clk->name, st->num_returned, st->num_remaining, in iter_clk_describe_update_state()
451 st->rx_len); in iter_clk_describe_update_state()
457 if (st->num_returned != 3 && st->num_remaining == 0 && in iter_clk_describe_update_state()
458 st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { in iter_clk_describe_update_state()
459 st->num_returned = 3; in iter_clk_describe_update_state()
460 st->num_remaining = 0; in iter_clk_describe_update_state()
462 dev_err(p->dev, in iter_clk_describe_update_state()
463 "Cannot fix out-of-spec reply !\n"); in iter_clk_describe_update_state()
464 return -EPROTO; in iter_clk_describe_update_state()
480 if (!p->clk->rate_discrete) { in iter_clk_describe_process_response()
481 switch (st->desc_index + st->loop_idx) { in iter_clk_describe_process_response()
483 p->clk->range.min_rate = RATE_TO_U64(r->rate[0]); in iter_clk_describe_process_response()
486 p->clk->range.max_rate = RATE_TO_U64(r->rate[1]); in iter_clk_describe_process_response()
489 p->clk->range.step_size = RATE_TO_U64(r->rate[2]); in iter_clk_describe_process_response()
492 ret = -EINVAL; in iter_clk_describe_process_response()
496 u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx]; in iter_clk_describe_process_response() local
498 *rate = RATE_TO_U64(r->rate[st->loop_idx]); in iter_clk_describe_process_response()
499 p->clk->list.num_rates++; in iter_clk_describe_process_response()
519 .dev = ph->dev, in scmi_clock_describe_rates_get()
522 iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, in scmi_clock_describe_rates_get()
529 ret = ph->hops->iter_response_run(iter); in scmi_clock_describe_rates_get()
533 if (!clk->rate_discrete) { in scmi_clock_describe_rates_get()
534 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", in scmi_clock_describe_rates_get()
535 clk->range.min_rate, clk->range.max_rate, in scmi_clock_describe_rates_get()
536 clk->range.step_size); in scmi_clock_describe_rates_get()
537 } else if (clk->list.num_rates) { in scmi_clock_describe_rates_get()
538 sort(clk->list.rates, clk->list.num_rates, in scmi_clock_describe_rates_get()
539 sizeof(clk->list.rates[0]), rate_cmp_func, NULL); in scmi_clock_describe_rates_get()
552 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET, in scmi_clock_rate_get()
557 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_rate_get()
559 ret = ph->xops->do_xfer(ph, t); in scmi_clock_rate_get()
561 *value = get_unaligned_le64(t->rx.buf); in scmi_clock_rate_get()
563 ph->xops->xfer_put(ph, t); in scmi_clock_rate_get()
568 u32 clk_id, u64 rate) in scmi_clock_rate_set() argument
574 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_rate_set()
581 if (clk->rate_ctrl_forbidden) in scmi_clock_rate_set()
582 return -EACCES; in scmi_clock_rate_set()
584 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t); in scmi_clock_rate_set()
588 if (ci->max_async_req && in scmi_clock_rate_set()
589 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) in scmi_clock_rate_set()
592 cfg = t->tx.buf; in scmi_clock_rate_set()
593 cfg->flags = cpu_to_le32(flags); in scmi_clock_rate_set()
594 cfg->id = cpu_to_le32(clk_id); in scmi_clock_rate_set()
595 cfg->value_low = cpu_to_le32(rate & 0xffffffff); in scmi_clock_rate_set()
596 cfg->value_high = cpu_to_le32(rate >> 32); in scmi_clock_rate_set()
599 ret = ph->xops->do_xfer_with_response(ph, t); in scmi_clock_rate_set()
603 resp = t->rx.buf; in scmi_clock_rate_set()
604 if (le32_to_cpu(resp->id) == clk_id) in scmi_clock_rate_set()
605 dev_dbg(ph->dev, in scmi_clock_rate_set()
607 get_unaligned_le64(&resp->rate_low)); in scmi_clock_rate_set()
609 ret = -EPROTO; in scmi_clock_rate_set()
612 ret = ph->xops->do_xfer(ph, t); in scmi_clock_rate_set()
615 if (ci->max_async_req) in scmi_clock_rate_set()
616 atomic_dec(&ci->cur_async_req); in scmi_clock_rate_set()
618 ph->xops->xfer_put(ph, t); in scmi_clock_rate_set()
633 return -EINVAL; in scmi_clock_config_set()
635 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, in scmi_clock_config_set()
640 t->hdr.poll_completion = atomic; in scmi_clock_config_set()
642 cfg = t->tx.buf; in scmi_clock_config_set()
643 cfg->id = cpu_to_le32(clk_id); in scmi_clock_config_set()
644 cfg->attributes = cpu_to_le32(state); in scmi_clock_config_set()
646 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_set()
648 ph->xops->xfer_put(ph, t); in scmi_clock_config_set()
659 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_set_parent()
666 if (parent_id >= clk->num_parents) in scmi_clock_set_parent()
667 return -EINVAL; in scmi_clock_set_parent()
669 if (clk->parent_ctrl_forbidden) in scmi_clock_set_parent()
670 return -EACCES; in scmi_clock_set_parent()
672 ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET, in scmi_clock_set_parent()
677 t->hdr.poll_completion = false; in scmi_clock_set_parent()
679 cfg = t->tx.buf; in scmi_clock_set_parent()
680 cfg->id = cpu_to_le32(clk_id); in scmi_clock_set_parent()
681 cfg->parent_id = cpu_to_le32(clk->parents[parent_id]); in scmi_clock_set_parent()
683 ret = ph->xops->do_xfer(ph, t); in scmi_clock_set_parent()
685 ph->xops->xfer_put(ph, t); in scmi_clock_set_parent()
697 ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET, in scmi_clock_get_parent()
702 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_get_parent()
704 ret = ph->xops->do_xfer(ph, t); in scmi_clock_get_parent()
706 *parent_id = get_unaligned_le32(t->rx.buf); in scmi_clock_get_parent()
708 ph->xops->xfer_put(ph, t); in scmi_clock_get_parent()
726 return -EINVAL; in scmi_clock_config_set_v2()
728 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, in scmi_clock_config_set_v2()
733 t->hdr.poll_completion = atomic; in scmi_clock_config_set_v2()
738 cfg = t->tx.buf; in scmi_clock_config_set_v2()
739 cfg->id = cpu_to_le32(clk_id); in scmi_clock_config_set_v2()
740 cfg->attributes = cpu_to_le32(attrs); in scmi_clock_config_set_v2()
742 cfg->oem_config_val = cpu_to_le32(0); in scmi_clock_config_set_v2()
744 cfg->oem_config_val = cpu_to_le32(oem_val); in scmi_clock_config_set_v2()
746 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_set_v2()
748 ph->xops->xfer_put(ph, t); in scmi_clock_config_set_v2()
755 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_enable()
762 if (clk->state_ctrl_forbidden) in scmi_clock_enable()
763 return -EACCES; in scmi_clock_enable()
765 return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE, in scmi_clock_enable()
772 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_disable()
779 if (clk->state_ctrl_forbidden) in scmi_clock_disable()
780 return -EACCES; in scmi_clock_disable()
782 return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE, in scmi_clock_disable()
797 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET, in scmi_clock_config_get_v2()
802 t->hdr.poll_completion = atomic; in scmi_clock_config_get_v2()
806 cfg = t->tx.buf; in scmi_clock_config_get_v2()
807 cfg->id = cpu_to_le32(clk_id); in scmi_clock_config_get_v2()
808 cfg->flags = cpu_to_le32(flags); in scmi_clock_config_get_v2()
810 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_get_v2()
812 struct scmi_msg_resp_clock_config_get *resp = t->rx.buf; in scmi_clock_config_get_v2()
815 *attributes = le32_to_cpu(resp->attributes); in scmi_clock_config_get_v2()
818 *enabled = IS_CLK_ENABLED(resp->config); in scmi_clock_config_get_v2()
821 *oem_val = le32_to_cpu(resp->oem_config_val); in scmi_clock_config_get_v2()
824 ph->xops->xfer_put(ph, t); in scmi_clock_config_get_v2()
839 return -EINVAL; in scmi_clock_config_get()
841 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, in scmi_clock_config_get()
846 t->hdr.poll_completion = atomic; in scmi_clock_config_get()
847 put_unaligned_le32(clk_id, t->tx.buf); in scmi_clock_config_get()
848 resp = t->rx.buf; in scmi_clock_config_get()
850 ret = ph->xops->do_xfer(ph, t); in scmi_clock_config_get()
852 *enabled = IS_CLK_ENABLED(resp->attributes); in scmi_clock_config_get()
854 ph->xops->xfer_put(ph, t); in scmi_clock_config_get()
862 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_state_get()
864 return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL, in scmi_clock_state_get()
873 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_config_oem_set()
880 if (!clk->extended_config) in scmi_clock_config_oem_set()
881 return -EOPNOTSUPP; in scmi_clock_config_oem_set()
883 return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED, in scmi_clock_config_oem_set()
892 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_config_oem_get()
899 if (!clk->extended_config) in scmi_clock_config_oem_get()
900 return -EOPNOTSUPP; in scmi_clock_config_oem_get()
902 return ci->clock_config_get(ph, clk_id, oem_type, attributes, in scmi_clock_config_oem_get()
908 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_count_get()
910 return ci->num_clocks; in scmi_clock_count_get()
917 struct clock_info *ci = ph->get_priv(ph); in scmi_clock_info_get()
923 if (!clk->name[0]) in scmi_clock_info_get()
948 struct clock_info *ci = ph->get_priv(ph); in scmi_clk_notify_supported()
958 supported = clk->rate_changed_notifications; in scmi_clk_notify_supported()
960 supported = clk->rate_change_requested_notifications; in scmi_clk_notify_supported()
972 ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); in scmi_clk_rate_notify()
976 notify = t->tx.buf; in scmi_clk_rate_notify()
977 notify->clk_id = cpu_to_le32(clk_id); in scmi_clk_rate_notify()
978 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; in scmi_clk_rate_notify()
980 ret = ph->xops->do_xfer(ph, t); in scmi_clk_rate_notify()
982 ph->xops->xfer_put(ph, t); in scmi_clk_rate_notify()
992 return -EINVAL; in scmi_clk_set_notify_enabled()
997 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", in scmi_clk_set_notify_enabled()
1006 void *report, u32 *src_id) in scmi_clk_fill_custom_report() argument
1009 struct scmi_clock_rate_notif_report *r = report; in scmi_clk_fill_custom_report()
1016 r->timestamp = timestamp; in scmi_clk_fill_custom_report()
1017 r->agent_id = le32_to_cpu(p->agent_id); in scmi_clk_fill_custom_report()
1018 r->clock_id = le32_to_cpu(p->clock_id); in scmi_clk_fill_custom_report()
1019 r->rate = get_unaligned_le64(&p->rate_low); in scmi_clk_fill_custom_report()
1020 *src_id = r->clock_id; in scmi_clk_fill_custom_report()
1027 struct clock_info *ci = ph->get_priv(ph); in scmi_clk_get_num_sources()
1030 return -EINVAL; in scmi_clk_get_num_sources()
1032 return ci->num_clocks; in scmi_clk_get_num_sources()
1068 ret = ph->xops->version_get(ph, &version); in scmi_clock_protocol_init()
1072 dev_dbg(ph->dev, "Clock Version %d.%d\n", in scmi_clock_protocol_init()
1075 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL); in scmi_clock_protocol_init()
1077 return -ENOMEM; in scmi_clock_protocol_init()
1083 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, in scmi_clock_protocol_init()
1084 sizeof(*cinfo->clk), GFP_KERNEL); in scmi_clock_protocol_init()
1085 if (!cinfo->clk) in scmi_clock_protocol_init()
1086 return -ENOMEM; in scmi_clock_protocol_init()
1088 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { in scmi_clock_protocol_init()
1089 struct scmi_clock_info *clk = cinfo->clk + clkid; in scmi_clock_protocol_init()
1097 cinfo->clock_config_set = scmi_clock_config_set_v2; in scmi_clock_protocol_init()
1098 cinfo->clock_config_get = scmi_clock_config_get_v2; in scmi_clock_protocol_init()
1100 cinfo->clock_config_set = scmi_clock_config_set; in scmi_clock_protocol_init()
1101 cinfo->clock_config_get = scmi_clock_config_get; in scmi_clock_protocol_init()
1104 cinfo->version = version; in scmi_clock_protocol_init()
1105 return ph->set_priv(ph, cinfo, version); in scmi_clock_protocol_init()