1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Clock Protocol
5 * Copyright (C) 2018-2022 ARM Ltd.
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
12 #include "protocols.h"
15 enum scmi_clock_protocol_cmd {
16 CLOCK_ATTRIBUTES = 0x3,
17 CLOCK_DESCRIBE_RATES = 0x4,
20 CLOCK_CONFIG_SET = 0x7,
22 CLOCK_RATE_NOTIFY = 0x9,
23 CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
26 struct scmi_msg_resp_clock_protocol_attributes {
32 struct scmi_msg_resp_clock_attributes {
34 #define CLOCK_ENABLE BIT(0)
35 #define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
36 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
37 #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
38 u8 name[SCMI_SHORT_NAME_MAX_SIZE];
39 __le32 clock_enable_latency;
42 struct scmi_clock_set_config {
47 struct scmi_msg_clock_describe_rates {
52 struct scmi_msg_resp_clock_describe_rates {
53 __le32 num_rates_flags;
54 #define NUM_RETURNED(x) ((x) & 0xfff)
55 #define RATE_DISCRETE(x) !((x) & BIT(12))
56 #define NUM_REMAINING(x) ((x) >> 16)
61 #define RATE_TO_U64(X) \
64 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
68 struct scmi_clock_set_rate {
70 #define CLOCK_SET_ASYNC BIT(0)
71 #define CLOCK_SET_IGNORE_RESP BIT(1)
72 #define CLOCK_SET_ROUND_UP BIT(2)
73 #define CLOCK_SET_ROUND_AUTO BIT(3)
79 struct scmi_msg_resp_set_rate_complete {
85 struct scmi_msg_clock_rate_notify {
90 struct scmi_clock_rate_notify_payld {
101 atomic_t cur_async_req;
102 struct scmi_clock_info *clk;
105 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
107 CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
111 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
112 struct clock_info *ci)
116 struct scmi_msg_resp_clock_protocol_attributes *attr;
118 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
119 0, sizeof(*attr), &t);
125 ret = ph->xops->do_xfer(ph, t);
127 ci->num_clocks = le16_to_cpu(attr->num_clocks);
128 ci->max_async_req = attr->max_async_req;
131 ph->xops->xfer_put(ph, t);
135 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
136 u32 clk_id, struct scmi_clock_info *clk,
142 struct scmi_msg_resp_clock_attributes *attr;
144 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
145 sizeof(clk_id), sizeof(*attr), &t);
149 put_unaligned_le32(clk_id, t->tx.buf);
152 ret = ph->xops->do_xfer(ph, t);
155 attributes = le32_to_cpu(attr->attributes);
156 strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
157 /* clock_enable_latency field is present only since SCMI v3.1 */
158 if (PROTOCOL_REV_MAJOR(version) >= 0x2)
159 latency = le32_to_cpu(attr->clock_enable_latency);
160 clk->enable_latency = latency ? : U32_MAX;
163 ph->xops->xfer_put(ph, t);
166 * If supported overwrite short name with the extended one;
167 * on error just carry on and use already provided short name.
169 if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
170 if (SUPPORTS_EXTENDED_NAMES(attributes))
171 ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
175 if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
176 clk->rate_changed_notifications = true;
177 if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
178 clk->rate_change_requested_notifications = true;
184 static int rate_cmp_func(const void *_r1, const void *_r2)
186 const u64 *r1 = _r1, *r2 = _r2;
196 struct scmi_clk_ipriv {
198 struct scmi_clock_info *clk;
201 static void iter_clk_describe_prepare_message(void *message,
202 const unsigned int desc_index,
205 struct scmi_msg_clock_describe_rates *msg = message;
206 const struct scmi_clk_ipriv *p = priv;
208 msg->id = cpu_to_le32(p->clk_id);
209 /* Set the number of rates to be skipped/already read */
210 msg->rate_index = cpu_to_le32(desc_index);
214 iter_clk_describe_update_state(struct scmi_iterator_state *st,
215 const void *response, void *priv)
218 struct scmi_clk_ipriv *p = priv;
219 const struct scmi_msg_resp_clock_describe_rates *r = response;
221 flags = le32_to_cpu(r->num_rates_flags);
222 st->num_remaining = NUM_REMAINING(flags);
223 st->num_returned = NUM_RETURNED(flags);
224 p->clk->rate_discrete = RATE_DISCRETE(flags);
230 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
231 const void *response,
232 struct scmi_iterator_state *st, void *priv)
235 struct scmi_clk_ipriv *p = priv;
236 const struct scmi_msg_resp_clock_describe_rates *r = response;
238 if (!p->clk->rate_discrete) {
239 switch (st->desc_index + st->loop_idx) {
241 p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
244 p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
247 p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
254 u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
256 *rate = RATE_TO_U64(r->rate[st->loop_idx]);
257 p->clk->list.num_rates++;
258 //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
265 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
266 struct scmi_clock_info *clk)
270 struct scmi_iterator_ops ops = {
271 .prepare_message = iter_clk_describe_prepare_message,
272 .update_state = iter_clk_describe_update_state,
273 .process_response = iter_clk_describe_process_response,
275 struct scmi_clk_ipriv cpriv = {
280 iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
281 CLOCK_DESCRIBE_RATES,
282 sizeof(struct scmi_msg_clock_describe_rates),
285 return PTR_ERR(iter);
287 ret = ph->hops->iter_response_run(iter);
291 if (!clk->rate_discrete) {
292 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
293 clk->range.min_rate, clk->range.max_rate,
294 clk->range.step_size);
295 } else if (clk->list.num_rates) {
296 sort(clk->list.rates, clk->list.num_rates,
297 sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
304 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
305 u32 clk_id, u64 *value)
310 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
311 sizeof(__le32), sizeof(u64), &t);
315 put_unaligned_le32(clk_id, t->tx.buf);
317 ret = ph->xops->do_xfer(ph, t);
319 *value = get_unaligned_le64(t->rx.buf);
321 ph->xops->xfer_put(ph, t);
325 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
326 u32 clk_id, u64 rate)
331 struct scmi_clock_set_rate *cfg;
332 struct clock_info *ci = ph->get_priv(ph);
334 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
338 if (ci->max_async_req &&
339 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
340 flags |= CLOCK_SET_ASYNC;
343 cfg->flags = cpu_to_le32(flags);
344 cfg->id = cpu_to_le32(clk_id);
345 cfg->value_low = cpu_to_le32(rate & 0xffffffff);
346 cfg->value_high = cpu_to_le32(rate >> 32);
348 if (flags & CLOCK_SET_ASYNC) {
349 ret = ph->xops->do_xfer_with_response(ph, t);
351 struct scmi_msg_resp_set_rate_complete *resp;
354 if (le32_to_cpu(resp->id) == clk_id)
356 "Clk ID %d set async to %llu\n", clk_id,
357 get_unaligned_le64(&resp->rate_low));
362 ret = ph->xops->do_xfer(ph, t);
365 if (ci->max_async_req)
366 atomic_dec(&ci->cur_async_req);
368 ph->xops->xfer_put(ph, t);
373 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
374 u32 config, bool atomic)
378 struct scmi_clock_set_config *cfg;
380 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
381 sizeof(*cfg), 0, &t);
385 t->hdr.poll_completion = atomic;
388 cfg->id = cpu_to_le32(clk_id);
389 cfg->attributes = cpu_to_le32(config);
391 ret = ph->xops->do_xfer(ph, t);
393 ph->xops->xfer_put(ph, t);
397 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
399 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
402 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
404 return scmi_clock_config_set(ph, clk_id, 0, false);
407 static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
410 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
413 static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
416 return scmi_clock_config_set(ph, clk_id, 0, true);
419 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
421 struct clock_info *ci = ph->get_priv(ph);
423 return ci->num_clocks;
426 static const struct scmi_clock_info *
427 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
429 struct clock_info *ci = ph->get_priv(ph);
430 struct scmi_clock_info *clk = ci->clk + clk_id;
438 static const struct scmi_clk_proto_ops clk_proto_ops = {
439 .count_get = scmi_clock_count_get,
440 .info_get = scmi_clock_info_get,
441 .rate_get = scmi_clock_rate_get,
442 .rate_set = scmi_clock_rate_set,
443 .enable = scmi_clock_enable,
444 .disable = scmi_clock_disable,
445 .enable_atomic = scmi_clock_enable_atomic,
446 .disable_atomic = scmi_clock_disable_atomic,
449 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
450 u32 clk_id, int message_id, bool enable)
454 struct scmi_msg_clock_rate_notify *notify;
456 ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
461 notify->clk_id = cpu_to_le32(clk_id);
462 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
464 ret = ph->xops->do_xfer(ph, t);
466 ph->xops->xfer_put(ph, t);
470 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
471 u8 evt_id, u32 src_id, bool enable)
475 if (evt_id >= ARRAY_SIZE(evt_2_cmd))
478 cmd_id = evt_2_cmd[evt_id];
479 ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
481 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
482 evt_id, src_id, ret);
487 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
488 u8 evt_id, ktime_t timestamp,
489 const void *payld, size_t payld_sz,
490 void *report, u32 *src_id)
492 const struct scmi_clock_rate_notify_payld *p = payld;
493 struct scmi_clock_rate_notif_report *r = report;
495 if (sizeof(*p) != payld_sz ||
496 (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
497 evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
500 r->timestamp = timestamp;
501 r->agent_id = le32_to_cpu(p->agent_id);
502 r->clock_id = le32_to_cpu(p->clock_id);
503 r->rate = get_unaligned_le64(&p->rate_low);
504 *src_id = r->clock_id;
509 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
511 struct clock_info *ci = ph->get_priv(ph);
516 return ci->num_clocks;
519 static const struct scmi_event clk_events[] = {
521 .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
522 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
523 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
526 .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
527 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
528 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
532 static const struct scmi_event_ops clk_event_ops = {
533 .get_num_sources = scmi_clk_get_num_sources,
534 .set_notify_enabled = scmi_clk_set_notify_enabled,
535 .fill_custom_report = scmi_clk_fill_custom_report,
538 static const struct scmi_protocol_events clk_protocol_events = {
539 .queue_sz = SCMI_PROTO_QUEUE_SZ,
540 .ops = &clk_event_ops,
542 .num_events = ARRAY_SIZE(clk_events),
545 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
549 struct clock_info *cinfo;
551 ret = ph->xops->version_get(ph, &version);
555 dev_dbg(ph->dev, "Clock Version %d.%d\n",
556 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
558 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
562 ret = scmi_clock_protocol_attributes_get(ph, cinfo);
566 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
567 sizeof(*cinfo->clk), GFP_KERNEL);
571 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
572 struct scmi_clock_info *clk = cinfo->clk + clkid;
574 ret = scmi_clock_attributes_get(ph, clkid, clk, version);
576 scmi_clock_describe_rates_get(ph, clkid, clk);
579 cinfo->version = version;
580 return ph->set_priv(ph, cinfo);
583 static const struct scmi_protocol scmi_clock = {
584 .id = SCMI_PROTOCOL_CLOCK,
585 .owner = THIS_MODULE,
586 .instance_init = &scmi_clock_protocol_init,
587 .ops = &clk_proto_ops,
588 .events = &clk_protocol_events,
591 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)