1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message Protocol driver
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
14 * Copyright (C) 2018 ARM Ltd.
17 #include <linux/bitmap.h>
18 #include <linux/export.h>
20 #include <linux/kernel.h>
21 #include <linux/ktime.h>
22 #include <linux/mailbox_client.h>
23 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/of_device.h>
26 #include <linux/processor.h>
27 #include <linux/semaphore.h>
28 #include <linux/slab.h>
32 #define MSG_ID_MASK GENMASK(7, 0)
33 #define MSG_TYPE_MASK GENMASK(9, 8)
34 #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
35 #define MSG_TOKEN_ID_MASK GENMASK(27, 18)
36 #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
37 #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
39 enum scmi_error_codes {
40 SCMI_SUCCESS = 0, /* Success */
41 SCMI_ERR_SUPPORT = -1, /* Not supported */
42 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
43 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
44 SCMI_ERR_ENTRY = -4, /* Not found */
45 SCMI_ERR_RANGE = -5, /* Value out of range */
46 SCMI_ERR_BUSY = -6, /* Device busy */
47 SCMI_ERR_COMMS = -7, /* Communication Error */
48 SCMI_ERR_GENERIC = -8, /* Generic Error */
49 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
50 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
54 /* List of all SCMI devices active in system */
55 static LIST_HEAD(scmi_list);
56 /* Protection for the entire list */
57 static DEFINE_MUTEX(scmi_list_mutex);
60 * struct scmi_xfers_info - Structure to manage transfer information
62 * @xfer_block: Preallocated Message array
63 * @xfer_alloc_table: Bitmap table for allocated messages.
64 * Index of this bitmap table is also used for message
65 * sequence identifier.
66 * @xfer_lock: Protection for message allocation
68 struct scmi_xfers_info {
69 struct scmi_xfer *xfer_block;
70 unsigned long *xfer_alloc_table;
75 * struct scmi_desc - Description of SoC integration
77 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
78 * @max_msg: Maximum number of messages that can be pending
79 * simultaneously in the system
80 * @max_msg_size: Maximum size of data per message that can be handled.
83 int max_rx_timeout_ms;
89 * struct scmi_chan_info - Structure representing a SCMI channel information
92 * @chan: Transmit/Receive mailbox channel
93 * @payload: Transmit/Receive mailbox channel payload area
94 * @dev: Reference to device in the SCMI hierarchy corresponding to this
96 * @handle: Pointer to SCMI entity handle
98 struct scmi_chan_info {
99 struct mbox_client cl;
100 struct mbox_chan *chan;
101 void __iomem *payload;
103 struct scmi_handle *handle;
107 * struct scmi_info - Structure representing a SCMI instance
109 * @dev: Device pointer
110 * @desc: SoC description for this instance
111 * @handle: Instance of SCMI handle to send to clients
112 * @version: SCMI revision information containing protocol version,
113 * implementation version and (sub-)vendor identification.
114 * @minfo: Message info
115 * @tx_idr: IDR object to map protocol id to channel info pointer
116 * @protocols_imp: List of protocols implemented, currently maximum of
117 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
119 * @users: Number of users of this instance
123 const struct scmi_desc *desc;
124 struct scmi_revision_info version;
125 struct scmi_handle handle;
126 struct scmi_xfers_info minfo;
129 struct list_head node;
133 #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
134 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
137 * SCMI specification requires all parameters, message headers, return
138 * arguments or any protocol data to be expressed in little endian
141 struct scmi_shared_mem {
143 __le32 channel_status;
144 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
145 #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
148 #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
154 static const int scmi_linux_errmap[] = {
155 /* better than switch case as long as return value is continuous */
156 0, /* SCMI_SUCCESS */
157 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
158 -EINVAL, /* SCMI_ERR_PARAM */
159 -EACCES, /* SCMI_ERR_ACCESS */
160 -ENOENT, /* SCMI_ERR_ENTRY */
161 -ERANGE, /* SCMI_ERR_RANGE */
162 -EBUSY, /* SCMI_ERR_BUSY */
163 -ECOMM, /* SCMI_ERR_COMMS */
164 -EIO, /* SCMI_ERR_GENERIC */
165 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
166 -EPROTO, /* SCMI_ERR_PROTOCOL */
169 static inline int scmi_to_linux_errno(int errno)
171 if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
172 return scmi_linux_errmap[-errno];
177 * scmi_dump_header_dbg() - Helper to dump a message header.
179 * @dev: Device pointer corresponding to the SCMI entity
180 * @hdr: pointer to header.
182 static inline void scmi_dump_header_dbg(struct device *dev,
183 struct scmi_msg_hdr *hdr)
185 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
186 hdr->id, hdr->seq, hdr->protocol_id);
189 static void scmi_fetch_response(struct scmi_xfer *xfer,
190 struct scmi_shared_mem __iomem *mem)
192 xfer->hdr.status = ioread32(mem->msg_payload);
193 /* Skip the length of header and status in payload area i.e 8 bytes */
194 xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
196 /* Take a copy to the rx buffer.. */
197 memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
201 * scmi_rx_callback() - mailbox client callback for receive messages
203 * @cl: client pointer
204 * @m: mailbox message
206 * Processes one received message to appropriate transfer information and
207 * signals completion of the transfer.
209 * NOTE: This function will be invoked in IRQ context, hence should be
210 * as optimal as possible.
212 static void scmi_rx_callback(struct mbox_client *cl, void *m)
215 struct scmi_xfer *xfer;
216 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
217 struct device *dev = cinfo->dev;
218 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
219 struct scmi_xfers_info *minfo = &info->minfo;
220 struct scmi_shared_mem __iomem *mem = cinfo->payload;
222 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
224 /* Are we even expecting this? */
225 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
226 dev_err(dev, "message for %d is not expected!\n", xfer_id);
230 xfer = &minfo->xfer_block[xfer_id];
232 scmi_dump_header_dbg(dev, &xfer->hdr);
234 scmi_fetch_response(xfer, mem);
235 complete(&xfer->done);
239 * pack_scmi_header() - packs and returns 32-bit header
241 * @hdr: pointer to header containing all the information on message id,
242 * protocol id and sequence id.
244 * Return: 32-bit packed message header to be sent to the platform.
246 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
248 return FIELD_PREP(MSG_ID_MASK, hdr->id) |
249 FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
250 FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
254 * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
256 * @cl: client pointer
257 * @m: mailbox message
259 * This function prepares the shared memory which contains the header and the
262 static void scmi_tx_prepare(struct mbox_client *cl, void *m)
264 struct scmi_xfer *t = m;
265 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
266 struct scmi_shared_mem __iomem *mem = cinfo->payload;
269 * Ideally channel must be free by now unless OS timeout last
270 * request and platform continued to process the same, wait
271 * until it releases the shared memory, otherwise we may endup
272 * overwriting its response with new message payload or vice-versa
274 spin_until_cond(ioread32(&mem->channel_status) &
275 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
276 /* Mark channel busy + clear error */
277 iowrite32(0x0, &mem->channel_status);
278 iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
280 iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
281 iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
283 memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
287 * scmi_xfer_get() - Allocate one message
289 * @handle: Pointer to SCMI entity handle
291 * Helper function which is used by various message functions that are
292 * exposed to clients of this driver for allocating a message traffic event.
294 * This function can sleep depending on pending requests already in the system
295 * for the SCMI entity. Further, this also holds a spinlock to maintain
296 * integrity of internal data structures.
298 * Return: 0 if all went fine, else corresponding error.
300 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
303 struct scmi_xfer *xfer;
304 unsigned long flags, bit_pos;
305 struct scmi_info *info = handle_to_scmi_info(handle);
306 struct scmi_xfers_info *minfo = &info->minfo;
308 /* Keep the locked section as small as possible */
309 spin_lock_irqsave(&minfo->xfer_lock, flags);
310 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
311 info->desc->max_msg);
312 if (bit_pos == info->desc->max_msg) {
313 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
314 return ERR_PTR(-ENOMEM);
316 set_bit(bit_pos, minfo->xfer_alloc_table);
317 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
321 xfer = &minfo->xfer_block[xfer_id];
322 xfer->hdr.seq = xfer_id;
323 reinit_completion(&xfer->done);
329 * scmi_xfer_put() - Release a message
331 * @handle: Pointer to SCMI entity handle
332 * @xfer: message that was reserved by scmi_xfer_get
334 * This holds a spinlock to maintain integrity of internal data structures.
336 void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
339 struct scmi_info *info = handle_to_scmi_info(handle);
340 struct scmi_xfers_info *minfo = &info->minfo;
343 * Keep the locked section as small as possible
344 * NOTE: we might escape with smp_mb and no lock here..
345 * but just be conservative and symmetric.
347 spin_lock_irqsave(&minfo->xfer_lock, flags);
348 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
349 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
353 scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
355 struct scmi_shared_mem __iomem *mem = cinfo->payload;
356 u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
358 if (xfer->hdr.seq != xfer_id)
361 return ioread32(&mem->channel_status) &
362 (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
363 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
366 #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
368 static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
369 struct scmi_xfer *xfer, ktime_t stop)
371 ktime_t __cur = ktime_get();
373 return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
377 * scmi_do_xfer() - Do one transfer
379 * @handle: Pointer to SCMI entity handle
380 * @xfer: Transfer to initiate and wait for response
382 * Return: -ETIMEDOUT in case of no response, if transmit error,
383 * return corresponding error, else if all goes well,
386 int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
390 struct scmi_info *info = handle_to_scmi_info(handle);
391 struct device *dev = info->dev;
392 struct scmi_chan_info *cinfo;
394 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
395 if (unlikely(!cinfo))
398 ret = mbox_send_message(cinfo->chan, xfer);
400 dev_dbg(dev, "mbox send fail %d\n", ret);
404 /* mbox_send_message returns non-negative value on success, so reset */
407 if (xfer->hdr.poll_completion) {
408 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
410 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
412 if (ktime_before(ktime_get(), stop))
413 scmi_fetch_response(xfer, cinfo->payload);
417 /* And we wait for the response. */
418 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
419 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
420 dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
426 if (!ret && xfer->hdr.status)
427 ret = scmi_to_linux_errno(xfer->hdr.status);
430 * NOTE: we might prefer not to need the mailbox ticker to manage the
431 * transfer queueing since the protocol layer queues things by itself.
432 * Unfortunately, we have to kick the mailbox framework after we have
433 * received our message.
435 mbox_client_txdone(cinfo->chan, ret);
441 * scmi_xfer_get_init() - Allocate and initialise one message
443 * @handle: Pointer to SCMI entity handle
444 * @msg_id: Message identifier
445 * @prot_id: Protocol identifier for the message
446 * @tx_size: transmit message size
447 * @rx_size: receive message size
448 * @p: pointer to the allocated and initialised message
450 * This function allocates the message using @scmi_xfer_get and
451 * initialise the header.
453 * Return: 0 if all went fine with @p pointing to message, else
454 * corresponding error.
456 int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
457 size_t tx_size, size_t rx_size, struct scmi_xfer **p)
460 struct scmi_xfer *xfer;
461 struct scmi_info *info = handle_to_scmi_info(handle);
462 struct device *dev = info->dev;
464 /* Ensure we have sane transfer sizes */
465 if (rx_size > info->desc->max_msg_size ||
466 tx_size > info->desc->max_msg_size)
469 xfer = scmi_xfer_get(handle);
472 dev_err(dev, "failed to get free message slot(%d)\n", ret);
476 xfer->tx.len = tx_size;
477 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
478 xfer->hdr.id = msg_id;
479 xfer->hdr.protocol_id = prot_id;
480 xfer->hdr.poll_completion = false;
488 * scmi_version_get() - command to get the revision of the SCMI entity
490 * @handle: Pointer to SCMI entity handle
491 * @protocol: Protocol identifier for the message
492 * @version: Holds returned version of protocol.
494 * Updates the SCMI information in the internal data structure.
496 * Return: 0 if all went fine, else return appropriate error.
498 int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
505 ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
506 sizeof(*version), &t);
510 ret = scmi_do_xfer(handle, t);
512 rev_info = t->rx.buf;
513 *version = le32_to_cpu(*rev_info);
516 scmi_xfer_put(handle, t);
520 void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
523 struct scmi_info *info = handle_to_scmi_info(handle);
525 info->protocols_imp = prot_imp;
529 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
532 struct scmi_info *info = handle_to_scmi_info(handle);
534 if (!info->protocols_imp)
537 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
538 if (info->protocols_imp[i] == prot_id)
544 * scmi_handle_get() - Get the SCMI handle for a device
546 * @dev: pointer to device for which we want SCMI handle
548 * NOTE: The function does not track individual clients of the framework
549 * and is expected to be maintained by caller of SCMI protocol library.
550 * scmi_handle_put must be balanced with successful scmi_handle_get
552 * Return: pointer to handle if successful, NULL on error
554 struct scmi_handle *scmi_handle_get(struct device *dev)
557 struct scmi_info *info;
558 struct scmi_handle *handle = NULL;
560 mutex_lock(&scmi_list_mutex);
561 list_for_each(p, &scmi_list) {
562 info = list_entry(p, struct scmi_info, node);
563 if (dev->parent == info->dev) {
564 handle = &info->handle;
569 mutex_unlock(&scmi_list_mutex);
575 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
577 * @handle: handle acquired by scmi_handle_get
579 * NOTE: The function does not track individual clients of the framework
580 * and is expected to be maintained by caller of SCMI protocol library.
581 * scmi_handle_put must be balanced with successful scmi_handle_get
583 * Return: 0 is successfully released
584 * if null was passed, it returns -EINVAL;
586 int scmi_handle_put(const struct scmi_handle *handle)
588 struct scmi_info *info;
593 info = handle_to_scmi_info(handle);
594 mutex_lock(&scmi_list_mutex);
595 if (!WARN_ON(!info->users))
597 mutex_unlock(&scmi_list_mutex);
602 static const struct scmi_desc scmi_generic_desc = {
603 .max_rx_timeout_ms = 30, /* We may increase this if required */
604 .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
608 /* Each compatible listed below must have descriptor associated with it */
609 static const struct of_device_id scmi_of_match[] = {
610 { .compatible = "arm,scmi", .data = &scmi_generic_desc },
614 MODULE_DEVICE_TABLE(of, scmi_of_match);
616 static int scmi_xfer_info_init(struct scmi_info *sinfo)
619 struct scmi_xfer *xfer;
620 struct device *dev = sinfo->dev;
621 const struct scmi_desc *desc = sinfo->desc;
622 struct scmi_xfers_info *info = &sinfo->minfo;
624 /* Pre-allocated messages, no more than what hdr.seq can support */
625 if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
626 dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
627 desc->max_msg, MSG_TOKEN_MAX);
631 info->xfer_block = devm_kcalloc(dev, desc->max_msg,
632 sizeof(*info->xfer_block), GFP_KERNEL);
633 if (!info->xfer_block)
636 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
637 sizeof(long), GFP_KERNEL);
638 if (!info->xfer_alloc_table)
641 /* Pre-initialize the buffer pointer to pre-allocated buffers */
642 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
643 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
648 xfer->tx.buf = xfer->rx.buf;
649 init_completion(&xfer->done);
652 spin_lock_init(&info->xfer_lock);
657 static int scmi_mailbox_check(struct device_node *np)
659 return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
662 static int scmi_mbox_free_channel(int id, void *p, void *data)
664 struct scmi_chan_info *cinfo = p;
665 struct idr *idr = data;
667 if (!IS_ERR_OR_NULL(cinfo->chan)) {
668 mbox_free_channel(cinfo->chan);
677 static int scmi_remove(struct platform_device *pdev)
680 struct scmi_info *info = platform_get_drvdata(pdev);
681 struct idr *idr = &info->tx_idr;
683 mutex_lock(&scmi_list_mutex);
687 list_del(&info->node);
688 mutex_unlock(&scmi_list_mutex);
693 /* Safe to free channels since no more users */
694 ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
695 idr_destroy(&info->tx_idr);
701 scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
705 resource_size_t size;
706 struct device_node *shmem, *np = dev->of_node;
707 struct scmi_chan_info *cinfo;
708 struct mbox_client *cl;
710 if (scmi_mailbox_check(np)) {
711 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
715 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
723 cl->rx_callback = scmi_rx_callback;
724 cl->tx_prepare = scmi_tx_prepare;
725 cl->tx_block = false;
726 cl->knows_txdone = true;
728 shmem = of_parse_phandle(np, "shmem", 0);
729 ret = of_address_to_resource(shmem, 0, &res);
732 dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
736 size = resource_size(&res);
737 cinfo->payload = devm_ioremap(info->dev, res.start, size);
738 if (!cinfo->payload) {
739 dev_err(dev, "failed to ioremap SCMI Tx payload\n");
740 return -EADDRNOTAVAIL;
743 /* Transmit channel is first entry i.e. index 0 */
744 cinfo->chan = mbox_request_channel(cl, 0);
745 if (IS_ERR(cinfo->chan)) {
746 ret = PTR_ERR(cinfo->chan);
747 if (ret != -EPROBE_DEFER)
748 dev_err(dev, "failed to request SCMI Tx mailbox\n");
753 ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
754 if (ret != prot_id) {
755 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
759 cinfo->handle = &info->handle;
764 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
767 struct scmi_device *sdev;
769 sdev = scmi_device_create(np, info->dev, prot_id);
771 dev_err(info->dev, "failed to create %d protocol device\n",
776 if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
777 dev_err(&sdev->dev, "failed to setup transport\n");
778 scmi_device_destroy(sdev);
782 /* setup handle now as the transport is ready */
783 scmi_set_handle(sdev);
786 static int scmi_probe(struct platform_device *pdev)
789 struct scmi_handle *handle;
790 const struct scmi_desc *desc;
791 struct scmi_info *info;
792 struct device *dev = &pdev->dev;
793 struct device_node *child, *np = dev->of_node;
795 /* Only mailbox method supported, check for the presence of one */
796 if (scmi_mailbox_check(np)) {
797 dev_err(dev, "no mailbox found in %pOF\n", np);
801 desc = of_device_get_match_data(dev);
805 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
811 INIT_LIST_HEAD(&info->node);
813 ret = scmi_xfer_info_init(info);
817 platform_set_drvdata(pdev, info);
818 idr_init(&info->tx_idr);
820 handle = &info->handle;
821 handle->dev = info->dev;
822 handle->version = &info->version;
824 ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
828 ret = scmi_base_protocol_init(handle);
830 dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
834 mutex_lock(&scmi_list_mutex);
835 list_add_tail(&info->node, &scmi_list);
836 mutex_unlock(&scmi_list_mutex);
838 for_each_available_child_of_node(np, child) {
841 if (of_property_read_u32(child, "reg", &prot_id))
844 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
845 dev_err(dev, "Out of range protocol %d\n", prot_id);
847 if (!scmi_is_protocol_implemented(handle, prot_id)) {
848 dev_err(dev, "SCMI protocol %d not implemented\n",
853 scmi_create_protocol_device(child, info, prot_id);
859 static struct platform_driver scmi_driver = {
862 .of_match_table = scmi_of_match,
865 .remove = scmi_remove,
868 module_platform_driver(scmi_driver);
870 MODULE_ALIAS("platform: arm-scmi");
871 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
872 MODULE_DESCRIPTION("ARM SCMI protocol driver");
873 MODULE_LICENSE("GPL v2");