1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message Protocol driver
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
14 * Copyright (C) 2018-2021 ARM Ltd.
17 #include <linux/bitmap.h>
18 #include <linux/device.h>
19 #include <linux/export.h>
20 #include <linux/idr.h>
22 #include <linux/kernel.h>
23 #include <linux/ktime.h>
24 #include <linux/hashtable.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/processor.h>
30 #include <linux/refcount.h>
31 #include <linux/slab.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/scmi.h>
39 enum scmi_error_codes {
40 SCMI_SUCCESS = 0, /* Success */
41 SCMI_ERR_SUPPORT = -1, /* Not supported */
42 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
43 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
44 SCMI_ERR_ENTRY = -4, /* Not found */
45 SCMI_ERR_RANGE = -5, /* Value out of range */
46 SCMI_ERR_BUSY = -6, /* Device busy */
47 SCMI_ERR_COMMS = -7, /* Communication Error */
48 SCMI_ERR_GENERIC = -8, /* Generic Error */
49 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
50 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
53 /* List of all SCMI devices active in system */
54 static LIST_HEAD(scmi_list);
55 /* Protection for the entire list */
56 static DEFINE_MUTEX(scmi_list_mutex);
57 /* Track the unique id for the transfers for debug & profiling purpose */
58 static atomic_t transfer_last_id;
60 static DEFINE_IDR(scmi_requested_devices);
61 static DEFINE_MUTEX(scmi_requested_devices_mtx);
63 struct scmi_requested_dev {
64 const struct scmi_device_id *id_table;
65 struct list_head node;
69 * struct scmi_xfers_info - Structure to manage transfer information
71 * @xfer_alloc_table: Bitmap table for allocated messages.
72 * Index of this bitmap table is also used for message
73 * sequence identifier.
74 * @xfer_lock: Protection for message allocation
75 * @free_xfers: A free list for available to use xfers. It is initialized with
76 * a number of xfers equal to the maximum allowed in-flight
78 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
79 * currently in-flight messages.
81 struct scmi_xfers_info {
82 unsigned long *xfer_alloc_table;
84 struct hlist_head free_xfers;
85 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
89 * struct scmi_protocol_instance - Describe an initialized protocol instance.
90 * @handle: Reference to the SCMI handle associated to this protocol instance.
91 * @proto: A reference to the protocol descriptor.
92 * @gid: A reference for per-protocol devres management.
93 * @users: A refcount to track effective users of this protocol.
94 * @priv: Reference for optional protocol private data.
95 * @ph: An embedded protocol handle that will be passed down to protocol
96 * initialization code to identify this instance.
98 * Each protocol is initialized independently once for each SCMI platform in
99 * which is defined by DT and implemented by the SCMI server fw.
101 struct scmi_protocol_instance {
102 const struct scmi_handle *handle;
103 const struct scmi_protocol *proto;
107 struct scmi_protocol_handle ph;
110 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
113 * struct scmi_info - Structure representing a SCMI instance
115 * @dev: Device pointer
116 * @desc: SoC description for this instance
117 * @version: SCMI revision information containing protocol version,
118 * implementation version and (sub-)vendor identification.
119 * @handle: Instance of SCMI handle to send to clients
120 * @tx_minfo: Universal Transmit Message management info
121 * @rx_minfo: Universal Receive Message management info
122 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
123 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
124 * @protocols: IDR for protocols' instance descriptors initialized for
125 * this SCMI instance: populated on protocol's first attempted
127 * @protocols_mtx: A mutex to protect protocols instances initialization.
128 * @protocols_imp: List of protocols implemented, currently maximum of
129 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
130 * @active_protocols: IDR storing device_nodes for protocols actually defined
131 * in the DT and confirmed as implemented by fw.
132 * @notify_priv: Pointer to private data structure specific to notifications.
134 * @users: Number of users of this instance
138 const struct scmi_desc *desc;
139 struct scmi_revision_info version;
140 struct scmi_handle handle;
141 struct scmi_xfers_info tx_minfo;
142 struct scmi_xfers_info rx_minfo;
145 struct idr protocols;
146 /* Ensure mutual exclusive access to protocols instance array */
147 struct mutex protocols_mtx;
149 struct idr active_protocols;
151 struct list_head node;
155 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
157 static const int scmi_linux_errmap[] = {
158 /* better than switch case as long as return value is continuous */
159 0, /* SCMI_SUCCESS */
160 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
161 -EINVAL, /* SCMI_ERR_PARAM */
162 -EACCES, /* SCMI_ERR_ACCESS */
163 -ENOENT, /* SCMI_ERR_ENTRY */
164 -ERANGE, /* SCMI_ERR_RANGE */
165 -EBUSY, /* SCMI_ERR_BUSY */
166 -ECOMM, /* SCMI_ERR_COMMS */
167 -EIO, /* SCMI_ERR_GENERIC */
168 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
169 -EPROTO, /* SCMI_ERR_PROTOCOL */
172 static inline int scmi_to_linux_errno(int errno)
174 int err_idx = -errno;
176 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
177 return scmi_linux_errmap[err_idx];
181 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
184 struct scmi_info *info = handle_to_scmi_info(handle);
186 info->notify_priv = priv;
187 /* Ensure updated protocol private date are visible */
191 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
193 struct scmi_info *info = handle_to_scmi_info(handle);
195 /* Ensure protocols_private_data has been updated */
197 return info->notify_priv;
201 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
203 * @minfo: Pointer to Tx/Rx Message management info based on channel type
204 * @xfer: The xfer to act upon
206 * Pick the next unused monotonically increasing token and set it into
207 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
208 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
209 * of incorrect association of a late and expired xfer with a live in-flight
210 * transaction, both happening to re-use the same token identifier.
212 * Since platform is NOT required to answer our request in-order we should
213 * account for a few rare but possible scenarios:
215 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
216 * using find_next_zero_bit() starting from candidate next_token bit
218 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
219 * are plenty of free tokens at start, so try a second pass using
220 * find_next_zero_bit() and starting from 0.
228 * -----------+----------------------------------------------------------
229 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
230 * ----------------------------------------------------------------------
234 * Out-of-order pending at start
235 * -----------------------------
237 * |- xfer_id picked, last_token fixed
238 * -----+----------------------------------------------------------------
239 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
240 * ----------------------------------------------------------------------
245 * Out-of-order pending at end
246 * ---------------------------
248 * |- xfer_id picked, last_token fixed
249 * -----+----------------------------------------------------------------
250 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
251 * ----------------------------------------------------------------------
255 * Context: Assumes to be called with @xfer_lock already acquired.
257 * Return: 0 on Success or error
259 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
260 struct scmi_xfer *xfer)
262 unsigned long xfer_id, next_token;
265 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
266 * using the pre-allocated transfer_id as a base.
267 * Note that the global transfer_id is shared across all message types
268 * so there could be holes in the allocated set of monotonic sequence
269 * numbers, but that is going to limit the effectiveness of the
270 * mitigation only in very rare limit conditions.
272 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
274 /* Pick the next available xfer_id >= next_token */
275 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
276 MSG_TOKEN_MAX, next_token);
277 if (xfer_id == MSG_TOKEN_MAX) {
279 * After heavily out-of-order responses, there are no free
280 * tokens ahead, but only at start of xfer_alloc_table so
281 * try again from the beginning.
283 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
286 * Something is wrong if we got here since there can be a
287 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
288 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
290 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
294 /* Update +/- last_token accordingly if we skipped some hole */
295 if (xfer_id != next_token)
296 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
299 set_bit(xfer_id, minfo->xfer_alloc_table);
300 xfer->hdr.seq = (u16)xfer_id;
306 * scmi_xfer_token_clear - Release the token
308 * @minfo: Pointer to Tx/Rx Message management info based on channel type
309 * @xfer: The xfer to act upon
311 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
312 struct scmi_xfer *xfer)
314 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
318 * scmi_xfer_get() - Allocate one message
320 * @handle: Pointer to SCMI entity handle
321 * @minfo: Pointer to Tx/Rx Message management info based on channel type
322 * @set_pending: If true a monotonic token is picked and the xfer is added to
323 * the pending hash table.
325 * Helper function which is used by various message functions that are
326 * exposed to clients of this driver for allocating a message traffic event.
328 * Picks an xfer from the free list @free_xfers (if any available) and, if
329 * required, sets a monotonically increasing token and stores the inflight xfer
330 * into the @pending_xfers hashtable for later retrieval.
332 * The successfully initialized xfer is refcounted.
334 * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
337 * Return: 0 if all went fine, else corresponding error.
339 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
340 struct scmi_xfers_info *minfo,
345 struct scmi_xfer *xfer;
347 spin_lock_irqsave(&minfo->xfer_lock, flags);
348 if (hlist_empty(&minfo->free_xfers)) {
349 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
350 return ERR_PTR(-ENOMEM);
353 /* grab an xfer from the free_list */
354 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
355 hlist_del_init(&xfer->node);
358 * Allocate transfer_id early so that can be used also as base for
359 * monotonic sequence number generation if needed.
361 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
364 /* Pick and set monotonic token */
365 ret = scmi_xfer_token_set(minfo, xfer);
367 hash_add(minfo->pending_xfers, &xfer->node,
369 xfer->pending = true;
372 "Failed to get monotonic token %d\n", ret);
373 hlist_add_head(&xfer->node, &minfo->free_xfers);
379 refcount_set(&xfer->users, 1);
380 atomic_set(&xfer->busy, SCMI_XFER_FREE);
382 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
388 * __scmi_xfer_put() - Release a message
390 * @minfo: Pointer to Tx/Rx Message management info based on channel type
391 * @xfer: message that was reserved by scmi_xfer_get
393 * After refcount check, possibly release an xfer, clearing the token slot,
394 * removing xfer from @pending_xfers and putting it back into free_xfers.
396 * This holds a spinlock to maintain integrity of internal data structures.
399 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
403 spin_lock_irqsave(&minfo->xfer_lock, flags);
404 if (refcount_dec_and_test(&xfer->users)) {
406 scmi_xfer_token_clear(minfo, xfer);
407 hash_del(&xfer->node);
408 xfer->pending = false;
410 hlist_add_head(&xfer->node, &minfo->free_xfers);
412 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
416 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
418 * @minfo: Pointer to Tx/Rx Message management info based on channel type
419 * @xfer_id: Token ID to lookup in @pending_xfers
421 * Refcounting is untouched.
423 * Context: Assumes to be called with @xfer_lock already acquired.
425 * Return: A valid xfer on Success or error otherwise
427 static struct scmi_xfer *
428 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
430 struct scmi_xfer *xfer = NULL;
432 if (test_bit(xfer_id, minfo->xfer_alloc_table))
433 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
435 return xfer ?: ERR_PTR(-EINVAL);
439 * scmi_msg_response_validate - Validate message type against state of related
442 * @cinfo: A reference to the channel descriptor.
443 * @msg_type: Message type to check
444 * @xfer: A reference to the xfer to validate against @msg_type
446 * This function checks if @msg_type is congruent with the current state of
447 * a pending @xfer; if an asynchronous delayed response is received before the
448 * related synchronous response (Out-of-Order Delayed Response) the missing
449 * synchronous response is assumed to be OK and completed, carrying on with the
450 * Delayed Response: this is done to address the case in which the underlying
451 * SCMI transport can deliver such out-of-order responses.
453 * Context: Assumes to be called with xfer->lock already acquired.
455 * Return: 0 on Success, error otherwise
457 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
459 struct scmi_xfer *xfer)
462 * Even if a response was indeed expected on this slot at this point,
463 * a buggy platform could wrongly reply feeding us an unexpected
464 * delayed response we're not prepared to handle: bail-out safely
467 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
469 "Delayed Response for %d not expected! Buggy F/W ?\n",
474 switch (xfer->state) {
475 case SCMI_XFER_SENT_OK:
476 if (msg_type == MSG_TYPE_DELAYED_RESP) {
478 * Delayed Response expected but delivered earlier.
479 * Assume message RESPONSE was OK and skip state.
481 xfer->hdr.status = SCMI_SUCCESS;
482 xfer->state = SCMI_XFER_RESP_OK;
483 complete(&xfer->done);
485 "Received valid OoO Delayed Response for %d\n",
489 case SCMI_XFER_RESP_OK:
490 if (msg_type != MSG_TYPE_DELAYED_RESP)
493 case SCMI_XFER_DRESP_OK:
494 /* No further message expected once in SCMI_XFER_DRESP_OK */
502 * scmi_xfer_state_update - Update xfer state
504 * @xfer: A reference to the xfer to update
505 * @msg_type: Type of message being processed.
507 * Note that this message is assumed to have been already successfully validated
508 * by @scmi_msg_response_validate(), so here we just update the state.
510 * Context: Assumes to be called on an xfer exclusively acquired using the
513 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
515 xfer->hdr.type = msg_type;
517 /* Unknown command types were already discarded earlier */
518 if (xfer->hdr.type == MSG_TYPE_COMMAND)
519 xfer->state = SCMI_XFER_RESP_OK;
521 xfer->state = SCMI_XFER_DRESP_OK;
524 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
528 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
530 return ret == SCMI_XFER_FREE;
534 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
536 * @cinfo: A reference to the channel descriptor.
537 * @msg_hdr: A message header to use as lookup key
539 * When a valid xfer is found for the sequence number embedded in the provided
540 * msg_hdr, reference counting is properly updated and exclusive access to this
541 * xfer is granted till released with @scmi_xfer_command_release.
543 * Return: A valid @xfer on Success or error otherwise.
545 static inline struct scmi_xfer *
546 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
550 struct scmi_xfer *xfer;
551 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
552 struct scmi_xfers_info *minfo = &info->tx_minfo;
553 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
554 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
556 /* Are we even expecting this? */
557 spin_lock_irqsave(&minfo->xfer_lock, flags);
558 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
561 "Message for %d type %d is not expected!\n",
563 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
566 refcount_inc(&xfer->users);
567 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
569 spin_lock_irqsave(&xfer->lock, flags);
570 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
572 * If a pending xfer was found which was also in a congruent state with
573 * the received message, acquire exclusive access to it setting the busy
575 * Spins only on the rare limit condition of concurrent reception of
576 * RESP and DRESP for the same xfer.
579 spin_until_cond(scmi_xfer_acquired(xfer));
580 scmi_xfer_state_update(xfer, msg_type);
582 spin_unlock_irqrestore(&xfer->lock, flags);
586 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
587 msg_type, xfer_id, msg_hdr, xfer->state);
588 /* On error the refcount incremented above has to be dropped */
589 __scmi_xfer_put(minfo, xfer);
590 xfer = ERR_PTR(-EINVAL);
596 static inline void scmi_xfer_command_release(struct scmi_info *info,
597 struct scmi_xfer *xfer)
599 atomic_set(&xfer->busy, SCMI_XFER_FREE);
600 __scmi_xfer_put(&info->tx_minfo, xfer);
603 static inline void scmi_clear_channel(struct scmi_info *info,
604 struct scmi_chan_info *cinfo)
606 if (info->desc->ops->clear_channel)
607 info->desc->ops->clear_channel(cinfo);
610 static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
612 struct scmi_xfer *xfer;
613 struct device *dev = cinfo->dev;
614 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
615 struct scmi_xfers_info *minfo = &info->rx_minfo;
618 ts = ktime_get_boottime();
619 xfer = scmi_xfer_get(cinfo->handle, minfo, false);
621 dev_err(dev, "failed to get free message slot (%ld)\n",
623 scmi_clear_channel(info, cinfo);
627 unpack_scmi_header(msg_hdr, &xfer->hdr);
628 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
630 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
631 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
633 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
634 xfer->hdr.protocol_id, xfer->hdr.seq,
635 MSG_TYPE_NOTIFICATION);
637 __scmi_xfer_put(minfo, xfer);
639 scmi_clear_channel(info, cinfo);
642 static void scmi_handle_response(struct scmi_chan_info *cinfo, u32 msg_hdr)
644 struct scmi_xfer *xfer;
645 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
647 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
649 scmi_clear_channel(info, cinfo);
653 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
654 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
655 xfer->rx.len = info->desc->max_msg_size;
657 info->desc->ops->fetch_response(cinfo, xfer);
659 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
660 xfer->hdr.protocol_id, xfer->hdr.seq,
663 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
664 scmi_clear_channel(info, cinfo);
665 complete(xfer->async_done);
667 complete(&xfer->done);
670 scmi_xfer_command_release(info, xfer);
674 * scmi_rx_callback() - callback for receiving messages
676 * @cinfo: SCMI channel info
677 * @msg_hdr: Message header
679 * Processes one received message to appropriate transfer information and
680 * signals completion of the transfer.
682 * NOTE: This function will be invoked in IRQ context, hence should be
683 * as optimal as possible.
685 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
687 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
690 case MSG_TYPE_NOTIFICATION:
691 scmi_handle_notification(cinfo, msg_hdr);
693 case MSG_TYPE_COMMAND:
694 case MSG_TYPE_DELAYED_RESP:
695 scmi_handle_response(cinfo, msg_hdr);
698 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
704 * xfer_put() - Release a transmit message
706 * @ph: Pointer to SCMI protocol handle
707 * @xfer: message that was reserved by xfer_get_init
709 static void xfer_put(const struct scmi_protocol_handle *ph,
710 struct scmi_xfer *xfer)
712 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
713 struct scmi_info *info = handle_to_scmi_info(pi->handle);
715 __scmi_xfer_put(&info->tx_minfo, xfer);
718 #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
720 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
721 struct scmi_xfer *xfer, ktime_t stop)
723 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
726 * Poll also on xfer->done so that polling can be forcibly terminated
727 * in case of out-of-order receptions of delayed responses
729 return info->desc->ops->poll_done(cinfo, xfer) ||
730 try_wait_for_completion(&xfer->done) ||
731 ktime_after(ktime_get(), stop);
735 * do_xfer() - Do one transfer
737 * @ph: Pointer to SCMI protocol handle
738 * @xfer: Transfer to initiate and wait for response
740 * Return: -ETIMEDOUT in case of no response, if transmit error,
741 * return corresponding error, else if all goes well,
744 static int do_xfer(const struct scmi_protocol_handle *ph,
745 struct scmi_xfer *xfer)
749 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
750 struct scmi_info *info = handle_to_scmi_info(pi->handle);
751 struct device *dev = info->dev;
752 struct scmi_chan_info *cinfo;
755 * Initialise protocol id now from protocol handle to avoid it being
756 * overridden by mistake (or malice) by the protocol code mangling with
757 * the scmi_xfer structure prior to this.
759 xfer->hdr.protocol_id = pi->proto->id;
760 reinit_completion(&xfer->done);
762 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
763 if (unlikely(!cinfo))
766 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
767 xfer->hdr.protocol_id, xfer->hdr.seq,
768 xfer->hdr.poll_completion);
770 xfer->state = SCMI_XFER_SENT_OK;
772 * Even though spinlocking is not needed here since no race is possible
773 * on xfer->state due to the monotonically increasing tokens allocation,
774 * we must anyway ensure xfer->state initialization is not re-ordered
775 * after the .send_message() to be sure that on the RX path an early
776 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
780 ret = info->desc->ops->send_message(cinfo, xfer);
782 dev_dbg(dev, "Failed to send message %d\n", ret);
786 if (xfer->hdr.poll_completion) {
787 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
789 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
791 if (ktime_before(ktime_get(), stop)) {
795 * Do not fetch_response if an out-of-order delayed
796 * response is being processed.
798 spin_lock_irqsave(&xfer->lock, flags);
799 if (xfer->state == SCMI_XFER_SENT_OK) {
800 info->desc->ops->fetch_response(cinfo, xfer);
801 xfer->state = SCMI_XFER_RESP_OK;
803 spin_unlock_irqrestore(&xfer->lock, flags);
808 /* And we wait for the response. */
809 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
810 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
811 dev_err(dev, "timed out in resp(caller: %pS)\n",
817 if (!ret && xfer->hdr.status)
818 ret = scmi_to_linux_errno(xfer->hdr.status);
820 if (info->desc->ops->mark_txdone)
821 info->desc->ops->mark_txdone(cinfo, ret);
823 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
824 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
829 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
830 struct scmi_xfer *xfer)
832 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
833 struct scmi_info *info = handle_to_scmi_info(pi->handle);
835 xfer->rx.len = info->desc->max_msg_size;
838 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
841 * do_xfer_with_response() - Do one transfer and wait until the delayed
842 * response is received
844 * @ph: Pointer to SCMI protocol handle
845 * @xfer: Transfer to initiate and wait for response
847 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
848 * return corresponding error, else if all goes well, return 0.
850 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
851 struct scmi_xfer *xfer)
853 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
854 DECLARE_COMPLETION_ONSTACK(async_response);
856 xfer->async_done = &async_response;
858 ret = do_xfer(ph, xfer);
860 if (!wait_for_completion_timeout(xfer->async_done, timeout))
862 else if (xfer->hdr.status)
863 ret = scmi_to_linux_errno(xfer->hdr.status);
866 xfer->async_done = NULL;
871 * xfer_get_init() - Allocate and initialise one message for transmit
873 * @ph: Pointer to SCMI protocol handle
874 * @msg_id: Message identifier
875 * @tx_size: transmit message size
876 * @rx_size: receive message size
877 * @p: pointer to the allocated and initialised message
879 * This function allocates the message using @scmi_xfer_get and
880 * initialise the header.
882 * Return: 0 if all went fine with @p pointing to message, else
883 * corresponding error.
885 static int xfer_get_init(const struct scmi_protocol_handle *ph,
886 u8 msg_id, size_t tx_size, size_t rx_size,
887 struct scmi_xfer **p)
890 struct scmi_xfer *xfer;
891 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
892 struct scmi_info *info = handle_to_scmi_info(pi->handle);
893 struct scmi_xfers_info *minfo = &info->tx_minfo;
894 struct device *dev = info->dev;
896 /* Ensure we have sane transfer sizes */
897 if (rx_size > info->desc->max_msg_size ||
898 tx_size > info->desc->max_msg_size)
901 xfer = scmi_xfer_get(pi->handle, minfo, true);
904 dev_err(dev, "failed to get free message slot(%d)\n", ret);
908 xfer->tx.len = tx_size;
909 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
910 xfer->hdr.type = MSG_TYPE_COMMAND;
911 xfer->hdr.id = msg_id;
912 xfer->hdr.poll_completion = false;
920 * version_get() - command to get the revision of the SCMI entity
922 * @ph: Pointer to SCMI protocol handle
923 * @version: Holds returned version of protocol.
925 * Updates the SCMI information in the internal data structure.
927 * Return: 0 if all went fine, else return appropriate error.
929 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
935 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
939 ret = do_xfer(ph, t);
941 rev_info = t->rx.buf;
942 *version = le32_to_cpu(*rev_info);
950 * scmi_set_protocol_priv - Set protocol specific data at init time
952 * @ph: A reference to the protocol handle.
953 * @priv: The private data to set.
955 * Return: 0 on Success
957 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
960 struct scmi_protocol_instance *pi = ph_to_pi(ph);
968 * scmi_get_protocol_priv - Set protocol specific data at init time
970 * @ph: A reference to the protocol handle.
972 * Return: Protocol private data if any was set.
974 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
976 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
981 static const struct scmi_xfer_ops xfer_ops = {
982 .version_get = version_get,
983 .xfer_get_init = xfer_get_init,
984 .reset_rx_to_maxsz = reset_rx_to_maxsz,
986 .do_xfer_with_response = do_xfer_with_response,
987 .xfer_put = xfer_put,
991 * scmi_revision_area_get - Retrieve version memory area.
993 * @ph: A reference to the protocol handle.
995 * A helper to grab the version memory area reference during SCMI Base protocol
998 * Return: A reference to the version memory area associated to the SCMI
999 * instance underlying this protocol handle.
1001 struct scmi_revision_info *
1002 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1004 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1006 return pi->handle->version;
1010 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1011 * instance descriptor.
1012 * @info: The reference to the related SCMI instance.
1013 * @proto: The protocol descriptor.
1015 * Allocate a new protocol instance descriptor, using the provided @proto
1016 * description, against the specified SCMI instance @info, and initialize it;
1017 * all resources management is handled via a dedicated per-protocol devres
1020 * Context: Assumes to be called with @protocols_mtx already acquired.
1021 * Return: A reference to a freshly allocated and initialized protocol instance
1022 * or ERR_PTR on failure. On failure the @proto reference is at first
1023 * put using @scmi_protocol_put() before releasing all the devres group.
1025 static struct scmi_protocol_instance *
1026 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1027 const struct scmi_protocol *proto)
1031 struct scmi_protocol_instance *pi;
1032 const struct scmi_handle *handle = &info->handle;
1034 /* Protocol specific devres group */
1035 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1037 scmi_protocol_put(proto->id);
1041 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1047 pi->handle = handle;
1048 pi->ph.dev = handle->dev;
1049 pi->ph.xops = &xfer_ops;
1050 pi->ph.set_priv = scmi_set_protocol_priv;
1051 pi->ph.get_priv = scmi_get_protocol_priv;
1052 refcount_set(&pi->users, 1);
1053 /* proto->init is assured NON NULL by scmi_protocol_register */
1054 ret = pi->proto->instance_init(&pi->ph);
1058 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1060 if (ret != proto->id)
1064 * Warn but ignore events registration errors since we do not want
1065 * to skip whole protocols if their notifications are messed up.
1067 if (pi->proto->events) {
1068 ret = scmi_register_protocol_events(handle, pi->proto->id,
1072 dev_warn(handle->dev,
1073 "Protocol:%X - Events Registration Failed - err:%d\n",
1074 pi->proto->id, ret);
1077 devres_close_group(handle->dev, pi->gid);
1078 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1083 /* Take care to put the protocol module's owner before releasing all */
1084 scmi_protocol_put(proto->id);
1085 devres_release_group(handle->dev, gid);
1087 return ERR_PTR(ret);
1091 * scmi_get_protocol_instance - Protocol initialization helper.
1092 * @handle: A reference to the SCMI platform instance.
1093 * @protocol_id: The protocol being requested.
1095 * In case the required protocol has never been requested before for this
1096 * instance, allocate and initialize all the needed structures while handling
1097 * resource allocation with a dedicated per-protocol devres subgroup.
1099 * Return: A reference to an initialized protocol instance or error on failure:
1100 * in particular returns -EPROBE_DEFER when the desired protocol could
1103 static struct scmi_protocol_instance * __must_check
1104 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1106 struct scmi_protocol_instance *pi;
1107 struct scmi_info *info = handle_to_scmi_info(handle);
1109 mutex_lock(&info->protocols_mtx);
1110 pi = idr_find(&info->protocols, protocol_id);
1113 refcount_inc(&pi->users);
1115 const struct scmi_protocol *proto;
1117 /* Fails if protocol not registered on bus */
1118 proto = scmi_protocol_get(protocol_id);
1120 pi = scmi_alloc_init_protocol_instance(info, proto);
1122 pi = ERR_PTR(-EPROBE_DEFER);
1124 mutex_unlock(&info->protocols_mtx);
1130 * scmi_protocol_acquire - Protocol acquire
1131 * @handle: A reference to the SCMI platform instance.
1132 * @protocol_id: The protocol being requested.
1134 * Register a new user for the requested protocol on the specified SCMI
1135 * platform instance, possibly triggering its initialization on first user.
1137 * Return: 0 if protocol was acquired successfully.
1139 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1141 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1145 * scmi_protocol_release - Protocol de-initialization helper.
1146 * @handle: A reference to the SCMI platform instance.
1147 * @protocol_id: The protocol being requested.
1149 * Remove one user for the specified protocol and triggers de-initialization
1150 * and resources de-allocation once the last user has gone.
1152 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1154 struct scmi_info *info = handle_to_scmi_info(handle);
1155 struct scmi_protocol_instance *pi;
1157 mutex_lock(&info->protocols_mtx);
1158 pi = idr_find(&info->protocols, protocol_id);
1162 if (refcount_dec_and_test(&pi->users)) {
1163 void *gid = pi->gid;
1165 if (pi->proto->events)
1166 scmi_deregister_protocol_events(handle, protocol_id);
1168 if (pi->proto->instance_deinit)
1169 pi->proto->instance_deinit(&pi->ph);
1171 idr_remove(&info->protocols, protocol_id);
1173 scmi_protocol_put(protocol_id);
1175 devres_release_group(handle->dev, gid);
1176 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1181 mutex_unlock(&info->protocols_mtx);
1184 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1187 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1188 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1190 info->protocols_imp = prot_imp;
1194 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1197 struct scmi_info *info = handle_to_scmi_info(handle);
1199 if (!info->protocols_imp)
1202 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
1203 if (info->protocols_imp[i] == prot_id)
1208 struct scmi_protocol_devres {
1209 const struct scmi_handle *handle;
1213 static void scmi_devm_release_protocol(struct device *dev, void *res)
1215 struct scmi_protocol_devres *dres = res;
1217 scmi_protocol_release(dres->handle, dres->protocol_id);
1221 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
1222 * @sdev: A reference to an scmi_device whose embedded struct device is to
1223 * be used for devres accounting.
1224 * @protocol_id: The protocol being requested.
1225 * @ph: A pointer reference used to pass back the associated protocol handle.
1227 * Get hold of a protocol accounting for its usage, eventually triggering its
1228 * initialization, and returning the protocol specific operations and related
1229 * protocol handle which will be used as first argument in most of the
1230 * protocols operations methods.
1231 * Being a devres based managed method, protocol hold will be automatically
1232 * released, and possibly de-initialized on last user, once the SCMI driver
1233 * owning the scmi_device is unbound from it.
1235 * Return: A reference to the requested protocol operations or error.
1236 * Must be checked for errors by caller.
1238 static const void __must_check *
1239 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1240 struct scmi_protocol_handle **ph)
1242 struct scmi_protocol_instance *pi;
1243 struct scmi_protocol_devres *dres;
1244 struct scmi_handle *handle = sdev->handle;
1247 return ERR_PTR(-EINVAL);
1249 dres = devres_alloc(scmi_devm_release_protocol,
1250 sizeof(*dres), GFP_KERNEL);
1252 return ERR_PTR(-ENOMEM);
1254 pi = scmi_get_protocol_instance(handle, protocol_id);
1260 dres->handle = handle;
1261 dres->protocol_id = protocol_id;
1262 devres_add(&sdev->dev, dres);
1266 return pi->proto->ops;
1269 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1271 struct scmi_protocol_devres *dres = res;
1273 if (WARN_ON(!dres || !data))
1276 return dres->protocol_id == *((u8 *)data);
1280 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
1281 * @sdev: A reference to an scmi_device whose embedded struct device is to
1282 * be used for devres accounting.
1283 * @protocol_id: The protocol being requested.
1285 * Explicitly release a protocol hold previously obtained calling the above
1286 * @scmi_devm_protocol_get.
1288 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1292 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1293 scmi_devm_protocol_match, &protocol_id);
1298 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1301 return &info->handle;
1305 * scmi_handle_get() - Get the SCMI handle for a device
1307 * @dev: pointer to device for which we want SCMI handle
1309 * NOTE: The function does not track individual clients of the framework
1310 * and is expected to be maintained by caller of SCMI protocol library.
1311 * scmi_handle_put must be balanced with successful scmi_handle_get
1313 * Return: pointer to handle if successful, NULL on error
1315 struct scmi_handle *scmi_handle_get(struct device *dev)
1317 struct list_head *p;
1318 struct scmi_info *info;
1319 struct scmi_handle *handle = NULL;
1321 mutex_lock(&scmi_list_mutex);
1322 list_for_each(p, &scmi_list) {
1323 info = list_entry(p, struct scmi_info, node);
1324 if (dev->parent == info->dev) {
1325 handle = scmi_handle_get_from_info_unlocked(info);
1329 mutex_unlock(&scmi_list_mutex);
1335 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1337 * @handle: handle acquired by scmi_handle_get
1339 * NOTE: The function does not track individual clients of the framework
1340 * and is expected to be maintained by caller of SCMI protocol library.
1341 * scmi_handle_put must be balanced with successful scmi_handle_get
1343 * Return: 0 is successfully released
1344 * if null was passed, it returns -EINVAL;
1346 int scmi_handle_put(const struct scmi_handle *handle)
1348 struct scmi_info *info;
1353 info = handle_to_scmi_info(handle);
1354 mutex_lock(&scmi_list_mutex);
1355 if (!WARN_ON(!info->users))
1357 mutex_unlock(&scmi_list_mutex);
1362 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1363 struct scmi_xfers_info *info)
1366 struct scmi_xfer *xfer;
1367 struct device *dev = sinfo->dev;
1368 const struct scmi_desc *desc = sinfo->desc;
1370 /* Pre-allocated messages, no more than what hdr.seq can support */
1371 if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
1373 "Invalid maximum messages %d, not in range [1 - %lu]\n",
1374 desc->max_msg, MSG_TOKEN_MAX);
1378 hash_init(info->pending_xfers);
1380 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
1381 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
1382 sizeof(long), GFP_KERNEL);
1383 if (!info->xfer_alloc_table)
1387 * Preallocate a number of xfers equal to max inflight messages,
1388 * pre-initialize the buffer pointer to pre-allocated buffers and
1389 * attach all of them to the free list
1391 INIT_HLIST_HEAD(&info->free_xfers);
1392 for (i = 0; i < desc->max_msg; i++) {
1393 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1397 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1402 xfer->tx.buf = xfer->rx.buf;
1403 init_completion(&xfer->done);
1404 spin_lock_init(&xfer->lock);
1406 /* Add initialized xfer to the free list */
1407 hlist_add_head(&xfer->node, &info->free_xfers);
1410 spin_lock_init(&info->xfer_lock);
1415 static int scmi_xfer_info_init(struct scmi_info *sinfo)
1417 int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1419 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1420 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1425 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1426 int prot_id, bool tx)
1429 struct scmi_chan_info *cinfo;
1432 /* Transmit channel is first entry i.e. index 0 */
1434 idr = tx ? &info->tx_idr : &info->rx_idr;
1436 /* check if already allocated, used for multiple device per protocol */
1437 cinfo = idr_find(idr, prot_id);
1441 if (!info->desc->ops->chan_available(dev, idx)) {
1442 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1443 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1448 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1454 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1459 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1460 if (ret != prot_id) {
1461 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1465 cinfo->handle = &info->handle;
1470 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1472 int ret = scmi_chan_setup(info, dev, prot_id, true);
1474 if (!ret) /* Rx is optional, hence no error check */
1475 scmi_chan_setup(info, dev, prot_id, false);
1481 * scmi_get_protocol_device - Helper to get/create an SCMI device.
1483 * @np: A device node representing a valid active protocols for the referred
1485 * @info: The referred SCMI instance for which we are getting/creating this
1487 * @prot_id: The protocol ID.
1488 * @name: The device name.
1490 * Referring to the specific SCMI instance identified by @info, this helper
1491 * takes care to return a properly initialized device matching the requested
1492 * @proto_id and @name: if device was still not existent it is created as a
1493 * child of the specified SCMI instance @info and its transport properly
1494 * initialized as usual.
1496 * Return: A properly initialized scmi device, NULL otherwise.
1498 static inline struct scmi_device *
1499 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1500 int prot_id, const char *name)
1502 struct scmi_device *sdev;
1504 /* Already created for this parent SCMI instance ? */
1505 sdev = scmi_child_dev_find(info->dev, prot_id, name);
1509 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1511 sdev = scmi_device_create(np, info->dev, prot_id, name);
1513 dev_err(info->dev, "failed to create %d protocol device\n",
1518 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1519 dev_err(&sdev->dev, "failed to setup transport\n");
1520 scmi_device_destroy(sdev);
1528 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1529 int prot_id, const char *name)
1531 struct scmi_device *sdev;
1533 sdev = scmi_get_protocol_device(np, info, prot_id, name);
1537 /* setup handle now as the transport is ready */
1538 scmi_set_handle(sdev);
1542 * scmi_create_protocol_devices - Create devices for all pending requests for
1543 * this SCMI instance.
1545 * @np: The device node describing the protocol
1546 * @info: The SCMI instance descriptor
1547 * @prot_id: The protocol ID
1549 * All devices previously requested for this instance (if any) are found and
1550 * created by scanning the proper @&scmi_requested_devices entry.
1552 static void scmi_create_protocol_devices(struct device_node *np,
1553 struct scmi_info *info, int prot_id)
1555 struct list_head *phead;
1557 mutex_lock(&scmi_requested_devices_mtx);
1558 phead = idr_find(&scmi_requested_devices, prot_id);
1560 struct scmi_requested_dev *rdev;
1562 list_for_each_entry(rdev, phead, node)
1563 scmi_create_protocol_device(np, info, prot_id,
1564 rdev->id_table->name);
1566 mutex_unlock(&scmi_requested_devices_mtx);
1570 * scmi_protocol_device_request - Helper to request a device
1572 * @id_table: A protocol/name pair descriptor for the device to be created.
1574 * This helper let an SCMI driver request specific devices identified by the
1575 * @id_table to be created for each active SCMI instance.
1577 * The requested device name MUST NOT be already existent for any protocol;
1578 * at first the freshly requested @id_table is annotated in the IDR table
1579 * @scmi_requested_devices, then a matching device is created for each already
1580 * active SCMI instance. (if any)
1582 * This way the requested device is created straight-away for all the already
1583 * initialized(probed) SCMI instances (handles) and it remains also annotated
1584 * as pending creation if the requesting SCMI driver was loaded before some
1585 * SCMI instance and related transports were available: when such late instance
1586 * is probed, its probe will take care to scan the list of pending requested
1587 * devices and create those on its own (see @scmi_create_protocol_devices and
1588 * its enclosing loop)
1590 * Return: 0 on Success
1592 int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1595 unsigned int id = 0;
1596 struct list_head *head, *phead = NULL;
1597 struct scmi_requested_dev *rdev;
1598 struct scmi_info *info;
1600 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1601 id_table->name, id_table->protocol_id);
1604 * Search for the matching protocol rdev list and then search
1605 * of any existent equally named device...fails if any duplicate found.
1607 mutex_lock(&scmi_requested_devices_mtx);
1608 idr_for_each_entry(&scmi_requested_devices, head, id) {
1610 /* A list found registered in the IDR is never empty */
1611 rdev = list_first_entry(head, struct scmi_requested_dev,
1613 if (rdev->id_table->protocol_id ==
1614 id_table->protocol_id)
1617 list_for_each_entry(rdev, head, node) {
1618 if (!strcmp(rdev->id_table->name, id_table->name)) {
1619 pr_err("Ignoring duplicate request [%d] %s\n",
1620 rdev->id_table->protocol_id,
1621 rdev->id_table->name);
1629 * No duplicate found for requested id_table, so let's create a new
1630 * requested device entry for this new valid request.
1632 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1637 rdev->id_table = id_table;
1640 * Append the new requested device table descriptor to the head of the
1641 * related protocol list, eventually creating such head if not already
1645 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1651 INIT_LIST_HEAD(phead);
1653 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1654 id_table->protocol_id,
1655 id_table->protocol_id + 1, GFP_KERNEL);
1656 if (ret != id_table->protocol_id) {
1657 pr_err("Failed to save SCMI device - ret:%d\n", ret);
1665 list_add(&rdev->node, phead);
1668 * Now effectively create and initialize the requested device for every
1669 * already initialized SCMI instance which has registered the requested
1670 * protocol as a valid active one: i.e. defined in DT and supported by
1671 * current platform FW.
1673 mutex_lock(&scmi_list_mutex);
1674 list_for_each_entry(info, &scmi_list, node) {
1675 struct device_node *child;
1677 child = idr_find(&info->active_protocols,
1678 id_table->protocol_id);
1680 struct scmi_device *sdev;
1682 sdev = scmi_get_protocol_device(child, info,
1683 id_table->protocol_id,
1685 /* Set handle if not already set: device existed */
1686 if (sdev && !sdev->handle)
1688 scmi_handle_get_from_info_unlocked(info);
1691 "Failed. SCMI protocol %d not active.\n",
1692 id_table->protocol_id);
1695 mutex_unlock(&scmi_list_mutex);
1698 mutex_unlock(&scmi_requested_devices_mtx);
1704 * scmi_protocol_device_unrequest - Helper to unrequest a device
1706 * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1708 * An helper to let an SCMI driver release its request about devices; note that
1709 * devices are created and initialized once the first SCMI driver request them
1710 * but they destroyed only on SCMI core unloading/unbinding.
1712 * The current SCMI transport layer uses such devices as internal references and
1713 * as such they could be shared as same transport between multiple drivers so
1714 * that cannot be safely destroyed till the whole SCMI stack is removed.
1715 * (unless adding further burden of refcounting.)
1717 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
1719 struct list_head *phead;
1721 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1722 id_table->name, id_table->protocol_id);
1724 mutex_lock(&scmi_requested_devices_mtx);
1725 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1727 struct scmi_requested_dev *victim, *tmp;
1729 list_for_each_entry_safe(victim, tmp, phead, node) {
1730 if (!strcmp(victim->id_table->name, id_table->name)) {
1731 list_del(&victim->node);
1737 if (list_empty(phead)) {
1738 idr_remove(&scmi_requested_devices,
1739 id_table->protocol_id);
1743 mutex_unlock(&scmi_requested_devices_mtx);
1746 static int scmi_probe(struct platform_device *pdev)
1749 struct scmi_handle *handle;
1750 const struct scmi_desc *desc;
1751 struct scmi_info *info;
1752 struct device *dev = &pdev->dev;
1753 struct device_node *child, *np = dev->of_node;
1755 desc = of_device_get_match_data(dev);
1759 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1765 INIT_LIST_HEAD(&info->node);
1766 idr_init(&info->protocols);
1767 mutex_init(&info->protocols_mtx);
1768 idr_init(&info->active_protocols);
1770 platform_set_drvdata(pdev, info);
1771 idr_init(&info->tx_idr);
1772 idr_init(&info->rx_idr);
1774 handle = &info->handle;
1775 handle->dev = info->dev;
1776 handle->version = &info->version;
1777 handle->devm_protocol_get = scmi_devm_protocol_get;
1778 handle->devm_protocol_put = scmi_devm_protocol_put;
1780 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
1784 ret = scmi_xfer_info_init(info);
1788 if (scmi_notification_init(handle))
1789 dev_err(dev, "SCMI Notifications NOT available.\n");
1792 * Trigger SCMI Base protocol initialization.
1793 * It's mandatory and won't be ever released/deinit until the
1794 * SCMI stack is shutdown/unloaded as a whole.
1796 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
1798 dev_err(dev, "unable to communicate with SCMI\n");
1802 mutex_lock(&scmi_list_mutex);
1803 list_add_tail(&info->node, &scmi_list);
1804 mutex_unlock(&scmi_list_mutex);
1806 for_each_available_child_of_node(np, child) {
1809 if (of_property_read_u32(child, "reg", &prot_id))
1812 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
1813 dev_err(dev, "Out of range protocol %d\n", prot_id);
1815 if (!scmi_is_protocol_implemented(handle, prot_id)) {
1816 dev_err(dev, "SCMI protocol %d not implemented\n",
1822 * Save this valid DT protocol descriptor amongst
1823 * @active_protocols for this SCMI instance/
1825 ret = idr_alloc(&info->active_protocols, child,
1826 prot_id, prot_id + 1, GFP_KERNEL);
1827 if (ret != prot_id) {
1828 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
1834 scmi_create_protocol_devices(child, info, prot_id);
1840 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
1842 idr_remove(idr, id);
1845 static int scmi_remove(struct platform_device *pdev)
1848 struct scmi_info *info = platform_get_drvdata(pdev);
1849 struct idr *idr = &info->tx_idr;
1850 struct device_node *child;
1852 mutex_lock(&scmi_list_mutex);
1856 list_del(&info->node);
1857 mutex_unlock(&scmi_list_mutex);
1862 scmi_notification_exit(&info->handle);
1864 mutex_lock(&info->protocols_mtx);
1865 idr_destroy(&info->protocols);
1866 mutex_unlock(&info->protocols_mtx);
1868 idr_for_each_entry(&info->active_protocols, child, id)
1870 idr_destroy(&info->active_protocols);
1872 /* Safe to free channels since no more users */
1873 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1874 idr_destroy(&info->tx_idr);
1876 idr = &info->rx_idr;
1877 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1878 idr_destroy(&info->rx_idr);
1883 static ssize_t protocol_version_show(struct device *dev,
1884 struct device_attribute *attr, char *buf)
1886 struct scmi_info *info = dev_get_drvdata(dev);
1888 return sprintf(buf, "%u.%u\n", info->version.major_ver,
1889 info->version.minor_ver);
1891 static DEVICE_ATTR_RO(protocol_version);
1893 static ssize_t firmware_version_show(struct device *dev,
1894 struct device_attribute *attr, char *buf)
1896 struct scmi_info *info = dev_get_drvdata(dev);
1898 return sprintf(buf, "0x%x\n", info->version.impl_ver);
1900 static DEVICE_ATTR_RO(firmware_version);
1902 static ssize_t vendor_id_show(struct device *dev,
1903 struct device_attribute *attr, char *buf)
1905 struct scmi_info *info = dev_get_drvdata(dev);
1907 return sprintf(buf, "%s\n", info->version.vendor_id);
1909 static DEVICE_ATTR_RO(vendor_id);
1911 static ssize_t sub_vendor_id_show(struct device *dev,
1912 struct device_attribute *attr, char *buf)
1914 struct scmi_info *info = dev_get_drvdata(dev);
1916 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
1918 static DEVICE_ATTR_RO(sub_vendor_id);
1920 static struct attribute *versions_attrs[] = {
1921 &dev_attr_firmware_version.attr,
1922 &dev_attr_protocol_version.attr,
1923 &dev_attr_vendor_id.attr,
1924 &dev_attr_sub_vendor_id.attr,
1927 ATTRIBUTE_GROUPS(versions);
1929 /* Each compatible listed below must have descriptor associated with it */
1930 static const struct of_device_id scmi_of_match[] = {
1931 #ifdef CONFIG_MAILBOX
1932 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
1934 #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
1935 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
1940 MODULE_DEVICE_TABLE(of, scmi_of_match);
1942 static struct platform_driver scmi_driver = {
1945 .of_match_table = scmi_of_match,
1946 .dev_groups = versions_groups,
1948 .probe = scmi_probe,
1949 .remove = scmi_remove,
1953 * __scmi_transports_setup - Common helper to call transport-specific
1954 * .init/.exit code if provided.
1956 * @init: A flag to distinguish between init and exit.
1958 * Note that, if provided, we invoke .init/.exit functions for all the
1959 * transports currently compiled in.
1961 * Return: 0 on Success.
1963 static inline int __scmi_transports_setup(bool init)
1966 const struct of_device_id *trans;
1968 for (trans = scmi_of_match; trans->data; trans++) {
1969 const struct scmi_desc *tdesc = trans->data;
1971 if ((init && !tdesc->transport_init) ||
1972 (!init && !tdesc->transport_exit))
1976 ret = tdesc->transport_init();
1978 tdesc->transport_exit();
1981 pr_err("SCMI transport %s FAILED initialization!\n",
1990 static int __init scmi_transports_init(void)
1992 return __scmi_transports_setup(true);
1995 static void __exit scmi_transports_exit(void)
1997 __scmi_transports_setup(false);
2000 static int __init scmi_driver_init(void)
2006 /* Initialize any compiled-in transport which provided an init/exit */
2007 ret = scmi_transports_init();
2011 scmi_base_register();
2013 scmi_clock_register();
2014 scmi_perf_register();
2015 scmi_power_register();
2016 scmi_reset_register();
2017 scmi_sensors_register();
2018 scmi_voltage_register();
2019 scmi_system_register();
2021 return platform_driver_register(&scmi_driver);
2023 subsys_initcall(scmi_driver_init);
2025 static void __exit scmi_driver_exit(void)
2027 scmi_base_unregister();
2029 scmi_clock_unregister();
2030 scmi_perf_unregister();
2031 scmi_power_unregister();
2032 scmi_reset_unregister();
2033 scmi_sensors_unregister();
2034 scmi_voltage_unregister();
2035 scmi_system_unregister();
2039 scmi_transports_exit();
2041 platform_driver_unregister(&scmi_driver);
2043 module_exit(scmi_driver_exit);
2045 MODULE_ALIAS("platform: arm-scmi");
2046 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2047 MODULE_DESCRIPTION("ARM SCMI protocol driver");
2048 MODULE_LICENSE("GPL v2");