1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message Protocol driver
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
14 * Copyright (C) 2018-2021 ARM Ltd.
17 #include <linux/bitmap.h>
18 #include <linux/device.h>
19 #include <linux/export.h>
20 #include <linux/idr.h>
22 #include <linux/kernel.h>
23 #include <linux/ktime.h>
24 #include <linux/hashtable.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/processor.h>
30 #include <linux/refcount.h>
31 #include <linux/slab.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/scmi.h>
39 enum scmi_error_codes {
40 SCMI_SUCCESS = 0, /* Success */
41 SCMI_ERR_SUPPORT = -1, /* Not supported */
42 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
43 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
44 SCMI_ERR_ENTRY = -4, /* Not found */
45 SCMI_ERR_RANGE = -5, /* Value out of range */
46 SCMI_ERR_BUSY = -6, /* Device busy */
47 SCMI_ERR_COMMS = -7, /* Communication Error */
48 SCMI_ERR_GENERIC = -8, /* Generic Error */
49 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
50 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
53 /* List of all SCMI devices active in system */
54 static LIST_HEAD(scmi_list);
55 /* Protection for the entire list */
56 static DEFINE_MUTEX(scmi_list_mutex);
57 /* Track the unique id for the transfers for debug & profiling purpose */
58 static atomic_t transfer_last_id;
60 static DEFINE_IDR(scmi_requested_devices);
61 static DEFINE_MUTEX(scmi_requested_devices_mtx);
63 struct scmi_requested_dev {
64 const struct scmi_device_id *id_table;
65 struct list_head node;
69 * struct scmi_xfers_info - Structure to manage transfer information
71 * @xfer_alloc_table: Bitmap table for allocated messages.
72 * Index of this bitmap table is also used for message
73 * sequence identifier.
74 * @xfer_lock: Protection for message allocation
75 * @max_msg: Maximum number of messages that can be pending
76 * @free_xfers: A free list for available to use xfers. It is initialized with
77 * a number of xfers equal to the maximum allowed in-flight
79 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
80 * currently in-flight messages.
82 struct scmi_xfers_info {
83 unsigned long *xfer_alloc_table;
86 struct hlist_head free_xfers;
87 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
91 * struct scmi_protocol_instance - Describe an initialized protocol instance.
92 * @handle: Reference to the SCMI handle associated to this protocol instance.
93 * @proto: A reference to the protocol descriptor.
94 * @gid: A reference for per-protocol devres management.
95 * @users: A refcount to track effective users of this protocol.
96 * @priv: Reference for optional protocol private data.
97 * @ph: An embedded protocol handle that will be passed down to protocol
98 * initialization code to identify this instance.
100 * Each protocol is initialized independently once for each SCMI platform in
101 * which is defined by DT and implemented by the SCMI server fw.
103 struct scmi_protocol_instance {
104 const struct scmi_handle *handle;
105 const struct scmi_protocol *proto;
109 struct scmi_protocol_handle ph;
112 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
115 * struct scmi_info - Structure representing a SCMI instance
117 * @dev: Device pointer
118 * @desc: SoC description for this instance
119 * @version: SCMI revision information containing protocol version,
120 * implementation version and (sub-)vendor identification.
121 * @handle: Instance of SCMI handle to send to clients
122 * @tx_minfo: Universal Transmit Message management info
123 * @rx_minfo: Universal Receive Message management info
124 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
125 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
126 * @protocols: IDR for protocols' instance descriptors initialized for
127 * this SCMI instance: populated on protocol's first attempted
129 * @protocols_mtx: A mutex to protect protocols instances initialization.
130 * @protocols_imp: List of protocols implemented, currently maximum of
131 * scmi_revision_info.num_protocols elements allocated by the
133 * @active_protocols: IDR storing device_nodes for protocols actually defined
134 * in the DT and confirmed as implemented by fw.
135 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
136 * in microseconds, for atomic operations.
137 * Only SCMI synchronous commands reported by the platform
138 * to have an execution latency lesser-equal to the threshold
139 * should be considered for atomic mode operation: such
140 * decision is finally left up to the SCMI drivers.
141 * @notify_priv: Pointer to private data structure specific to notifications.
143 * @users: Number of users of this instance
147 const struct scmi_desc *desc;
148 struct scmi_revision_info version;
149 struct scmi_handle handle;
150 struct scmi_xfers_info tx_minfo;
151 struct scmi_xfers_info rx_minfo;
154 struct idr protocols;
155 /* Ensure mutual exclusive access to protocols instance array */
156 struct mutex protocols_mtx;
158 struct idr active_protocols;
159 unsigned int atomic_threshold;
161 struct list_head node;
165 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
167 static const int scmi_linux_errmap[] = {
168 /* better than switch case as long as return value is continuous */
169 0, /* SCMI_SUCCESS */
170 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
171 -EINVAL, /* SCMI_ERR_PARAM */
172 -EACCES, /* SCMI_ERR_ACCESS */
173 -ENOENT, /* SCMI_ERR_ENTRY */
174 -ERANGE, /* SCMI_ERR_RANGE */
175 -EBUSY, /* SCMI_ERR_BUSY */
176 -ECOMM, /* SCMI_ERR_COMMS */
177 -EIO, /* SCMI_ERR_GENERIC */
178 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
179 -EPROTO, /* SCMI_ERR_PROTOCOL */
182 static inline int scmi_to_linux_errno(int errno)
184 int err_idx = -errno;
186 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
187 return scmi_linux_errmap[err_idx];
191 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
194 struct scmi_info *info = handle_to_scmi_info(handle);
196 info->notify_priv = priv;
197 /* Ensure updated protocol private date are visible */
201 void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
203 struct scmi_info *info = handle_to_scmi_info(handle);
205 /* Ensure protocols_private_data has been updated */
207 return info->notify_priv;
211 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
213 * @minfo: Pointer to Tx/Rx Message management info based on channel type
214 * @xfer: The xfer to act upon
216 * Pick the next unused monotonically increasing token and set it into
217 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
218 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
219 * of incorrect association of a late and expired xfer with a live in-flight
220 * transaction, both happening to re-use the same token identifier.
222 * Since platform is NOT required to answer our request in-order we should
223 * account for a few rare but possible scenarios:
225 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
226 * using find_next_zero_bit() starting from candidate next_token bit
228 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
229 * are plenty of free tokens at start, so try a second pass using
230 * find_next_zero_bit() and starting from 0.
238 * -----------+----------------------------------------------------------
239 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
240 * ----------------------------------------------------------------------
244 * Out-of-order pending at start
245 * -----------------------------
247 * |- xfer_id picked, last_token fixed
248 * -----+----------------------------------------------------------------
249 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
250 * ----------------------------------------------------------------------
255 * Out-of-order pending at end
256 * ---------------------------
258 * |- xfer_id picked, last_token fixed
259 * -----+----------------------------------------------------------------
260 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
261 * ----------------------------------------------------------------------
265 * Context: Assumes to be called with @xfer_lock already acquired.
267 * Return: 0 on Success or error
269 static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
270 struct scmi_xfer *xfer)
272 unsigned long xfer_id, next_token;
275 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
276 * using the pre-allocated transfer_id as a base.
277 * Note that the global transfer_id is shared across all message types
278 * so there could be holes in the allocated set of monotonic sequence
279 * numbers, but that is going to limit the effectiveness of the
280 * mitigation only in very rare limit conditions.
282 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
284 /* Pick the next available xfer_id >= next_token */
285 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
286 MSG_TOKEN_MAX, next_token);
287 if (xfer_id == MSG_TOKEN_MAX) {
289 * After heavily out-of-order responses, there are no free
290 * tokens ahead, but only at start of xfer_alloc_table so
291 * try again from the beginning.
293 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
296 * Something is wrong if we got here since there can be a
297 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
298 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
300 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
304 /* Update +/- last_token accordingly if we skipped some hole */
305 if (xfer_id != next_token)
306 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
309 set_bit(xfer_id, minfo->xfer_alloc_table);
310 xfer->hdr.seq = (u16)xfer_id;
316 * scmi_xfer_token_clear - Release the token
318 * @minfo: Pointer to Tx/Rx Message management info based on channel type
319 * @xfer: The xfer to act upon
321 static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
322 struct scmi_xfer *xfer)
324 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
328 * scmi_xfer_get() - Allocate one message
330 * @handle: Pointer to SCMI entity handle
331 * @minfo: Pointer to Tx/Rx Message management info based on channel type
332 * @set_pending: If true a monotonic token is picked and the xfer is added to
333 * the pending hash table.
335 * Helper function which is used by various message functions that are
336 * exposed to clients of this driver for allocating a message traffic event.
338 * Picks an xfer from the free list @free_xfers (if any available) and, if
339 * required, sets a monotonically increasing token and stores the inflight xfer
340 * into the @pending_xfers hashtable for later retrieval.
342 * The successfully initialized xfer is refcounted.
344 * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
347 * Return: 0 if all went fine, else corresponding error.
349 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
350 struct scmi_xfers_info *minfo,
355 struct scmi_xfer *xfer;
357 spin_lock_irqsave(&minfo->xfer_lock, flags);
358 if (hlist_empty(&minfo->free_xfers)) {
359 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
360 return ERR_PTR(-ENOMEM);
363 /* grab an xfer from the free_list */
364 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
365 hlist_del_init(&xfer->node);
368 * Allocate transfer_id early so that can be used also as base for
369 * monotonic sequence number generation if needed.
371 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
374 /* Pick and set monotonic token */
375 ret = scmi_xfer_token_set(minfo, xfer);
377 hash_add(minfo->pending_xfers, &xfer->node,
379 xfer->pending = true;
382 "Failed to get monotonic token %d\n", ret);
383 hlist_add_head(&xfer->node, &minfo->free_xfers);
389 refcount_set(&xfer->users, 1);
390 atomic_set(&xfer->busy, SCMI_XFER_FREE);
392 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
398 * __scmi_xfer_put() - Release a message
400 * @minfo: Pointer to Tx/Rx Message management info based on channel type
401 * @xfer: message that was reserved by scmi_xfer_get
403 * After refcount check, possibly release an xfer, clearing the token slot,
404 * removing xfer from @pending_xfers and putting it back into free_xfers.
406 * This holds a spinlock to maintain integrity of internal data structures.
409 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
413 spin_lock_irqsave(&minfo->xfer_lock, flags);
414 if (refcount_dec_and_test(&xfer->users)) {
416 scmi_xfer_token_clear(minfo, xfer);
417 hash_del(&xfer->node);
418 xfer->pending = false;
420 hlist_add_head(&xfer->node, &minfo->free_xfers);
422 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
426 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
428 * @minfo: Pointer to Tx/Rx Message management info based on channel type
429 * @xfer_id: Token ID to lookup in @pending_xfers
431 * Refcounting is untouched.
433 * Context: Assumes to be called with @xfer_lock already acquired.
435 * Return: A valid xfer on Success or error otherwise
437 static struct scmi_xfer *
438 scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
440 struct scmi_xfer *xfer = NULL;
442 if (test_bit(xfer_id, minfo->xfer_alloc_table))
443 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
445 return xfer ?: ERR_PTR(-EINVAL);
449 * scmi_msg_response_validate - Validate message type against state of related
452 * @cinfo: A reference to the channel descriptor.
453 * @msg_type: Message type to check
454 * @xfer: A reference to the xfer to validate against @msg_type
456 * This function checks if @msg_type is congruent with the current state of
457 * a pending @xfer; if an asynchronous delayed response is received before the
458 * related synchronous response (Out-of-Order Delayed Response) the missing
459 * synchronous response is assumed to be OK and completed, carrying on with the
460 * Delayed Response: this is done to address the case in which the underlying
461 * SCMI transport can deliver such out-of-order responses.
463 * Context: Assumes to be called with xfer->lock already acquired.
465 * Return: 0 on Success, error otherwise
467 static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
469 struct scmi_xfer *xfer)
472 * Even if a response was indeed expected on this slot at this point,
473 * a buggy platform could wrongly reply feeding us an unexpected
474 * delayed response we're not prepared to handle: bail-out safely
477 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
479 "Delayed Response for %d not expected! Buggy F/W ?\n",
484 switch (xfer->state) {
485 case SCMI_XFER_SENT_OK:
486 if (msg_type == MSG_TYPE_DELAYED_RESP) {
488 * Delayed Response expected but delivered earlier.
489 * Assume message RESPONSE was OK and skip state.
491 xfer->hdr.status = SCMI_SUCCESS;
492 xfer->state = SCMI_XFER_RESP_OK;
493 complete(&xfer->done);
495 "Received valid OoO Delayed Response for %d\n",
499 case SCMI_XFER_RESP_OK:
500 if (msg_type != MSG_TYPE_DELAYED_RESP)
503 case SCMI_XFER_DRESP_OK:
504 /* No further message expected once in SCMI_XFER_DRESP_OK */
512 * scmi_xfer_state_update - Update xfer state
514 * @xfer: A reference to the xfer to update
515 * @msg_type: Type of message being processed.
517 * Note that this message is assumed to have been already successfully validated
518 * by @scmi_msg_response_validate(), so here we just update the state.
520 * Context: Assumes to be called on an xfer exclusively acquired using the
523 static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
525 xfer->hdr.type = msg_type;
527 /* Unknown command types were already discarded earlier */
528 if (xfer->hdr.type == MSG_TYPE_COMMAND)
529 xfer->state = SCMI_XFER_RESP_OK;
531 xfer->state = SCMI_XFER_DRESP_OK;
534 static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
538 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
540 return ret == SCMI_XFER_FREE;
544 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
546 * @cinfo: A reference to the channel descriptor.
547 * @msg_hdr: A message header to use as lookup key
549 * When a valid xfer is found for the sequence number embedded in the provided
550 * msg_hdr, reference counting is properly updated and exclusive access to this
551 * xfer is granted till released with @scmi_xfer_command_release.
553 * Return: A valid @xfer on Success or error otherwise.
555 static inline struct scmi_xfer *
556 scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
560 struct scmi_xfer *xfer;
561 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
562 struct scmi_xfers_info *minfo = &info->tx_minfo;
563 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
564 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
566 /* Are we even expecting this? */
567 spin_lock_irqsave(&minfo->xfer_lock, flags);
568 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
571 "Message for %d type %d is not expected!\n",
573 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
576 refcount_inc(&xfer->users);
577 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
579 spin_lock_irqsave(&xfer->lock, flags);
580 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
582 * If a pending xfer was found which was also in a congruent state with
583 * the received message, acquire exclusive access to it setting the busy
585 * Spins only on the rare limit condition of concurrent reception of
586 * RESP and DRESP for the same xfer.
589 spin_until_cond(scmi_xfer_acquired(xfer));
590 scmi_xfer_state_update(xfer, msg_type);
592 spin_unlock_irqrestore(&xfer->lock, flags);
596 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
597 msg_type, xfer_id, msg_hdr, xfer->state);
598 /* On error the refcount incremented above has to be dropped */
599 __scmi_xfer_put(minfo, xfer);
600 xfer = ERR_PTR(-EINVAL);
606 static inline void scmi_xfer_command_release(struct scmi_info *info,
607 struct scmi_xfer *xfer)
609 atomic_set(&xfer->busy, SCMI_XFER_FREE);
610 __scmi_xfer_put(&info->tx_minfo, xfer);
613 static inline void scmi_clear_channel(struct scmi_info *info,
614 struct scmi_chan_info *cinfo)
616 if (info->desc->ops->clear_channel)
617 info->desc->ops->clear_channel(cinfo);
620 static inline bool is_polling_required(struct scmi_chan_info *cinfo,
621 struct scmi_info *info)
623 return cinfo->no_completion_irq || info->desc->force_polling;
626 static inline bool is_transport_polling_capable(struct scmi_info *info)
628 return info->desc->ops->poll_done ||
629 info->desc->sync_cmds_completed_on_ret;
632 static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
633 struct scmi_info *info)
635 return is_polling_required(cinfo, info) &&
636 is_transport_polling_capable(info);
639 static void scmi_handle_notification(struct scmi_chan_info *cinfo,
640 u32 msg_hdr, void *priv)
642 struct scmi_xfer *xfer;
643 struct device *dev = cinfo->dev;
644 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
645 struct scmi_xfers_info *minfo = &info->rx_minfo;
648 ts = ktime_get_boottime();
649 xfer = scmi_xfer_get(cinfo->handle, minfo, false);
651 dev_err(dev, "failed to get free message slot (%ld)\n",
653 scmi_clear_channel(info, cinfo);
657 unpack_scmi_header(msg_hdr, &xfer->hdr);
659 /* Ensure order between xfer->priv store and following ops */
660 smp_store_mb(xfer->priv, priv);
661 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
663 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
664 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
666 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
667 xfer->hdr.protocol_id, xfer->hdr.seq,
668 MSG_TYPE_NOTIFICATION);
670 __scmi_xfer_put(minfo, xfer);
672 scmi_clear_channel(info, cinfo);
675 static void scmi_handle_response(struct scmi_chan_info *cinfo,
676 u32 msg_hdr, void *priv)
678 struct scmi_xfer *xfer;
679 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
681 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
683 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
684 scmi_clear_channel(info, cinfo);
688 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
689 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
690 xfer->rx.len = info->desc->max_msg_size;
693 /* Ensure order between xfer->priv store and following ops */
694 smp_store_mb(xfer->priv, priv);
695 info->desc->ops->fetch_response(cinfo, xfer);
697 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
698 xfer->hdr.protocol_id, xfer->hdr.seq,
701 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
702 scmi_clear_channel(info, cinfo);
703 complete(xfer->async_done);
705 complete(&xfer->done);
708 scmi_xfer_command_release(info, xfer);
712 * scmi_rx_callback() - callback for receiving messages
714 * @cinfo: SCMI channel info
715 * @msg_hdr: Message header
716 * @priv: Transport specific private data.
718 * Processes one received message to appropriate transfer information and
719 * signals completion of the transfer.
721 * NOTE: This function will be invoked in IRQ context, hence should be
722 * as optimal as possible.
724 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
726 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
729 case MSG_TYPE_NOTIFICATION:
730 scmi_handle_notification(cinfo, msg_hdr, priv);
732 case MSG_TYPE_COMMAND:
733 case MSG_TYPE_DELAYED_RESP:
734 scmi_handle_response(cinfo, msg_hdr, priv);
737 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
743 * xfer_put() - Release a transmit message
745 * @ph: Pointer to SCMI protocol handle
746 * @xfer: message that was reserved by xfer_get_init
748 static void xfer_put(const struct scmi_protocol_handle *ph,
749 struct scmi_xfer *xfer)
751 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
752 struct scmi_info *info = handle_to_scmi_info(pi->handle);
754 __scmi_xfer_put(&info->tx_minfo, xfer);
757 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
758 struct scmi_xfer *xfer, ktime_t stop)
760 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
763 * Poll also on xfer->done so that polling can be forcibly terminated
764 * in case of out-of-order receptions of delayed responses
766 return info->desc->ops->poll_done(cinfo, xfer) ||
767 try_wait_for_completion(&xfer->done) ||
768 ktime_after(ktime_get(), stop);
772 * scmi_wait_for_message_response - An helper to group all the possible ways of
773 * waiting for a synchronous message response.
775 * @cinfo: SCMI channel info
776 * @xfer: Reference to the transfer being waited for.
778 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
779 * configuration flags like xfer->hdr.poll_completion.
781 * Return: 0 on Success, error otherwise.
783 static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
784 struct scmi_xfer *xfer)
786 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
787 struct device *dev = info->dev;
788 int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
790 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
791 xfer->hdr.protocol_id, xfer->hdr.seq,
793 xfer->hdr.poll_completion);
795 if (xfer->hdr.poll_completion) {
797 * Real polling is needed only if transport has NOT declared
798 * itself to support synchronous commands replies.
800 if (!info->desc->sync_cmds_completed_on_ret) {
802 * Poll on xfer using transport provided .poll_done();
803 * assumes no completion interrupt was available.
805 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
807 spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
809 if (ktime_after(ktime_get(), stop)) {
811 "timed out in resp(caller: %pS) - polling\n",
821 * Do not fetch_response if an out-of-order delayed
822 * response is being processed.
824 spin_lock_irqsave(&xfer->lock, flags);
825 if (xfer->state == SCMI_XFER_SENT_OK) {
826 info->desc->ops->fetch_response(cinfo, xfer);
827 xfer->state = SCMI_XFER_RESP_OK;
829 spin_unlock_irqrestore(&xfer->lock, flags);
832 /* And we wait for the response. */
833 if (!wait_for_completion_timeout(&xfer->done,
834 msecs_to_jiffies(timeout_ms))) {
835 dev_err(dev, "timed out in resp(caller: %pS)\n",
845 * do_xfer() - Do one transfer
847 * @ph: Pointer to SCMI protocol handle
848 * @xfer: Transfer to initiate and wait for response
850 * Return: -ETIMEDOUT in case of no response, if transmit error,
851 * return corresponding error, else if all goes well,
854 static int do_xfer(const struct scmi_protocol_handle *ph,
855 struct scmi_xfer *xfer)
858 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
859 struct scmi_info *info = handle_to_scmi_info(pi->handle);
860 struct device *dev = info->dev;
861 struct scmi_chan_info *cinfo;
863 /* Check for polling request on custom command xfers at first */
864 if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
866 "Polling mode is not supported by transport.\n");
870 cinfo = idr_find(&info->tx_idr, pi->proto->id);
871 if (unlikely(!cinfo))
874 /* True ONLY if also supported by transport. */
875 if (is_polling_enabled(cinfo, info))
876 xfer->hdr.poll_completion = true;
879 * Initialise protocol id now from protocol handle to avoid it being
880 * overridden by mistake (or malice) by the protocol code mangling with
881 * the scmi_xfer structure prior to this.
883 xfer->hdr.protocol_id = pi->proto->id;
884 reinit_completion(&xfer->done);
886 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
887 xfer->hdr.protocol_id, xfer->hdr.seq,
888 xfer->hdr.poll_completion);
890 xfer->state = SCMI_XFER_SENT_OK;
892 * Even though spinlocking is not needed here since no race is possible
893 * on xfer->state due to the monotonically increasing tokens allocation,
894 * we must anyway ensure xfer->state initialization is not re-ordered
895 * after the .send_message() to be sure that on the RX path an early
896 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
900 ret = info->desc->ops->send_message(cinfo, xfer);
902 dev_dbg(dev, "Failed to send message %d\n", ret);
906 ret = scmi_wait_for_message_response(cinfo, xfer);
907 if (!ret && xfer->hdr.status)
908 ret = scmi_to_linux_errno(xfer->hdr.status);
910 if (info->desc->ops->mark_txdone)
911 info->desc->ops->mark_txdone(cinfo, ret, xfer);
913 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
914 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
919 static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
920 struct scmi_xfer *xfer)
922 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
923 struct scmi_info *info = handle_to_scmi_info(pi->handle);
925 xfer->rx.len = info->desc->max_msg_size;
928 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
931 * do_xfer_with_response() - Do one transfer and wait until the delayed
932 * response is received
934 * @ph: Pointer to SCMI protocol handle
935 * @xfer: Transfer to initiate and wait for response
937 * Using asynchronous commands in atomic/polling mode should be avoided since
938 * it could cause long busy-waiting here, so ignore polling for the delayed
939 * response and WARN if it was requested for this command transaction since
940 * upper layers should refrain from issuing such kind of requests.
942 * The only other option would have been to refrain from using any asynchronous
943 * command even if made available, when an atomic transport is detected, and
944 * instead forcibly use the synchronous version (thing that can be easily
945 * attained at the protocol layer), but this would also have led to longer
946 * stalls of the channel for synchronous commands and possibly timeouts.
947 * (in other words there is usually a good reason if a platform provides an
948 * asynchronous version of a command and we should prefer to use it...just not
949 * when using atomic/polling mode)
951 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
952 * return corresponding error, else if all goes well, return 0.
954 static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
955 struct scmi_xfer *xfer)
957 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
958 DECLARE_COMPLETION_ONSTACK(async_response);
960 xfer->async_done = &async_response;
963 * Delayed responses should not be polled, so an async command should
964 * not have been used when requiring an atomic/poll context; WARN and
965 * perform instead a sleeping wait.
966 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
968 WARN_ON_ONCE(xfer->hdr.poll_completion);
970 ret = do_xfer(ph, xfer);
972 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
974 "timed out in delayed resp(caller: %pS)\n",
977 } else if (xfer->hdr.status) {
978 ret = scmi_to_linux_errno(xfer->hdr.status);
982 xfer->async_done = NULL;
987 * xfer_get_init() - Allocate and initialise one message for transmit
989 * @ph: Pointer to SCMI protocol handle
990 * @msg_id: Message identifier
991 * @tx_size: transmit message size
992 * @rx_size: receive message size
993 * @p: pointer to the allocated and initialised message
995 * This function allocates the message using @scmi_xfer_get and
996 * initialise the header.
998 * Return: 0 if all went fine with @p pointing to message, else
999 * corresponding error.
1001 static int xfer_get_init(const struct scmi_protocol_handle *ph,
1002 u8 msg_id, size_t tx_size, size_t rx_size,
1003 struct scmi_xfer **p)
1006 struct scmi_xfer *xfer;
1007 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1008 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1009 struct scmi_xfers_info *minfo = &info->tx_minfo;
1010 struct device *dev = info->dev;
1012 /* Ensure we have sane transfer sizes */
1013 if (rx_size > info->desc->max_msg_size ||
1014 tx_size > info->desc->max_msg_size)
1017 xfer = scmi_xfer_get(pi->handle, minfo, true);
1019 ret = PTR_ERR(xfer);
1020 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1024 xfer->tx.len = tx_size;
1025 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1026 xfer->hdr.type = MSG_TYPE_COMMAND;
1027 xfer->hdr.id = msg_id;
1028 xfer->hdr.poll_completion = false;
1036 * version_get() - command to get the revision of the SCMI entity
1038 * @ph: Pointer to SCMI protocol handle
1039 * @version: Holds returned version of protocol.
1041 * Updates the SCMI information in the internal data structure.
1043 * Return: 0 if all went fine, else return appropriate error.
1045 static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1049 struct scmi_xfer *t;
1051 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1055 ret = do_xfer(ph, t);
1057 rev_info = t->rx.buf;
1058 *version = le32_to_cpu(*rev_info);
1066 * scmi_set_protocol_priv - Set protocol specific data at init time
1068 * @ph: A reference to the protocol handle.
1069 * @priv: The private data to set.
1071 * Return: 0 on Success
1073 static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1076 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1084 * scmi_get_protocol_priv - Set protocol specific data at init time
1086 * @ph: A reference to the protocol handle.
1088 * Return: Protocol private data if any was set.
1090 static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1092 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1097 static const struct scmi_xfer_ops xfer_ops = {
1098 .version_get = version_get,
1099 .xfer_get_init = xfer_get_init,
1100 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1102 .do_xfer_with_response = do_xfer_with_response,
1103 .xfer_put = xfer_put,
1106 struct scmi_msg_resp_domain_name_get {
1108 u8 name[SCMI_MAX_STR_SIZE];
1112 * scmi_common_extended_name_get - Common helper to get extended resources name
1113 * @ph: A protocol handle reference.
1114 * @cmd_id: The specific command ID to use.
1115 * @res_id: The specific resource ID to use.
1116 * @name: A pointer to the preallocated area where the retrieved name will be
1117 * stored as a NULL terminated string.
1118 * @len: The len in bytes of the @name char array.
1120 * Return: 0 on Succcess
1122 static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1123 u8 cmd_id, u32 res_id, char *name,
1127 struct scmi_xfer *t;
1128 struct scmi_msg_resp_domain_name_get *resp;
1130 ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
1135 put_unaligned_le32(res_id, t->tx.buf);
1138 ret = ph->xops->do_xfer(ph, t);
1140 strscpy(name, resp->name, len);
1142 ph->xops->xfer_put(ph, t);
1146 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1152 * struct scmi_iterator - Iterator descriptor
1153 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1154 * a proper custom command payload for each multi-part command request.
1155 * @resp: A reference to the response RX buffer; used by @update_state and
1156 * @process_response to parse the multi-part replies.
1157 * @t: A reference to the underlying xfer initialized and used transparently by
1158 * the iterator internal routines.
1159 * @ph: A reference to the associated protocol handle to be used.
1160 * @ops: A reference to the custom provided iterator operations.
1161 * @state: The current iterator state; used and updated in turn by the iterators
1162 * internal routines and by the caller-provided @scmi_iterator_ops.
1163 * @priv: A reference to optional private data as provided by the caller and
1164 * passed back to the @@scmi_iterator_ops.
1166 struct scmi_iterator {
1169 struct scmi_xfer *t;
1170 const struct scmi_protocol_handle *ph;
1171 struct scmi_iterator_ops *ops;
1172 struct scmi_iterator_state state;
1176 static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1177 struct scmi_iterator_ops *ops,
1178 unsigned int max_resources, u8 msg_id,
1179 size_t tx_size, void *priv)
1182 struct scmi_iterator *i;
1184 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1186 return ERR_PTR(-ENOMEM);
1192 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1194 devm_kfree(ph->dev, i);
1195 return ERR_PTR(ret);
1198 i->state.max_resources = max_resources;
1199 i->msg = i->t->tx.buf;
1200 i->resp = i->t->rx.buf;
1205 static int scmi_iterator_run(void *iter)
1208 struct scmi_iterator_ops *iops;
1209 const struct scmi_protocol_handle *ph;
1210 struct scmi_iterator_state *st;
1211 struct scmi_iterator *i = iter;
1213 if (!i || !i->ops || !i->ph)
1221 iops->prepare_message(i->msg, st->desc_index, i->priv);
1222 ret = ph->xops->do_xfer(ph, i->t);
1226 st->rx_len = i->t->rx.len;
1227 ret = iops->update_state(st, i->resp, i->priv);
1231 if (st->num_returned > st->max_resources - st->desc_index) {
1233 "No. of resources can't exceed %d\n",
1239 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1241 ret = iops->process_response(ph, i->resp, st, i->priv);
1246 st->desc_index += st->num_returned;
1247 ph->xops->reset_rx_to_maxsz(ph, i->t);
1249 * check for both returned and remaining to avoid infinite
1250 * loop due to buggy firmware
1252 } while (st->num_returned && st->num_remaining);
1255 /* Finalize and destroy iterator */
1256 ph->xops->xfer_put(ph, i->t);
1257 devm_kfree(ph->dev, i);
1262 static const struct scmi_proto_helpers_ops helpers_ops = {
1263 .extended_name_get = scmi_common_extended_name_get,
1264 .iter_response_init = scmi_iterator_init,
1265 .iter_response_run = scmi_iterator_run,
1269 * scmi_revision_area_get - Retrieve version memory area.
1271 * @ph: A reference to the protocol handle.
1273 * A helper to grab the version memory area reference during SCMI Base protocol
1276 * Return: A reference to the version memory area associated to the SCMI
1277 * instance underlying this protocol handle.
1279 struct scmi_revision_info *
1280 scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1282 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1284 return pi->handle->version;
1288 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1289 * instance descriptor.
1290 * @info: The reference to the related SCMI instance.
1291 * @proto: The protocol descriptor.
1293 * Allocate a new protocol instance descriptor, using the provided @proto
1294 * description, against the specified SCMI instance @info, and initialize it;
1295 * all resources management is handled via a dedicated per-protocol devres
1298 * Context: Assumes to be called with @protocols_mtx already acquired.
1299 * Return: A reference to a freshly allocated and initialized protocol instance
1300 * or ERR_PTR on failure. On failure the @proto reference is at first
1301 * put using @scmi_protocol_put() before releasing all the devres group.
1303 static struct scmi_protocol_instance *
1304 scmi_alloc_init_protocol_instance(struct scmi_info *info,
1305 const struct scmi_protocol *proto)
1309 struct scmi_protocol_instance *pi;
1310 const struct scmi_handle *handle = &info->handle;
1312 /* Protocol specific devres group */
1313 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1315 scmi_protocol_put(proto->id);
1319 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1325 pi->handle = handle;
1326 pi->ph.dev = handle->dev;
1327 pi->ph.xops = &xfer_ops;
1328 pi->ph.hops = &helpers_ops;
1329 pi->ph.set_priv = scmi_set_protocol_priv;
1330 pi->ph.get_priv = scmi_get_protocol_priv;
1331 refcount_set(&pi->users, 1);
1332 /* proto->init is assured NON NULL by scmi_protocol_register */
1333 ret = pi->proto->instance_init(&pi->ph);
1337 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1339 if (ret != proto->id)
1343 * Warn but ignore events registration errors since we do not want
1344 * to skip whole protocols if their notifications are messed up.
1346 if (pi->proto->events) {
1347 ret = scmi_register_protocol_events(handle, pi->proto->id,
1351 dev_warn(handle->dev,
1352 "Protocol:%X - Events Registration Failed - err:%d\n",
1353 pi->proto->id, ret);
1356 devres_close_group(handle->dev, pi->gid);
1357 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1362 /* Take care to put the protocol module's owner before releasing all */
1363 scmi_protocol_put(proto->id);
1364 devres_release_group(handle->dev, gid);
1366 return ERR_PTR(ret);
1370 * scmi_get_protocol_instance - Protocol initialization helper.
1371 * @handle: A reference to the SCMI platform instance.
1372 * @protocol_id: The protocol being requested.
1374 * In case the required protocol has never been requested before for this
1375 * instance, allocate and initialize all the needed structures while handling
1376 * resource allocation with a dedicated per-protocol devres subgroup.
1378 * Return: A reference to an initialized protocol instance or error on failure:
1379 * in particular returns -EPROBE_DEFER when the desired protocol could
1382 static struct scmi_protocol_instance * __must_check
1383 scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1385 struct scmi_protocol_instance *pi;
1386 struct scmi_info *info = handle_to_scmi_info(handle);
1388 mutex_lock(&info->protocols_mtx);
1389 pi = idr_find(&info->protocols, protocol_id);
1392 refcount_inc(&pi->users);
1394 const struct scmi_protocol *proto;
1396 /* Fails if protocol not registered on bus */
1397 proto = scmi_protocol_get(protocol_id);
1399 pi = scmi_alloc_init_protocol_instance(info, proto);
1401 pi = ERR_PTR(-EPROBE_DEFER);
1403 mutex_unlock(&info->protocols_mtx);
1409 * scmi_protocol_acquire - Protocol acquire
1410 * @handle: A reference to the SCMI platform instance.
1411 * @protocol_id: The protocol being requested.
1413 * Register a new user for the requested protocol on the specified SCMI
1414 * platform instance, possibly triggering its initialization on first user.
1416 * Return: 0 if protocol was acquired successfully.
1418 int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1420 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1424 * scmi_protocol_release - Protocol de-initialization helper.
1425 * @handle: A reference to the SCMI platform instance.
1426 * @protocol_id: The protocol being requested.
1428 * Remove one user for the specified protocol and triggers de-initialization
1429 * and resources de-allocation once the last user has gone.
1431 void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1433 struct scmi_info *info = handle_to_scmi_info(handle);
1434 struct scmi_protocol_instance *pi;
1436 mutex_lock(&info->protocols_mtx);
1437 pi = idr_find(&info->protocols, protocol_id);
1441 if (refcount_dec_and_test(&pi->users)) {
1442 void *gid = pi->gid;
1444 if (pi->proto->events)
1445 scmi_deregister_protocol_events(handle, protocol_id);
1447 if (pi->proto->instance_deinit)
1448 pi->proto->instance_deinit(&pi->ph);
1450 idr_remove(&info->protocols, protocol_id);
1452 scmi_protocol_put(protocol_id);
1454 devres_release_group(handle->dev, gid);
1455 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1460 mutex_unlock(&info->protocols_mtx);
1463 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1466 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1467 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1469 info->protocols_imp = prot_imp;
1473 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1476 struct scmi_info *info = handle_to_scmi_info(handle);
1477 struct scmi_revision_info *rev = handle->version;
1479 if (!info->protocols_imp)
1482 for (i = 0; i < rev->num_protocols; i++)
1483 if (info->protocols_imp[i] == prot_id)
1488 struct scmi_protocol_devres {
1489 const struct scmi_handle *handle;
1493 static void scmi_devm_release_protocol(struct device *dev, void *res)
1495 struct scmi_protocol_devres *dres = res;
1497 scmi_protocol_release(dres->handle, dres->protocol_id);
1501 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
1502 * @sdev: A reference to an scmi_device whose embedded struct device is to
1503 * be used for devres accounting.
1504 * @protocol_id: The protocol being requested.
1505 * @ph: A pointer reference used to pass back the associated protocol handle.
1507 * Get hold of a protocol accounting for its usage, eventually triggering its
1508 * initialization, and returning the protocol specific operations and related
1509 * protocol handle which will be used as first argument in most of the
1510 * protocols operations methods.
1511 * Being a devres based managed method, protocol hold will be automatically
1512 * released, and possibly de-initialized on last user, once the SCMI driver
1513 * owning the scmi_device is unbound from it.
1515 * Return: A reference to the requested protocol operations or error.
1516 * Must be checked for errors by caller.
1518 static const void __must_check *
1519 scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1520 struct scmi_protocol_handle **ph)
1522 struct scmi_protocol_instance *pi;
1523 struct scmi_protocol_devres *dres;
1524 struct scmi_handle *handle = sdev->handle;
1527 return ERR_PTR(-EINVAL);
1529 dres = devres_alloc(scmi_devm_release_protocol,
1530 sizeof(*dres), GFP_KERNEL);
1532 return ERR_PTR(-ENOMEM);
1534 pi = scmi_get_protocol_instance(handle, protocol_id);
1540 dres->handle = handle;
1541 dres->protocol_id = protocol_id;
1542 devres_add(&sdev->dev, dres);
1546 return pi->proto->ops;
1549 static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1551 struct scmi_protocol_devres *dres = res;
1553 if (WARN_ON(!dres || !data))
1556 return dres->protocol_id == *((u8 *)data);
1560 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
1561 * @sdev: A reference to an scmi_device whose embedded struct device is to
1562 * be used for devres accounting.
1563 * @protocol_id: The protocol being requested.
1565 * Explicitly release a protocol hold previously obtained calling the above
1566 * @scmi_devm_protocol_get.
1568 static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1572 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1573 scmi_devm_protocol_match, &protocol_id);
1578 * scmi_is_transport_atomic - Method to check if underlying transport for an
1579 * SCMI instance is configured as atomic.
1581 * @handle: A reference to the SCMI platform instance.
1582 * @atomic_threshold: An optional return value for the system wide currently
1583 * configured threshold for atomic operations.
1585 * Return: True if transport is configured as atomic
1587 static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
1588 unsigned int *atomic_threshold)
1591 struct scmi_info *info = handle_to_scmi_info(handle);
1593 ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
1594 if (ret && atomic_threshold)
1595 *atomic_threshold = info->atomic_threshold;
1601 struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1604 return &info->handle;
1608 * scmi_handle_get() - Get the SCMI handle for a device
1610 * @dev: pointer to device for which we want SCMI handle
1612 * NOTE: The function does not track individual clients of the framework
1613 * and is expected to be maintained by caller of SCMI protocol library.
1614 * scmi_handle_put must be balanced with successful scmi_handle_get
1616 * Return: pointer to handle if successful, NULL on error
1618 struct scmi_handle *scmi_handle_get(struct device *dev)
1620 struct list_head *p;
1621 struct scmi_info *info;
1622 struct scmi_handle *handle = NULL;
1624 mutex_lock(&scmi_list_mutex);
1625 list_for_each(p, &scmi_list) {
1626 info = list_entry(p, struct scmi_info, node);
1627 if (dev->parent == info->dev) {
1628 handle = scmi_handle_get_from_info_unlocked(info);
1632 mutex_unlock(&scmi_list_mutex);
1638 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1640 * @handle: handle acquired by scmi_handle_get
1642 * NOTE: The function does not track individual clients of the framework
1643 * and is expected to be maintained by caller of SCMI protocol library.
1644 * scmi_handle_put must be balanced with successful scmi_handle_get
1646 * Return: 0 is successfully released
1647 * if null was passed, it returns -EINVAL;
1649 int scmi_handle_put(const struct scmi_handle *handle)
1651 struct scmi_info *info;
1656 info = handle_to_scmi_info(handle);
1657 mutex_lock(&scmi_list_mutex);
1658 if (!WARN_ON(!info->users))
1660 mutex_unlock(&scmi_list_mutex);
1665 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1666 struct scmi_xfers_info *info)
1669 struct scmi_xfer *xfer;
1670 struct device *dev = sinfo->dev;
1671 const struct scmi_desc *desc = sinfo->desc;
1673 /* Pre-allocated messages, no more than what hdr.seq can support */
1674 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
1676 "Invalid maximum messages %d, not in range [1 - %lu]\n",
1677 info->max_msg, MSG_TOKEN_MAX);
1681 hash_init(info->pending_xfers);
1683 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
1684 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
1685 sizeof(long), GFP_KERNEL);
1686 if (!info->xfer_alloc_table)
1690 * Preallocate a number of xfers equal to max inflight messages,
1691 * pre-initialize the buffer pointer to pre-allocated buffers and
1692 * attach all of them to the free list
1694 INIT_HLIST_HEAD(&info->free_xfers);
1695 for (i = 0; i < info->max_msg; i++) {
1696 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1700 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1705 xfer->tx.buf = xfer->rx.buf;
1706 init_completion(&xfer->done);
1707 spin_lock_init(&xfer->lock);
1709 /* Add initialized xfer to the free list */
1710 hlist_add_head(&xfer->node, &info->free_xfers);
1713 spin_lock_init(&info->xfer_lock);
1718 static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
1720 const struct scmi_desc *desc = sinfo->desc;
1722 if (!desc->ops->get_max_msg) {
1723 sinfo->tx_minfo.max_msg = desc->max_msg;
1724 sinfo->rx_minfo.max_msg = desc->max_msg;
1726 struct scmi_chan_info *base_cinfo;
1728 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
1731 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
1733 /* RX channel is optional so can be skipped */
1734 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
1736 sinfo->rx_minfo.max_msg =
1737 desc->ops->get_max_msg(base_cinfo);
1743 static int scmi_xfer_info_init(struct scmi_info *sinfo)
1747 ret = scmi_channels_max_msg_configure(sinfo);
1751 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1752 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1753 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1758 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1759 int prot_id, bool tx)
1762 struct scmi_chan_info *cinfo;
1765 /* Transmit channel is first entry i.e. index 0 */
1767 idr = tx ? &info->tx_idr : &info->rx_idr;
1769 /* check if already allocated, used for multiple device per protocol */
1770 cinfo = idr_find(idr, prot_id);
1774 if (!info->desc->ops->chan_available(dev, idx)) {
1775 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1776 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1781 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1787 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1791 if (tx && is_polling_required(cinfo, info)) {
1792 if (is_transport_polling_capable(info))
1794 "Enabled polling mode TX channel - prot_id:%d\n",
1798 "Polling mode NOT supported by transport.\n");
1802 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1803 if (ret != prot_id) {
1804 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1808 cinfo->handle = &info->handle;
1813 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1815 int ret = scmi_chan_setup(info, dev, prot_id, true);
1817 if (!ret) /* Rx is optional, hence no error check */
1818 scmi_chan_setup(info, dev, prot_id, false);
1824 * scmi_get_protocol_device - Helper to get/create an SCMI device.
1826 * @np: A device node representing a valid active protocols for the referred
1828 * @info: The referred SCMI instance for which we are getting/creating this
1830 * @prot_id: The protocol ID.
1831 * @name: The device name.
1833 * Referring to the specific SCMI instance identified by @info, this helper
1834 * takes care to return a properly initialized device matching the requested
1835 * @proto_id and @name: if device was still not existent it is created as a
1836 * child of the specified SCMI instance @info and its transport properly
1837 * initialized as usual.
1839 * Return: A properly initialized scmi device, NULL otherwise.
1841 static inline struct scmi_device *
1842 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1843 int prot_id, const char *name)
1845 struct scmi_device *sdev;
1847 /* Already created for this parent SCMI instance ? */
1848 sdev = scmi_child_dev_find(info->dev, prot_id, name);
1852 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1854 sdev = scmi_device_create(np, info->dev, prot_id, name);
1856 dev_err(info->dev, "failed to create %d protocol device\n",
1861 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1862 dev_err(&sdev->dev, "failed to setup transport\n");
1863 scmi_device_destroy(sdev);
1871 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1872 int prot_id, const char *name)
1874 struct scmi_device *sdev;
1876 sdev = scmi_get_protocol_device(np, info, prot_id, name);
1880 /* setup handle now as the transport is ready */
1881 scmi_set_handle(sdev);
1885 * scmi_create_protocol_devices - Create devices for all pending requests for
1886 * this SCMI instance.
1888 * @np: The device node describing the protocol
1889 * @info: The SCMI instance descriptor
1890 * @prot_id: The protocol ID
1892 * All devices previously requested for this instance (if any) are found and
1893 * created by scanning the proper @&scmi_requested_devices entry.
1895 static void scmi_create_protocol_devices(struct device_node *np,
1896 struct scmi_info *info, int prot_id)
1898 struct list_head *phead;
1900 mutex_lock(&scmi_requested_devices_mtx);
1901 phead = idr_find(&scmi_requested_devices, prot_id);
1903 struct scmi_requested_dev *rdev;
1905 list_for_each_entry(rdev, phead, node)
1906 scmi_create_protocol_device(np, info, prot_id,
1907 rdev->id_table->name);
1909 mutex_unlock(&scmi_requested_devices_mtx);
1913 * scmi_protocol_device_request - Helper to request a device
1915 * @id_table: A protocol/name pair descriptor for the device to be created.
1917 * This helper let an SCMI driver request specific devices identified by the
1918 * @id_table to be created for each active SCMI instance.
1920 * The requested device name MUST NOT be already existent for any protocol;
1921 * at first the freshly requested @id_table is annotated in the IDR table
1922 * @scmi_requested_devices, then a matching device is created for each already
1923 * active SCMI instance. (if any)
1925 * This way the requested device is created straight-away for all the already
1926 * initialized(probed) SCMI instances (handles) and it remains also annotated
1927 * as pending creation if the requesting SCMI driver was loaded before some
1928 * SCMI instance and related transports were available: when such late instance
1929 * is probed, its probe will take care to scan the list of pending requested
1930 * devices and create those on its own (see @scmi_create_protocol_devices and
1931 * its enclosing loop)
1933 * Return: 0 on Success
1935 int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1938 unsigned int id = 0;
1939 struct list_head *head, *phead = NULL;
1940 struct scmi_requested_dev *rdev;
1941 struct scmi_info *info;
1943 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1944 id_table->name, id_table->protocol_id);
1947 * Search for the matching protocol rdev list and then search
1948 * of any existent equally named device...fails if any duplicate found.
1950 mutex_lock(&scmi_requested_devices_mtx);
1951 idr_for_each_entry(&scmi_requested_devices, head, id) {
1953 /* A list found registered in the IDR is never empty */
1954 rdev = list_first_entry(head, struct scmi_requested_dev,
1956 if (rdev->id_table->protocol_id ==
1957 id_table->protocol_id)
1960 list_for_each_entry(rdev, head, node) {
1961 if (!strcmp(rdev->id_table->name, id_table->name)) {
1962 pr_err("Ignoring duplicate request [%d] %s\n",
1963 rdev->id_table->protocol_id,
1964 rdev->id_table->name);
1972 * No duplicate found for requested id_table, so let's create a new
1973 * requested device entry for this new valid request.
1975 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1980 rdev->id_table = id_table;
1983 * Append the new requested device table descriptor to the head of the
1984 * related protocol list, eventually creating such head if not already
1988 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1994 INIT_LIST_HEAD(phead);
1996 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1997 id_table->protocol_id,
1998 id_table->protocol_id + 1, GFP_KERNEL);
1999 if (ret != id_table->protocol_id) {
2000 pr_err("Failed to save SCMI device - ret:%d\n", ret);
2008 list_add(&rdev->node, phead);
2011 * Now effectively create and initialize the requested device for every
2012 * already initialized SCMI instance which has registered the requested
2013 * protocol as a valid active one: i.e. defined in DT and supported by
2014 * current platform FW.
2016 mutex_lock(&scmi_list_mutex);
2017 list_for_each_entry(info, &scmi_list, node) {
2018 struct device_node *child;
2020 child = idr_find(&info->active_protocols,
2021 id_table->protocol_id);
2023 struct scmi_device *sdev;
2025 sdev = scmi_get_protocol_device(child, info,
2026 id_table->protocol_id,
2028 /* Set handle if not already set: device existed */
2029 if (sdev && !sdev->handle)
2031 scmi_handle_get_from_info_unlocked(info);
2034 "Failed. SCMI protocol %d not active.\n",
2035 id_table->protocol_id);
2038 mutex_unlock(&scmi_list_mutex);
2041 mutex_unlock(&scmi_requested_devices_mtx);
2047 * scmi_protocol_device_unrequest - Helper to unrequest a device
2049 * @id_table: A protocol/name pair descriptor for the device to be unrequested.
2051 * An helper to let an SCMI driver release its request about devices; note that
2052 * devices are created and initialized once the first SCMI driver request them
2053 * but they destroyed only on SCMI core unloading/unbinding.
2055 * The current SCMI transport layer uses such devices as internal references and
2056 * as such they could be shared as same transport between multiple drivers so
2057 * that cannot be safely destroyed till the whole SCMI stack is removed.
2058 * (unless adding further burden of refcounting.)
2060 void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
2062 struct list_head *phead;
2064 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
2065 id_table->name, id_table->protocol_id);
2067 mutex_lock(&scmi_requested_devices_mtx);
2068 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
2070 struct scmi_requested_dev *victim, *tmp;
2072 list_for_each_entry_safe(victim, tmp, phead, node) {
2073 if (!strcmp(victim->id_table->name, id_table->name)) {
2074 list_del(&victim->node);
2080 if (list_empty(phead)) {
2081 idr_remove(&scmi_requested_devices,
2082 id_table->protocol_id);
2086 mutex_unlock(&scmi_requested_devices_mtx);
2089 static int scmi_cleanup_txrx_channels(struct scmi_info *info)
2092 struct idr *idr = &info->tx_idr;
2094 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2095 idr_destroy(&info->tx_idr);
2097 idr = &info->rx_idr;
2098 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
2099 idr_destroy(&info->rx_idr);
2104 static int scmi_probe(struct platform_device *pdev)
2107 struct scmi_handle *handle;
2108 const struct scmi_desc *desc;
2109 struct scmi_info *info;
2110 struct device *dev = &pdev->dev;
2111 struct device_node *child, *np = dev->of_node;
2113 desc = of_device_get_match_data(dev);
2117 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
2123 INIT_LIST_HEAD(&info->node);
2124 idr_init(&info->protocols);
2125 mutex_init(&info->protocols_mtx);
2126 idr_init(&info->active_protocols);
2128 platform_set_drvdata(pdev, info);
2129 idr_init(&info->tx_idr);
2130 idr_init(&info->rx_idr);
2132 handle = &info->handle;
2133 handle->dev = info->dev;
2134 handle->version = &info->version;
2135 handle->devm_protocol_get = scmi_devm_protocol_get;
2136 handle->devm_protocol_put = scmi_devm_protocol_put;
2138 /* System wide atomic threshold for atomic ops .. if any */
2139 if (!of_property_read_u32(np, "atomic-threshold-us",
2140 &info->atomic_threshold))
2142 "SCMI System wide atomic threshold set to %d us\n",
2143 info->atomic_threshold);
2144 handle->is_transport_atomic = scmi_is_transport_atomic;
2146 if (desc->ops->link_supplier) {
2147 ret = desc->ops->link_supplier(dev);
2152 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
2156 ret = scmi_xfer_info_init(info);
2158 goto clear_txrx_setup;
2160 if (scmi_notification_init(handle))
2161 dev_err(dev, "SCMI Notifications NOT available.\n");
2163 if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
2165 "Transport is not polling capable. Atomic mode not supported.\n");
2168 * Trigger SCMI Base protocol initialization.
2169 * It's mandatory and won't be ever released/deinit until the
2170 * SCMI stack is shutdown/unloaded as a whole.
2172 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2174 dev_err(dev, "unable to communicate with SCMI\n");
2175 goto notification_exit;
2178 mutex_lock(&scmi_list_mutex);
2179 list_add_tail(&info->node, &scmi_list);
2180 mutex_unlock(&scmi_list_mutex);
2182 for_each_available_child_of_node(np, child) {
2185 if (of_property_read_u32(child, "reg", &prot_id))
2188 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2189 dev_err(dev, "Out of range protocol %d\n", prot_id);
2191 if (!scmi_is_protocol_implemented(handle, prot_id)) {
2192 dev_err(dev, "SCMI protocol %d not implemented\n",
2198 * Save this valid DT protocol descriptor amongst
2199 * @active_protocols for this SCMI instance/
2201 ret = idr_alloc(&info->active_protocols, child,
2202 prot_id, prot_id + 1, GFP_KERNEL);
2203 if (ret != prot_id) {
2204 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2210 scmi_create_protocol_devices(child, info, prot_id);
2216 scmi_notification_exit(&info->handle);
2218 scmi_cleanup_txrx_channels(info);
2222 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2224 idr_remove(idr, id);
2227 static int scmi_remove(struct platform_device *pdev)
2230 struct scmi_info *info = platform_get_drvdata(pdev);
2231 struct device_node *child;
2233 mutex_lock(&scmi_list_mutex);
2237 list_del(&info->node);
2238 mutex_unlock(&scmi_list_mutex);
2243 scmi_notification_exit(&info->handle);
2245 mutex_lock(&info->protocols_mtx);
2246 idr_destroy(&info->protocols);
2247 mutex_unlock(&info->protocols_mtx);
2249 idr_for_each_entry(&info->active_protocols, child, id)
2251 idr_destroy(&info->active_protocols);
2253 /* Safe to free channels since no more users */
2254 return scmi_cleanup_txrx_channels(info);
2257 static ssize_t protocol_version_show(struct device *dev,
2258 struct device_attribute *attr, char *buf)
2260 struct scmi_info *info = dev_get_drvdata(dev);
2262 return sprintf(buf, "%u.%u\n", info->version.major_ver,
2263 info->version.minor_ver);
2265 static DEVICE_ATTR_RO(protocol_version);
2267 static ssize_t firmware_version_show(struct device *dev,
2268 struct device_attribute *attr, char *buf)
2270 struct scmi_info *info = dev_get_drvdata(dev);
2272 return sprintf(buf, "0x%x\n", info->version.impl_ver);
2274 static DEVICE_ATTR_RO(firmware_version);
2276 static ssize_t vendor_id_show(struct device *dev,
2277 struct device_attribute *attr, char *buf)
2279 struct scmi_info *info = dev_get_drvdata(dev);
2281 return sprintf(buf, "%s\n", info->version.vendor_id);
2283 static DEVICE_ATTR_RO(vendor_id);
2285 static ssize_t sub_vendor_id_show(struct device *dev,
2286 struct device_attribute *attr, char *buf)
2288 struct scmi_info *info = dev_get_drvdata(dev);
2290 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2292 static DEVICE_ATTR_RO(sub_vendor_id);
2294 static struct attribute *versions_attrs[] = {
2295 &dev_attr_firmware_version.attr,
2296 &dev_attr_protocol_version.attr,
2297 &dev_attr_vendor_id.attr,
2298 &dev_attr_sub_vendor_id.attr,
2301 ATTRIBUTE_GROUPS(versions);
2303 /* Each compatible listed below must have descriptor associated with it */
2304 static const struct of_device_id scmi_of_match[] = {
2305 #ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2306 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2308 #ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2309 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2311 #ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2312 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2314 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2315 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2320 MODULE_DEVICE_TABLE(of, scmi_of_match);
2322 static struct platform_driver scmi_driver = {
2325 .of_match_table = scmi_of_match,
2326 .dev_groups = versions_groups,
2328 .probe = scmi_probe,
2329 .remove = scmi_remove,
2333 * __scmi_transports_setup - Common helper to call transport-specific
2334 * .init/.exit code if provided.
2336 * @init: A flag to distinguish between init and exit.
2338 * Note that, if provided, we invoke .init/.exit functions for all the
2339 * transports currently compiled in.
2341 * Return: 0 on Success.
2343 static inline int __scmi_transports_setup(bool init)
2346 const struct of_device_id *trans;
2348 for (trans = scmi_of_match; trans->data; trans++) {
2349 const struct scmi_desc *tdesc = trans->data;
2351 if ((init && !tdesc->transport_init) ||
2352 (!init && !tdesc->transport_exit))
2356 ret = tdesc->transport_init();
2358 tdesc->transport_exit();
2361 pr_err("SCMI transport %s FAILED initialization!\n",
2370 static int __init scmi_transports_init(void)
2372 return __scmi_transports_setup(true);
2375 static void __exit scmi_transports_exit(void)
2377 __scmi_transports_setup(false);
2380 static int __init scmi_driver_init(void)
2384 /* Bail out if no SCMI transport was configured */
2385 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
2390 /* Initialize any compiled-in transport which provided an init/exit */
2391 ret = scmi_transports_init();
2395 scmi_base_register();
2397 scmi_clock_register();
2398 scmi_perf_register();
2399 scmi_power_register();
2400 scmi_reset_register();
2401 scmi_sensors_register();
2402 scmi_voltage_register();
2403 scmi_system_register();
2405 return platform_driver_register(&scmi_driver);
2407 subsys_initcall(scmi_driver_init);
2409 static void __exit scmi_driver_exit(void)
2411 scmi_base_unregister();
2413 scmi_clock_unregister();
2414 scmi_perf_unregister();
2415 scmi_power_unregister();
2416 scmi_reset_unregister();
2417 scmi_sensors_unregister();
2418 scmi_voltage_unregister();
2419 scmi_system_unregister();
2423 scmi_transports_exit();
2425 platform_driver_unregister(&scmi_driver);
2427 module_exit(scmi_driver_exit);
2429 MODULE_ALIAS("platform:arm-scmi");
2430 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2431 MODULE_DESCRIPTION("ARM SCMI protocol driver");
2432 MODULE_LICENSE("GPL v2");