1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio Transport driver for Arm System Control and Management Interface
6 * Copyright (C) 2020-2022 OpenSynergy.
7 * Copyright (C) 2021-2022 ARM Ltd.
11 * DOC: Theory of Operation
13 * The scmi-virtio transport implements a driver for the virtio SCMI device.
15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
16 * channel (virtio eventq, P2A channel). Each channel is implemented through a
17 * virtqueue. Access to each virtqueue is protected by spinlocks.
20 #include <linux/completion.h>
21 #include <linux/errno.h>
22 #include <linux/refcount.h>
23 #include <linux/slab.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_config.h>
27 #include <uapi/linux/virtio_ids.h>
28 #include <uapi/linux/virtio_scmi.h>
32 #define VIRTIO_MAX_RX_TIMEOUT_MS 60000
33 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
34 #define VIRTIO_SCMI_MAX_PDU_SIZE \
35 (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
36 #define DESCRIPTORS_PER_TX_MSG 2
39 * struct scmi_vio_channel - Transport channel information
41 * @vqueue: Associated virtqueue
42 * @cinfo: SCMI Tx or Rx channel
43 * @free_lock: Protects access to the @free_list.
44 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
45 * @deferred_tx_work: Worker for TX deferred replies processing
46 * @deferred_tx_wq: Workqueue for TX deferred replies
47 * @pending_lock: Protects access to the @pending_cmds_list.
48 * @pending_cmds_list: List of pre-fetched commands queueud for later processing
49 * @is_rx: Whether channel is an Rx channel
50 * @max_msg: Maximum number of pending messages for this channel.
51 * @lock: Protects access to all members except users, free_list and
53 * @shutdown_done: A reference to a completion used when freeing this channel.
54 * @users: A reference count to currently active users of this channel.
56 struct scmi_vio_channel {
57 struct virtqueue *vqueue;
58 struct scmi_chan_info *cinfo;
59 /* lock to protect access to the free list. */
61 struct list_head free_list;
62 /* lock to protect access to the pending list. */
63 spinlock_t pending_lock;
64 struct list_head pending_cmds_list;
65 struct work_struct deferred_tx_work;
66 struct workqueue_struct *deferred_tx_wq;
70 * Lock to protect access to all members except users, free_list and
74 struct completion *shutdown_done;
86 * struct scmi_vio_msg - Transport PDU information
88 * @request: SDU used for commands
89 * @input: SDU used for (delayed) responses and notifications
90 * @list: List which scmi_vio_msg may be part of
91 * @rx_len: Input SDU size in bytes, once input has been received
92 * @poll_idx: Last used index registered for polling purposes if this message
93 * transaction reply was configured for polling.
94 * @poll_status: Polling state for this message.
95 * @poll_lock: A lock to protect @poll_status
96 * @users: A reference count to track this message users and avoid premature
97 * freeing (and reuse) when polling and IRQ execution paths interleave.
100 struct scmi_msg_payld *request;
101 struct scmi_msg_payld *input;
102 struct list_head list;
104 unsigned int poll_idx;
105 enum poll_states poll_status;
106 /* Lock to protect access to poll_status */
107 spinlock_t poll_lock;
111 /* Only one SCMI VirtIO device can possibly exist */
112 static struct virtio_device *scmi_vdev;
114 static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
115 struct scmi_chan_info *cinfo)
119 spin_lock_irqsave(&vioch->lock, flags);
120 cinfo->transport_info = vioch;
121 /* Indirectly setting channel not available any more */
122 vioch->cinfo = cinfo;
123 spin_unlock_irqrestore(&vioch->lock, flags);
125 refcount_set(&vioch->users, 1);
128 static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
130 return refcount_inc_not_zero(&vioch->users);
133 static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
135 if (refcount_dec_and_test(&vioch->users)) {
138 spin_lock_irqsave(&vioch->lock, flags);
139 if (vioch->shutdown_done) {
141 complete(vioch->shutdown_done);
143 spin_unlock_irqrestore(&vioch->lock, flags);
147 static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
150 DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
151 void *deferred_wq = NULL;
154 * Prepare to wait for the last release if not already released
157 spin_lock_irqsave(&vioch->lock, flags);
158 if (!vioch->cinfo || vioch->shutdown_done) {
159 spin_unlock_irqrestore(&vioch->lock, flags);
163 vioch->shutdown_done = &vioch_shutdown_done;
164 virtio_break_device(vioch->vqueue->vdev);
165 if (!vioch->is_rx && vioch->deferred_tx_wq) {
166 deferred_wq = vioch->deferred_tx_wq;
167 /* Cannot be kicked anymore after this...*/
168 vioch->deferred_tx_wq = NULL;
170 spin_unlock_irqrestore(&vioch->lock, flags);
173 destroy_workqueue(deferred_wq);
175 scmi_vio_channel_release(vioch);
177 /* Let any possibly concurrent RX path release the channel */
178 wait_for_completion(vioch->shutdown_done);
181 /* Assumes to be called with vio channel acquired already */
182 static struct scmi_vio_msg *
183 scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
186 struct scmi_vio_msg *msg;
188 spin_lock_irqsave(&vioch->free_lock, flags);
189 if (list_empty(&vioch->free_list)) {
190 spin_unlock_irqrestore(&vioch->free_lock, flags);
194 msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
195 list_del_init(&msg->list);
196 spin_unlock_irqrestore(&vioch->free_lock, flags);
198 /* Still no users, no need to acquire poll_lock */
199 msg->poll_status = VIO_MSG_NOT_POLLED;
200 refcount_set(&msg->users, 1);
205 static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
207 return refcount_inc_not_zero(&msg->users);
210 /* Assumes to be called with vio channel acquired already */
211 static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
212 struct scmi_vio_msg *msg)
216 ret = refcount_dec_and_test(&msg->users);
220 spin_lock_irqsave(&vioch->free_lock, flags);
221 list_add_tail(&msg->list, &vioch->free_list);
222 spin_unlock_irqrestore(&vioch->free_lock, flags);
228 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
230 return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
233 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
234 struct scmi_vio_msg *msg)
236 struct scatterlist sg_in;
239 struct device *dev = &vioch->vqueue->vdev->dev;
241 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
243 spin_lock_irqsave(&vioch->lock, flags);
245 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
247 dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
249 virtqueue_kick(vioch->vqueue);
251 spin_unlock_irqrestore(&vioch->lock, flags);
257 * Assume to be called with channel already acquired or not ready at all;
258 * vioch->lock MUST NOT have been already acquired.
260 static void scmi_finalize_message(struct scmi_vio_channel *vioch,
261 struct scmi_vio_msg *msg)
264 scmi_vio_feed_vq_rx(vioch, msg);
266 scmi_vio_msg_release(vioch, msg);
269 static void scmi_vio_complete_cb(struct virtqueue *vqueue)
273 struct scmi_vio_channel *vioch;
274 struct scmi_vio_msg *msg;
275 bool cb_enabled = true;
277 if (WARN_ON_ONCE(!vqueue->vdev->priv))
279 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
282 if (!scmi_vio_channel_acquire(vioch))
285 spin_lock_irqsave(&vioch->lock, flags);
287 virtqueue_disable_cb(vqueue);
291 msg = virtqueue_get_buf(vqueue, &length);
293 if (virtqueue_enable_cb(vqueue)) {
294 spin_unlock_irqrestore(&vioch->lock, flags);
295 scmi_vio_channel_release(vioch);
300 spin_unlock_irqrestore(&vioch->lock, flags);
303 msg->rx_len = length;
304 scmi_rx_callback(vioch->cinfo,
305 msg_read_header(msg->input), msg);
307 scmi_finalize_message(vioch, msg);
311 * Release vio channel between loop iterations to allow
312 * virtio_chan_free() to eventually fully release it when
313 * shutting down; in such a case, any outstanding message will
314 * be ignored since this loop will bail out at the next
317 scmi_vio_channel_release(vioch);
321 static void scmi_vio_deferred_tx_worker(struct work_struct *work)
324 struct scmi_vio_channel *vioch;
325 struct scmi_vio_msg *msg, *tmp;
327 vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
329 if (!scmi_vio_channel_acquire(vioch))
333 * Process pre-fetched messages: these could be non-polled messages or
334 * late timed-out replies to polled messages dequeued by chance while
335 * polling for some other messages: this worker is in charge to process
336 * the valid non-expired messages and anyway finally free all of them.
338 spin_lock_irqsave(&vioch->pending_lock, flags);
340 /* Scan the list of possibly pre-fetched messages during polling. */
341 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
342 list_del(&msg->list);
345 * Channel is acquired here (cannot vanish) and this message
346 * is no more processed elsewhere so no poll_lock needed.
348 if (msg->poll_status == VIO_MSG_NOT_POLLED)
349 scmi_rx_callback(vioch->cinfo,
350 msg_read_header(msg->input), msg);
352 /* Free the processed message once done */
353 scmi_vio_msg_release(vioch, msg);
356 spin_unlock_irqrestore(&vioch->pending_lock, flags);
358 /* Process possibly still pending messages */
359 scmi_vio_complete_cb(vioch->vqueue);
361 scmi_vio_channel_release(vioch);
364 static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
366 static vq_callback_t *scmi_vio_complete_callbacks[] = {
367 scmi_vio_complete_cb,
371 static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
373 struct scmi_vio_channel *vioch = base_cinfo->transport_info;
375 return vioch->max_msg;
378 static int virtio_link_supplier(struct device *dev)
382 "Deferring probe after not finding a bound scmi-virtio device\n");
383 return -EPROBE_DEFER;
386 if (!device_link_add(dev, &scmi_vdev->dev,
387 DL_FLAG_AUTOREMOVE_CONSUMER)) {
388 dev_err(dev, "Adding link to supplier virtio device failed\n");
395 static bool virtio_chan_available(struct device *dev, int idx)
397 struct scmi_vio_channel *channels, *vioch = NULL;
399 if (WARN_ON_ONCE(!scmi_vdev))
402 channels = (struct scmi_vio_channel *)scmi_vdev->priv;
405 case VIRTIO_SCMI_VQ_TX:
406 vioch = &channels[VIRTIO_SCMI_VQ_TX];
408 case VIRTIO_SCMI_VQ_RX:
409 if (scmi_vio_have_vq_rx(scmi_vdev))
410 vioch = &channels[VIRTIO_SCMI_VQ_RX];
416 return vioch && !vioch->cinfo;
419 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
422 struct scmi_vio_channel *vioch;
423 int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
427 return -EPROBE_DEFER;
429 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
431 /* Setup a deferred worker for polling. */
432 if (tx && !vioch->deferred_tx_wq) {
433 vioch->deferred_tx_wq =
434 alloc_workqueue(dev_name(&scmi_vdev->dev),
435 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
437 if (!vioch->deferred_tx_wq)
440 INIT_WORK(&vioch->deferred_tx_work,
441 scmi_vio_deferred_tx_worker);
444 for (i = 0; i < vioch->max_msg; i++) {
445 struct scmi_vio_msg *msg;
447 msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
452 msg->request = devm_kzalloc(cinfo->dev,
453 VIRTIO_SCMI_MAX_PDU_SIZE,
457 spin_lock_init(&msg->poll_lock);
458 refcount_set(&msg->users, 1);
461 msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
466 scmi_finalize_message(vioch, msg);
469 scmi_vio_channel_ready(vioch, cinfo);
474 static int virtio_chan_free(int id, void *p, void *data)
476 struct scmi_chan_info *cinfo = p;
477 struct scmi_vio_channel *vioch = cinfo->transport_info;
479 scmi_vio_channel_cleanup_sync(vioch);
481 scmi_free_channel(cinfo, data, id);
486 static int virtio_send_message(struct scmi_chan_info *cinfo,
487 struct scmi_xfer *xfer)
489 struct scmi_vio_channel *vioch = cinfo->transport_info;
490 struct scatterlist sg_out;
491 struct scatterlist sg_in;
492 struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
495 struct scmi_vio_msg *msg;
497 if (!scmi_vio_channel_acquire(vioch))
500 msg = scmi_virtio_get_free_msg(vioch);
502 scmi_vio_channel_release(vioch);
506 msg_tx_prepare(msg->request, xfer);
508 sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
509 sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
511 spin_lock_irqsave(&vioch->lock, flags);
514 * If polling was requested for this transaction:
515 * - retrieve last used index (will be used as polling reference)
516 * - bind the polled message to the xfer via .priv
517 * - grab an additional msg refcount for the poll-path
519 if (xfer->hdr.poll_completion) {
520 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
521 /* Still no users, no need to acquire poll_lock */
522 msg->poll_status = VIO_MSG_POLLING;
523 scmi_vio_msg_acquire(msg);
524 /* Ensure initialized msg is visibly bound to xfer */
525 smp_store_mb(xfer->priv, msg);
528 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
530 dev_err(vioch->cinfo->dev,
531 "failed to add to TX virtqueue (%d)\n", rc);
533 virtqueue_kick(vioch->vqueue);
535 spin_unlock_irqrestore(&vioch->lock, flags);
538 /* Ensure order between xfer->priv clear and vq feeding */
539 smp_store_mb(xfer->priv, NULL);
540 if (xfer->hdr.poll_completion)
541 scmi_vio_msg_release(vioch, msg);
542 scmi_vio_msg_release(vioch, msg);
545 scmi_vio_channel_release(vioch);
550 static void virtio_fetch_response(struct scmi_chan_info *cinfo,
551 struct scmi_xfer *xfer)
553 struct scmi_vio_msg *msg = xfer->priv;
556 msg_fetch_response(msg->input, msg->rx_len, xfer);
559 static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
560 size_t max_len, struct scmi_xfer *xfer)
562 struct scmi_vio_msg *msg = xfer->priv;
565 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
569 * virtio_mark_txdone - Mark transmission done
571 * Free only completed polling transfer messages.
573 * Note that in the SCMI VirtIO transport we never explicitly release still
574 * outstanding but timed-out messages by forcibly re-adding them to the
575 * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
576 * TX deferred worker, eventually clean up such messages once, finally, a late
577 * reply is received and discarded (if ever).
579 * This approach was deemed preferable since those pending timed-out buffers are
580 * still effectively owned by the SCMI platform VirtIO device even after timeout
581 * expiration: forcibly freeing and reusing them before they had been returned
582 * explicitly by the SCMI platform could lead to subtle bugs due to message
584 * An SCMI platform VirtIO device which never returns message buffers is
585 * anyway broken and it will quickly lead to exhaustion of available messages.
587 * For this same reason, here, we take care to free only the polled messages
588 * that had been somehow replied (only if not by chance already processed on the
589 * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
590 * any timed-out polled message if that indeed appears to have been at least
591 * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such
592 * messages won't be freed elsewhere. Any other polled message is marked as
593 * VIO_MSG_POLL_TIMEOUT.
595 * Possible late replies to timed-out polled messages will be eventually freed
596 * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
597 * dequeued on some other polling path.
599 * @cinfo: SCMI channel info
600 * @ret: Transmission return code
601 * @xfer: Transfer descriptor
603 static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
604 struct scmi_xfer *xfer)
607 struct scmi_vio_channel *vioch = cinfo->transport_info;
608 struct scmi_vio_msg *msg = xfer->priv;
610 if (!msg || !scmi_vio_channel_acquire(vioch))
613 /* Ensure msg is unbound from xfer anyway at this point */
614 smp_store_mb(xfer->priv, NULL);
616 /* Must be a polled xfer and not already freed on the IRQ path */
617 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
618 scmi_vio_channel_release(vioch);
622 spin_lock_irqsave(&msg->poll_lock, flags);
623 /* Do not free timedout polled messages only if still inflight */
624 if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)
625 scmi_vio_msg_release(vioch, msg);
626 else if (msg->poll_status == VIO_MSG_POLLING)
627 msg->poll_status = VIO_MSG_POLL_TIMEOUT;
628 spin_unlock_irqrestore(&msg->poll_lock, flags);
630 scmi_vio_channel_release(vioch);
634 * virtio_poll_done - Provide polling support for VirtIO transport
636 * @cinfo: SCMI channel info
637 * @xfer: Reference to the transfer being poll for.
639 * VirtIO core provides a polling mechanism based only on last used indexes:
640 * this means that it is possible to poll the virtqueues waiting for something
641 * new to arrive from the host side, but the only way to check if the freshly
642 * arrived buffer was indeed what we were waiting for is to compare the newly
643 * arrived message descriptor with the one we are polling on.
645 * As a consequence it can happen to dequeue something different from the buffer
646 * we were poll-waiting for: if that is the case such early fetched buffers are
647 * then added to a the @pending_cmds_list list for later processing by a
648 * dedicated deferred worker.
650 * So, basically, once something new is spotted we proceed to de-queue all the
651 * freshly received used buffers until we found the one we were polling on, or,
652 * we have 'seemingly' emptied the virtqueue; if some buffers are still pending
653 * in the vqueue at the end of the polling loop (possible due to inherent races
654 * in virtqueues handling mechanisms), we similarly kick the deferred worker
655 * and let it process those, to avoid indefinitely looping in the .poll_done
656 * busy-waiting helper.
658 * Finally, we delegate to the deferred worker also the final free of any timed
659 * out reply to a polled message that we should dequeue.
661 * Note that, since we do NOT have per-message suppress notification mechanism,
662 * the message we are polling for could be alternatively delivered via usual
663 * IRQs callbacks on another core which happened to have IRQs enabled while we
664 * are actively polling for it here: in such a case it will be handled as such
665 * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
666 * transparently terminated anyway.
668 * Return: True once polling has successfully completed.
670 static bool virtio_poll_done(struct scmi_chan_info *cinfo,
671 struct scmi_xfer *xfer)
673 bool pending, found = false;
674 unsigned int length, any_prefetched = 0;
676 struct scmi_vio_msg *next_msg, *msg = xfer->priv;
677 struct scmi_vio_channel *vioch = cinfo->transport_info;
683 * Processed already by other polling loop on another CPU ?
685 * Note that this message is acquired on the poll path so cannot vanish
686 * while inside this loop iteration even if concurrently processed on
689 * Avoid to acquire poll_lock since polled_status can be changed
690 * in a relevant manner only later in this same thread of execution:
691 * any other possible changes made concurrently by other polling loops
692 * or by a reply delivered on the IRQ path have no meaningful impact on
693 * this loop iteration: in other words it is harmless to allow this
694 * possible race but let has avoid spinlocking with irqs off in this
695 * initial part of the polling loop.
697 if (msg->poll_status == VIO_MSG_POLL_DONE)
700 if (!scmi_vio_channel_acquire(vioch))
703 /* Has cmdq index moved at all ? */
704 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
706 scmi_vio_channel_release(vioch);
710 spin_lock_irqsave(&vioch->lock, flags);
711 virtqueue_disable_cb(vioch->vqueue);
714 * Process all new messages till the polled-for message is found OR
715 * the vqueue is empty.
717 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
718 bool next_msg_done = false;
721 * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so
722 * that can be properly freed even on timeout in mark_txdone.
724 spin_lock(&next_msg->poll_lock);
725 if (next_msg->poll_status == VIO_MSG_POLLING) {
726 next_msg->poll_status = VIO_MSG_POLL_DONE;
727 next_msg_done = true;
729 spin_unlock(&next_msg->poll_lock);
731 next_msg->rx_len = length;
732 /* Is the message we were polling for ? */
733 if (next_msg == msg) {
736 } else if (next_msg_done) {
737 /* Skip the rest if this was another polled msg */
742 * Enqueue for later processing any non-polled message and any
743 * timed-out polled one that we happen to have dequeued.
745 spin_lock(&next_msg->poll_lock);
746 if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||
747 next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {
748 spin_unlock(&next_msg->poll_lock);
751 spin_lock(&vioch->pending_lock);
752 list_add_tail(&next_msg->list,
753 &vioch->pending_cmds_list);
754 spin_unlock(&vioch->pending_lock);
756 spin_unlock(&next_msg->poll_lock);
761 * When the polling loop has successfully terminated if something
762 * else was queued in the meantime, it will be served by a deferred
763 * worker OR by the normal IRQ/callback OR by other poll loops.
765 * If we are still looking for the polled reply, the polling index has
766 * to be updated to the current vqueue last used index.
769 pending = !virtqueue_enable_cb(vioch->vqueue);
771 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
772 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
775 if (vioch->deferred_tx_wq && (any_prefetched || pending))
776 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
778 spin_unlock_irqrestore(&vioch->lock, flags);
780 scmi_vio_channel_release(vioch);
785 static const struct scmi_transport_ops scmi_virtio_ops = {
786 .link_supplier = virtio_link_supplier,
787 .chan_available = virtio_chan_available,
788 .chan_setup = virtio_chan_setup,
789 .chan_free = virtio_chan_free,
790 .get_max_msg = virtio_get_max_msg,
791 .send_message = virtio_send_message,
792 .fetch_response = virtio_fetch_response,
793 .fetch_notification = virtio_fetch_notification,
794 .mark_txdone = virtio_mark_txdone,
795 .poll_done = virtio_poll_done,
798 static int scmi_vio_probe(struct virtio_device *vdev)
800 struct device *dev = &vdev->dev;
801 struct scmi_vio_channel *channels;
806 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
808 /* Only one SCMI VirtiO device allowed */
811 "One SCMI Virtio device was already initialized: only one allowed.\n");
815 have_vq_rx = scmi_vio_have_vq_rx(vdev);
816 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
818 channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
823 channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
825 ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
826 scmi_vio_vqueue_names, NULL);
828 dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
832 for (i = 0; i < vq_cnt; i++) {
835 spin_lock_init(&channels[i].lock);
836 spin_lock_init(&channels[i].free_lock);
837 INIT_LIST_HEAD(&channels[i].free_list);
838 spin_lock_init(&channels[i].pending_lock);
839 INIT_LIST_HEAD(&channels[i].pending_cmds_list);
840 channels[i].vqueue = vqs[i];
842 sz = virtqueue_get_vring_size(channels[i].vqueue);
843 /* Tx messages need multiple descriptors. */
844 if (!channels[i].is_rx)
845 sz /= DESCRIPTORS_PER_TX_MSG;
847 if (sz > MSG_TOKEN_MAX) {
849 "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
850 channels[i].is_rx ? "rx" : "tx",
854 channels[i].max_msg = sz;
857 vdev->priv = channels;
858 /* Ensure initialized scmi_vdev is visible */
859 smp_store_mb(scmi_vdev, vdev);
864 static void scmi_vio_remove(struct virtio_device *vdev)
867 * Once we get here, virtio_chan_free() will have already been called by
868 * the SCMI core for any existing channel and, as a consequence, all the
869 * virtio channels will have been already marked NOT ready, causing any
870 * outstanding message on any vqueue to be ignored by complete_cb: now
871 * we can just stop processing buffers and destroy the vqueues.
873 virtio_reset_device(vdev);
874 vdev->config->del_vqs(vdev);
875 /* Ensure scmi_vdev is visible as NULL */
876 smp_store_mb(scmi_vdev, NULL);
879 static int scmi_vio_validate(struct virtio_device *vdev)
881 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
882 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
884 "device does not comply with spec version 1.x\n");
891 static unsigned int features[] = {
892 VIRTIO_SCMI_F_P2A_CHANNELS,
895 static const struct virtio_device_id id_table[] = {
896 { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
900 static struct virtio_driver virtio_scmi_driver = {
901 .driver.name = "scmi-virtio",
902 .driver.owner = THIS_MODULE,
903 .feature_table = features,
904 .feature_table_size = ARRAY_SIZE(features),
905 .id_table = id_table,
906 .probe = scmi_vio_probe,
907 .remove = scmi_vio_remove,
908 .validate = scmi_vio_validate,
911 static int __init virtio_scmi_init(void)
913 return register_virtio_driver(&virtio_scmi_driver);
916 static void virtio_scmi_exit(void)
918 unregister_virtio_driver(&virtio_scmi_driver);
921 const struct scmi_desc scmi_virtio_desc = {
922 .transport_init = virtio_scmi_init,
923 .transport_exit = virtio_scmi_exit,
924 .ops = &scmi_virtio_ops,
925 /* for non-realtime virtio devices */
926 .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
927 .max_msg = 0, /* overridden by virtio_get_max_msg() */
928 .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
929 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),