1 // SPDX-License-Identifier: GPL-2.0
3 * Virtio Transport driver for Arm System Control and Management Interface
6 * Copyright (C) 2020-2021 OpenSynergy.
7 * Copyright (C) 2021 ARM Ltd.
11 * DOC: Theory of Operation
13 * The scmi-virtio transport implements a driver for the virtio SCMI device.
15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
16 * channel (virtio eventq, P2A channel). Each channel is implemented through a
17 * virtqueue. Access to each virtqueue is protected by spinlocks.
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/virtio.h>
23 #include <linux/virtio_config.h>
25 #include <uapi/linux/virtio_ids.h>
26 #include <uapi/linux/virtio_scmi.h>
30 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
31 #define VIRTIO_SCMI_MAX_PDU_SIZE \
32 (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
33 #define DESCRIPTORS_PER_TX_MSG 2
36 * struct scmi_vio_channel - Transport channel information
38 * @vqueue: Associated virtqueue
39 * @cinfo: SCMI Tx or Rx channel
40 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
41 * @is_rx: Whether channel is an Rx channel
42 * @ready: Whether transport user is ready to hear about channel
43 * @max_msg: Maximum number of pending messages for this channel.
44 * @lock: Protects access to all members except ready.
45 * @ready_lock: Protects access to ready. If required, it must be taken before
48 struct scmi_vio_channel {
49 struct virtqueue *vqueue;
50 struct scmi_chan_info *cinfo;
51 struct list_head free_list;
55 /* lock to protect access to all members except ready. */
57 /* lock to rotects access to ready flag. */
58 spinlock_t ready_lock;
62 * struct scmi_vio_msg - Transport PDU information
64 * @request: SDU used for commands
65 * @input: SDU used for (delayed) responses and notifications
66 * @list: List which scmi_vio_msg may be part of
67 * @rx_len: Input SDU size in bytes, once input has been received
70 struct scmi_msg_payld *request;
71 struct scmi_msg_payld *input;
72 struct list_head list;
76 /* Only one SCMI VirtIO device can possibly exist */
77 static struct virtio_device *scmi_vdev;
79 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
81 return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
84 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
85 struct scmi_vio_msg *msg)
87 struct scatterlist sg_in;
91 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
93 spin_lock_irqsave(&vioch->lock, flags);
95 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
97 dev_err_once(vioch->cinfo->dev,
98 "failed to add to virtqueue (%d)\n", rc);
100 virtqueue_kick(vioch->vqueue);
102 spin_unlock_irqrestore(&vioch->lock, flags);
107 static void scmi_finalize_message(struct scmi_vio_channel *vioch,
108 struct scmi_vio_msg *msg)
111 scmi_vio_feed_vq_rx(vioch, msg);
113 /* Here IRQs are assumed to be already disabled by the caller */
114 spin_lock(&vioch->lock);
115 list_add(&msg->list, &vioch->free_list);
116 spin_unlock(&vioch->lock);
120 static void scmi_vio_complete_cb(struct virtqueue *vqueue)
122 unsigned long ready_flags;
124 struct scmi_vio_channel *vioch;
125 struct scmi_vio_msg *msg;
126 bool cb_enabled = true;
128 if (WARN_ON_ONCE(!vqueue->vdev->priv))
130 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
133 spin_lock_irqsave(&vioch->ready_lock, ready_flags);
137 (void)virtqueue_enable_cb(vqueue);
138 goto unlock_ready_out;
141 /* IRQs already disabled here no need to irqsave */
142 spin_lock(&vioch->lock);
144 virtqueue_disable_cb(vqueue);
147 msg = virtqueue_get_buf(vqueue, &length);
149 if (virtqueue_enable_cb(vqueue))
153 spin_unlock(&vioch->lock);
156 msg->rx_len = length;
157 scmi_rx_callback(vioch->cinfo,
158 msg_read_header(msg->input), msg);
160 scmi_finalize_message(vioch, msg);
164 * Release ready_lock and re-enable IRQs between loop iterations
165 * to allow virtio_chan_free() to possibly kick in and set the
166 * flag vioch->ready to false even in between processing of
167 * messages, so as to force outstanding messages to be ignored
168 * when system is shutting down.
170 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
174 spin_unlock(&vioch->lock);
176 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
179 static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
181 static vq_callback_t *scmi_vio_complete_callbacks[] = {
182 scmi_vio_complete_cb,
186 static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
188 struct scmi_vio_channel *vioch = base_cinfo->transport_info;
190 return vioch->max_msg;
193 static int virtio_link_supplier(struct device *dev)
197 "Deferring probe after not finding a bound scmi-virtio device\n");
198 return -EPROBE_DEFER;
201 if (!device_link_add(dev, &scmi_vdev->dev,
202 DL_FLAG_AUTOREMOVE_CONSUMER)) {
203 dev_err(dev, "Adding link to supplier virtio device failed\n");
210 static bool virtio_chan_available(struct device *dev, int idx)
212 struct scmi_vio_channel *channels, *vioch = NULL;
214 if (WARN_ON_ONCE(!scmi_vdev))
217 channels = (struct scmi_vio_channel *)scmi_vdev->priv;
220 case VIRTIO_SCMI_VQ_TX:
221 vioch = &channels[VIRTIO_SCMI_VQ_TX];
223 case VIRTIO_SCMI_VQ_RX:
224 if (scmi_vio_have_vq_rx(scmi_vdev))
225 vioch = &channels[VIRTIO_SCMI_VQ_RX];
231 return vioch && !vioch->cinfo;
234 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
238 struct scmi_vio_channel *vioch;
239 int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
243 return -EPROBE_DEFER;
245 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
247 for (i = 0; i < vioch->max_msg; i++) {
248 struct scmi_vio_msg *msg;
250 msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
255 msg->request = devm_kzalloc(cinfo->dev,
256 VIRTIO_SCMI_MAX_PDU_SIZE,
262 msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
268 spin_lock_irqsave(&vioch->lock, flags);
269 list_add_tail(&msg->list, &vioch->free_list);
270 spin_unlock_irqrestore(&vioch->lock, flags);
272 scmi_vio_feed_vq_rx(vioch, msg);
276 spin_lock_irqsave(&vioch->lock, flags);
277 cinfo->transport_info = vioch;
278 /* Indirectly setting channel not available any more */
279 vioch->cinfo = cinfo;
280 spin_unlock_irqrestore(&vioch->lock, flags);
282 spin_lock_irqsave(&vioch->ready_lock, flags);
284 spin_unlock_irqrestore(&vioch->ready_lock, flags);
289 static int virtio_chan_free(int id, void *p, void *data)
292 struct scmi_chan_info *cinfo = p;
293 struct scmi_vio_channel *vioch = cinfo->transport_info;
295 spin_lock_irqsave(&vioch->ready_lock, flags);
296 vioch->ready = false;
297 spin_unlock_irqrestore(&vioch->ready_lock, flags);
299 scmi_free_channel(cinfo, data, id);
301 spin_lock_irqsave(&vioch->lock, flags);
303 spin_unlock_irqrestore(&vioch->lock, flags);
308 static int virtio_send_message(struct scmi_chan_info *cinfo,
309 struct scmi_xfer *xfer)
311 struct scmi_vio_channel *vioch = cinfo->transport_info;
312 struct scatterlist sg_out;
313 struct scatterlist sg_in;
314 struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
317 struct scmi_vio_msg *msg;
319 spin_lock_irqsave(&vioch->lock, flags);
321 if (list_empty(&vioch->free_list)) {
322 spin_unlock_irqrestore(&vioch->lock, flags);
326 msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
327 list_del(&msg->list);
329 msg_tx_prepare(msg->request, xfer);
331 sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
332 sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
334 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
336 list_add(&msg->list, &vioch->free_list);
337 dev_err_once(vioch->cinfo->dev,
338 "%s() failed to add to virtqueue (%d)\n", __func__,
341 virtqueue_kick(vioch->vqueue);
344 spin_unlock_irqrestore(&vioch->lock, flags);
349 static void virtio_fetch_response(struct scmi_chan_info *cinfo,
350 struct scmi_xfer *xfer)
352 struct scmi_vio_msg *msg = xfer->priv;
355 msg_fetch_response(msg->input, msg->rx_len, xfer);
360 static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
361 size_t max_len, struct scmi_xfer *xfer)
363 struct scmi_vio_msg *msg = xfer->priv;
366 msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
371 static const struct scmi_transport_ops scmi_virtio_ops = {
372 .link_supplier = virtio_link_supplier,
373 .chan_available = virtio_chan_available,
374 .chan_setup = virtio_chan_setup,
375 .chan_free = virtio_chan_free,
376 .get_max_msg = virtio_get_max_msg,
377 .send_message = virtio_send_message,
378 .fetch_response = virtio_fetch_response,
379 .fetch_notification = virtio_fetch_notification,
382 static int scmi_vio_probe(struct virtio_device *vdev)
384 struct device *dev = &vdev->dev;
385 struct scmi_vio_channel *channels;
390 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
392 /* Only one SCMI VirtiO device allowed */
395 "One SCMI Virtio device was already initialized: only one allowed.\n");
399 have_vq_rx = scmi_vio_have_vq_rx(vdev);
400 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
402 channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
407 channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
409 ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
410 scmi_vio_vqueue_names, NULL);
412 dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
416 for (i = 0; i < vq_cnt; i++) {
419 spin_lock_init(&channels[i].lock);
420 spin_lock_init(&channels[i].ready_lock);
421 INIT_LIST_HEAD(&channels[i].free_list);
422 channels[i].vqueue = vqs[i];
424 sz = virtqueue_get_vring_size(channels[i].vqueue);
425 /* Tx messages need multiple descriptors. */
426 if (!channels[i].is_rx)
427 sz /= DESCRIPTORS_PER_TX_MSG;
429 if (sz > MSG_TOKEN_MAX) {
431 "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
432 channels[i].is_rx ? "rx" : "tx",
436 channels[i].max_msg = sz;
439 vdev->priv = channels;
440 /* Ensure initialized scmi_vdev is visible */
441 smp_store_mb(scmi_vdev, vdev);
446 static void scmi_vio_remove(struct virtio_device *vdev)
449 * Once we get here, virtio_chan_free() will have already been called by
450 * the SCMI core for any existing channel and, as a consequence, all the
451 * virtio channels will have been already marked NOT ready, causing any
452 * outstanding message on any vqueue to be ignored by complete_cb: now
453 * we can just stop processing buffers and destroy the vqueues.
455 vdev->config->reset(vdev);
456 vdev->config->del_vqs(vdev);
457 /* Ensure scmi_vdev is visible as NULL */
458 smp_store_mb(scmi_vdev, NULL);
461 static int scmi_vio_validate(struct virtio_device *vdev)
463 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
465 "device does not comply with spec version 1.x\n");
472 static unsigned int features[] = {
473 VIRTIO_SCMI_F_P2A_CHANNELS,
476 static const struct virtio_device_id id_table[] = {
477 { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
481 static struct virtio_driver virtio_scmi_driver = {
482 .driver.name = "scmi-virtio",
483 .driver.owner = THIS_MODULE,
484 .feature_table = features,
485 .feature_table_size = ARRAY_SIZE(features),
486 .id_table = id_table,
487 .probe = scmi_vio_probe,
488 .remove = scmi_vio_remove,
489 .validate = scmi_vio_validate,
492 static int __init virtio_scmi_init(void)
494 return register_virtio_driver(&virtio_scmi_driver);
497 static void virtio_scmi_exit(void)
499 unregister_virtio_driver(&virtio_scmi_driver);
502 const struct scmi_desc scmi_virtio_desc = {
503 .transport_init = virtio_scmi_init,
504 .transport_exit = virtio_scmi_exit,
505 .ops = &scmi_virtio_ops,
506 .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
507 .max_msg = 0, /* overridden by virtio_get_max_msg() */
508 .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,