1 // SPDX-License-Identifier: GPL-2.0-only
3 * vDPA bridge driver for modern virtio-pci device
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
8 * Based on virtio_pci_modern.c.
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/vdpa.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_config.h>
17 #include <linux/virtio_ring.h>
18 #include <linux/virtio_pci.h>
19 #include <linux/virtio_pci_modern.h>
21 #define VP_VDPA_QUEUE_MAX 256
22 #define VP_VDPA_DRIVER_NAME "vp_vdpa"
23 #define VP_VDPA_NAME_SIZE 256
27 char msix_name[VP_VDPA_NAME_SIZE];
28 struct vdpa_callback cb;
29 resource_size_t notify_pa;
34 struct vdpa_device vdpa;
35 struct virtio_pci_modern_device mdev;
36 struct vp_vring *vring;
37 struct vdpa_callback config_cb;
38 char msix_name[VP_VDPA_NAME_SIZE];
44 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
46 return container_of(vdpa, struct vp_vdpa, vdpa);
49 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
51 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
53 return &vp_vdpa->mdev;
56 static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
58 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
60 return vp_modern_get_features(mdev);
63 static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
65 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
67 vp_modern_set_features(mdev, features);
72 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
74 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
76 return vp_modern_get_status(mdev);
79 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
81 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
82 struct pci_dev *pdev = mdev->pci_dev;
85 for (i = 0; i < vp_vdpa->queues; i++) {
86 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
87 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
88 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
90 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
94 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
95 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
96 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
97 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
100 if (vp_vdpa->vectors) {
101 pci_free_irq_vectors(pdev);
102 vp_vdpa->vectors = 0;
106 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
108 struct vp_vring *vring = arg;
110 if (vring->cb.callback)
111 return vring->cb.callback(vring->cb.private);
116 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
118 struct vp_vdpa *vp_vdpa = arg;
120 if (vp_vdpa->config_cb.callback)
121 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
126 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
128 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
129 struct pci_dev *pdev = mdev->pci_dev;
131 int queues = vp_vdpa->queues;
132 int vectors = queues + 1;
134 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
135 if (ret != vectors) {
137 "vp_vdpa: fail to allocate irq vectors want %d but %d\n",
142 vp_vdpa->vectors = vectors;
144 for (i = 0; i < queues; i++) {
145 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
146 "vp-vdpa[%s]-%d\n", pci_name(pdev), i);
147 irq = pci_irq_vector(pdev, i);
148 ret = devm_request_irq(&pdev->dev, irq,
150 0, vp_vdpa->vring[i].msix_name,
154 "vp_vdpa: fail to request irq for vq %d\n", i);
157 vp_modern_queue_vector(mdev, i, i);
158 vp_vdpa->vring[i].irq = irq;
161 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
163 irq = pci_irq_vector(pdev, queues);
164 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
165 vp_vdpa->msix_name, vp_vdpa);
168 "vp_vdpa: fail to request irq for vq %d\n", i);
171 vp_modern_config_vector(mdev, queues);
172 vp_vdpa->config_irq = irq;
176 vp_vdpa_free_irq(vp_vdpa);
180 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
182 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
183 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
184 u8 s = vp_vdpa_get_status(vdpa);
186 if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
187 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
188 vp_vdpa_request_irq(vp_vdpa);
191 vp_modern_set_status(mdev, status);
193 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
194 (s & VIRTIO_CONFIG_S_DRIVER_OK))
195 vp_vdpa_free_irq(vp_vdpa);
198 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
200 return VP_VDPA_QUEUE_MAX;
203 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
204 struct vdpa_vq_state *state)
206 /* Note that this is not supported by virtio specification, so
207 * we return -EOPNOTSUPP here. This means we can't support live
208 * migration, vhost device start/stop.
213 static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa,
214 const struct vdpa_vq_state *state)
216 const struct vdpa_vq_state_split *split = &state->split;
218 if (split->avail_index == 0)
224 static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa,
225 const struct vdpa_vq_state *state)
227 const struct vdpa_vq_state_packed *packed = &state->packed;
229 if (packed->last_avail_counter == 1 &&
230 packed->last_avail_idx == 0 &&
231 packed->last_used_counter == 1 &&
232 packed->last_used_idx == 0)
238 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
239 const struct vdpa_vq_state *state)
241 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
243 /* Note that this is not supported by virtio specification.
244 * But if the state is by chance equal to the device initial
245 * state, we can let it go.
247 if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) &&
248 !vp_modern_get_queue_enable(mdev, qid)) {
249 if (vp_modern_get_driver_features(mdev) &
250 BIT_ULL(VIRTIO_F_RING_PACKED))
251 return vp_vdpa_set_vq_state_packed(vdpa, state);
253 return vp_vdpa_set_vq_state_split(vdpa, state);
259 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
260 struct vdpa_callback *cb)
262 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
264 vp_vdpa->vring[qid].cb = *cb;
267 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
270 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
272 vp_modern_set_queue_enable(mdev, qid, ready);
275 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
277 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
279 return vp_modern_get_queue_enable(mdev, qid);
282 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
285 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
287 vp_modern_set_queue_size(mdev, qid, num);
290 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
291 u64 desc_area, u64 driver_area,
294 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
296 vp_modern_queue_address(mdev, qid, desc_area,
297 driver_area, device_area);
302 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
304 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
306 vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
309 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
311 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
313 return vp_modern_generation(mdev);
316 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
318 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
320 return mdev->id.device;
323 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
325 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
327 return mdev->id.vendor;
330 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
335 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa)
337 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
339 return mdev->device_len;
342 static void vp_vdpa_get_config(struct vdpa_device *vdpa,
344 void *buf, unsigned int len)
346 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
347 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
353 old = vp_ioread8(&mdev->common->config_generation);
355 for (i = 0; i < len; i++)
356 *p++ = vp_ioread8(mdev->device + offset + i);
358 new = vp_ioread8(&mdev->common->config_generation);
359 } while (old != new);
362 static void vp_vdpa_set_config(struct vdpa_device *vdpa,
363 unsigned int offset, const void *buf,
366 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
367 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
371 for (i = 0; i < len; i++)
372 vp_iowrite8(*p++, mdev->device + offset + i);
375 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
376 struct vdpa_callback *cb)
378 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
380 vp_vdpa->config_cb = *cb;
383 static struct vdpa_notification_area
384 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
386 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
387 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
388 struct vdpa_notification_area notify;
390 notify.addr = vp_vdpa->vring[qid].notify_pa;
391 notify.size = mdev->notify_offset_multiplier;
396 static const struct vdpa_config_ops vp_vdpa_ops = {
397 .get_features = vp_vdpa_get_features,
398 .set_features = vp_vdpa_set_features,
399 .get_status = vp_vdpa_get_status,
400 .set_status = vp_vdpa_set_status,
401 .get_vq_num_max = vp_vdpa_get_vq_num_max,
402 .get_vq_state = vp_vdpa_get_vq_state,
403 .get_vq_notification = vp_vdpa_get_vq_notification,
404 .set_vq_state = vp_vdpa_set_vq_state,
405 .set_vq_cb = vp_vdpa_set_vq_cb,
406 .set_vq_ready = vp_vdpa_set_vq_ready,
407 .get_vq_ready = vp_vdpa_get_vq_ready,
408 .set_vq_num = vp_vdpa_set_vq_num,
409 .set_vq_address = vp_vdpa_set_vq_address,
410 .kick_vq = vp_vdpa_kick_vq,
411 .get_generation = vp_vdpa_get_generation,
412 .get_device_id = vp_vdpa_get_device_id,
413 .get_vendor_id = vp_vdpa_get_vendor_id,
414 .get_vq_align = vp_vdpa_get_vq_align,
415 .get_config_size = vp_vdpa_get_config_size,
416 .get_config = vp_vdpa_get_config,
417 .set_config = vp_vdpa_set_config,
418 .set_config_cb = vp_vdpa_set_config_cb,
421 static void vp_vdpa_free_irq_vectors(void *data)
423 pci_free_irq_vectors(data);
426 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
428 struct virtio_pci_modern_device *mdev;
429 struct device *dev = &pdev->dev;
430 struct vp_vdpa *vp_vdpa;
433 ret = pcim_enable_device(pdev);
437 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
438 dev, &vp_vdpa_ops, NULL);
439 if (IS_ERR(vp_vdpa)) {
440 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
441 return PTR_ERR(vp_vdpa);
444 mdev = &vp_vdpa->mdev;
445 mdev->pci_dev = pdev;
447 ret = vp_modern_probe(mdev);
449 dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
453 pci_set_master(pdev);
454 pci_set_drvdata(pdev, vp_vdpa);
456 vp_vdpa->vdpa.dma_dev = &pdev->dev;
457 vp_vdpa->queues = vp_modern_get_num_queues(mdev);
459 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
462 "Failed for adding devres for freeing irq vectors\n");
466 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
467 sizeof(*vp_vdpa->vring),
469 if (!vp_vdpa->vring) {
471 dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
475 for (i = 0; i < vp_vdpa->queues; i++) {
476 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
477 vp_vdpa->vring[i].notify =
478 vp_modern_map_vq_notify(mdev, i,
479 &vp_vdpa->vring[i].notify_pa);
480 if (!vp_vdpa->vring[i].notify) {
482 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
486 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
488 ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
490 dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
497 put_device(&vp_vdpa->vdpa.dev);
501 static void vp_vdpa_remove(struct pci_dev *pdev)
503 struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
505 vdpa_unregister_device(&vp_vdpa->vdpa);
506 vp_modern_remove(&vp_vdpa->mdev);
509 static struct pci_driver vp_vdpa_driver = {
511 .id_table = NULL, /* only dynamic ids */
512 .probe = vp_vdpa_probe,
513 .remove = vp_vdpa_remove,
516 module_pci_driver(vp_vdpa_driver);
518 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
519 MODULE_DESCRIPTION("vp-vdpa");
520 MODULE_LICENSE("GPL");