1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/virtio_ids.h>
19 static LIST_HEAD(mdev_head);
20 /* A global mutex that protects vdpa management device and device level operations. */
21 static DECLARE_RWSEM(vdpa_dev_lock);
22 static DEFINE_IDA(vdpa_index_ida);
24 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
26 down_write(&vdev->cf_lock);
27 vdev->config->set_status(vdev, status);
28 up_write(&vdev->cf_lock);
30 EXPORT_SYMBOL(vdpa_set_status);
32 static struct genl_family vdpa_nl_family;
34 static int vdpa_dev_probe(struct device *d)
36 struct vdpa_device *vdev = dev_to_vdpa(d);
37 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 const struct vdpa_config_ops *ops = vdev->config;
39 u32 max_num, min_num = 1;
42 max_num = ops->get_vq_num_max(vdev);
43 if (ops->get_vq_num_min)
44 min_num = ops->get_vq_num_min(vdev);
45 if (max_num < min_num)
48 if (drv && drv->probe)
49 ret = drv->probe(vdev);
54 static void vdpa_dev_remove(struct device *d)
56 struct vdpa_device *vdev = dev_to_vdpa(d);
57 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
59 if (drv && drv->remove)
63 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
65 struct vdpa_device *vdev = dev_to_vdpa(dev);
67 /* Check override first, and if set, only use the named driver */
68 if (vdev->driver_override)
69 return strcmp(vdev->driver_override, drv->name) == 0;
71 /* Currently devices must be supported by all vDPA bus drivers */
75 static ssize_t driver_override_store(struct device *dev,
76 struct device_attribute *attr,
77 const char *buf, size_t count)
79 struct vdpa_device *vdev = dev_to_vdpa(dev);
80 const char *driver_override, *old;
83 /* We need to keep extra room for a newline */
84 if (count >= (PAGE_SIZE - 1))
87 driver_override = kstrndup(buf, count, GFP_KERNEL);
91 cp = strchr(driver_override, '\n');
96 old = vdev->driver_override;
97 if (strlen(driver_override)) {
98 vdev->driver_override = driver_override;
100 kfree(driver_override);
101 vdev->driver_override = NULL;
110 static ssize_t driver_override_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
113 struct vdpa_device *vdev = dev_to_vdpa(dev);
117 len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
122 static DEVICE_ATTR_RW(driver_override);
124 static struct attribute *vdpa_dev_attrs[] = {
125 &dev_attr_driver_override.attr,
129 static const struct attribute_group vdpa_dev_group = {
130 .attrs = vdpa_dev_attrs,
132 __ATTRIBUTE_GROUPS(vdpa_dev);
134 static struct bus_type vdpa_bus = {
136 .dev_groups = vdpa_dev_groups,
137 .match = vdpa_dev_match,
138 .probe = vdpa_dev_probe,
139 .remove = vdpa_dev_remove,
142 static void vdpa_release_dev(struct device *d)
144 struct vdpa_device *vdev = dev_to_vdpa(d);
145 const struct vdpa_config_ops *ops = vdev->config;
150 ida_simple_remove(&vdpa_index_ida, vdev->index);
151 kfree(vdev->driver_override);
156 * __vdpa_alloc_device - allocate and initilaize a vDPA device
157 * This allows driver to some prepartion after device is
158 * initialized but before registered.
159 * @parent: the parent device
160 * @config: the bus operations that is supported by this device
161 * @size: size of the parent structure that contains private data
162 * @name: name of the vdpa device; optional.
163 * @use_va: indicate whether virtual address must be used by this device
165 * Driver should use vdpa_alloc_device() wrapper macro instead of
166 * using this directly.
168 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
171 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
172 const struct vdpa_config_ops *config,
173 size_t size, const char *name,
176 struct vdpa_device *vdev;
182 if (!!config->dma_map != !!config->dma_unmap)
185 /* It should only work for the device that use on-chip IOMMU */
186 if (use_va && !(config->dma_map || config->set_map))
190 vdev = kzalloc(size, GFP_KERNEL);
194 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
198 vdev->dev.bus = &vdpa_bus;
199 vdev->dev.parent = parent;
200 vdev->dev.release = vdpa_release_dev;
202 vdev->config = config;
203 vdev->features_valid = false;
204 vdev->use_va = use_va;
207 err = dev_set_name(&vdev->dev, "%s", name);
209 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
213 init_rwsem(&vdev->cf_lock);
214 device_initialize(&vdev->dev);
219 ida_simple_remove(&vdpa_index_ida, vdev->index);
225 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
227 static int vdpa_name_match(struct device *dev, const void *data)
229 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
231 return (strcmp(dev_name(&vdev->dev), data) == 0);
234 static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
240 lockdep_assert_held(&vdpa_dev_lock);
241 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
246 return device_add(&vdev->dev);
250 * _vdpa_register_device - register a vDPA device with vdpa lock held
251 * Caller must have a succeed call of vdpa_alloc_device() before.
252 * Caller must invoke this routine in the management device dev_add()
253 * callback after setting up valid mgmtdev for this vdpa device.
254 * @vdev: the vdpa device to be registered to vDPA bus
255 * @nvqs: number of virtqueues supported by this device
257 * Return: Returns an error when fail to add device to vDPA bus
259 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
264 return __vdpa_register_device(vdev, nvqs);
266 EXPORT_SYMBOL_GPL(_vdpa_register_device);
269 * vdpa_register_device - register a vDPA device
270 * Callers must have a succeed call of vdpa_alloc_device() before.
271 * @vdev: the vdpa device to be registered to vDPA bus
272 * @nvqs: number of virtqueues supported by this device
274 * Return: Returns an error when fail to add to vDPA bus
276 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
280 down_write(&vdpa_dev_lock);
281 err = __vdpa_register_device(vdev, nvqs);
282 up_write(&vdpa_dev_lock);
285 EXPORT_SYMBOL_GPL(vdpa_register_device);
288 * _vdpa_unregister_device - unregister a vDPA device
289 * Caller must invoke this routine as part of management device dev_del()
291 * @vdev: the vdpa device to be unregisted from vDPA bus
293 void _vdpa_unregister_device(struct vdpa_device *vdev)
295 lockdep_assert_held(&vdpa_dev_lock);
296 WARN_ON(!vdev->mdev);
297 device_unregister(&vdev->dev);
299 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
302 * vdpa_unregister_device - unregister a vDPA device
303 * @vdev: the vdpa device to be unregisted from vDPA bus
305 void vdpa_unregister_device(struct vdpa_device *vdev)
307 down_write(&vdpa_dev_lock);
308 device_unregister(&vdev->dev);
309 up_write(&vdpa_dev_lock);
311 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
314 * __vdpa_register_driver - register a vDPA device driver
315 * @drv: the vdpa device driver to be registered
316 * @owner: module owner of the driver
318 * Return: Returns an err when fail to do the registration
320 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
322 drv->driver.bus = &vdpa_bus;
323 drv->driver.owner = owner;
325 return driver_register(&drv->driver);
327 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
330 * vdpa_unregister_driver - unregister a vDPA device driver
331 * @drv: the vdpa device driver to be unregistered
333 void vdpa_unregister_driver(struct vdpa_driver *drv)
335 driver_unregister(&drv->driver);
337 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
340 * vdpa_mgmtdev_register - register a vdpa management device
342 * @mdev: Pointer to vdpa management device
343 * vdpa_mgmtdev_register() register a vdpa management device which supports
344 * vdpa device management.
345 * Return: Returns 0 on success or failure when required callback ops are not
348 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
350 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
353 INIT_LIST_HEAD(&mdev->list);
354 down_write(&vdpa_dev_lock);
355 list_add_tail(&mdev->list, &mdev_head);
356 up_write(&vdpa_dev_lock);
359 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
361 static int vdpa_match_remove(struct device *dev, void *data)
363 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
364 struct vdpa_mgmt_dev *mdev = vdev->mdev;
367 mdev->ops->dev_del(mdev, vdev);
371 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
373 down_write(&vdpa_dev_lock);
375 list_del(&mdev->list);
377 /* Filter out all the entries belong to this management device and delete it. */
378 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
380 up_write(&vdpa_dev_lock);
382 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
384 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
386 void *buf, unsigned int len)
388 const struct vdpa_config_ops *ops = vdev->config;
391 * Config accesses aren't supposed to trigger before features are set.
392 * If it does happen we assume a legacy guest.
394 if (!vdev->features_valid)
395 vdpa_set_features_unlocked(vdev, 0);
396 ops->get_config(vdev, offset, buf, len);
400 * vdpa_get_config - Get one or more device configuration fields.
401 * @vdev: vdpa device to operate on
402 * @offset: starting byte offset of the field
403 * @buf: buffer pointer to read to
404 * @len: length of the configuration fields in bytes
406 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
407 void *buf, unsigned int len)
409 down_read(&vdev->cf_lock);
410 vdpa_get_config_unlocked(vdev, offset, buf, len);
411 up_read(&vdev->cf_lock);
413 EXPORT_SYMBOL_GPL(vdpa_get_config);
416 * vdpa_set_config - Set one or more device configuration fields.
417 * @vdev: vdpa device to operate on
418 * @offset: starting byte offset of the field
419 * @buf: buffer pointer to read from
420 * @length: length of the configuration fields in bytes
422 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
423 const void *buf, unsigned int length)
425 down_write(&vdev->cf_lock);
426 vdev->config->set_config(vdev, offset, buf, length);
427 up_write(&vdev->cf_lock);
429 EXPORT_SYMBOL_GPL(vdpa_set_config);
431 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
432 const char *busname, const char *devname)
434 /* Bus name is optional for simulated management device, so ignore the
435 * device with bus if bus attribute is provided.
437 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
440 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
443 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
444 (strcmp(dev_name(mdev->device), devname) == 0))
450 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
452 struct vdpa_mgmt_dev *mdev;
453 const char *busname = NULL;
456 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
457 return ERR_PTR(-EINVAL);
458 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
459 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
460 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
462 list_for_each_entry(mdev, &mdev_head, list) {
463 if (mgmtdev_handle_match(mdev, busname, devname))
466 return ERR_PTR(-ENODEV);
469 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
471 if (mdev->device->bus &&
472 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
474 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
479 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
480 u32 portid, u32 seq, int flags)
482 u64 supported_classes = 0;
487 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
490 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
494 while (mdev->id_table[i].device) {
495 if (mdev->id_table[i].device <= 63)
496 supported_classes |= BIT_ULL(mdev->id_table[i].device);
500 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
501 supported_classes, VDPA_ATTR_UNSPEC)) {
505 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
506 mdev->max_supported_vqs)) {
510 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
511 mdev->supported_features, VDPA_ATTR_PAD)) {
516 genlmsg_end(msg, hdr);
520 genlmsg_cancel(msg, hdr);
524 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
526 struct vdpa_mgmt_dev *mdev;
530 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
534 down_read(&vdpa_dev_lock);
535 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
537 up_read(&vdpa_dev_lock);
538 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
543 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
544 up_read(&vdpa_dev_lock);
547 err = genlmsg_reply(msg, info);
556 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
558 struct vdpa_mgmt_dev *mdev;
559 int start = cb->args[0];
563 down_read(&vdpa_dev_lock);
564 list_for_each_entry(mdev, &mdev_head, list) {
569 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
570 cb->nlh->nlmsg_seq, NLM_F_MULTI);
576 up_read(&vdpa_dev_lock);
581 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
582 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
583 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
585 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
587 struct vdpa_dev_set_config config = {};
588 struct nlattr **nl_attrs = info->attrs;
589 struct vdpa_mgmt_dev *mdev;
594 if (!info->attrs[VDPA_ATTR_DEV_NAME])
597 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
599 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
600 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
601 memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
602 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
604 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
606 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
607 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
609 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
610 config.net.max_vq_pairs =
611 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
612 if (!config.net.max_vq_pairs) {
613 NL_SET_ERR_MSG_MOD(info->extack,
614 "At least one pair of VQs is required");
617 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
620 /* Skip checking capability if user didn't prefer to configure any
621 * device networking attributes. It is likely that user might have used
622 * a device specific method to configure such attributes or using device
623 * default attributes.
625 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
626 !netlink_capable(skb, CAP_NET_ADMIN))
629 down_write(&vdpa_dev_lock);
630 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
632 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
636 if ((config.mask & mdev->config_attr_mask) != config.mask) {
637 NL_SET_ERR_MSG_MOD(info->extack,
638 "All provided attributes are not supported");
643 err = mdev->ops->dev_add(mdev, name, &config);
645 up_write(&vdpa_dev_lock);
649 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
651 struct vdpa_mgmt_dev *mdev;
652 struct vdpa_device *vdev;
657 if (!info->attrs[VDPA_ATTR_DEV_NAME])
659 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
661 down_write(&vdpa_dev_lock);
662 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
664 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
668 vdev = container_of(dev, struct vdpa_device, dev);
670 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
675 mdev->ops->dev_del(mdev, vdev);
679 up_write(&vdpa_dev_lock);
684 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
685 int flags, struct netlink_ext_ack *extack)
694 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
698 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
702 device_id = vdev->config->get_device_id(vdev);
703 vendor_id = vdev->config->get_vendor_id(vdev);
704 max_vq_size = vdev->config->get_vq_num_max(vdev);
705 if (vdev->config->get_vq_num_min)
706 min_vq_size = vdev->config->get_vq_num_min(vdev);
709 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
711 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
713 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
715 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
717 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
719 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
722 genlmsg_end(msg, hdr);
726 genlmsg_cancel(msg, hdr);
730 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
732 struct vdpa_device *vdev;
738 if (!info->attrs[VDPA_ATTR_DEV_NAME])
740 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
741 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
745 down_read(&vdpa_dev_lock);
746 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
748 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
752 vdev = container_of(dev, struct vdpa_device, dev);
757 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
761 err = genlmsg_reply(msg, info);
763 up_read(&vdpa_dev_lock);
769 up_read(&vdpa_dev_lock);
774 struct vdpa_dev_dump_info {
776 struct netlink_callback *cb;
781 static int vdpa_dev_dump(struct device *dev, void *data)
783 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
784 struct vdpa_dev_dump_info *info = data;
789 if (info->idx < info->start_idx) {
793 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
794 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
802 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
804 struct vdpa_dev_dump_info info;
808 info.start_idx = cb->args[0];
811 down_read(&vdpa_dev_lock);
812 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
813 up_read(&vdpa_dev_lock);
814 cb->args[0] = info.idx;
818 static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
819 struct sk_buff *msg, u64 features,
820 const struct virtio_net_config *config)
824 if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
827 val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
828 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
831 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
833 struct virtio_net_config config = {};
837 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
839 if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
843 val_u16 = le16_to_cpu(config.status);
844 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
847 val_u16 = le16_to_cpu(config.mtu);
848 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
851 features = vdev->config->get_driver_features(vdev);
852 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
856 return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
860 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
861 int flags, struct netlink_ext_ack *extack)
868 down_read(&vdev->cf_lock);
869 status = vdev->config->get_status(vdev);
870 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
871 NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
876 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
877 VDPA_CMD_DEV_CONFIG_GET);
883 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
888 device_id = vdev->config->get_device_id(vdev);
889 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
896 err = vdpa_dev_net_config_fill(vdev, msg);
905 up_read(&vdev->cf_lock);
906 genlmsg_end(msg, hdr);
910 genlmsg_cancel(msg, hdr);
912 up_read(&vdev->cf_lock);
916 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
917 struct genl_info *info, u32 index)
919 struct virtio_net_config config = {};
925 status = vdev->config->get_status(vdev);
926 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
927 NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
930 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
932 max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
933 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
936 features = vdev->config->get_driver_features(vdev);
937 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
938 features, VDPA_ATTR_PAD))
941 if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
944 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
951 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
952 struct genl_info *info, u32 index)
956 down_read(&vdev->cf_lock);
957 if (!vdev->config->get_vendor_vq_stats) {
962 err = vdpa_fill_stats_rec(vdev, msg, info, index);
964 up_read(&vdev->cf_lock);
968 static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
970 struct genl_info *info, u32 index)
975 u32 portid = info->snd_portid;
976 u32 seq = info->snd_seq;
979 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
980 VDPA_CMD_DEV_VSTATS_GET);
984 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
989 device_id = vdev->config->get_device_id(vdev);
990 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
997 if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
998 NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
1003 err = vendor_stats_fill(vdev, msg, info, index);
1009 genlmsg_end(msg, hdr);
1014 genlmsg_cancel(msg, hdr);
1018 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
1020 struct vdpa_device *vdev;
1021 struct sk_buff *msg;
1022 const char *devname;
1026 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1028 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1029 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1033 down_read(&vdpa_dev_lock);
1034 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1036 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1040 vdev = container_of(dev, struct vdpa_device, dev);
1042 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1046 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
1049 err = genlmsg_reply(msg, info);
1054 up_read(&vdpa_dev_lock);
1060 static int vdpa_dev_config_dump(struct device *dev, void *data)
1062 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
1063 struct vdpa_dev_dump_info *info = data;
1068 if (info->idx < info->start_idx) {
1072 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
1073 info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1083 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
1085 struct vdpa_dev_dump_info info;
1089 info.start_idx = cb->args[0];
1092 down_read(&vdpa_dev_lock);
1093 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
1094 up_read(&vdpa_dev_lock);
1095 cb->args[0] = info.idx;
1099 static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
1100 struct genl_info *info)
1102 struct vdpa_device *vdev;
1103 struct sk_buff *msg;
1104 const char *devname;
1109 if (!info->attrs[VDPA_ATTR_DEV_NAME])
1112 if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
1115 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
1116 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1120 index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
1121 down_read(&vdpa_dev_lock);
1122 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
1124 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
1128 vdev = container_of(dev, struct vdpa_device, dev);
1130 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
1134 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
1138 err = genlmsg_reply(msg, info);
1141 up_read(&vdpa_dev_lock);
1149 up_read(&vdpa_dev_lock);
1153 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
1154 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
1155 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
1156 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
1157 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1158 /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
1159 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1162 static const struct genl_ops vdpa_nl_ops[] = {
1164 .cmd = VDPA_CMD_MGMTDEV_GET,
1165 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1166 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
1167 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1170 .cmd = VDPA_CMD_DEV_NEW,
1171 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1172 .doit = vdpa_nl_cmd_dev_add_set_doit,
1173 .flags = GENL_ADMIN_PERM,
1176 .cmd = VDPA_CMD_DEV_DEL,
1177 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1178 .doit = vdpa_nl_cmd_dev_del_set_doit,
1179 .flags = GENL_ADMIN_PERM,
1182 .cmd = VDPA_CMD_DEV_GET,
1183 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1184 .doit = vdpa_nl_cmd_dev_get_doit,
1185 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
1188 .cmd = VDPA_CMD_DEV_CONFIG_GET,
1189 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1190 .doit = vdpa_nl_cmd_dev_config_get_doit,
1191 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1194 .cmd = VDPA_CMD_DEV_VSTATS_GET,
1195 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1196 .doit = vdpa_nl_cmd_dev_stats_get_doit,
1197 .flags = GENL_ADMIN_PERM,
1201 static struct genl_family vdpa_nl_family __ro_after_init = {
1202 .name = VDPA_GENL_NAME,
1203 .version = VDPA_GENL_VERSION,
1204 .maxattr = VDPA_ATTR_MAX,
1205 .policy = vdpa_nl_policy,
1207 .module = THIS_MODULE,
1209 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
1212 static int vdpa_init(void)
1216 err = bus_register(&vdpa_bus);
1219 err = genl_register_family(&vdpa_nl_family);
1225 bus_unregister(&vdpa_bus);
1229 static void __exit vdpa_exit(void)
1231 genl_unregister_family(&vdpa_nl_family);
1232 bus_unregister(&vdpa_bus);
1233 ida_destroy(&vdpa_index_ida);
1235 core_initcall(vdpa_init);
1236 module_exit(vdpa_exit);
1238 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1239 MODULE_LICENSE("GPL v2");