1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/virtio_ids.h>
19 static LIST_HEAD(mdev_head);
20 /* A global mutex that protects vdpa management device and device level operations. */
21 static DEFINE_MUTEX(vdpa_dev_mutex);
22 static DEFINE_IDA(vdpa_index_ida);
24 void vdpa_set_status(struct vdpa_device *vdev, u8 status)
26 mutex_lock(&vdev->cf_mutex);
27 vdev->config->set_status(vdev, status);
28 mutex_unlock(&vdev->cf_mutex);
30 EXPORT_SYMBOL(vdpa_set_status);
32 static struct genl_family vdpa_nl_family;
34 static int vdpa_dev_probe(struct device *d)
36 struct vdpa_device *vdev = dev_to_vdpa(d);
37 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 const struct vdpa_config_ops *ops = vdev->config;
39 u32 max_num, min_num = 1;
42 max_num = ops->get_vq_num_max(vdev);
43 if (ops->get_vq_num_min)
44 min_num = ops->get_vq_num_min(vdev);
45 if (max_num < min_num)
48 if (drv && drv->probe)
49 ret = drv->probe(vdev);
54 static void vdpa_dev_remove(struct device *d)
56 struct vdpa_device *vdev = dev_to_vdpa(d);
57 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
59 if (drv && drv->remove)
63 static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
65 struct vdpa_device *vdev = dev_to_vdpa(dev);
67 /* Check override first, and if set, only use the named driver */
68 if (vdev->driver_override)
69 return strcmp(vdev->driver_override, drv->name) == 0;
71 /* Currently devices must be supported by all vDPA bus drivers */
75 static ssize_t driver_override_store(struct device *dev,
76 struct device_attribute *attr,
77 const char *buf, size_t count)
79 struct vdpa_device *vdev = dev_to_vdpa(dev);
80 const char *driver_override, *old;
83 /* We need to keep extra room for a newline */
84 if (count >= (PAGE_SIZE - 1))
87 driver_override = kstrndup(buf, count, GFP_KERNEL);
91 cp = strchr(driver_override, '\n');
96 old = vdev->driver_override;
97 if (strlen(driver_override)) {
98 vdev->driver_override = driver_override;
100 kfree(driver_override);
101 vdev->driver_override = NULL;
110 static ssize_t driver_override_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
113 struct vdpa_device *vdev = dev_to_vdpa(dev);
117 len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
122 static DEVICE_ATTR_RW(driver_override);
124 static struct attribute *vdpa_dev_attrs[] = {
125 &dev_attr_driver_override.attr,
129 static const struct attribute_group vdpa_dev_group = {
130 .attrs = vdpa_dev_attrs,
132 __ATTRIBUTE_GROUPS(vdpa_dev);
134 static struct bus_type vdpa_bus = {
136 .dev_groups = vdpa_dev_groups,
137 .match = vdpa_dev_match,
138 .probe = vdpa_dev_probe,
139 .remove = vdpa_dev_remove,
142 static void vdpa_release_dev(struct device *d)
144 struct vdpa_device *vdev = dev_to_vdpa(d);
145 const struct vdpa_config_ops *ops = vdev->config;
150 ida_simple_remove(&vdpa_index_ida, vdev->index);
151 mutex_destroy(&vdev->cf_mutex);
152 kfree(vdev->driver_override);
157 * __vdpa_alloc_device - allocate and initilaize a vDPA device
158 * This allows driver to some prepartion after device is
159 * initialized but before registered.
160 * @parent: the parent device
161 * @config: the bus operations that is supported by this device
162 * @size: size of the parent structure that contains private data
163 * @name: name of the vdpa device; optional.
164 * @use_va: indicate whether virtual address must be used by this device
166 * Driver should use vdpa_alloc_device() wrapper macro instead of
167 * using this directly.
169 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
172 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
173 const struct vdpa_config_ops *config,
174 size_t size, const char *name,
177 struct vdpa_device *vdev;
183 if (!!config->dma_map != !!config->dma_unmap)
186 /* It should only work for the device that use on-chip IOMMU */
187 if (use_va && !(config->dma_map || config->set_map))
191 vdev = kzalloc(size, GFP_KERNEL);
195 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
199 vdev->dev.bus = &vdpa_bus;
200 vdev->dev.parent = parent;
201 vdev->dev.release = vdpa_release_dev;
203 vdev->config = config;
204 vdev->features_valid = false;
205 vdev->use_va = use_va;
208 err = dev_set_name(&vdev->dev, "%s", name);
210 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
214 mutex_init(&vdev->cf_mutex);
215 device_initialize(&vdev->dev);
220 ida_simple_remove(&vdpa_index_ida, vdev->index);
226 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
228 static int vdpa_name_match(struct device *dev, const void *data)
230 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
232 return (strcmp(dev_name(&vdev->dev), data) == 0);
235 static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
241 lockdep_assert_held(&vdpa_dev_mutex);
242 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
247 return device_add(&vdev->dev);
251 * _vdpa_register_device - register a vDPA device with vdpa lock held
252 * Caller must have a succeed call of vdpa_alloc_device() before.
253 * Caller must invoke this routine in the management device dev_add()
254 * callback after setting up valid mgmtdev for this vdpa device.
255 * @vdev: the vdpa device to be registered to vDPA bus
256 * @nvqs: number of virtqueues supported by this device
258 * Return: Returns an error when fail to add device to vDPA bus
260 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
265 return __vdpa_register_device(vdev, nvqs);
267 EXPORT_SYMBOL_GPL(_vdpa_register_device);
270 * vdpa_register_device - register a vDPA device
271 * Callers must have a succeed call of vdpa_alloc_device() before.
272 * @vdev: the vdpa device to be registered to vDPA bus
273 * @nvqs: number of virtqueues supported by this device
275 * Return: Returns an error when fail to add to vDPA bus
277 int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
281 mutex_lock(&vdpa_dev_mutex);
282 err = __vdpa_register_device(vdev, nvqs);
283 mutex_unlock(&vdpa_dev_mutex);
286 EXPORT_SYMBOL_GPL(vdpa_register_device);
289 * _vdpa_unregister_device - unregister a vDPA device
290 * Caller must invoke this routine as part of management device dev_del()
292 * @vdev: the vdpa device to be unregisted from vDPA bus
294 void _vdpa_unregister_device(struct vdpa_device *vdev)
296 lockdep_assert_held(&vdpa_dev_mutex);
297 WARN_ON(!vdev->mdev);
298 device_unregister(&vdev->dev);
300 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
303 * vdpa_unregister_device - unregister a vDPA device
304 * @vdev: the vdpa device to be unregisted from vDPA bus
306 void vdpa_unregister_device(struct vdpa_device *vdev)
308 mutex_lock(&vdpa_dev_mutex);
309 device_unregister(&vdev->dev);
310 mutex_unlock(&vdpa_dev_mutex);
312 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
315 * __vdpa_register_driver - register a vDPA device driver
316 * @drv: the vdpa device driver to be registered
317 * @owner: module owner of the driver
319 * Return: Returns an err when fail to do the registration
321 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
323 drv->driver.bus = &vdpa_bus;
324 drv->driver.owner = owner;
326 return driver_register(&drv->driver);
328 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
331 * vdpa_unregister_driver - unregister a vDPA device driver
332 * @drv: the vdpa device driver to be unregistered
334 void vdpa_unregister_driver(struct vdpa_driver *drv)
336 driver_unregister(&drv->driver);
338 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
341 * vdpa_mgmtdev_register - register a vdpa management device
343 * @mdev: Pointer to vdpa management device
344 * vdpa_mgmtdev_register() register a vdpa management device which supports
345 * vdpa device management.
346 * Return: Returns 0 on success or failure when required callback ops are not
349 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
351 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
354 INIT_LIST_HEAD(&mdev->list);
355 mutex_lock(&vdpa_dev_mutex);
356 list_add_tail(&mdev->list, &mdev_head);
357 mutex_unlock(&vdpa_dev_mutex);
360 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
362 static int vdpa_match_remove(struct device *dev, void *data)
364 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
365 struct vdpa_mgmt_dev *mdev = vdev->mdev;
368 mdev->ops->dev_del(mdev, vdev);
372 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
374 mutex_lock(&vdpa_dev_mutex);
376 list_del(&mdev->list);
378 /* Filter out all the entries belong to this management device and delete it. */
379 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
381 mutex_unlock(&vdpa_dev_mutex);
383 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
385 static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
387 void *buf, unsigned int len)
389 const struct vdpa_config_ops *ops = vdev->config;
392 * Config accesses aren't supposed to trigger before features are set.
393 * If it does happen we assume a legacy guest.
395 if (!vdev->features_valid)
396 vdpa_set_features(vdev, 0, true);
397 ops->get_config(vdev, offset, buf, len);
401 * vdpa_get_config - Get one or more device configuration fields.
402 * @vdev: vdpa device to operate on
403 * @offset: starting byte offset of the field
404 * @buf: buffer pointer to read to
405 * @len: length of the configuration fields in bytes
407 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
408 void *buf, unsigned int len)
410 mutex_lock(&vdev->cf_mutex);
411 vdpa_get_config_unlocked(vdev, offset, buf, len);
412 mutex_unlock(&vdev->cf_mutex);
414 EXPORT_SYMBOL_GPL(vdpa_get_config);
417 * vdpa_set_config - Set one or more device configuration fields.
418 * @vdev: vdpa device to operate on
419 * @offset: starting byte offset of the field
420 * @buf: buffer pointer to read from
421 * @length: length of the configuration fields in bytes
423 void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
424 const void *buf, unsigned int length)
426 mutex_lock(&vdev->cf_mutex);
427 vdev->config->set_config(vdev, offset, buf, length);
428 mutex_unlock(&vdev->cf_mutex);
430 EXPORT_SYMBOL_GPL(vdpa_set_config);
432 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
433 const char *busname, const char *devname)
435 /* Bus name is optional for simulated management device, so ignore the
436 * device with bus if bus attribute is provided.
438 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
441 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
444 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
445 (strcmp(dev_name(mdev->device), devname) == 0))
451 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
453 struct vdpa_mgmt_dev *mdev;
454 const char *busname = NULL;
457 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
458 return ERR_PTR(-EINVAL);
459 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
460 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
461 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
463 list_for_each_entry(mdev, &mdev_head, list) {
464 if (mgmtdev_handle_match(mdev, busname, devname))
467 return ERR_PTR(-ENODEV);
470 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
472 if (mdev->device->bus &&
473 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
475 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
480 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
481 u32 portid, u32 seq, int flags)
483 u64 supported_classes = 0;
488 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
491 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
495 while (mdev->id_table[i].device) {
496 if (mdev->id_table[i].device <= 63)
497 supported_classes |= BIT_ULL(mdev->id_table[i].device);
501 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
502 supported_classes, VDPA_ATTR_UNSPEC)) {
506 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
507 mdev->max_supported_vqs)) {
511 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
512 mdev->supported_features, VDPA_ATTR_PAD)) {
517 genlmsg_end(msg, hdr);
521 genlmsg_cancel(msg, hdr);
525 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
527 struct vdpa_mgmt_dev *mdev;
531 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
535 mutex_lock(&vdpa_dev_mutex);
536 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
538 mutex_unlock(&vdpa_dev_mutex);
539 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
544 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
545 mutex_unlock(&vdpa_dev_mutex);
548 err = genlmsg_reply(msg, info);
557 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
559 struct vdpa_mgmt_dev *mdev;
560 int start = cb->args[0];
564 mutex_lock(&vdpa_dev_mutex);
565 list_for_each_entry(mdev, &mdev_head, list) {
570 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
571 cb->nlh->nlmsg_seq, NLM_F_MULTI);
577 mutex_unlock(&vdpa_dev_mutex);
582 #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
583 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
584 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
586 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
588 struct vdpa_dev_set_config config = {};
589 struct nlattr **nl_attrs = info->attrs;
590 struct vdpa_mgmt_dev *mdev;
595 if (!info->attrs[VDPA_ATTR_DEV_NAME])
598 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
600 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
601 macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
602 memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
603 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
605 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
607 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
608 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
610 if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
611 config.net.max_vq_pairs =
612 nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
613 if (!config.net.max_vq_pairs) {
614 NL_SET_ERR_MSG_MOD(info->extack,
615 "At least one pair of VQs is required");
618 config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
621 /* Skip checking capability if user didn't prefer to configure any
622 * device networking attributes. It is likely that user might have used
623 * a device specific method to configure such attributes or using device
624 * default attributes.
626 if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
627 !netlink_capable(skb, CAP_NET_ADMIN))
630 mutex_lock(&vdpa_dev_mutex);
631 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
633 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
637 if ((config.mask & mdev->config_attr_mask) != config.mask) {
638 NL_SET_ERR_MSG_MOD(info->extack,
639 "All provided attributes are not supported");
644 err = mdev->ops->dev_add(mdev, name, &config);
646 mutex_unlock(&vdpa_dev_mutex);
650 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
652 struct vdpa_mgmt_dev *mdev;
653 struct vdpa_device *vdev;
658 if (!info->attrs[VDPA_ATTR_DEV_NAME])
660 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
662 mutex_lock(&vdpa_dev_mutex);
663 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
665 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
669 vdev = container_of(dev, struct vdpa_device, dev);
671 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
676 mdev->ops->dev_del(mdev, vdev);
680 mutex_unlock(&vdpa_dev_mutex);
685 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
686 int flags, struct netlink_ext_ack *extack)
695 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
699 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
703 device_id = vdev->config->get_device_id(vdev);
704 vendor_id = vdev->config->get_vendor_id(vdev);
705 max_vq_size = vdev->config->get_vq_num_max(vdev);
706 if (vdev->config->get_vq_num_min)
707 min_vq_size = vdev->config->get_vq_num_min(vdev);
710 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
712 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
714 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
716 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
718 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
720 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
723 genlmsg_end(msg, hdr);
727 genlmsg_cancel(msg, hdr);
731 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
733 struct vdpa_device *vdev;
739 if (!info->attrs[VDPA_ATTR_DEV_NAME])
741 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
742 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
746 mutex_lock(&vdpa_dev_mutex);
747 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
749 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
753 vdev = container_of(dev, struct vdpa_device, dev);
758 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
760 err = genlmsg_reply(msg, info);
764 mutex_unlock(&vdpa_dev_mutex);
770 struct vdpa_dev_dump_info {
772 struct netlink_callback *cb;
777 static int vdpa_dev_dump(struct device *dev, void *data)
779 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
780 struct vdpa_dev_dump_info *info = data;
785 if (info->idx < info->start_idx) {
789 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
790 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
798 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
800 struct vdpa_dev_dump_info info;
804 info.start_idx = cb->args[0];
807 mutex_lock(&vdpa_dev_mutex);
808 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
809 mutex_unlock(&vdpa_dev_mutex);
810 cb->args[0] = info.idx;
814 static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
815 struct sk_buff *msg, u64 features,
816 const struct virtio_net_config *config)
820 if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
823 val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
824 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
827 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
829 struct virtio_net_config config = {};
833 vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
835 if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
839 val_u16 = le16_to_cpu(config.status);
840 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
843 val_u16 = le16_to_cpu(config.mtu);
844 if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
847 features = vdev->config->get_driver_features(vdev);
848 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
852 return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
856 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
857 int flags, struct netlink_ext_ack *extack)
864 mutex_lock(&vdev->cf_mutex);
865 status = vdev->config->get_status(vdev);
866 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
867 NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
872 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
873 VDPA_CMD_DEV_CONFIG_GET);
879 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
884 device_id = vdev->config->get_device_id(vdev);
885 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
892 err = vdpa_dev_net_config_fill(vdev, msg);
901 mutex_unlock(&vdev->cf_mutex);
902 genlmsg_end(msg, hdr);
906 genlmsg_cancel(msg, hdr);
908 mutex_unlock(&vdev->cf_mutex);
912 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
914 struct vdpa_device *vdev;
920 if (!info->attrs[VDPA_ATTR_DEV_NAME])
922 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
923 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
927 mutex_lock(&vdpa_dev_mutex);
928 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
930 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
934 vdev = container_of(dev, struct vdpa_device, dev);
936 NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
940 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
943 err = genlmsg_reply(msg, info);
948 mutex_unlock(&vdpa_dev_mutex);
954 static int vdpa_dev_config_dump(struct device *dev, void *data)
956 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
957 struct vdpa_dev_dump_info *info = data;
962 if (info->idx < info->start_idx) {
966 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
967 info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
977 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
979 struct vdpa_dev_dump_info info;
983 info.start_idx = cb->args[0];
986 mutex_lock(&vdpa_dev_mutex);
987 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
988 mutex_unlock(&vdpa_dev_mutex);
989 cb->args[0] = info.idx;
993 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
994 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
995 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
996 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
997 [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
998 /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
999 [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1002 static const struct genl_ops vdpa_nl_ops[] = {
1004 .cmd = VDPA_CMD_MGMTDEV_GET,
1005 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1006 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
1007 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
1010 .cmd = VDPA_CMD_DEV_NEW,
1011 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1012 .doit = vdpa_nl_cmd_dev_add_set_doit,
1013 .flags = GENL_ADMIN_PERM,
1016 .cmd = VDPA_CMD_DEV_DEL,
1017 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1018 .doit = vdpa_nl_cmd_dev_del_set_doit,
1019 .flags = GENL_ADMIN_PERM,
1022 .cmd = VDPA_CMD_DEV_GET,
1023 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1024 .doit = vdpa_nl_cmd_dev_get_doit,
1025 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
1028 .cmd = VDPA_CMD_DEV_CONFIG_GET,
1029 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1030 .doit = vdpa_nl_cmd_dev_config_get_doit,
1031 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
1035 static struct genl_family vdpa_nl_family __ro_after_init = {
1036 .name = VDPA_GENL_NAME,
1037 .version = VDPA_GENL_VERSION,
1038 .maxattr = VDPA_ATTR_MAX,
1039 .policy = vdpa_nl_policy,
1041 .module = THIS_MODULE,
1043 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
1046 static int vdpa_init(void)
1050 err = bus_register(&vdpa_bus);
1053 err = genl_register_family(&vdpa_nl_family);
1059 bus_unregister(&vdpa_bus);
1063 static void __exit vdpa_exit(void)
1065 genl_unregister_family(&vdpa_nl_family);
1066 bus_unregister(&vdpa_bus);
1067 ida_destroy(&vdpa_index_ida);
1069 core_initcall(vdpa_init);
1070 module_exit(vdpa_exit);
1072 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
1073 MODULE_LICENSE("GPL v2");