1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
19 static int idxd_config_bus_match(struct device *dev,
20 struct device_driver *drv)
24 if (is_idxd_dev(dev)) {
25 struct idxd_device *idxd = confdev_to_idxd(dev);
27 if (idxd->state != IDXD_DEV_CONF_READY)
30 } else if (is_idxd_wq_dev(dev)) {
31 struct idxd_wq *wq = confdev_to_wq(dev);
32 struct idxd_device *idxd = wq->idxd;
34 if (idxd->state < IDXD_DEV_CONF_READY)
37 if (wq->state != IDXD_WQ_DISABLED) {
38 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
45 dev_dbg(dev, "%s matched\n", dev_name(dev));
50 static int enable_wq(struct idxd_wq *wq)
52 struct idxd_device *idxd = wq->idxd;
53 struct device *dev = &idxd->pdev->dev;
57 mutex_lock(&wq->wq_lock);
59 if (idxd->state != IDXD_DEV_ENABLED) {
60 mutex_unlock(&wq->wq_lock);
61 dev_warn(dev, "Enabling while device not enabled.\n");
65 if (wq->state != IDXD_WQ_DISABLED) {
66 mutex_unlock(&wq->wq_lock);
67 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
72 mutex_unlock(&wq->wq_lock);
73 dev_warn(dev, "WQ not attached to group.\n");
77 if (strlen(wq->name) == 0) {
78 mutex_unlock(&wq->wq_lock);
79 dev_warn(dev, "WQ name not set.\n");
83 /* Shared WQ checks */
85 if (!device_swq_supported(idxd)) {
86 dev_warn(dev, "PASID not enabled and shared WQ.\n");
87 mutex_unlock(&wq->wq_lock);
91 * Shared wq with the threshold set to 0 means the user
92 * did not set the threshold or transitioned from a
93 * dedicated wq but did not set threshold. A value
94 * of 0 would effectively disable the shared wq. The
95 * driver does not allow a value of 0 to be set for
96 * threshold via sysfs.
98 if (wq->threshold == 0) {
99 dev_warn(dev, "Shared WQ and threshold 0.\n");
100 mutex_unlock(&wq->wq_lock);
105 rc = idxd_wq_alloc_resources(wq);
107 mutex_unlock(&wq->wq_lock);
108 dev_warn(dev, "WQ resource alloc failed\n");
112 spin_lock_irqsave(&idxd->dev_lock, flags);
113 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
114 rc = idxd_device_config(idxd);
115 spin_unlock_irqrestore(&idxd->dev_lock, flags);
117 mutex_unlock(&wq->wq_lock);
118 dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
122 rc = idxd_wq_enable(wq);
124 mutex_unlock(&wq->wq_lock);
125 dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
129 rc = idxd_wq_map_portal(wq);
131 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
132 rc = idxd_wq_disable(wq);
134 dev_warn(dev, "IDXD wq disable failed\n");
135 mutex_unlock(&wq->wq_lock);
139 wq->client_count = 0;
141 if (wq->type == IDXD_WQT_KERNEL) {
142 rc = idxd_wq_init_percpu_ref(wq);
144 dev_dbg(dev, "percpu_ref setup failed\n");
145 mutex_unlock(&wq->wq_lock);
150 if (is_idxd_wq_dmaengine(wq)) {
151 rc = idxd_register_dma_channel(wq);
153 dev_dbg(dev, "DMA channel register failed\n");
154 mutex_unlock(&wq->wq_lock);
157 } else if (is_idxd_wq_cdev(wq)) {
158 rc = idxd_wq_add_cdev(wq);
160 dev_dbg(dev, "Cdev creation failed\n");
161 mutex_unlock(&wq->wq_lock);
166 mutex_unlock(&wq->wq_lock);
167 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
172 static int idxd_config_bus_probe(struct device *dev)
177 dev_dbg(dev, "%s called\n", __func__);
179 if (is_idxd_dev(dev)) {
180 struct idxd_device *idxd = confdev_to_idxd(dev);
182 if (idxd->state != IDXD_DEV_CONF_READY) {
183 dev_warn(dev, "Device not ready for config\n");
187 if (!try_module_get(THIS_MODULE))
190 /* Perform IDXD configuration and enabling */
191 spin_lock_irqsave(&idxd->dev_lock, flags);
192 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
193 rc = idxd_device_config(idxd);
194 spin_unlock_irqrestore(&idxd->dev_lock, flags);
196 module_put(THIS_MODULE);
197 dev_warn(dev, "Device config failed: %d\n", rc);
202 rc = idxd_device_enable(idxd);
204 module_put(THIS_MODULE);
205 dev_warn(dev, "Device enable failed: %d\n", rc);
209 dev_info(dev, "Device %s enabled\n", dev_name(dev));
211 rc = idxd_register_dma_device(idxd);
213 module_put(THIS_MODULE);
214 dev_dbg(dev, "Failed to register dmaengine device\n");
218 } else if (is_idxd_wq_dev(dev)) {
219 struct idxd_wq *wq = confdev_to_wq(dev);
221 return enable_wq(wq);
227 static void disable_wq(struct idxd_wq *wq)
229 struct idxd_device *idxd = wq->idxd;
230 struct device *dev = &idxd->pdev->dev;
232 mutex_lock(&wq->wq_lock);
233 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
234 if (wq->state == IDXD_WQ_DISABLED) {
235 mutex_unlock(&wq->wq_lock);
239 if (wq->type == IDXD_WQT_KERNEL)
242 if (is_idxd_wq_dmaengine(wq))
243 idxd_unregister_dma_channel(wq);
244 else if (is_idxd_wq_cdev(wq))
245 idxd_wq_del_cdev(wq);
247 if (idxd_wq_refcount(wq))
248 dev_warn(dev, "Clients has claim on wq %d: %d\n",
249 wq->id, idxd_wq_refcount(wq));
251 idxd_wq_unmap_portal(wq);
256 idxd_wq_free_resources(wq);
257 wq->client_count = 0;
258 mutex_unlock(&wq->wq_lock);
260 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
263 static void idxd_config_bus_remove(struct device *dev)
267 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
269 /* disable workqueue here */
270 if (is_idxd_wq_dev(dev)) {
271 struct idxd_wq *wq = confdev_to_wq(dev);
274 } else if (is_idxd_dev(dev)) {
275 struct idxd_device *idxd = confdev_to_idxd(dev);
278 dev_dbg(dev, "%s removing dev %s\n", __func__,
279 dev_name(&idxd->conf_dev));
280 for (i = 0; i < idxd->max_wqs; i++) {
281 struct idxd_wq *wq = idxd->wqs[i];
283 if (wq->state == IDXD_WQ_DISABLED)
285 dev_warn(dev, "Active wq %d on disable %s.\n", i,
286 dev_name(&idxd->conf_dev));
287 device_release_driver(&wq->conf_dev);
290 idxd_unregister_dma_device(idxd);
291 rc = idxd_device_disable(idxd);
292 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
293 for (i = 0; i < idxd->max_wqs; i++) {
294 struct idxd_wq *wq = idxd->wqs[i];
296 mutex_lock(&wq->wq_lock);
297 idxd_wq_disable_cleanup(wq);
298 mutex_unlock(&wq->wq_lock);
301 module_put(THIS_MODULE);
303 dev_warn(dev, "Device disable failed\n");
305 dev_info(dev, "Device %s disabled\n", dev_name(dev));
310 static void idxd_config_bus_shutdown(struct device *dev)
312 dev_dbg(dev, "%s called\n", __func__);
315 struct bus_type dsa_bus_type = {
317 .match = idxd_config_bus_match,
318 .probe = idxd_config_bus_probe,
319 .remove = idxd_config_bus_remove,
320 .shutdown = idxd_config_bus_shutdown,
323 static struct idxd_device_driver dsa_drv = {
326 .bus = &dsa_bus_type,
327 .owner = THIS_MODULE,
328 .mod_name = KBUILD_MODNAME,
332 /* IDXD generic driver setup */
333 int idxd_register_driver(void)
335 return driver_register(&dsa_drv.drv);
338 void idxd_unregister_driver(void)
340 driver_unregister(&dsa_drv.drv);
343 /* IDXD engine attributes */
344 static ssize_t engine_group_id_show(struct device *dev,
345 struct device_attribute *attr, char *buf)
347 struct idxd_engine *engine =
348 container_of(dev, struct idxd_engine, conf_dev);
351 return sysfs_emit(buf, "%d\n", engine->group->id);
353 return sysfs_emit(buf, "%d\n", -1);
356 static ssize_t engine_group_id_store(struct device *dev,
357 struct device_attribute *attr,
358 const char *buf, size_t count)
360 struct idxd_engine *engine =
361 container_of(dev, struct idxd_engine, conf_dev);
362 struct idxd_device *idxd = engine->idxd;
365 struct idxd_group *prevg;
367 rc = kstrtol(buf, 10, &id);
371 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
374 if (id > idxd->max_groups - 1 || id < -1)
379 engine->group->num_engines--;
380 engine->group = NULL;
385 prevg = engine->group;
388 prevg->num_engines--;
389 engine->group = idxd->groups[id];
390 engine->group->num_engines++;
395 static struct device_attribute dev_attr_engine_group =
396 __ATTR(group_id, 0644, engine_group_id_show,
397 engine_group_id_store);
399 static struct attribute *idxd_engine_attributes[] = {
400 &dev_attr_engine_group.attr,
404 static const struct attribute_group idxd_engine_attribute_group = {
405 .attrs = idxd_engine_attributes,
408 static const struct attribute_group *idxd_engine_attribute_groups[] = {
409 &idxd_engine_attribute_group,
413 static void idxd_conf_engine_release(struct device *dev)
415 struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
420 struct device_type idxd_engine_device_type = {
422 .release = idxd_conf_engine_release,
423 .groups = idxd_engine_attribute_groups,
426 /* Group attributes */
428 static void idxd_set_free_tokens(struct idxd_device *idxd)
432 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
433 struct idxd_group *g = idxd->groups[i];
435 tokens += g->tokens_reserved;
438 idxd->nr_tokens = idxd->max_tokens - tokens;
441 static ssize_t group_tokens_reserved_show(struct device *dev,
442 struct device_attribute *attr,
445 struct idxd_group *group =
446 container_of(dev, struct idxd_group, conf_dev);
448 return sysfs_emit(buf, "%u\n", group->tokens_reserved);
451 static ssize_t group_tokens_reserved_store(struct device *dev,
452 struct device_attribute *attr,
453 const char *buf, size_t count)
455 struct idxd_group *group =
456 container_of(dev, struct idxd_group, conf_dev);
457 struct idxd_device *idxd = group->idxd;
461 rc = kstrtoul(buf, 10, &val);
465 if (idxd->data->type == IDXD_TYPE_IAX)
468 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
471 if (idxd->state == IDXD_DEV_ENABLED)
474 if (val > idxd->max_tokens)
477 if (val > idxd->nr_tokens + group->tokens_reserved)
480 group->tokens_reserved = val;
481 idxd_set_free_tokens(idxd);
485 static struct device_attribute dev_attr_group_tokens_reserved =
486 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
487 group_tokens_reserved_store);
489 static ssize_t group_tokens_allowed_show(struct device *dev,
490 struct device_attribute *attr,
493 struct idxd_group *group =
494 container_of(dev, struct idxd_group, conf_dev);
496 return sysfs_emit(buf, "%u\n", group->tokens_allowed);
499 static ssize_t group_tokens_allowed_store(struct device *dev,
500 struct device_attribute *attr,
501 const char *buf, size_t count)
503 struct idxd_group *group =
504 container_of(dev, struct idxd_group, conf_dev);
505 struct idxd_device *idxd = group->idxd;
509 rc = kstrtoul(buf, 10, &val);
513 if (idxd->data->type == IDXD_TYPE_IAX)
516 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
519 if (idxd->state == IDXD_DEV_ENABLED)
522 if (val < 4 * group->num_engines ||
523 val > group->tokens_reserved + idxd->nr_tokens)
526 group->tokens_allowed = val;
530 static struct device_attribute dev_attr_group_tokens_allowed =
531 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
532 group_tokens_allowed_store);
534 static ssize_t group_use_token_limit_show(struct device *dev,
535 struct device_attribute *attr,
538 struct idxd_group *group =
539 container_of(dev, struct idxd_group, conf_dev);
541 return sysfs_emit(buf, "%u\n", group->use_token_limit);
544 static ssize_t group_use_token_limit_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t count)
548 struct idxd_group *group =
549 container_of(dev, struct idxd_group, conf_dev);
550 struct idxd_device *idxd = group->idxd;
554 rc = kstrtoul(buf, 10, &val);
558 if (idxd->data->type == IDXD_TYPE_IAX)
561 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
564 if (idxd->state == IDXD_DEV_ENABLED)
567 if (idxd->token_limit == 0)
570 group->use_token_limit = !!val;
574 static struct device_attribute dev_attr_group_use_token_limit =
575 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
576 group_use_token_limit_store);
578 static ssize_t group_engines_show(struct device *dev,
579 struct device_attribute *attr, char *buf)
581 struct idxd_group *group =
582 container_of(dev, struct idxd_group, conf_dev);
584 struct idxd_device *idxd = group->idxd;
586 for (i = 0; i < idxd->max_engines; i++) {
587 struct idxd_engine *engine = idxd->engines[i];
592 if (engine->group->id == group->id)
593 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
599 rc += sysfs_emit_at(buf, rc, "\n");
604 static struct device_attribute dev_attr_group_engines =
605 __ATTR(engines, 0444, group_engines_show, NULL);
607 static ssize_t group_work_queues_show(struct device *dev,
608 struct device_attribute *attr, char *buf)
610 struct idxd_group *group =
611 container_of(dev, struct idxd_group, conf_dev);
613 struct idxd_device *idxd = group->idxd;
615 for (i = 0; i < idxd->max_wqs; i++) {
616 struct idxd_wq *wq = idxd->wqs[i];
621 if (wq->group->id == group->id)
622 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
628 rc += sysfs_emit_at(buf, rc, "\n");
633 static struct device_attribute dev_attr_group_work_queues =
634 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
636 static ssize_t group_traffic_class_a_show(struct device *dev,
637 struct device_attribute *attr,
640 struct idxd_group *group =
641 container_of(dev, struct idxd_group, conf_dev);
643 return sysfs_emit(buf, "%d\n", group->tc_a);
646 static ssize_t group_traffic_class_a_store(struct device *dev,
647 struct device_attribute *attr,
648 const char *buf, size_t count)
650 struct idxd_group *group =
651 container_of(dev, struct idxd_group, conf_dev);
652 struct idxd_device *idxd = group->idxd;
656 rc = kstrtol(buf, 10, &val);
660 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
663 if (idxd->state == IDXD_DEV_ENABLED)
666 if (val < 0 || val > 7)
673 static struct device_attribute dev_attr_group_traffic_class_a =
674 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
675 group_traffic_class_a_store);
677 static ssize_t group_traffic_class_b_show(struct device *dev,
678 struct device_attribute *attr,
681 struct idxd_group *group =
682 container_of(dev, struct idxd_group, conf_dev);
684 return sysfs_emit(buf, "%d\n", group->tc_b);
687 static ssize_t group_traffic_class_b_store(struct device *dev,
688 struct device_attribute *attr,
689 const char *buf, size_t count)
691 struct idxd_group *group =
692 container_of(dev, struct idxd_group, conf_dev);
693 struct idxd_device *idxd = group->idxd;
697 rc = kstrtol(buf, 10, &val);
701 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
704 if (idxd->state == IDXD_DEV_ENABLED)
707 if (val < 0 || val > 7)
714 static struct device_attribute dev_attr_group_traffic_class_b =
715 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
716 group_traffic_class_b_store);
718 static struct attribute *idxd_group_attributes[] = {
719 &dev_attr_group_work_queues.attr,
720 &dev_attr_group_engines.attr,
721 &dev_attr_group_use_token_limit.attr,
722 &dev_attr_group_tokens_allowed.attr,
723 &dev_attr_group_tokens_reserved.attr,
724 &dev_attr_group_traffic_class_a.attr,
725 &dev_attr_group_traffic_class_b.attr,
729 static const struct attribute_group idxd_group_attribute_group = {
730 .attrs = idxd_group_attributes,
733 static const struct attribute_group *idxd_group_attribute_groups[] = {
734 &idxd_group_attribute_group,
738 static void idxd_conf_group_release(struct device *dev)
740 struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
745 struct device_type idxd_group_device_type = {
747 .release = idxd_conf_group_release,
748 .groups = idxd_group_attribute_groups,
751 /* IDXD work queue attribs */
752 static ssize_t wq_clients_show(struct device *dev,
753 struct device_attribute *attr, char *buf)
755 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
757 return sysfs_emit(buf, "%d\n", wq->client_count);
760 static struct device_attribute dev_attr_wq_clients =
761 __ATTR(clients, 0444, wq_clients_show, NULL);
763 static ssize_t wq_state_show(struct device *dev,
764 struct device_attribute *attr, char *buf)
766 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
769 case IDXD_WQ_DISABLED:
770 return sysfs_emit(buf, "disabled\n");
771 case IDXD_WQ_ENABLED:
772 return sysfs_emit(buf, "enabled\n");
775 return sysfs_emit(buf, "unknown\n");
778 static struct device_attribute dev_attr_wq_state =
779 __ATTR(state, 0444, wq_state_show, NULL);
781 static ssize_t wq_group_id_show(struct device *dev,
782 struct device_attribute *attr, char *buf)
784 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
787 return sysfs_emit(buf, "%u\n", wq->group->id);
789 return sysfs_emit(buf, "-1\n");
792 static ssize_t wq_group_id_store(struct device *dev,
793 struct device_attribute *attr,
794 const char *buf, size_t count)
796 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
797 struct idxd_device *idxd = wq->idxd;
800 struct idxd_group *prevg, *group;
802 rc = kstrtol(buf, 10, &id);
806 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
809 if (wq->state != IDXD_WQ_DISABLED)
812 if (id > idxd->max_groups - 1 || id < -1)
817 wq->group->num_wqs--;
823 group = idxd->groups[id];
833 static struct device_attribute dev_attr_wq_group_id =
834 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
836 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
839 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
841 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
844 static ssize_t wq_mode_store(struct device *dev,
845 struct device_attribute *attr, const char *buf,
848 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
849 struct idxd_device *idxd = wq->idxd;
851 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
854 if (wq->state != IDXD_WQ_DISABLED)
857 if (sysfs_streq(buf, "dedicated")) {
858 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
860 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
861 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
869 static struct device_attribute dev_attr_wq_mode =
870 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
872 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
875 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
877 return sysfs_emit(buf, "%u\n", wq->size);
880 static int total_claimed_wq_size(struct idxd_device *idxd)
885 for (i = 0; i < idxd->max_wqs; i++) {
886 struct idxd_wq *wq = idxd->wqs[i];
894 static ssize_t wq_size_store(struct device *dev,
895 struct device_attribute *attr, const char *buf,
898 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
900 struct idxd_device *idxd = wq->idxd;
903 rc = kstrtoul(buf, 10, &size);
907 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
910 if (idxd->state == IDXD_DEV_ENABLED)
913 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
920 static struct device_attribute dev_attr_wq_size =
921 __ATTR(size, 0644, wq_size_show, wq_size_store);
923 static ssize_t wq_priority_show(struct device *dev,
924 struct device_attribute *attr, char *buf)
926 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
928 return sysfs_emit(buf, "%u\n", wq->priority);
931 static ssize_t wq_priority_store(struct device *dev,
932 struct device_attribute *attr,
933 const char *buf, size_t count)
935 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
937 struct idxd_device *idxd = wq->idxd;
940 rc = kstrtoul(buf, 10, &prio);
944 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
947 if (wq->state != IDXD_WQ_DISABLED)
950 if (prio > IDXD_MAX_PRIORITY)
957 static struct device_attribute dev_attr_wq_priority =
958 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
960 static ssize_t wq_block_on_fault_show(struct device *dev,
961 struct device_attribute *attr, char *buf)
963 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
965 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
968 static ssize_t wq_block_on_fault_store(struct device *dev,
969 struct device_attribute *attr,
970 const char *buf, size_t count)
972 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
973 struct idxd_device *idxd = wq->idxd;
977 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
980 if (wq->state != IDXD_WQ_DISABLED)
983 rc = kstrtobool(buf, &bof);
988 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
990 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
995 static struct device_attribute dev_attr_wq_block_on_fault =
996 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
997 wq_block_on_fault_store);
999 static ssize_t wq_threshold_show(struct device *dev,
1000 struct device_attribute *attr, char *buf)
1002 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1004 return sysfs_emit(buf, "%u\n", wq->threshold);
1007 static ssize_t wq_threshold_store(struct device *dev,
1008 struct device_attribute *attr,
1009 const char *buf, size_t count)
1011 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1012 struct idxd_device *idxd = wq->idxd;
1016 rc = kstrtouint(buf, 0, &val);
1020 if (val > wq->size || val <= 0)
1023 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1026 if (wq->state != IDXD_WQ_DISABLED)
1029 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1032 wq->threshold = val;
1037 static struct device_attribute dev_attr_wq_threshold =
1038 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1040 static ssize_t wq_type_show(struct device *dev,
1041 struct device_attribute *attr, char *buf)
1043 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1046 case IDXD_WQT_KERNEL:
1047 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
1049 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
1052 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
1058 static ssize_t wq_type_store(struct device *dev,
1059 struct device_attribute *attr, const char *buf,
1062 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1063 enum idxd_wq_type old_type;
1065 if (wq->state != IDXD_WQ_DISABLED)
1068 old_type = wq->type;
1069 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1070 wq->type = IDXD_WQT_NONE;
1071 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1072 wq->type = IDXD_WQT_KERNEL;
1073 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1074 wq->type = IDXD_WQT_USER;
1078 /* If we are changing queue type, clear the name */
1079 if (wq->type != old_type)
1080 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1085 static struct device_attribute dev_attr_wq_type =
1086 __ATTR(type, 0644, wq_type_show, wq_type_store);
1088 static ssize_t wq_name_show(struct device *dev,
1089 struct device_attribute *attr, char *buf)
1091 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1093 return sysfs_emit(buf, "%s\n", wq->name);
1096 static ssize_t wq_name_store(struct device *dev,
1097 struct device_attribute *attr, const char *buf,
1100 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1102 if (wq->state != IDXD_WQ_DISABLED)
1105 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1109 * This is temporarily placed here until we have SVM support for
1112 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1115 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1116 strncpy(wq->name, buf, WQ_NAME_SIZE);
1117 strreplace(wq->name, '\n', '\0');
1121 static struct device_attribute dev_attr_wq_name =
1122 __ATTR(name, 0644, wq_name_show, wq_name_store);
1124 static ssize_t wq_cdev_minor_show(struct device *dev,
1125 struct device_attribute *attr, char *buf)
1127 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1130 mutex_lock(&wq->wq_lock);
1132 minor = wq->idxd_cdev->minor;
1133 mutex_unlock(&wq->wq_lock);
1137 return sysfs_emit(buf, "%d\n", minor);
1140 static struct device_attribute dev_attr_wq_cdev_minor =
1141 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1143 static int __get_sysfs_u64(const char *buf, u64 *val)
1147 rc = kstrtou64(buf, 0, val);
1154 *val = roundup_pow_of_two(*val);
1158 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1161 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1163 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1166 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1167 const char *buf, size_t count)
1169 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1170 struct idxd_device *idxd = wq->idxd;
1174 if (wq->state != IDXD_WQ_DISABLED)
1177 rc = __get_sysfs_u64(buf, &xfer_size);
1181 if (xfer_size > idxd->max_xfer_bytes)
1184 wq->max_xfer_bytes = xfer_size;
1189 static struct device_attribute dev_attr_wq_max_transfer_size =
1190 __ATTR(max_transfer_size, 0644,
1191 wq_max_transfer_size_show, wq_max_transfer_size_store);
1193 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1195 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1197 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1200 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1201 const char *buf, size_t count)
1203 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1204 struct idxd_device *idxd = wq->idxd;
1208 if (wq->state != IDXD_WQ_DISABLED)
1211 rc = __get_sysfs_u64(buf, &batch_size);
1215 if (batch_size > idxd->max_batch_size)
1218 wq->max_batch_size = (u32)batch_size;
1223 static struct device_attribute dev_attr_wq_max_batch_size =
1224 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1226 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1228 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1230 return sysfs_emit(buf, "%u\n", wq->ats_dis);
1233 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1234 const char *buf, size_t count)
1236 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1237 struct idxd_device *idxd = wq->idxd;
1241 if (wq->state != IDXD_WQ_DISABLED)
1244 if (!idxd->hw.wq_cap.wq_ats_support)
1247 rc = kstrtobool(buf, &ats_dis);
1251 wq->ats_dis = ats_dis;
1256 static struct device_attribute dev_attr_wq_ats_disable =
1257 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1259 static struct attribute *idxd_wq_attributes[] = {
1260 &dev_attr_wq_clients.attr,
1261 &dev_attr_wq_state.attr,
1262 &dev_attr_wq_group_id.attr,
1263 &dev_attr_wq_mode.attr,
1264 &dev_attr_wq_size.attr,
1265 &dev_attr_wq_priority.attr,
1266 &dev_attr_wq_block_on_fault.attr,
1267 &dev_attr_wq_threshold.attr,
1268 &dev_attr_wq_type.attr,
1269 &dev_attr_wq_name.attr,
1270 &dev_attr_wq_cdev_minor.attr,
1271 &dev_attr_wq_max_transfer_size.attr,
1272 &dev_attr_wq_max_batch_size.attr,
1273 &dev_attr_wq_ats_disable.attr,
1277 static const struct attribute_group idxd_wq_attribute_group = {
1278 .attrs = idxd_wq_attributes,
1281 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1282 &idxd_wq_attribute_group,
1286 static void idxd_conf_wq_release(struct device *dev)
1288 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1294 struct device_type idxd_wq_device_type = {
1296 .release = idxd_conf_wq_release,
1297 .groups = idxd_wq_attribute_groups,
1300 /* IDXD device attribs */
1301 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1304 struct idxd_device *idxd =
1305 container_of(dev, struct idxd_device, conf_dev);
1307 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1309 static DEVICE_ATTR_RO(version);
1311 static ssize_t max_work_queues_size_show(struct device *dev,
1312 struct device_attribute *attr,
1315 struct idxd_device *idxd =
1316 container_of(dev, struct idxd_device, conf_dev);
1318 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1320 static DEVICE_ATTR_RO(max_work_queues_size);
1322 static ssize_t max_groups_show(struct device *dev,
1323 struct device_attribute *attr, char *buf)
1325 struct idxd_device *idxd =
1326 container_of(dev, struct idxd_device, conf_dev);
1328 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1330 static DEVICE_ATTR_RO(max_groups);
1332 static ssize_t max_work_queues_show(struct device *dev,
1333 struct device_attribute *attr, char *buf)
1335 struct idxd_device *idxd =
1336 container_of(dev, struct idxd_device, conf_dev);
1338 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1340 static DEVICE_ATTR_RO(max_work_queues);
1342 static ssize_t max_engines_show(struct device *dev,
1343 struct device_attribute *attr, char *buf)
1345 struct idxd_device *idxd =
1346 container_of(dev, struct idxd_device, conf_dev);
1348 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1350 static DEVICE_ATTR_RO(max_engines);
1352 static ssize_t numa_node_show(struct device *dev,
1353 struct device_attribute *attr, char *buf)
1355 struct idxd_device *idxd =
1356 container_of(dev, struct idxd_device, conf_dev);
1358 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1360 static DEVICE_ATTR_RO(numa_node);
1362 static ssize_t max_batch_size_show(struct device *dev,
1363 struct device_attribute *attr, char *buf)
1365 struct idxd_device *idxd =
1366 container_of(dev, struct idxd_device, conf_dev);
1368 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1370 static DEVICE_ATTR_RO(max_batch_size);
1372 static ssize_t max_transfer_size_show(struct device *dev,
1373 struct device_attribute *attr,
1376 struct idxd_device *idxd =
1377 container_of(dev, struct idxd_device, conf_dev);
1379 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1381 static DEVICE_ATTR_RO(max_transfer_size);
1383 static ssize_t op_cap_show(struct device *dev,
1384 struct device_attribute *attr, char *buf)
1386 struct idxd_device *idxd =
1387 container_of(dev, struct idxd_device, conf_dev);
1390 for (i = 0; i < 4; i++)
1391 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1394 rc += sysfs_emit_at(buf, rc, "\n");
1397 static DEVICE_ATTR_RO(op_cap);
1399 static ssize_t gen_cap_show(struct device *dev,
1400 struct device_attribute *attr, char *buf)
1402 struct idxd_device *idxd =
1403 container_of(dev, struct idxd_device, conf_dev);
1405 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1407 static DEVICE_ATTR_RO(gen_cap);
1409 static ssize_t configurable_show(struct device *dev,
1410 struct device_attribute *attr, char *buf)
1412 struct idxd_device *idxd =
1413 container_of(dev, struct idxd_device, conf_dev);
1415 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1417 static DEVICE_ATTR_RO(configurable);
1419 static ssize_t clients_show(struct device *dev,
1420 struct device_attribute *attr, char *buf)
1422 struct idxd_device *idxd =
1423 container_of(dev, struct idxd_device, conf_dev);
1424 unsigned long flags;
1427 spin_lock_irqsave(&idxd->dev_lock, flags);
1428 for (i = 0; i < idxd->max_wqs; i++) {
1429 struct idxd_wq *wq = idxd->wqs[i];
1431 count += wq->client_count;
1433 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1435 return sysfs_emit(buf, "%d\n", count);
1437 static DEVICE_ATTR_RO(clients);
1439 static ssize_t pasid_enabled_show(struct device *dev,
1440 struct device_attribute *attr, char *buf)
1442 struct idxd_device *idxd =
1443 container_of(dev, struct idxd_device, conf_dev);
1445 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1447 static DEVICE_ATTR_RO(pasid_enabled);
1449 static ssize_t state_show(struct device *dev,
1450 struct device_attribute *attr, char *buf)
1452 struct idxd_device *idxd =
1453 container_of(dev, struct idxd_device, conf_dev);
1455 switch (idxd->state) {
1456 case IDXD_DEV_DISABLED:
1457 case IDXD_DEV_CONF_READY:
1458 return sysfs_emit(buf, "disabled\n");
1459 case IDXD_DEV_ENABLED:
1460 return sysfs_emit(buf, "enabled\n");
1461 case IDXD_DEV_HALTED:
1462 return sysfs_emit(buf, "halted\n");
1465 return sysfs_emit(buf, "unknown\n");
1467 static DEVICE_ATTR_RO(state);
1469 static ssize_t errors_show(struct device *dev,
1470 struct device_attribute *attr, char *buf)
1472 struct idxd_device *idxd =
1473 container_of(dev, struct idxd_device, conf_dev);
1475 unsigned long flags;
1477 spin_lock_irqsave(&idxd->dev_lock, flags);
1478 for (i = 0; i < 4; i++)
1479 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1480 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1482 out += sysfs_emit_at(buf, out, "\n");
1485 static DEVICE_ATTR_RO(errors);
1487 static ssize_t max_tokens_show(struct device *dev,
1488 struct device_attribute *attr, char *buf)
1490 struct idxd_device *idxd =
1491 container_of(dev, struct idxd_device, conf_dev);
1493 return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1495 static DEVICE_ATTR_RO(max_tokens);
1497 static ssize_t token_limit_show(struct device *dev,
1498 struct device_attribute *attr, char *buf)
1500 struct idxd_device *idxd =
1501 container_of(dev, struct idxd_device, conf_dev);
1503 return sysfs_emit(buf, "%u\n", idxd->token_limit);
1506 static ssize_t token_limit_store(struct device *dev,
1507 struct device_attribute *attr,
1508 const char *buf, size_t count)
1510 struct idxd_device *idxd =
1511 container_of(dev, struct idxd_device, conf_dev);
1515 rc = kstrtoul(buf, 10, &val);
1519 if (idxd->state == IDXD_DEV_ENABLED)
1522 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1525 if (!idxd->hw.group_cap.token_limit)
1528 if (val > idxd->hw.group_cap.total_tokens)
1531 idxd->token_limit = val;
1534 static DEVICE_ATTR_RW(token_limit);
1536 static ssize_t cdev_major_show(struct device *dev,
1537 struct device_attribute *attr, char *buf)
1539 struct idxd_device *idxd =
1540 container_of(dev, struct idxd_device, conf_dev);
1542 return sysfs_emit(buf, "%u\n", idxd->major);
1544 static DEVICE_ATTR_RO(cdev_major);
1546 static ssize_t cmd_status_show(struct device *dev,
1547 struct device_attribute *attr, char *buf)
1549 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1551 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1553 static DEVICE_ATTR_RO(cmd_status);
1555 static struct attribute *idxd_device_attributes[] = {
1556 &dev_attr_version.attr,
1557 &dev_attr_max_groups.attr,
1558 &dev_attr_max_work_queues.attr,
1559 &dev_attr_max_work_queues_size.attr,
1560 &dev_attr_max_engines.attr,
1561 &dev_attr_numa_node.attr,
1562 &dev_attr_max_batch_size.attr,
1563 &dev_attr_max_transfer_size.attr,
1564 &dev_attr_op_cap.attr,
1565 &dev_attr_gen_cap.attr,
1566 &dev_attr_configurable.attr,
1567 &dev_attr_clients.attr,
1568 &dev_attr_pasid_enabled.attr,
1569 &dev_attr_state.attr,
1570 &dev_attr_errors.attr,
1571 &dev_attr_max_tokens.attr,
1572 &dev_attr_token_limit.attr,
1573 &dev_attr_cdev_major.attr,
1574 &dev_attr_cmd_status.attr,
1578 static const struct attribute_group idxd_device_attribute_group = {
1579 .attrs = idxd_device_attributes,
1582 static const struct attribute_group *idxd_attribute_groups[] = {
1583 &idxd_device_attribute_group,
1587 static void idxd_conf_device_release(struct device *dev)
1589 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1591 kfree(idxd->groups);
1593 kfree(idxd->engines);
1594 kfree(idxd->irq_entries);
1595 kfree(idxd->int_handles);
1596 ida_free(&idxd_ida, idxd->id);
1600 struct device_type dsa_device_type = {
1602 .release = idxd_conf_device_release,
1603 .groups = idxd_attribute_groups,
1606 struct device_type iax_device_type = {
1608 .release = idxd_conf_device_release,
1609 .groups = idxd_attribute_groups,
1612 static int idxd_register_engine_devices(struct idxd_device *idxd)
1616 for (i = 0; i < idxd->max_engines; i++) {
1617 struct idxd_engine *engine = idxd->engines[i];
1619 rc = device_add(&engine->conf_dev);
1628 for (; i < idxd->max_engines; i++)
1629 put_device(&idxd->engines[i]->conf_dev);
1632 device_unregister(&idxd->engines[j]->conf_dev);
1636 static int idxd_register_group_devices(struct idxd_device *idxd)
1640 for (i = 0; i < idxd->max_groups; i++) {
1641 struct idxd_group *group = idxd->groups[i];
1643 rc = device_add(&group->conf_dev);
1652 for (; i < idxd->max_groups; i++)
1653 put_device(&idxd->groups[i]->conf_dev);
1656 device_unregister(&idxd->groups[j]->conf_dev);
1660 static int idxd_register_wq_devices(struct idxd_device *idxd)
1664 for (i = 0; i < idxd->max_wqs; i++) {
1665 struct idxd_wq *wq = idxd->wqs[i];
1667 rc = device_add(&wq->conf_dev);
1676 for (; i < idxd->max_wqs; i++)
1677 put_device(&idxd->wqs[i]->conf_dev);
1680 device_unregister(&idxd->wqs[j]->conf_dev);
1684 int idxd_register_devices(struct idxd_device *idxd)
1686 struct device *dev = &idxd->pdev->dev;
1689 rc = device_add(&idxd->conf_dev);
1693 rc = idxd_register_wq_devices(idxd);
1695 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1699 rc = idxd_register_engine_devices(idxd);
1701 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1705 rc = idxd_register_group_devices(idxd);
1707 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1714 for (i = 0; i < idxd->max_engines; i++)
1715 device_unregister(&idxd->engines[i]->conf_dev);
1717 for (i = 0; i < idxd->max_wqs; i++)
1718 device_unregister(&idxd->wqs[i]->conf_dev);
1720 device_del(&idxd->conf_dev);
1724 void idxd_unregister_devices(struct idxd_device *idxd)
1728 for (i = 0; i < idxd->max_wqs; i++) {
1729 struct idxd_wq *wq = idxd->wqs[i];
1731 device_unregister(&wq->conf_dev);
1734 for (i = 0; i < idxd->max_engines; i++) {
1735 struct idxd_engine *engine = idxd->engines[i];
1737 device_unregister(&engine->conf_dev);
1740 for (i = 0; i < idxd->max_groups; i++) {
1741 struct idxd_group *group = idxd->groups[i];
1743 device_unregister(&group->conf_dev);
1747 int idxd_register_bus_type(void)
1749 return bus_register(&dsa_bus_type);
1752 void idxd_unregister_bus_type(void)
1754 bus_unregister(&dsa_bus_type);