1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
19 static int idxd_config_bus_match(struct device *dev,
20 struct device_driver *drv)
24 if (is_idxd_dev(dev)) {
25 struct idxd_device *idxd = confdev_to_idxd(dev);
27 if (idxd->state != IDXD_DEV_CONF_READY)
30 } else if (is_idxd_wq_dev(dev)) {
31 struct idxd_wq *wq = confdev_to_wq(dev);
32 struct idxd_device *idxd = wq->idxd;
34 if (idxd->state < IDXD_DEV_CONF_READY)
37 if (wq->state != IDXD_WQ_DISABLED) {
38 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
45 dev_dbg(dev, "%s matched\n", dev_name(dev));
50 static int enable_wq(struct idxd_wq *wq)
52 struct idxd_device *idxd = wq->idxd;
53 struct device *dev = &idxd->pdev->dev;
57 mutex_lock(&wq->wq_lock);
59 if (idxd->state != IDXD_DEV_ENABLED) {
60 mutex_unlock(&wq->wq_lock);
61 dev_warn(dev, "Enabling while device not enabled.\n");
65 if (wq->state != IDXD_WQ_DISABLED) {
66 mutex_unlock(&wq->wq_lock);
67 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
72 mutex_unlock(&wq->wq_lock);
73 dev_warn(dev, "WQ not attached to group.\n");
77 if (strlen(wq->name) == 0) {
78 mutex_unlock(&wq->wq_lock);
79 dev_warn(dev, "WQ name not set.\n");
83 /* Shared WQ checks */
85 if (!device_swq_supported(idxd)) {
86 dev_warn(dev, "PASID not enabled and shared WQ.\n");
87 mutex_unlock(&wq->wq_lock);
91 * Shared wq with the threshold set to 0 means the user
92 * did not set the threshold or transitioned from a
93 * dedicated wq but did not set threshold. A value
94 * of 0 would effectively disable the shared wq. The
95 * driver does not allow a value of 0 to be set for
96 * threshold via sysfs.
98 if (wq->threshold == 0) {
99 dev_warn(dev, "Shared WQ and threshold 0.\n");
100 mutex_unlock(&wq->wq_lock);
105 rc = idxd_wq_alloc_resources(wq);
107 mutex_unlock(&wq->wq_lock);
108 dev_warn(dev, "WQ resource alloc failed\n");
112 spin_lock_irqsave(&idxd->dev_lock, flags);
113 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
114 rc = idxd_device_config(idxd);
115 spin_unlock_irqrestore(&idxd->dev_lock, flags);
117 mutex_unlock(&wq->wq_lock);
118 dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
122 rc = idxd_wq_enable(wq);
124 mutex_unlock(&wq->wq_lock);
125 dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
129 rc = idxd_wq_map_portal(wq);
131 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
132 rc = idxd_wq_disable(wq);
134 dev_warn(dev, "IDXD wq disable failed\n");
135 mutex_unlock(&wq->wq_lock);
139 wq->client_count = 0;
141 if (wq->type == IDXD_WQT_KERNEL) {
142 rc = idxd_wq_init_percpu_ref(wq);
144 dev_dbg(dev, "percpu_ref setup failed\n");
145 mutex_unlock(&wq->wq_lock);
150 if (is_idxd_wq_dmaengine(wq)) {
151 rc = idxd_register_dma_channel(wq);
153 dev_dbg(dev, "DMA channel register failed\n");
154 mutex_unlock(&wq->wq_lock);
157 } else if (is_idxd_wq_cdev(wq)) {
158 rc = idxd_wq_add_cdev(wq);
160 dev_dbg(dev, "Cdev creation failed\n");
161 mutex_unlock(&wq->wq_lock);
166 mutex_unlock(&wq->wq_lock);
167 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
172 static int idxd_config_bus_probe(struct device *dev)
177 dev_dbg(dev, "%s called\n", __func__);
179 if (is_idxd_dev(dev)) {
180 struct idxd_device *idxd = confdev_to_idxd(dev);
182 if (idxd->state != IDXD_DEV_CONF_READY) {
183 dev_warn(dev, "Device not ready for config\n");
187 if (!try_module_get(THIS_MODULE))
190 /* Perform IDXD configuration and enabling */
191 spin_lock_irqsave(&idxd->dev_lock, flags);
192 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
193 rc = idxd_device_config(idxd);
194 spin_unlock_irqrestore(&idxd->dev_lock, flags);
196 module_put(THIS_MODULE);
197 dev_warn(dev, "Device config failed: %d\n", rc);
202 rc = idxd_device_enable(idxd);
204 module_put(THIS_MODULE);
205 dev_warn(dev, "Device enable failed: %d\n", rc);
209 dev_info(dev, "Device %s enabled\n", dev_name(dev));
211 rc = idxd_register_dma_device(idxd);
213 module_put(THIS_MODULE);
214 dev_dbg(dev, "Failed to register dmaengine device\n");
218 } else if (is_idxd_wq_dev(dev)) {
219 struct idxd_wq *wq = confdev_to_wq(dev);
221 return enable_wq(wq);
227 static void disable_wq(struct idxd_wq *wq)
229 struct idxd_device *idxd = wq->idxd;
230 struct device *dev = &idxd->pdev->dev;
232 mutex_lock(&wq->wq_lock);
233 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
234 if (wq->state == IDXD_WQ_DISABLED) {
235 mutex_unlock(&wq->wq_lock);
239 if (wq->type == IDXD_WQT_KERNEL)
242 if (is_idxd_wq_dmaengine(wq))
243 idxd_unregister_dma_channel(wq);
244 else if (is_idxd_wq_cdev(wq))
245 idxd_wq_del_cdev(wq);
247 if (idxd_wq_refcount(wq))
248 dev_warn(dev, "Clients has claim on wq %d: %d\n",
249 wq->id, idxd_wq_refcount(wq));
251 idxd_wq_unmap_portal(wq);
256 idxd_wq_free_resources(wq);
257 wq->client_count = 0;
258 mutex_unlock(&wq->wq_lock);
260 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
263 static int idxd_config_bus_remove(struct device *dev)
267 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
269 /* disable workqueue here */
270 if (is_idxd_wq_dev(dev)) {
271 struct idxd_wq *wq = confdev_to_wq(dev);
274 } else if (is_idxd_dev(dev)) {
275 struct idxd_device *idxd = confdev_to_idxd(dev);
278 dev_dbg(dev, "%s removing dev %s\n", __func__,
279 dev_name(&idxd->conf_dev));
280 for (i = 0; i < idxd->max_wqs; i++) {
281 struct idxd_wq *wq = idxd->wqs[i];
283 if (wq->state == IDXD_WQ_DISABLED)
285 dev_warn(dev, "Active wq %d on disable %s.\n", i,
286 dev_name(&idxd->conf_dev));
287 device_release_driver(&wq->conf_dev);
290 idxd_unregister_dma_device(idxd);
291 rc = idxd_device_disable(idxd);
292 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
293 for (i = 0; i < idxd->max_wqs; i++) {
294 struct idxd_wq *wq = idxd->wqs[i];
296 mutex_lock(&wq->wq_lock);
297 idxd_wq_disable_cleanup(wq);
298 mutex_unlock(&wq->wq_lock);
301 module_put(THIS_MODULE);
303 dev_warn(dev, "Device disable failed\n");
305 dev_info(dev, "Device %s disabled\n", dev_name(dev));
312 static void idxd_config_bus_shutdown(struct device *dev)
314 dev_dbg(dev, "%s called\n", __func__);
317 struct bus_type dsa_bus_type = {
319 .match = idxd_config_bus_match,
320 .probe = idxd_config_bus_probe,
321 .remove = idxd_config_bus_remove,
322 .shutdown = idxd_config_bus_shutdown,
325 static struct idxd_device_driver dsa_drv = {
328 .bus = &dsa_bus_type,
329 .owner = THIS_MODULE,
330 .mod_name = KBUILD_MODNAME,
334 /* IDXD generic driver setup */
335 int idxd_register_driver(void)
337 return driver_register(&dsa_drv.drv);
340 void idxd_unregister_driver(void)
342 driver_unregister(&dsa_drv.drv);
345 /* IDXD engine attributes */
346 static ssize_t engine_group_id_show(struct device *dev,
347 struct device_attribute *attr, char *buf)
349 struct idxd_engine *engine =
350 container_of(dev, struct idxd_engine, conf_dev);
353 return sysfs_emit(buf, "%d\n", engine->group->id);
355 return sysfs_emit(buf, "%d\n", -1);
358 static ssize_t engine_group_id_store(struct device *dev,
359 struct device_attribute *attr,
360 const char *buf, size_t count)
362 struct idxd_engine *engine =
363 container_of(dev, struct idxd_engine, conf_dev);
364 struct idxd_device *idxd = engine->idxd;
367 struct idxd_group *prevg;
369 rc = kstrtol(buf, 10, &id);
373 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
376 if (id > idxd->max_groups - 1 || id < -1)
381 engine->group->num_engines--;
382 engine->group = NULL;
387 prevg = engine->group;
390 prevg->num_engines--;
391 engine->group = idxd->groups[id];
392 engine->group->num_engines++;
397 static struct device_attribute dev_attr_engine_group =
398 __ATTR(group_id, 0644, engine_group_id_show,
399 engine_group_id_store);
401 static struct attribute *idxd_engine_attributes[] = {
402 &dev_attr_engine_group.attr,
406 static const struct attribute_group idxd_engine_attribute_group = {
407 .attrs = idxd_engine_attributes,
410 static const struct attribute_group *idxd_engine_attribute_groups[] = {
411 &idxd_engine_attribute_group,
415 static void idxd_conf_engine_release(struct device *dev)
417 struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
422 struct device_type idxd_engine_device_type = {
424 .release = idxd_conf_engine_release,
425 .groups = idxd_engine_attribute_groups,
428 /* Group attributes */
430 static void idxd_set_free_tokens(struct idxd_device *idxd)
434 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
435 struct idxd_group *g = idxd->groups[i];
437 tokens += g->tokens_reserved;
440 idxd->nr_tokens = idxd->max_tokens - tokens;
443 static ssize_t group_tokens_reserved_show(struct device *dev,
444 struct device_attribute *attr,
447 struct idxd_group *group =
448 container_of(dev, struct idxd_group, conf_dev);
450 return sysfs_emit(buf, "%u\n", group->tokens_reserved);
453 static ssize_t group_tokens_reserved_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t count)
457 struct idxd_group *group =
458 container_of(dev, struct idxd_group, conf_dev);
459 struct idxd_device *idxd = group->idxd;
463 rc = kstrtoul(buf, 10, &val);
467 if (idxd->data->type == IDXD_TYPE_IAX)
470 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
473 if (idxd->state == IDXD_DEV_ENABLED)
476 if (val > idxd->max_tokens)
479 if (val > idxd->nr_tokens + group->tokens_reserved)
482 group->tokens_reserved = val;
483 idxd_set_free_tokens(idxd);
487 static struct device_attribute dev_attr_group_tokens_reserved =
488 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
489 group_tokens_reserved_store);
491 static ssize_t group_tokens_allowed_show(struct device *dev,
492 struct device_attribute *attr,
495 struct idxd_group *group =
496 container_of(dev, struct idxd_group, conf_dev);
498 return sysfs_emit(buf, "%u\n", group->tokens_allowed);
501 static ssize_t group_tokens_allowed_store(struct device *dev,
502 struct device_attribute *attr,
503 const char *buf, size_t count)
505 struct idxd_group *group =
506 container_of(dev, struct idxd_group, conf_dev);
507 struct idxd_device *idxd = group->idxd;
511 rc = kstrtoul(buf, 10, &val);
515 if (idxd->data->type == IDXD_TYPE_IAX)
518 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
521 if (idxd->state == IDXD_DEV_ENABLED)
524 if (val < 4 * group->num_engines ||
525 val > group->tokens_reserved + idxd->nr_tokens)
528 group->tokens_allowed = val;
532 static struct device_attribute dev_attr_group_tokens_allowed =
533 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
534 group_tokens_allowed_store);
536 static ssize_t group_use_token_limit_show(struct device *dev,
537 struct device_attribute *attr,
540 struct idxd_group *group =
541 container_of(dev, struct idxd_group, conf_dev);
543 return sysfs_emit(buf, "%u\n", group->use_token_limit);
546 static ssize_t group_use_token_limit_store(struct device *dev,
547 struct device_attribute *attr,
548 const char *buf, size_t count)
550 struct idxd_group *group =
551 container_of(dev, struct idxd_group, conf_dev);
552 struct idxd_device *idxd = group->idxd;
556 rc = kstrtoul(buf, 10, &val);
560 if (idxd->data->type == IDXD_TYPE_IAX)
563 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
566 if (idxd->state == IDXD_DEV_ENABLED)
569 if (idxd->token_limit == 0)
572 group->use_token_limit = !!val;
576 static struct device_attribute dev_attr_group_use_token_limit =
577 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
578 group_use_token_limit_store);
580 static ssize_t group_engines_show(struct device *dev,
581 struct device_attribute *attr, char *buf)
583 struct idxd_group *group =
584 container_of(dev, struct idxd_group, conf_dev);
586 struct idxd_device *idxd = group->idxd;
588 for (i = 0; i < idxd->max_engines; i++) {
589 struct idxd_engine *engine = idxd->engines[i];
594 if (engine->group->id == group->id)
595 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
601 rc += sysfs_emit_at(buf, rc, "\n");
606 static struct device_attribute dev_attr_group_engines =
607 __ATTR(engines, 0444, group_engines_show, NULL);
609 static ssize_t group_work_queues_show(struct device *dev,
610 struct device_attribute *attr, char *buf)
612 struct idxd_group *group =
613 container_of(dev, struct idxd_group, conf_dev);
615 struct idxd_device *idxd = group->idxd;
617 for (i = 0; i < idxd->max_wqs; i++) {
618 struct idxd_wq *wq = idxd->wqs[i];
623 if (wq->group->id == group->id)
624 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
630 rc += sysfs_emit_at(buf, rc, "\n");
635 static struct device_attribute dev_attr_group_work_queues =
636 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
638 static ssize_t group_traffic_class_a_show(struct device *dev,
639 struct device_attribute *attr,
642 struct idxd_group *group =
643 container_of(dev, struct idxd_group, conf_dev);
645 return sysfs_emit(buf, "%d\n", group->tc_a);
648 static ssize_t group_traffic_class_a_store(struct device *dev,
649 struct device_attribute *attr,
650 const char *buf, size_t count)
652 struct idxd_group *group =
653 container_of(dev, struct idxd_group, conf_dev);
654 struct idxd_device *idxd = group->idxd;
658 rc = kstrtol(buf, 10, &val);
662 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
665 if (idxd->state == IDXD_DEV_ENABLED)
668 if (val < 0 || val > 7)
675 static struct device_attribute dev_attr_group_traffic_class_a =
676 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
677 group_traffic_class_a_store);
679 static ssize_t group_traffic_class_b_show(struct device *dev,
680 struct device_attribute *attr,
683 struct idxd_group *group =
684 container_of(dev, struct idxd_group, conf_dev);
686 return sysfs_emit(buf, "%d\n", group->tc_b);
689 static ssize_t group_traffic_class_b_store(struct device *dev,
690 struct device_attribute *attr,
691 const char *buf, size_t count)
693 struct idxd_group *group =
694 container_of(dev, struct idxd_group, conf_dev);
695 struct idxd_device *idxd = group->idxd;
699 rc = kstrtol(buf, 10, &val);
703 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
706 if (idxd->state == IDXD_DEV_ENABLED)
709 if (val < 0 || val > 7)
716 static struct device_attribute dev_attr_group_traffic_class_b =
717 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
718 group_traffic_class_b_store);
720 static struct attribute *idxd_group_attributes[] = {
721 &dev_attr_group_work_queues.attr,
722 &dev_attr_group_engines.attr,
723 &dev_attr_group_use_token_limit.attr,
724 &dev_attr_group_tokens_allowed.attr,
725 &dev_attr_group_tokens_reserved.attr,
726 &dev_attr_group_traffic_class_a.attr,
727 &dev_attr_group_traffic_class_b.attr,
731 static const struct attribute_group idxd_group_attribute_group = {
732 .attrs = idxd_group_attributes,
735 static const struct attribute_group *idxd_group_attribute_groups[] = {
736 &idxd_group_attribute_group,
740 static void idxd_conf_group_release(struct device *dev)
742 struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
747 struct device_type idxd_group_device_type = {
749 .release = idxd_conf_group_release,
750 .groups = idxd_group_attribute_groups,
753 /* IDXD work queue attribs */
754 static ssize_t wq_clients_show(struct device *dev,
755 struct device_attribute *attr, char *buf)
757 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
759 return sysfs_emit(buf, "%d\n", wq->client_count);
762 static struct device_attribute dev_attr_wq_clients =
763 __ATTR(clients, 0444, wq_clients_show, NULL);
765 static ssize_t wq_state_show(struct device *dev,
766 struct device_attribute *attr, char *buf)
768 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
771 case IDXD_WQ_DISABLED:
772 return sysfs_emit(buf, "disabled\n");
773 case IDXD_WQ_ENABLED:
774 return sysfs_emit(buf, "enabled\n");
777 return sysfs_emit(buf, "unknown\n");
780 static struct device_attribute dev_attr_wq_state =
781 __ATTR(state, 0444, wq_state_show, NULL);
783 static ssize_t wq_group_id_show(struct device *dev,
784 struct device_attribute *attr, char *buf)
786 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
789 return sysfs_emit(buf, "%u\n", wq->group->id);
791 return sysfs_emit(buf, "-1\n");
794 static ssize_t wq_group_id_store(struct device *dev,
795 struct device_attribute *attr,
796 const char *buf, size_t count)
798 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
799 struct idxd_device *idxd = wq->idxd;
802 struct idxd_group *prevg, *group;
804 rc = kstrtol(buf, 10, &id);
808 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
811 if (wq->state != IDXD_WQ_DISABLED)
814 if (id > idxd->max_groups - 1 || id < -1)
819 wq->group->num_wqs--;
825 group = idxd->groups[id];
835 static struct device_attribute dev_attr_wq_group_id =
836 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
838 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
841 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
843 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
846 static ssize_t wq_mode_store(struct device *dev,
847 struct device_attribute *attr, const char *buf,
850 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
851 struct idxd_device *idxd = wq->idxd;
853 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
856 if (wq->state != IDXD_WQ_DISABLED)
859 if (sysfs_streq(buf, "dedicated")) {
860 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
862 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
863 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
871 static struct device_attribute dev_attr_wq_mode =
872 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
874 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
877 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
879 return sysfs_emit(buf, "%u\n", wq->size);
882 static int total_claimed_wq_size(struct idxd_device *idxd)
887 for (i = 0; i < idxd->max_wqs; i++) {
888 struct idxd_wq *wq = idxd->wqs[i];
896 static ssize_t wq_size_store(struct device *dev,
897 struct device_attribute *attr, const char *buf,
900 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
902 struct idxd_device *idxd = wq->idxd;
905 rc = kstrtoul(buf, 10, &size);
909 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
912 if (idxd->state == IDXD_DEV_ENABLED)
915 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
922 static struct device_attribute dev_attr_wq_size =
923 __ATTR(size, 0644, wq_size_show, wq_size_store);
925 static ssize_t wq_priority_show(struct device *dev,
926 struct device_attribute *attr, char *buf)
928 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
930 return sysfs_emit(buf, "%u\n", wq->priority);
933 static ssize_t wq_priority_store(struct device *dev,
934 struct device_attribute *attr,
935 const char *buf, size_t count)
937 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
939 struct idxd_device *idxd = wq->idxd;
942 rc = kstrtoul(buf, 10, &prio);
946 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
949 if (wq->state != IDXD_WQ_DISABLED)
952 if (prio > IDXD_MAX_PRIORITY)
959 static struct device_attribute dev_attr_wq_priority =
960 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
962 static ssize_t wq_block_on_fault_show(struct device *dev,
963 struct device_attribute *attr, char *buf)
965 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
967 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
970 static ssize_t wq_block_on_fault_store(struct device *dev,
971 struct device_attribute *attr,
972 const char *buf, size_t count)
974 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
975 struct idxd_device *idxd = wq->idxd;
979 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
982 if (wq->state != IDXD_WQ_DISABLED)
985 rc = kstrtobool(buf, &bof);
990 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
992 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
997 static struct device_attribute dev_attr_wq_block_on_fault =
998 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
999 wq_block_on_fault_store);
1001 static ssize_t wq_threshold_show(struct device *dev,
1002 struct device_attribute *attr, char *buf)
1004 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1006 return sysfs_emit(buf, "%u\n", wq->threshold);
1009 static ssize_t wq_threshold_store(struct device *dev,
1010 struct device_attribute *attr,
1011 const char *buf, size_t count)
1013 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1014 struct idxd_device *idxd = wq->idxd;
1018 rc = kstrtouint(buf, 0, &val);
1022 if (val > wq->size || val <= 0)
1025 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1028 if (wq->state != IDXD_WQ_DISABLED)
1031 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1034 wq->threshold = val;
1039 static struct device_attribute dev_attr_wq_threshold =
1040 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1042 static ssize_t wq_type_show(struct device *dev,
1043 struct device_attribute *attr, char *buf)
1045 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1048 case IDXD_WQT_KERNEL:
1049 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
1051 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
1054 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
1060 static ssize_t wq_type_store(struct device *dev,
1061 struct device_attribute *attr, const char *buf,
1064 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1065 enum idxd_wq_type old_type;
1067 if (wq->state != IDXD_WQ_DISABLED)
1070 old_type = wq->type;
1071 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1072 wq->type = IDXD_WQT_NONE;
1073 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1074 wq->type = IDXD_WQT_KERNEL;
1075 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1076 wq->type = IDXD_WQT_USER;
1080 /* If we are changing queue type, clear the name */
1081 if (wq->type != old_type)
1082 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1087 static struct device_attribute dev_attr_wq_type =
1088 __ATTR(type, 0644, wq_type_show, wq_type_store);
1090 static ssize_t wq_name_show(struct device *dev,
1091 struct device_attribute *attr, char *buf)
1093 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1095 return sysfs_emit(buf, "%s\n", wq->name);
1098 static ssize_t wq_name_store(struct device *dev,
1099 struct device_attribute *attr, const char *buf,
1102 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1104 if (wq->state != IDXD_WQ_DISABLED)
1107 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1111 * This is temporarily placed here until we have SVM support for
1114 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1117 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1118 strncpy(wq->name, buf, WQ_NAME_SIZE);
1119 strreplace(wq->name, '\n', '\0');
1123 static struct device_attribute dev_attr_wq_name =
1124 __ATTR(name, 0644, wq_name_show, wq_name_store);
1126 static ssize_t wq_cdev_minor_show(struct device *dev,
1127 struct device_attribute *attr, char *buf)
1129 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1132 mutex_lock(&wq->wq_lock);
1134 minor = wq->idxd_cdev->minor;
1135 mutex_unlock(&wq->wq_lock);
1139 return sysfs_emit(buf, "%d\n", minor);
1142 static struct device_attribute dev_attr_wq_cdev_minor =
1143 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1145 static int __get_sysfs_u64(const char *buf, u64 *val)
1149 rc = kstrtou64(buf, 0, val);
1156 *val = roundup_pow_of_two(*val);
1160 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1163 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1165 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1168 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1169 const char *buf, size_t count)
1171 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1172 struct idxd_device *idxd = wq->idxd;
1176 if (wq->state != IDXD_WQ_DISABLED)
1179 rc = __get_sysfs_u64(buf, &xfer_size);
1183 if (xfer_size > idxd->max_xfer_bytes)
1186 wq->max_xfer_bytes = xfer_size;
1191 static struct device_attribute dev_attr_wq_max_transfer_size =
1192 __ATTR(max_transfer_size, 0644,
1193 wq_max_transfer_size_show, wq_max_transfer_size_store);
1195 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1197 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1199 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1202 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1203 const char *buf, size_t count)
1205 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1206 struct idxd_device *idxd = wq->idxd;
1210 if (wq->state != IDXD_WQ_DISABLED)
1213 rc = __get_sysfs_u64(buf, &batch_size);
1217 if (batch_size > idxd->max_batch_size)
1220 wq->max_batch_size = (u32)batch_size;
1225 static struct device_attribute dev_attr_wq_max_batch_size =
1226 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1228 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1230 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1232 return sysfs_emit(buf, "%u\n", wq->ats_dis);
1235 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1236 const char *buf, size_t count)
1238 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1239 struct idxd_device *idxd = wq->idxd;
1243 if (wq->state != IDXD_WQ_DISABLED)
1246 if (!idxd->hw.wq_cap.wq_ats_support)
1249 rc = kstrtobool(buf, &ats_dis);
1253 wq->ats_dis = ats_dis;
1258 static struct device_attribute dev_attr_wq_ats_disable =
1259 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1261 static struct attribute *idxd_wq_attributes[] = {
1262 &dev_attr_wq_clients.attr,
1263 &dev_attr_wq_state.attr,
1264 &dev_attr_wq_group_id.attr,
1265 &dev_attr_wq_mode.attr,
1266 &dev_attr_wq_size.attr,
1267 &dev_attr_wq_priority.attr,
1268 &dev_attr_wq_block_on_fault.attr,
1269 &dev_attr_wq_threshold.attr,
1270 &dev_attr_wq_type.attr,
1271 &dev_attr_wq_name.attr,
1272 &dev_attr_wq_cdev_minor.attr,
1273 &dev_attr_wq_max_transfer_size.attr,
1274 &dev_attr_wq_max_batch_size.attr,
1275 &dev_attr_wq_ats_disable.attr,
1279 static const struct attribute_group idxd_wq_attribute_group = {
1280 .attrs = idxd_wq_attributes,
1283 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1284 &idxd_wq_attribute_group,
1288 static void idxd_conf_wq_release(struct device *dev)
1290 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1296 struct device_type idxd_wq_device_type = {
1298 .release = idxd_conf_wq_release,
1299 .groups = idxd_wq_attribute_groups,
1302 /* IDXD device attribs */
1303 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1306 struct idxd_device *idxd =
1307 container_of(dev, struct idxd_device, conf_dev);
1309 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1311 static DEVICE_ATTR_RO(version);
1313 static ssize_t max_work_queues_size_show(struct device *dev,
1314 struct device_attribute *attr,
1317 struct idxd_device *idxd =
1318 container_of(dev, struct idxd_device, conf_dev);
1320 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1322 static DEVICE_ATTR_RO(max_work_queues_size);
1324 static ssize_t max_groups_show(struct device *dev,
1325 struct device_attribute *attr, char *buf)
1327 struct idxd_device *idxd =
1328 container_of(dev, struct idxd_device, conf_dev);
1330 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1332 static DEVICE_ATTR_RO(max_groups);
1334 static ssize_t max_work_queues_show(struct device *dev,
1335 struct device_attribute *attr, char *buf)
1337 struct idxd_device *idxd =
1338 container_of(dev, struct idxd_device, conf_dev);
1340 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1342 static DEVICE_ATTR_RO(max_work_queues);
1344 static ssize_t max_engines_show(struct device *dev,
1345 struct device_attribute *attr, char *buf)
1347 struct idxd_device *idxd =
1348 container_of(dev, struct idxd_device, conf_dev);
1350 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1352 static DEVICE_ATTR_RO(max_engines);
1354 static ssize_t numa_node_show(struct device *dev,
1355 struct device_attribute *attr, char *buf)
1357 struct idxd_device *idxd =
1358 container_of(dev, struct idxd_device, conf_dev);
1360 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1362 static DEVICE_ATTR_RO(numa_node);
1364 static ssize_t max_batch_size_show(struct device *dev,
1365 struct device_attribute *attr, char *buf)
1367 struct idxd_device *idxd =
1368 container_of(dev, struct idxd_device, conf_dev);
1370 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1372 static DEVICE_ATTR_RO(max_batch_size);
1374 static ssize_t max_transfer_size_show(struct device *dev,
1375 struct device_attribute *attr,
1378 struct idxd_device *idxd =
1379 container_of(dev, struct idxd_device, conf_dev);
1381 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1383 static DEVICE_ATTR_RO(max_transfer_size);
1385 static ssize_t op_cap_show(struct device *dev,
1386 struct device_attribute *attr, char *buf)
1388 struct idxd_device *idxd =
1389 container_of(dev, struct idxd_device, conf_dev);
1392 for (i = 0; i < 4; i++)
1393 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1396 rc += sysfs_emit_at(buf, rc, "\n");
1399 static DEVICE_ATTR_RO(op_cap);
1401 static ssize_t gen_cap_show(struct device *dev,
1402 struct device_attribute *attr, char *buf)
1404 struct idxd_device *idxd =
1405 container_of(dev, struct idxd_device, conf_dev);
1407 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1409 static DEVICE_ATTR_RO(gen_cap);
1411 static ssize_t configurable_show(struct device *dev,
1412 struct device_attribute *attr, char *buf)
1414 struct idxd_device *idxd =
1415 container_of(dev, struct idxd_device, conf_dev);
1417 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1419 static DEVICE_ATTR_RO(configurable);
1421 static ssize_t clients_show(struct device *dev,
1422 struct device_attribute *attr, char *buf)
1424 struct idxd_device *idxd =
1425 container_of(dev, struct idxd_device, conf_dev);
1426 unsigned long flags;
1429 spin_lock_irqsave(&idxd->dev_lock, flags);
1430 for (i = 0; i < idxd->max_wqs; i++) {
1431 struct idxd_wq *wq = idxd->wqs[i];
1433 count += wq->client_count;
1435 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1437 return sysfs_emit(buf, "%d\n", count);
1439 static DEVICE_ATTR_RO(clients);
1441 static ssize_t pasid_enabled_show(struct device *dev,
1442 struct device_attribute *attr, char *buf)
1444 struct idxd_device *idxd =
1445 container_of(dev, struct idxd_device, conf_dev);
1447 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1449 static DEVICE_ATTR_RO(pasid_enabled);
1451 static ssize_t state_show(struct device *dev,
1452 struct device_attribute *attr, char *buf)
1454 struct idxd_device *idxd =
1455 container_of(dev, struct idxd_device, conf_dev);
1457 switch (idxd->state) {
1458 case IDXD_DEV_DISABLED:
1459 case IDXD_DEV_CONF_READY:
1460 return sysfs_emit(buf, "disabled\n");
1461 case IDXD_DEV_ENABLED:
1462 return sysfs_emit(buf, "enabled\n");
1463 case IDXD_DEV_HALTED:
1464 return sysfs_emit(buf, "halted\n");
1467 return sysfs_emit(buf, "unknown\n");
1469 static DEVICE_ATTR_RO(state);
1471 static ssize_t errors_show(struct device *dev,
1472 struct device_attribute *attr, char *buf)
1474 struct idxd_device *idxd =
1475 container_of(dev, struct idxd_device, conf_dev);
1477 unsigned long flags;
1479 spin_lock_irqsave(&idxd->dev_lock, flags);
1480 for (i = 0; i < 4; i++)
1481 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1482 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1484 out += sysfs_emit_at(buf, out, "\n");
1487 static DEVICE_ATTR_RO(errors);
1489 static ssize_t max_tokens_show(struct device *dev,
1490 struct device_attribute *attr, char *buf)
1492 struct idxd_device *idxd =
1493 container_of(dev, struct idxd_device, conf_dev);
1495 return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1497 static DEVICE_ATTR_RO(max_tokens);
1499 static ssize_t token_limit_show(struct device *dev,
1500 struct device_attribute *attr, char *buf)
1502 struct idxd_device *idxd =
1503 container_of(dev, struct idxd_device, conf_dev);
1505 return sysfs_emit(buf, "%u\n", idxd->token_limit);
1508 static ssize_t token_limit_store(struct device *dev,
1509 struct device_attribute *attr,
1510 const char *buf, size_t count)
1512 struct idxd_device *idxd =
1513 container_of(dev, struct idxd_device, conf_dev);
1517 rc = kstrtoul(buf, 10, &val);
1521 if (idxd->state == IDXD_DEV_ENABLED)
1524 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1527 if (!idxd->hw.group_cap.token_limit)
1530 if (val > idxd->hw.group_cap.total_tokens)
1533 idxd->token_limit = val;
1536 static DEVICE_ATTR_RW(token_limit);
1538 static ssize_t cdev_major_show(struct device *dev,
1539 struct device_attribute *attr, char *buf)
1541 struct idxd_device *idxd =
1542 container_of(dev, struct idxd_device, conf_dev);
1544 return sysfs_emit(buf, "%u\n", idxd->major);
1546 static DEVICE_ATTR_RO(cdev_major);
1548 static ssize_t cmd_status_show(struct device *dev,
1549 struct device_attribute *attr, char *buf)
1551 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1553 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1555 static DEVICE_ATTR_RO(cmd_status);
1557 static struct attribute *idxd_device_attributes[] = {
1558 &dev_attr_version.attr,
1559 &dev_attr_max_groups.attr,
1560 &dev_attr_max_work_queues.attr,
1561 &dev_attr_max_work_queues_size.attr,
1562 &dev_attr_max_engines.attr,
1563 &dev_attr_numa_node.attr,
1564 &dev_attr_max_batch_size.attr,
1565 &dev_attr_max_transfer_size.attr,
1566 &dev_attr_op_cap.attr,
1567 &dev_attr_gen_cap.attr,
1568 &dev_attr_configurable.attr,
1569 &dev_attr_clients.attr,
1570 &dev_attr_pasid_enabled.attr,
1571 &dev_attr_state.attr,
1572 &dev_attr_errors.attr,
1573 &dev_attr_max_tokens.attr,
1574 &dev_attr_token_limit.attr,
1575 &dev_attr_cdev_major.attr,
1576 &dev_attr_cmd_status.attr,
1580 static const struct attribute_group idxd_device_attribute_group = {
1581 .attrs = idxd_device_attributes,
1584 static const struct attribute_group *idxd_attribute_groups[] = {
1585 &idxd_device_attribute_group,
1589 static void idxd_conf_device_release(struct device *dev)
1591 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1593 kfree(idxd->groups);
1595 kfree(idxd->engines);
1596 kfree(idxd->irq_entries);
1597 kfree(idxd->int_handles);
1598 ida_free(&idxd_ida, idxd->id);
1602 struct device_type dsa_device_type = {
1604 .release = idxd_conf_device_release,
1605 .groups = idxd_attribute_groups,
1608 struct device_type iax_device_type = {
1610 .release = idxd_conf_device_release,
1611 .groups = idxd_attribute_groups,
1614 static int idxd_register_engine_devices(struct idxd_device *idxd)
1618 for (i = 0; i < idxd->max_engines; i++) {
1619 struct idxd_engine *engine = idxd->engines[i];
1621 rc = device_add(&engine->conf_dev);
1630 for (; i < idxd->max_engines; i++)
1631 put_device(&idxd->engines[i]->conf_dev);
1634 device_unregister(&idxd->engines[j]->conf_dev);
1638 static int idxd_register_group_devices(struct idxd_device *idxd)
1642 for (i = 0; i < idxd->max_groups; i++) {
1643 struct idxd_group *group = idxd->groups[i];
1645 rc = device_add(&group->conf_dev);
1654 for (; i < idxd->max_groups; i++)
1655 put_device(&idxd->groups[i]->conf_dev);
1658 device_unregister(&idxd->groups[j]->conf_dev);
1662 static int idxd_register_wq_devices(struct idxd_device *idxd)
1666 for (i = 0; i < idxd->max_wqs; i++) {
1667 struct idxd_wq *wq = idxd->wqs[i];
1669 rc = device_add(&wq->conf_dev);
1678 for (; i < idxd->max_wqs; i++)
1679 put_device(&idxd->wqs[i]->conf_dev);
1682 device_unregister(&idxd->wqs[j]->conf_dev);
1686 int idxd_register_devices(struct idxd_device *idxd)
1688 struct device *dev = &idxd->pdev->dev;
1691 rc = device_add(&idxd->conf_dev);
1695 rc = idxd_register_wq_devices(idxd);
1697 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1701 rc = idxd_register_engine_devices(idxd);
1703 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1707 rc = idxd_register_group_devices(idxd);
1709 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1716 for (i = 0; i < idxd->max_engines; i++)
1717 device_unregister(&idxd->engines[i]->conf_dev);
1719 for (i = 0; i < idxd->max_wqs; i++)
1720 device_unregister(&idxd->wqs[i]->conf_dev);
1722 device_del(&idxd->conf_dev);
1726 void idxd_unregister_devices(struct idxd_device *idxd)
1730 for (i = 0; i < idxd->max_wqs; i++) {
1731 struct idxd_wq *wq = idxd->wqs[i];
1733 device_unregister(&wq->conf_dev);
1736 for (i = 0; i < idxd->max_engines; i++) {
1737 struct idxd_engine *engine = idxd->engines[i];
1739 device_unregister(&engine->conf_dev);
1742 for (i = 0; i < idxd->max_groups; i++) {
1743 struct idxd_group *group = idxd->groups[i];
1745 device_unregister(&group->conf_dev);
1748 device_unregister(&idxd->conf_dev);
1751 int idxd_register_bus_type(void)
1753 return bus_register(&dsa_bus_type);
1756 void idxd_unregister_bus_type(void)
1758 bus_unregister(&dsa_bus_type);