1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
19 /* IDXD engine attributes */
20 static ssize_t engine_group_id_show(struct device *dev,
21 struct device_attribute *attr, char *buf)
23 struct idxd_engine *engine = confdev_to_engine(dev);
26 return sysfs_emit(buf, "%d\n", engine->group->id);
28 return sysfs_emit(buf, "%d\n", -1);
31 static ssize_t engine_group_id_store(struct device *dev,
32 struct device_attribute *attr,
33 const char *buf, size_t count)
35 struct idxd_engine *engine = confdev_to_engine(dev);
36 struct idxd_device *idxd = engine->idxd;
39 struct idxd_group *prevg;
41 rc = kstrtol(buf, 10, &id);
45 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
48 if (id > idxd->max_groups - 1 || id < -1)
53 engine->group->num_engines--;
59 prevg = engine->group;
63 engine->group = idxd->groups[id];
64 engine->group->num_engines++;
69 static struct device_attribute dev_attr_engine_group =
70 __ATTR(group_id, 0644, engine_group_id_show,
71 engine_group_id_store);
73 static struct attribute *idxd_engine_attributes[] = {
74 &dev_attr_engine_group.attr,
78 static const struct attribute_group idxd_engine_attribute_group = {
79 .attrs = idxd_engine_attributes,
82 static const struct attribute_group *idxd_engine_attribute_groups[] = {
83 &idxd_engine_attribute_group,
87 static void idxd_conf_engine_release(struct device *dev)
89 struct idxd_engine *engine = confdev_to_engine(dev);
94 const struct device_type idxd_engine_device_type = {
96 .release = idxd_conf_engine_release,
97 .groups = idxd_engine_attribute_groups,
100 /* Group attributes */
102 static void idxd_set_free_rdbufs(struct idxd_device *idxd)
106 for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
107 struct idxd_group *g = idxd->groups[i];
109 rdbufs += g->rdbufs_reserved;
112 idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
115 static ssize_t group_read_buffers_reserved_show(struct device *dev,
116 struct device_attribute *attr,
119 struct idxd_group *group = confdev_to_group(dev);
121 return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
124 static ssize_t group_tokens_reserved_show(struct device *dev,
125 struct device_attribute *attr,
128 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
129 return group_read_buffers_reserved_show(dev, attr, buf);
132 static ssize_t group_read_buffers_reserved_store(struct device *dev,
133 struct device_attribute *attr,
134 const char *buf, size_t count)
136 struct idxd_group *group = confdev_to_group(dev);
137 struct idxd_device *idxd = group->idxd;
141 rc = kstrtoul(buf, 10, &val);
145 if (idxd->data->type == IDXD_TYPE_IAX)
148 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
151 if (idxd->state == IDXD_DEV_ENABLED)
154 if (val > idxd->max_rdbufs)
157 if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
160 group->rdbufs_reserved = val;
161 idxd_set_free_rdbufs(idxd);
165 static ssize_t group_tokens_reserved_store(struct device *dev,
166 struct device_attribute *attr,
167 const char *buf, size_t count)
169 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
170 return group_read_buffers_reserved_store(dev, attr, buf, count);
173 static struct device_attribute dev_attr_group_tokens_reserved =
174 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
175 group_tokens_reserved_store);
177 static struct device_attribute dev_attr_group_read_buffers_reserved =
178 __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
179 group_read_buffers_reserved_store);
181 static ssize_t group_read_buffers_allowed_show(struct device *dev,
182 struct device_attribute *attr,
185 struct idxd_group *group = confdev_to_group(dev);
187 return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
190 static ssize_t group_tokens_allowed_show(struct device *dev,
191 struct device_attribute *attr,
194 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
195 return group_read_buffers_allowed_show(dev, attr, buf);
198 static ssize_t group_read_buffers_allowed_store(struct device *dev,
199 struct device_attribute *attr,
200 const char *buf, size_t count)
202 struct idxd_group *group = confdev_to_group(dev);
203 struct idxd_device *idxd = group->idxd;
207 rc = kstrtoul(buf, 10, &val);
211 if (idxd->data->type == IDXD_TYPE_IAX)
214 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
217 if (idxd->state == IDXD_DEV_ENABLED)
220 if (val < 4 * group->num_engines ||
221 val > group->rdbufs_reserved + idxd->nr_rdbufs)
224 group->rdbufs_allowed = val;
228 static ssize_t group_tokens_allowed_store(struct device *dev,
229 struct device_attribute *attr,
230 const char *buf, size_t count)
232 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
233 return group_read_buffers_allowed_store(dev, attr, buf, count);
236 static struct device_attribute dev_attr_group_tokens_allowed =
237 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
238 group_tokens_allowed_store);
240 static struct device_attribute dev_attr_group_read_buffers_allowed =
241 __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
242 group_read_buffers_allowed_store);
244 static ssize_t group_use_read_buffer_limit_show(struct device *dev,
245 struct device_attribute *attr,
248 struct idxd_group *group = confdev_to_group(dev);
250 return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
253 static ssize_t group_use_token_limit_show(struct device *dev,
254 struct device_attribute *attr,
257 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
258 return group_use_read_buffer_limit_show(dev, attr, buf);
261 static ssize_t group_use_read_buffer_limit_store(struct device *dev,
262 struct device_attribute *attr,
263 const char *buf, size_t count)
265 struct idxd_group *group = confdev_to_group(dev);
266 struct idxd_device *idxd = group->idxd;
270 rc = kstrtoul(buf, 10, &val);
274 if (idxd->data->type == IDXD_TYPE_IAX)
277 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
280 if (idxd->state == IDXD_DEV_ENABLED)
283 if (idxd->rdbuf_limit == 0)
286 group->use_rdbuf_limit = !!val;
290 static ssize_t group_use_token_limit_store(struct device *dev,
291 struct device_attribute *attr,
292 const char *buf, size_t count)
294 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
295 return group_use_read_buffer_limit_store(dev, attr, buf, count);
298 static struct device_attribute dev_attr_group_use_token_limit =
299 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
300 group_use_token_limit_store);
302 static struct device_attribute dev_attr_group_use_read_buffer_limit =
303 __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
304 group_use_read_buffer_limit_store);
306 static ssize_t group_engines_show(struct device *dev,
307 struct device_attribute *attr, char *buf)
309 struct idxd_group *group = confdev_to_group(dev);
311 struct idxd_device *idxd = group->idxd;
313 for (i = 0; i < idxd->max_engines; i++) {
314 struct idxd_engine *engine = idxd->engines[i];
319 if (engine->group->id == group->id)
320 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
326 rc += sysfs_emit_at(buf, rc, "\n");
331 static struct device_attribute dev_attr_group_engines =
332 __ATTR(engines, 0444, group_engines_show, NULL);
334 static ssize_t group_work_queues_show(struct device *dev,
335 struct device_attribute *attr, char *buf)
337 struct idxd_group *group = confdev_to_group(dev);
339 struct idxd_device *idxd = group->idxd;
341 for (i = 0; i < idxd->max_wqs; i++) {
342 struct idxd_wq *wq = idxd->wqs[i];
347 if (wq->group->id == group->id)
348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
354 rc += sysfs_emit_at(buf, rc, "\n");
359 static struct device_attribute dev_attr_group_work_queues =
360 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
362 static ssize_t group_traffic_class_a_show(struct device *dev,
363 struct device_attribute *attr,
366 struct idxd_group *group = confdev_to_group(dev);
368 return sysfs_emit(buf, "%d\n", group->tc_a);
371 static ssize_t group_traffic_class_a_store(struct device *dev,
372 struct device_attribute *attr,
373 const char *buf, size_t count)
375 struct idxd_group *group = confdev_to_group(dev);
376 struct idxd_device *idxd = group->idxd;
380 rc = kstrtol(buf, 10, &val);
384 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
387 if (idxd->state == IDXD_DEV_ENABLED)
390 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
393 if (val < 0 || val > 7)
400 static struct device_attribute dev_attr_group_traffic_class_a =
401 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
402 group_traffic_class_a_store);
404 static ssize_t group_traffic_class_b_show(struct device *dev,
405 struct device_attribute *attr,
408 struct idxd_group *group = confdev_to_group(dev);
410 return sysfs_emit(buf, "%d\n", group->tc_b);
413 static ssize_t group_traffic_class_b_store(struct device *dev,
414 struct device_attribute *attr,
415 const char *buf, size_t count)
417 struct idxd_group *group = confdev_to_group(dev);
418 struct idxd_device *idxd = group->idxd;
422 rc = kstrtol(buf, 10, &val);
426 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
429 if (idxd->state == IDXD_DEV_ENABLED)
432 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
435 if (val < 0 || val > 7)
442 static struct device_attribute dev_attr_group_traffic_class_b =
443 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
444 group_traffic_class_b_store);
446 static ssize_t group_desc_progress_limit_show(struct device *dev,
447 struct device_attribute *attr,
450 struct idxd_group *group = confdev_to_group(dev);
452 return sysfs_emit(buf, "%d\n", group->desc_progress_limit);
455 static ssize_t group_desc_progress_limit_store(struct device *dev,
456 struct device_attribute *attr,
457 const char *buf, size_t count)
459 struct idxd_group *group = confdev_to_group(dev);
462 rc = kstrtoint(buf, 10, &val);
466 if (val & ~GENMASK(1, 0))
469 group->desc_progress_limit = val;
473 static struct device_attribute dev_attr_group_desc_progress_limit =
474 __ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show,
475 group_desc_progress_limit_store);
477 static ssize_t group_batch_progress_limit_show(struct device *dev,
478 struct device_attribute *attr,
481 struct idxd_group *group = confdev_to_group(dev);
483 return sysfs_emit(buf, "%d\n", group->batch_progress_limit);
486 static ssize_t group_batch_progress_limit_store(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf, size_t count)
490 struct idxd_group *group = confdev_to_group(dev);
493 rc = kstrtoint(buf, 10, &val);
497 if (val & ~GENMASK(1, 0))
500 group->batch_progress_limit = val;
504 static struct device_attribute dev_attr_group_batch_progress_limit =
505 __ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show,
506 group_batch_progress_limit_store);
507 static struct attribute *idxd_group_attributes[] = {
508 &dev_attr_group_work_queues.attr,
509 &dev_attr_group_engines.attr,
510 &dev_attr_group_use_token_limit.attr,
511 &dev_attr_group_use_read_buffer_limit.attr,
512 &dev_attr_group_tokens_allowed.attr,
513 &dev_attr_group_read_buffers_allowed.attr,
514 &dev_attr_group_tokens_reserved.attr,
515 &dev_attr_group_read_buffers_reserved.attr,
516 &dev_attr_group_traffic_class_a.attr,
517 &dev_attr_group_traffic_class_b.attr,
518 &dev_attr_group_desc_progress_limit.attr,
519 &dev_attr_group_batch_progress_limit.attr,
523 static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
524 struct idxd_device *idxd)
526 return (attr == &dev_attr_group_desc_progress_limit.attr ||
527 attr == &dev_attr_group_batch_progress_limit.attr) &&
528 !idxd->hw.group_cap.progress_limit;
531 static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr,
532 struct idxd_device *idxd)
535 * Intel IAA does not support Read Buffer allocation control,
536 * make these attributes invisible.
538 return (attr == &dev_attr_group_use_token_limit.attr ||
539 attr == &dev_attr_group_use_read_buffer_limit.attr ||
540 attr == &dev_attr_group_tokens_allowed.attr ||
541 attr == &dev_attr_group_read_buffers_allowed.attr ||
542 attr == &dev_attr_group_tokens_reserved.attr ||
543 attr == &dev_attr_group_read_buffers_reserved.attr) &&
544 idxd->data->type == IDXD_TYPE_IAX;
547 static umode_t idxd_group_attr_visible(struct kobject *kobj,
548 struct attribute *attr, int n)
550 struct device *dev = container_of(kobj, struct device, kobj);
551 struct idxd_group *group = confdev_to_group(dev);
552 struct idxd_device *idxd = group->idxd;
554 if (idxd_group_attr_progress_limit_invisible(attr, idxd))
557 if (idxd_group_attr_read_buffers_invisible(attr, idxd))
563 static const struct attribute_group idxd_group_attribute_group = {
564 .attrs = idxd_group_attributes,
565 .is_visible = idxd_group_attr_visible,
568 static const struct attribute_group *idxd_group_attribute_groups[] = {
569 &idxd_group_attribute_group,
573 static void idxd_conf_group_release(struct device *dev)
575 struct idxd_group *group = confdev_to_group(dev);
580 const struct device_type idxd_group_device_type = {
582 .release = idxd_conf_group_release,
583 .groups = idxd_group_attribute_groups,
586 /* IDXD work queue attribs */
587 static ssize_t wq_clients_show(struct device *dev,
588 struct device_attribute *attr, char *buf)
590 struct idxd_wq *wq = confdev_to_wq(dev);
592 return sysfs_emit(buf, "%d\n", wq->client_count);
595 static struct device_attribute dev_attr_wq_clients =
596 __ATTR(clients, 0444, wq_clients_show, NULL);
598 static ssize_t wq_state_show(struct device *dev,
599 struct device_attribute *attr, char *buf)
601 struct idxd_wq *wq = confdev_to_wq(dev);
604 case IDXD_WQ_DISABLED:
605 return sysfs_emit(buf, "disabled\n");
606 case IDXD_WQ_ENABLED:
607 return sysfs_emit(buf, "enabled\n");
610 return sysfs_emit(buf, "unknown\n");
613 static struct device_attribute dev_attr_wq_state =
614 __ATTR(state, 0444, wq_state_show, NULL);
616 static ssize_t wq_group_id_show(struct device *dev,
617 struct device_attribute *attr, char *buf)
619 struct idxd_wq *wq = confdev_to_wq(dev);
622 return sysfs_emit(buf, "%u\n", wq->group->id);
624 return sysfs_emit(buf, "-1\n");
627 static ssize_t wq_group_id_store(struct device *dev,
628 struct device_attribute *attr,
629 const char *buf, size_t count)
631 struct idxd_wq *wq = confdev_to_wq(dev);
632 struct idxd_device *idxd = wq->idxd;
635 struct idxd_group *prevg, *group;
637 rc = kstrtol(buf, 10, &id);
641 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
644 if (wq->state != IDXD_WQ_DISABLED)
647 if (id > idxd->max_groups - 1 || id < -1)
652 wq->group->num_wqs--;
658 group = idxd->groups[id];
668 static struct device_attribute dev_attr_wq_group_id =
669 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
671 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
674 struct idxd_wq *wq = confdev_to_wq(dev);
676 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
679 static ssize_t wq_mode_store(struct device *dev,
680 struct device_attribute *attr, const char *buf,
683 struct idxd_wq *wq = confdev_to_wq(dev);
684 struct idxd_device *idxd = wq->idxd;
686 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
689 if (wq->state != IDXD_WQ_DISABLED)
692 if (sysfs_streq(buf, "dedicated")) {
693 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
695 } else if (sysfs_streq(buf, "shared")) {
696 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
704 static struct device_attribute dev_attr_wq_mode =
705 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
707 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
710 struct idxd_wq *wq = confdev_to_wq(dev);
712 return sysfs_emit(buf, "%u\n", wq->size);
715 static int total_claimed_wq_size(struct idxd_device *idxd)
720 for (i = 0; i < idxd->max_wqs; i++) {
721 struct idxd_wq *wq = idxd->wqs[i];
729 static ssize_t wq_size_store(struct device *dev,
730 struct device_attribute *attr, const char *buf,
733 struct idxd_wq *wq = confdev_to_wq(dev);
735 struct idxd_device *idxd = wq->idxd;
738 rc = kstrtoul(buf, 10, &size);
742 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
745 if (idxd->state == IDXD_DEV_ENABLED)
748 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
755 static struct device_attribute dev_attr_wq_size =
756 __ATTR(size, 0644, wq_size_show, wq_size_store);
758 static ssize_t wq_priority_show(struct device *dev,
759 struct device_attribute *attr, char *buf)
761 struct idxd_wq *wq = confdev_to_wq(dev);
763 return sysfs_emit(buf, "%u\n", wq->priority);
766 static ssize_t wq_priority_store(struct device *dev,
767 struct device_attribute *attr,
768 const char *buf, size_t count)
770 struct idxd_wq *wq = confdev_to_wq(dev);
772 struct idxd_device *idxd = wq->idxd;
775 rc = kstrtoul(buf, 10, &prio);
779 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
782 if (wq->state != IDXD_WQ_DISABLED)
785 if (prio > IDXD_MAX_PRIORITY)
792 static struct device_attribute dev_attr_wq_priority =
793 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
795 static ssize_t wq_block_on_fault_show(struct device *dev,
796 struct device_attribute *attr, char *buf)
798 struct idxd_wq *wq = confdev_to_wq(dev);
800 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
803 static ssize_t wq_block_on_fault_store(struct device *dev,
804 struct device_attribute *attr,
805 const char *buf, size_t count)
807 struct idxd_wq *wq = confdev_to_wq(dev);
808 struct idxd_device *idxd = wq->idxd;
812 if (!idxd->hw.gen_cap.block_on_fault)
815 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
818 if (wq->state != IDXD_WQ_DISABLED)
821 rc = kstrtobool(buf, &bof);
826 if (test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
829 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
831 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
837 static struct device_attribute dev_attr_wq_block_on_fault =
838 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
839 wq_block_on_fault_store);
841 static ssize_t wq_threshold_show(struct device *dev,
842 struct device_attribute *attr, char *buf)
844 struct idxd_wq *wq = confdev_to_wq(dev);
846 return sysfs_emit(buf, "%u\n", wq->threshold);
849 static ssize_t wq_threshold_store(struct device *dev,
850 struct device_attribute *attr,
851 const char *buf, size_t count)
853 struct idxd_wq *wq = confdev_to_wq(dev);
854 struct idxd_device *idxd = wq->idxd;
858 rc = kstrtouint(buf, 0, &val);
862 if (val > wq->size || val <= 0)
865 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
868 if (wq->state != IDXD_WQ_DISABLED)
871 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
879 static struct device_attribute dev_attr_wq_threshold =
880 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
882 static ssize_t wq_type_show(struct device *dev,
883 struct device_attribute *attr, char *buf)
885 struct idxd_wq *wq = confdev_to_wq(dev);
888 case IDXD_WQT_KERNEL:
889 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
891 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
894 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
900 static ssize_t wq_type_store(struct device *dev,
901 struct device_attribute *attr, const char *buf,
904 struct idxd_wq *wq = confdev_to_wq(dev);
905 enum idxd_wq_type old_type;
907 if (wq->state != IDXD_WQ_DISABLED)
911 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
912 wq->type = IDXD_WQT_NONE;
913 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
914 wq->type = IDXD_WQT_KERNEL;
915 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
916 wq->type = IDXD_WQT_USER;
920 /* If we are changing queue type, clear the name */
921 if (wq->type != old_type)
922 memset(wq->name, 0, WQ_NAME_SIZE + 1);
927 static struct device_attribute dev_attr_wq_type =
928 __ATTR(type, 0644, wq_type_show, wq_type_store);
930 static ssize_t wq_name_show(struct device *dev,
931 struct device_attribute *attr, char *buf)
933 struct idxd_wq *wq = confdev_to_wq(dev);
935 return sysfs_emit(buf, "%s\n", wq->name);
938 static ssize_t wq_name_store(struct device *dev,
939 struct device_attribute *attr, const char *buf,
942 struct idxd_wq *wq = confdev_to_wq(dev);
945 if (wq->state != IDXD_WQ_DISABLED)
948 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
951 input = kstrndup(buf, count, GFP_KERNEL);
956 memset(wq->name, 0, WQ_NAME_SIZE + 1);
957 sprintf(wq->name, "%s", pos);
962 static struct device_attribute dev_attr_wq_name =
963 __ATTR(name, 0644, wq_name_show, wq_name_store);
965 static ssize_t wq_cdev_minor_show(struct device *dev,
966 struct device_attribute *attr, char *buf)
968 struct idxd_wq *wq = confdev_to_wq(dev);
971 mutex_lock(&wq->wq_lock);
973 minor = wq->idxd_cdev->minor;
974 mutex_unlock(&wq->wq_lock);
978 return sysfs_emit(buf, "%d\n", minor);
981 static struct device_attribute dev_attr_wq_cdev_minor =
982 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
984 static int __get_sysfs_u64(const char *buf, u64 *val)
988 rc = kstrtou64(buf, 0, val);
995 *val = roundup_pow_of_two(*val);
999 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1002 struct idxd_wq *wq = confdev_to_wq(dev);
1004 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1007 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1008 const char *buf, size_t count)
1010 struct idxd_wq *wq = confdev_to_wq(dev);
1011 struct idxd_device *idxd = wq->idxd;
1015 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1018 if (wq->state != IDXD_WQ_DISABLED)
1021 rc = __get_sysfs_u64(buf, &xfer_size);
1025 if (xfer_size > idxd->max_xfer_bytes)
1028 wq->max_xfer_bytes = xfer_size;
1033 static struct device_attribute dev_attr_wq_max_transfer_size =
1034 __ATTR(max_transfer_size, 0644,
1035 wq_max_transfer_size_show, wq_max_transfer_size_store);
1037 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1039 struct idxd_wq *wq = confdev_to_wq(dev);
1041 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1044 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1045 const char *buf, size_t count)
1047 struct idxd_wq *wq = confdev_to_wq(dev);
1048 struct idxd_device *idxd = wq->idxd;
1052 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1055 if (wq->state != IDXD_WQ_DISABLED)
1058 rc = __get_sysfs_u64(buf, &batch_size);
1062 if (batch_size > idxd->max_batch_size)
1065 idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size);
1070 static struct device_attribute dev_attr_wq_max_batch_size =
1071 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1073 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1075 struct idxd_wq *wq = confdev_to_wq(dev);
1077 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags));
1080 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1081 const char *buf, size_t count)
1083 struct idxd_wq *wq = confdev_to_wq(dev);
1084 struct idxd_device *idxd = wq->idxd;
1088 if (wq->state != IDXD_WQ_DISABLED)
1091 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1094 rc = kstrtobool(buf, &ats_dis);
1099 set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
1101 clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
1106 static struct device_attribute dev_attr_wq_ats_disable =
1107 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1109 static ssize_t wq_prs_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1111 struct idxd_wq *wq = confdev_to_wq(dev);
1113 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags));
1116 static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute *attr,
1117 const char *buf, size_t count)
1119 struct idxd_wq *wq = confdev_to_wq(dev);
1120 struct idxd_device *idxd = wq->idxd;
1124 if (wq->state != IDXD_WQ_DISABLED)
1127 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1130 rc = kstrtobool(buf, &prs_dis);
1135 set_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
1136 /* when PRS is disabled, BOF needs to be off as well */
1137 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1139 clear_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
1144 static struct device_attribute dev_attr_wq_prs_disable =
1145 __ATTR(prs_disable, 0644, wq_prs_disable_show, wq_prs_disable_store);
1147 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1149 struct idxd_wq *wq = confdev_to_wq(dev);
1150 struct idxd_device *idxd = wq->idxd;
1153 if (!idxd->hw.wq_cap.occupancy)
1156 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1157 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1159 return sysfs_emit(buf, "%u\n", occup);
1162 static struct device_attribute dev_attr_wq_occupancy =
1163 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1165 static ssize_t wq_enqcmds_retries_show(struct device *dev,
1166 struct device_attribute *attr, char *buf)
1168 struct idxd_wq *wq = confdev_to_wq(dev);
1170 if (wq_dedicated(wq))
1173 return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
1176 static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
1177 const char *buf, size_t count)
1179 struct idxd_wq *wq = confdev_to_wq(dev);
1181 unsigned int retries;
1183 if (wq_dedicated(wq))
1186 rc = kstrtouint(buf, 10, &retries);
1190 if (retries > IDXD_ENQCMDS_MAX_RETRIES)
1191 retries = IDXD_ENQCMDS_MAX_RETRIES;
1193 wq->enqcmds_retries = retries;
1197 static struct device_attribute dev_attr_wq_enqcmds_retries =
1198 __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
1200 static ssize_t wq_op_config_show(struct device *dev,
1201 struct device_attribute *attr, char *buf)
1203 struct idxd_wq *wq = confdev_to_wq(dev);
1205 return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
1208 static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
1213 * The OPCAP is defined as 256 bits that represents each operation the device
1214 * supports per bit. Iterate through all the bits and check if the input mask
1215 * is set for bits that are not set in the OPCAP for the device. If no OPCAP
1216 * bit is set and input mask has the bit set, then return error.
1218 for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) {
1219 if (!test_bit(bit, idxd->opcap_bmap))
1226 static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr,
1227 const char *buf, size_t count)
1229 struct idxd_wq *wq = confdev_to_wq(dev);
1230 struct idxd_device *idxd = wq->idxd;
1231 unsigned long *opmask;
1234 if (wq->state != IDXD_WQ_DISABLED)
1237 opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
1241 rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS);
1245 rc = idxd_verify_supported_opcap(idxd, opmask);
1249 bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS);
1251 bitmap_free(opmask);
1255 bitmap_free(opmask);
1259 static struct device_attribute dev_attr_wq_op_config =
1260 __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
1262 static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1264 struct idxd_wq *wq = confdev_to_wq(dev);
1266 return sysfs_emit(buf, "%s\n", wq->driver_name);
1269 static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr,
1270 const char *buf, size_t count)
1272 struct idxd_wq *wq = confdev_to_wq(dev);
1275 if (wq->state != IDXD_WQ_DISABLED)
1278 if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0)
1281 input = kstrndup(buf, count, GFP_KERNEL);
1286 memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
1287 sprintf(wq->driver_name, "%s", pos);
1292 static struct device_attribute dev_attr_wq_driver_name =
1293 __ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store);
1295 static struct attribute *idxd_wq_attributes[] = {
1296 &dev_attr_wq_clients.attr,
1297 &dev_attr_wq_state.attr,
1298 &dev_attr_wq_group_id.attr,
1299 &dev_attr_wq_mode.attr,
1300 &dev_attr_wq_size.attr,
1301 &dev_attr_wq_priority.attr,
1302 &dev_attr_wq_block_on_fault.attr,
1303 &dev_attr_wq_threshold.attr,
1304 &dev_attr_wq_type.attr,
1305 &dev_attr_wq_name.attr,
1306 &dev_attr_wq_cdev_minor.attr,
1307 &dev_attr_wq_max_transfer_size.attr,
1308 &dev_attr_wq_max_batch_size.attr,
1309 &dev_attr_wq_ats_disable.attr,
1310 &dev_attr_wq_prs_disable.attr,
1311 &dev_attr_wq_occupancy.attr,
1312 &dev_attr_wq_enqcmds_retries.attr,
1313 &dev_attr_wq_op_config.attr,
1314 &dev_attr_wq_driver_name.attr,
1318 /* A WQ attr is invisible if the feature is not supported in WQCAP. */
1319 #define idxd_wq_attr_invisible(name, cap_field, a, idxd) \
1320 ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
1322 static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
1323 struct idxd_device *idxd)
1325 /* Intel IAA does not support batch processing, make it invisible */
1326 return attr == &dev_attr_wq_max_batch_size.attr &&
1327 idxd->data->type == IDXD_TYPE_IAX;
1330 static umode_t idxd_wq_attr_visible(struct kobject *kobj,
1331 struct attribute *attr, int n)
1333 struct device *dev = container_of(kobj, struct device, kobj);
1334 struct idxd_wq *wq = confdev_to_wq(dev);
1335 struct idxd_device *idxd = wq->idxd;
1337 if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
1340 if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
1343 if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
1346 if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
1352 static const struct attribute_group idxd_wq_attribute_group = {
1353 .attrs = idxd_wq_attributes,
1354 .is_visible = idxd_wq_attr_visible,
1357 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1358 &idxd_wq_attribute_group,
1362 static void idxd_conf_wq_release(struct device *dev)
1364 struct idxd_wq *wq = confdev_to_wq(dev);
1366 bitmap_free(wq->opcap_bmap);
1368 xa_destroy(&wq->upasid_xa);
1372 const struct device_type idxd_wq_device_type = {
1374 .release = idxd_conf_wq_release,
1375 .groups = idxd_wq_attribute_groups,
1378 /* IDXD device attribs */
1379 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1382 struct idxd_device *idxd = confdev_to_idxd(dev);
1384 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1386 static DEVICE_ATTR_RO(version);
1388 static ssize_t max_work_queues_size_show(struct device *dev,
1389 struct device_attribute *attr,
1392 struct idxd_device *idxd = confdev_to_idxd(dev);
1394 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1396 static DEVICE_ATTR_RO(max_work_queues_size);
1398 static ssize_t max_groups_show(struct device *dev,
1399 struct device_attribute *attr, char *buf)
1401 struct idxd_device *idxd = confdev_to_idxd(dev);
1403 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1405 static DEVICE_ATTR_RO(max_groups);
1407 static ssize_t max_work_queues_show(struct device *dev,
1408 struct device_attribute *attr, char *buf)
1410 struct idxd_device *idxd = confdev_to_idxd(dev);
1412 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1414 static DEVICE_ATTR_RO(max_work_queues);
1416 static ssize_t max_engines_show(struct device *dev,
1417 struct device_attribute *attr, char *buf)
1419 struct idxd_device *idxd = confdev_to_idxd(dev);
1421 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1423 static DEVICE_ATTR_RO(max_engines);
1425 static ssize_t numa_node_show(struct device *dev,
1426 struct device_attribute *attr, char *buf)
1428 struct idxd_device *idxd = confdev_to_idxd(dev);
1430 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1432 static DEVICE_ATTR_RO(numa_node);
1434 static ssize_t max_batch_size_show(struct device *dev,
1435 struct device_attribute *attr, char *buf)
1437 struct idxd_device *idxd = confdev_to_idxd(dev);
1439 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1441 static DEVICE_ATTR_RO(max_batch_size);
1443 static ssize_t max_transfer_size_show(struct device *dev,
1444 struct device_attribute *attr,
1447 struct idxd_device *idxd = confdev_to_idxd(dev);
1449 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1451 static DEVICE_ATTR_RO(max_transfer_size);
1453 static ssize_t op_cap_show(struct device *dev,
1454 struct device_attribute *attr, char *buf)
1456 struct idxd_device *idxd = confdev_to_idxd(dev);
1458 return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
1460 static DEVICE_ATTR_RO(op_cap);
1462 static ssize_t gen_cap_show(struct device *dev,
1463 struct device_attribute *attr, char *buf)
1465 struct idxd_device *idxd = confdev_to_idxd(dev);
1467 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1469 static DEVICE_ATTR_RO(gen_cap);
1471 static ssize_t configurable_show(struct device *dev,
1472 struct device_attribute *attr, char *buf)
1474 struct idxd_device *idxd = confdev_to_idxd(dev);
1476 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1478 static DEVICE_ATTR_RO(configurable);
1480 static ssize_t clients_show(struct device *dev,
1481 struct device_attribute *attr, char *buf)
1483 struct idxd_device *idxd = confdev_to_idxd(dev);
1486 spin_lock(&idxd->dev_lock);
1487 for (i = 0; i < idxd->max_wqs; i++) {
1488 struct idxd_wq *wq = idxd->wqs[i];
1490 count += wq->client_count;
1492 spin_unlock(&idxd->dev_lock);
1494 return sysfs_emit(buf, "%d\n", count);
1496 static DEVICE_ATTR_RO(clients);
1498 static ssize_t pasid_enabled_show(struct device *dev,
1499 struct device_attribute *attr, char *buf)
1501 struct idxd_device *idxd = confdev_to_idxd(dev);
1503 return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
1505 static DEVICE_ATTR_RO(pasid_enabled);
1507 static ssize_t state_show(struct device *dev,
1508 struct device_attribute *attr, char *buf)
1510 struct idxd_device *idxd = confdev_to_idxd(dev);
1512 switch (idxd->state) {
1513 case IDXD_DEV_DISABLED:
1514 return sysfs_emit(buf, "disabled\n");
1515 case IDXD_DEV_ENABLED:
1516 return sysfs_emit(buf, "enabled\n");
1517 case IDXD_DEV_HALTED:
1518 return sysfs_emit(buf, "halted\n");
1521 return sysfs_emit(buf, "unknown\n");
1523 static DEVICE_ATTR_RO(state);
1525 static ssize_t errors_show(struct device *dev,
1526 struct device_attribute *attr, char *buf)
1528 struct idxd_device *idxd = confdev_to_idxd(dev);
1529 DECLARE_BITMAP(swerr_bmap, 256);
1531 bitmap_zero(swerr_bmap, 256);
1532 spin_lock(&idxd->dev_lock);
1533 multi_u64_to_bmap(swerr_bmap, &idxd->sw_err.bits[0], 4);
1534 spin_unlock(&idxd->dev_lock);
1535 return sysfs_emit(buf, "%*pb\n", 256, swerr_bmap);
1537 static DEVICE_ATTR_RO(errors);
1539 static ssize_t max_read_buffers_show(struct device *dev,
1540 struct device_attribute *attr, char *buf)
1542 struct idxd_device *idxd = confdev_to_idxd(dev);
1544 return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
1547 static ssize_t max_tokens_show(struct device *dev,
1548 struct device_attribute *attr, char *buf)
1550 dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
1551 return max_read_buffers_show(dev, attr, buf);
1554 static DEVICE_ATTR_RO(max_tokens); /* deprecated */
1555 static DEVICE_ATTR_RO(max_read_buffers);
1557 static ssize_t read_buffer_limit_show(struct device *dev,
1558 struct device_attribute *attr, char *buf)
1560 struct idxd_device *idxd = confdev_to_idxd(dev);
1562 return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
1565 static ssize_t token_limit_show(struct device *dev,
1566 struct device_attribute *attr, char *buf)
1568 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
1569 return read_buffer_limit_show(dev, attr, buf);
1572 static ssize_t read_buffer_limit_store(struct device *dev,
1573 struct device_attribute *attr,
1574 const char *buf, size_t count)
1576 struct idxd_device *idxd = confdev_to_idxd(dev);
1580 rc = kstrtoul(buf, 10, &val);
1584 if (idxd->state == IDXD_DEV_ENABLED)
1587 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1590 if (!idxd->hw.group_cap.rdbuf_limit)
1593 if (val > idxd->hw.group_cap.total_rdbufs)
1596 idxd->rdbuf_limit = val;
1600 static ssize_t token_limit_store(struct device *dev,
1601 struct device_attribute *attr,
1602 const char *buf, size_t count)
1604 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
1605 return read_buffer_limit_store(dev, attr, buf, count);
1608 static DEVICE_ATTR_RW(token_limit); /* deprecated */
1609 static DEVICE_ATTR_RW(read_buffer_limit);
1611 static ssize_t cdev_major_show(struct device *dev,
1612 struct device_attribute *attr, char *buf)
1614 struct idxd_device *idxd = confdev_to_idxd(dev);
1616 return sysfs_emit(buf, "%u\n", idxd->major);
1618 static DEVICE_ATTR_RO(cdev_major);
1620 static ssize_t cmd_status_show(struct device *dev,
1621 struct device_attribute *attr, char *buf)
1623 struct idxd_device *idxd = confdev_to_idxd(dev);
1625 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1628 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1629 const char *buf, size_t count)
1631 struct idxd_device *idxd = confdev_to_idxd(dev);
1633 idxd->cmd_status = 0;
1636 static DEVICE_ATTR_RW(cmd_status);
1638 static ssize_t iaa_cap_show(struct device *dev,
1639 struct device_attribute *attr, char *buf)
1641 struct idxd_device *idxd = confdev_to_idxd(dev);
1643 if (idxd->hw.version < DEVICE_VERSION_2)
1646 return sysfs_emit(buf, "%#llx\n", idxd->hw.iaa_cap.bits);
1648 static DEVICE_ATTR_RO(iaa_cap);
1650 static ssize_t event_log_size_show(struct device *dev,
1651 struct device_attribute *attr, char *buf)
1653 struct idxd_device *idxd = confdev_to_idxd(dev);
1658 return sysfs_emit(buf, "%u\n", idxd->evl->size);
1661 static ssize_t event_log_size_store(struct device *dev,
1662 struct device_attribute *attr,
1663 const char *buf, size_t count)
1665 struct idxd_device *idxd = confdev_to_idxd(dev);
1672 rc = kstrtoul(buf, 10, &val);
1676 if (idxd->state == IDXD_DEV_ENABLED)
1679 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1682 if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX ||
1683 (val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma))
1686 idxd->evl->size = val;
1689 static DEVICE_ATTR_RW(event_log_size);
1691 static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
1692 struct idxd_device *idxd)
1694 /* Intel IAA does not support batch processing, make it invisible */
1695 return attr == &dev_attr_max_batch_size.attr &&
1696 idxd->data->type == IDXD_TYPE_IAX;
1699 static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
1700 struct idxd_device *idxd)
1703 * Intel IAA does not support Read Buffer allocation control,
1704 * make these attributes invisible.
1706 return (attr == &dev_attr_max_tokens.attr ||
1707 attr == &dev_attr_max_read_buffers.attr ||
1708 attr == &dev_attr_token_limit.attr ||
1709 attr == &dev_attr_read_buffer_limit.attr) &&
1710 idxd->data->type == IDXD_TYPE_IAX;
1713 static bool idxd_device_attr_iaa_cap_invisible(struct attribute *attr,
1714 struct idxd_device *idxd)
1716 return attr == &dev_attr_iaa_cap.attr &&
1717 (idxd->data->type != IDXD_TYPE_IAX ||
1718 idxd->hw.version < DEVICE_VERSION_2);
1721 static bool idxd_device_attr_event_log_size_invisible(struct attribute *attr,
1722 struct idxd_device *idxd)
1724 return (attr == &dev_attr_event_log_size.attr &&
1725 !idxd->hw.gen_cap.evl_support);
1728 static umode_t idxd_device_attr_visible(struct kobject *kobj,
1729 struct attribute *attr, int n)
1731 struct device *dev = container_of(kobj, struct device, kobj);
1732 struct idxd_device *idxd = confdev_to_idxd(dev);
1734 if (idxd_device_attr_max_batch_size_invisible(attr, idxd))
1737 if (idxd_device_attr_read_buffers_invisible(attr, idxd))
1740 if (idxd_device_attr_iaa_cap_invisible(attr, idxd))
1743 if (idxd_device_attr_event_log_size_invisible(attr, idxd))
1749 static struct attribute *idxd_device_attributes[] = {
1750 &dev_attr_version.attr,
1751 &dev_attr_max_groups.attr,
1752 &dev_attr_max_work_queues.attr,
1753 &dev_attr_max_work_queues_size.attr,
1754 &dev_attr_max_engines.attr,
1755 &dev_attr_numa_node.attr,
1756 &dev_attr_max_batch_size.attr,
1757 &dev_attr_max_transfer_size.attr,
1758 &dev_attr_op_cap.attr,
1759 &dev_attr_gen_cap.attr,
1760 &dev_attr_configurable.attr,
1761 &dev_attr_clients.attr,
1762 &dev_attr_pasid_enabled.attr,
1763 &dev_attr_state.attr,
1764 &dev_attr_errors.attr,
1765 &dev_attr_max_tokens.attr,
1766 &dev_attr_max_read_buffers.attr,
1767 &dev_attr_token_limit.attr,
1768 &dev_attr_read_buffer_limit.attr,
1769 &dev_attr_cdev_major.attr,
1770 &dev_attr_cmd_status.attr,
1771 &dev_attr_iaa_cap.attr,
1772 &dev_attr_event_log_size.attr,
1776 static const struct attribute_group idxd_device_attribute_group = {
1777 .attrs = idxd_device_attributes,
1778 .is_visible = idxd_device_attr_visible,
1781 static const struct attribute_group *idxd_attribute_groups[] = {
1782 &idxd_device_attribute_group,
1786 static void idxd_conf_device_release(struct device *dev)
1788 struct idxd_device *idxd = confdev_to_idxd(dev);
1790 kfree(idxd->groups);
1791 bitmap_free(idxd->wq_enable_map);
1793 kfree(idxd->engines);
1795 kmem_cache_destroy(idxd->evl_cache);
1796 ida_free(&idxd_ida, idxd->id);
1797 bitmap_free(idxd->opcap_bmap);
1801 const struct device_type dsa_device_type = {
1803 .release = idxd_conf_device_release,
1804 .groups = idxd_attribute_groups,
1807 const struct device_type iax_device_type = {
1809 .release = idxd_conf_device_release,
1810 .groups = idxd_attribute_groups,
1813 static int idxd_register_engine_devices(struct idxd_device *idxd)
1815 struct idxd_engine *engine;
1818 for (i = 0; i < idxd->max_engines; i++) {
1819 engine = idxd->engines[i];
1820 rc = device_add(engine_confdev(engine));
1829 for (; i < idxd->max_engines; i++) {
1830 engine = idxd->engines[i];
1831 put_device(engine_confdev(engine));
1835 engine = idxd->engines[j];
1836 device_unregister(engine_confdev(engine));
1841 static int idxd_register_group_devices(struct idxd_device *idxd)
1843 struct idxd_group *group;
1846 for (i = 0; i < idxd->max_groups; i++) {
1847 group = idxd->groups[i];
1848 rc = device_add(group_confdev(group));
1857 for (; i < idxd->max_groups; i++) {
1858 group = idxd->groups[i];
1859 put_device(group_confdev(group));
1863 group = idxd->groups[j];
1864 device_unregister(group_confdev(group));
1869 static int idxd_register_wq_devices(struct idxd_device *idxd)
1874 for (i = 0; i < idxd->max_wqs; i++) {
1876 rc = device_add(wq_confdev(wq));
1885 for (; i < idxd->max_wqs; i++) {
1887 put_device(wq_confdev(wq));
1892 device_unregister(wq_confdev(wq));
1897 int idxd_register_devices(struct idxd_device *idxd)
1899 struct device *dev = &idxd->pdev->dev;
1902 rc = device_add(idxd_confdev(idxd));
1906 rc = idxd_register_wq_devices(idxd);
1908 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1912 rc = idxd_register_engine_devices(idxd);
1914 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1918 rc = idxd_register_group_devices(idxd);
1920 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1927 for (i = 0; i < idxd->max_engines; i++)
1928 device_unregister(engine_confdev(idxd->engines[i]));
1930 for (i = 0; i < idxd->max_wqs; i++)
1931 device_unregister(wq_confdev(idxd->wqs[i]));
1933 device_del(idxd_confdev(idxd));
1937 void idxd_unregister_devices(struct idxd_device *idxd)
1941 for (i = 0; i < idxd->max_wqs; i++) {
1942 struct idxd_wq *wq = idxd->wqs[i];
1944 device_unregister(wq_confdev(wq));
1947 for (i = 0; i < idxd->max_engines; i++) {
1948 struct idxd_engine *engine = idxd->engines[i];
1950 device_unregister(engine_confdev(engine));
1953 for (i = 0; i < idxd->max_groups; i++) {
1954 struct idxd_group *group = idxd->groups[i];
1956 device_unregister(group_confdev(group));
1960 int idxd_register_bus_type(void)
1962 return bus_register(&dsa_bus_type);
1965 void idxd_unregister_bus_type(void)
1967 bus_unregister(&dsa_bus_type);