1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
19 /* IDXD engine attributes */
20 static ssize_t engine_group_id_show(struct device *dev,
21 struct device_attribute *attr, char *buf)
23 struct idxd_engine *engine = confdev_to_engine(dev);
26 return sysfs_emit(buf, "%d\n", engine->group->id);
28 return sysfs_emit(buf, "%d\n", -1);
31 static ssize_t engine_group_id_store(struct device *dev,
32 struct device_attribute *attr,
33 const char *buf, size_t count)
35 struct idxd_engine *engine = confdev_to_engine(dev);
36 struct idxd_device *idxd = engine->idxd;
39 struct idxd_group *prevg;
41 rc = kstrtol(buf, 10, &id);
45 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
48 if (id > idxd->max_groups - 1 || id < -1)
53 engine->group->num_engines--;
59 prevg = engine->group;
63 engine->group = idxd->groups[id];
64 engine->group->num_engines++;
69 static struct device_attribute dev_attr_engine_group =
70 __ATTR(group_id, 0644, engine_group_id_show,
71 engine_group_id_store);
73 static struct attribute *idxd_engine_attributes[] = {
74 &dev_attr_engine_group.attr,
78 static const struct attribute_group idxd_engine_attribute_group = {
79 .attrs = idxd_engine_attributes,
82 static const struct attribute_group *idxd_engine_attribute_groups[] = {
83 &idxd_engine_attribute_group,
87 static void idxd_conf_engine_release(struct device *dev)
89 struct idxd_engine *engine = confdev_to_engine(dev);
94 struct device_type idxd_engine_device_type = {
96 .release = idxd_conf_engine_release,
97 .groups = idxd_engine_attribute_groups,
100 /* Group attributes */
102 static void idxd_set_free_tokens(struct idxd_device *idxd)
106 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
107 struct idxd_group *g = idxd->groups[i];
109 tokens += g->tokens_reserved;
112 idxd->nr_tokens = idxd->max_tokens - tokens;
115 static ssize_t group_tokens_reserved_show(struct device *dev,
116 struct device_attribute *attr,
119 struct idxd_group *group = confdev_to_group(dev);
121 return sysfs_emit(buf, "%u\n", group->tokens_reserved);
124 static ssize_t group_tokens_reserved_store(struct device *dev,
125 struct device_attribute *attr,
126 const char *buf, size_t count)
128 struct idxd_group *group = confdev_to_group(dev);
129 struct idxd_device *idxd = group->idxd;
133 rc = kstrtoul(buf, 10, &val);
137 if (idxd->data->type == IDXD_TYPE_IAX)
140 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
143 if (idxd->state == IDXD_DEV_ENABLED)
146 if (val > idxd->max_tokens)
149 if (val > idxd->nr_tokens + group->tokens_reserved)
152 group->tokens_reserved = val;
153 idxd_set_free_tokens(idxd);
157 static struct device_attribute dev_attr_group_tokens_reserved =
158 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
159 group_tokens_reserved_store);
161 static ssize_t group_tokens_allowed_show(struct device *dev,
162 struct device_attribute *attr,
165 struct idxd_group *group = confdev_to_group(dev);
167 return sysfs_emit(buf, "%u\n", group->tokens_allowed);
170 static ssize_t group_tokens_allowed_store(struct device *dev,
171 struct device_attribute *attr,
172 const char *buf, size_t count)
174 struct idxd_group *group = confdev_to_group(dev);
175 struct idxd_device *idxd = group->idxd;
179 rc = kstrtoul(buf, 10, &val);
183 if (idxd->data->type == IDXD_TYPE_IAX)
186 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
189 if (idxd->state == IDXD_DEV_ENABLED)
192 if (val < 4 * group->num_engines ||
193 val > group->tokens_reserved + idxd->nr_tokens)
196 group->tokens_allowed = val;
200 static struct device_attribute dev_attr_group_tokens_allowed =
201 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
202 group_tokens_allowed_store);
204 static ssize_t group_use_token_limit_show(struct device *dev,
205 struct device_attribute *attr,
208 struct idxd_group *group = confdev_to_group(dev);
210 return sysfs_emit(buf, "%u\n", group->use_token_limit);
213 static ssize_t group_use_token_limit_store(struct device *dev,
214 struct device_attribute *attr,
215 const char *buf, size_t count)
217 struct idxd_group *group = confdev_to_group(dev);
218 struct idxd_device *idxd = group->idxd;
222 rc = kstrtoul(buf, 10, &val);
226 if (idxd->data->type == IDXD_TYPE_IAX)
229 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
232 if (idxd->state == IDXD_DEV_ENABLED)
235 if (idxd->token_limit == 0)
238 group->use_token_limit = !!val;
242 static struct device_attribute dev_attr_group_use_token_limit =
243 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
244 group_use_token_limit_store);
246 static ssize_t group_engines_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
249 struct idxd_group *group = confdev_to_group(dev);
251 struct idxd_device *idxd = group->idxd;
253 for (i = 0; i < idxd->max_engines; i++) {
254 struct idxd_engine *engine = idxd->engines[i];
259 if (engine->group->id == group->id)
260 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
266 rc += sysfs_emit_at(buf, rc, "\n");
271 static struct device_attribute dev_attr_group_engines =
272 __ATTR(engines, 0444, group_engines_show, NULL);
274 static ssize_t group_work_queues_show(struct device *dev,
275 struct device_attribute *attr, char *buf)
277 struct idxd_group *group = confdev_to_group(dev);
279 struct idxd_device *idxd = group->idxd;
281 for (i = 0; i < idxd->max_wqs; i++) {
282 struct idxd_wq *wq = idxd->wqs[i];
287 if (wq->group->id == group->id)
288 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
294 rc += sysfs_emit_at(buf, rc, "\n");
299 static struct device_attribute dev_attr_group_work_queues =
300 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
302 static ssize_t group_traffic_class_a_show(struct device *dev,
303 struct device_attribute *attr,
306 struct idxd_group *group = confdev_to_group(dev);
308 return sysfs_emit(buf, "%d\n", group->tc_a);
311 static ssize_t group_traffic_class_a_store(struct device *dev,
312 struct device_attribute *attr,
313 const char *buf, size_t count)
315 struct idxd_group *group = confdev_to_group(dev);
316 struct idxd_device *idxd = group->idxd;
320 rc = kstrtol(buf, 10, &val);
324 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
327 if (idxd->state == IDXD_DEV_ENABLED)
330 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
333 if (val < 0 || val > 7)
340 static struct device_attribute dev_attr_group_traffic_class_a =
341 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
342 group_traffic_class_a_store);
344 static ssize_t group_traffic_class_b_show(struct device *dev,
345 struct device_attribute *attr,
348 struct idxd_group *group = confdev_to_group(dev);
350 return sysfs_emit(buf, "%d\n", group->tc_b);
353 static ssize_t group_traffic_class_b_store(struct device *dev,
354 struct device_attribute *attr,
355 const char *buf, size_t count)
357 struct idxd_group *group = confdev_to_group(dev);
358 struct idxd_device *idxd = group->idxd;
362 rc = kstrtol(buf, 10, &val);
366 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
369 if (idxd->state == IDXD_DEV_ENABLED)
372 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
375 if (val < 0 || val > 7)
382 static struct device_attribute dev_attr_group_traffic_class_b =
383 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
384 group_traffic_class_b_store);
386 static struct attribute *idxd_group_attributes[] = {
387 &dev_attr_group_work_queues.attr,
388 &dev_attr_group_engines.attr,
389 &dev_attr_group_use_token_limit.attr,
390 &dev_attr_group_tokens_allowed.attr,
391 &dev_attr_group_tokens_reserved.attr,
392 &dev_attr_group_traffic_class_a.attr,
393 &dev_attr_group_traffic_class_b.attr,
397 static const struct attribute_group idxd_group_attribute_group = {
398 .attrs = idxd_group_attributes,
401 static const struct attribute_group *idxd_group_attribute_groups[] = {
402 &idxd_group_attribute_group,
406 static void idxd_conf_group_release(struct device *dev)
408 struct idxd_group *group = confdev_to_group(dev);
413 struct device_type idxd_group_device_type = {
415 .release = idxd_conf_group_release,
416 .groups = idxd_group_attribute_groups,
419 /* IDXD work queue attribs */
420 static ssize_t wq_clients_show(struct device *dev,
421 struct device_attribute *attr, char *buf)
423 struct idxd_wq *wq = confdev_to_wq(dev);
425 return sysfs_emit(buf, "%d\n", wq->client_count);
428 static struct device_attribute dev_attr_wq_clients =
429 __ATTR(clients, 0444, wq_clients_show, NULL);
431 static ssize_t wq_state_show(struct device *dev,
432 struct device_attribute *attr, char *buf)
434 struct idxd_wq *wq = confdev_to_wq(dev);
437 case IDXD_WQ_DISABLED:
438 return sysfs_emit(buf, "disabled\n");
439 case IDXD_WQ_ENABLED:
440 return sysfs_emit(buf, "enabled\n");
443 return sysfs_emit(buf, "unknown\n");
446 static struct device_attribute dev_attr_wq_state =
447 __ATTR(state, 0444, wq_state_show, NULL);
449 static ssize_t wq_group_id_show(struct device *dev,
450 struct device_attribute *attr, char *buf)
452 struct idxd_wq *wq = confdev_to_wq(dev);
455 return sysfs_emit(buf, "%u\n", wq->group->id);
457 return sysfs_emit(buf, "-1\n");
460 static ssize_t wq_group_id_store(struct device *dev,
461 struct device_attribute *attr,
462 const char *buf, size_t count)
464 struct idxd_wq *wq = confdev_to_wq(dev);
465 struct idxd_device *idxd = wq->idxd;
468 struct idxd_group *prevg, *group;
470 rc = kstrtol(buf, 10, &id);
474 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
477 if (wq->state != IDXD_WQ_DISABLED)
480 if (id > idxd->max_groups - 1 || id < -1)
485 wq->group->num_wqs--;
491 group = idxd->groups[id];
501 static struct device_attribute dev_attr_wq_group_id =
502 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
504 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
507 struct idxd_wq *wq = confdev_to_wq(dev);
509 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
512 static ssize_t wq_mode_store(struct device *dev,
513 struct device_attribute *attr, const char *buf,
516 struct idxd_wq *wq = confdev_to_wq(dev);
517 struct idxd_device *idxd = wq->idxd;
519 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
522 if (wq->state != IDXD_WQ_DISABLED)
525 if (sysfs_streq(buf, "dedicated")) {
526 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
528 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
529 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
537 static struct device_attribute dev_attr_wq_mode =
538 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
540 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
543 struct idxd_wq *wq = confdev_to_wq(dev);
545 return sysfs_emit(buf, "%u\n", wq->size);
548 static int total_claimed_wq_size(struct idxd_device *idxd)
553 for (i = 0; i < idxd->max_wqs; i++) {
554 struct idxd_wq *wq = idxd->wqs[i];
562 static ssize_t wq_size_store(struct device *dev,
563 struct device_attribute *attr, const char *buf,
566 struct idxd_wq *wq = confdev_to_wq(dev);
568 struct idxd_device *idxd = wq->idxd;
571 rc = kstrtoul(buf, 10, &size);
575 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
578 if (idxd->state == IDXD_DEV_ENABLED)
581 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
588 static struct device_attribute dev_attr_wq_size =
589 __ATTR(size, 0644, wq_size_show, wq_size_store);
591 static ssize_t wq_priority_show(struct device *dev,
592 struct device_attribute *attr, char *buf)
594 struct idxd_wq *wq = confdev_to_wq(dev);
596 return sysfs_emit(buf, "%u\n", wq->priority);
599 static ssize_t wq_priority_store(struct device *dev,
600 struct device_attribute *attr,
601 const char *buf, size_t count)
603 struct idxd_wq *wq = confdev_to_wq(dev);
605 struct idxd_device *idxd = wq->idxd;
608 rc = kstrtoul(buf, 10, &prio);
612 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
615 if (wq->state != IDXD_WQ_DISABLED)
618 if (prio > IDXD_MAX_PRIORITY)
625 static struct device_attribute dev_attr_wq_priority =
626 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
628 static ssize_t wq_block_on_fault_show(struct device *dev,
629 struct device_attribute *attr, char *buf)
631 struct idxd_wq *wq = confdev_to_wq(dev);
633 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
636 static ssize_t wq_block_on_fault_store(struct device *dev,
637 struct device_attribute *attr,
638 const char *buf, size_t count)
640 struct idxd_wq *wq = confdev_to_wq(dev);
641 struct idxd_device *idxd = wq->idxd;
645 if (!idxd->hw.gen_cap.block_on_fault)
648 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
651 if (wq->state != IDXD_WQ_DISABLED)
654 rc = kstrtobool(buf, &bof);
659 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
661 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
666 static struct device_attribute dev_attr_wq_block_on_fault =
667 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
668 wq_block_on_fault_store);
670 static ssize_t wq_threshold_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
673 struct idxd_wq *wq = confdev_to_wq(dev);
675 return sysfs_emit(buf, "%u\n", wq->threshold);
678 static ssize_t wq_threshold_store(struct device *dev,
679 struct device_attribute *attr,
680 const char *buf, size_t count)
682 struct idxd_wq *wq = confdev_to_wq(dev);
683 struct idxd_device *idxd = wq->idxd;
687 rc = kstrtouint(buf, 0, &val);
691 if (val > wq->size || val <= 0)
694 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
697 if (wq->state != IDXD_WQ_DISABLED)
700 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
708 static struct device_attribute dev_attr_wq_threshold =
709 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
711 static ssize_t wq_type_show(struct device *dev,
712 struct device_attribute *attr, char *buf)
714 struct idxd_wq *wq = confdev_to_wq(dev);
717 case IDXD_WQT_KERNEL:
718 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
720 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
723 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
729 static ssize_t wq_type_store(struct device *dev,
730 struct device_attribute *attr, const char *buf,
733 struct idxd_wq *wq = confdev_to_wq(dev);
734 enum idxd_wq_type old_type;
736 if (wq->state != IDXD_WQ_DISABLED)
740 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
741 wq->type = IDXD_WQT_NONE;
742 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
743 wq->type = IDXD_WQT_KERNEL;
744 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
745 wq->type = IDXD_WQT_USER;
749 /* If we are changing queue type, clear the name */
750 if (wq->type != old_type)
751 memset(wq->name, 0, WQ_NAME_SIZE + 1);
756 static struct device_attribute dev_attr_wq_type =
757 __ATTR(type, 0644, wq_type_show, wq_type_store);
759 static ssize_t wq_name_show(struct device *dev,
760 struct device_attribute *attr, char *buf)
762 struct idxd_wq *wq = confdev_to_wq(dev);
764 return sysfs_emit(buf, "%s\n", wq->name);
767 static ssize_t wq_name_store(struct device *dev,
768 struct device_attribute *attr, const char *buf,
771 struct idxd_wq *wq = confdev_to_wq(dev);
773 if (wq->state != IDXD_WQ_DISABLED)
776 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
780 * This is temporarily placed here until we have SVM support for
783 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
786 memset(wq->name, 0, WQ_NAME_SIZE + 1);
787 strncpy(wq->name, buf, WQ_NAME_SIZE);
788 strreplace(wq->name, '\n', '\0');
792 static struct device_attribute dev_attr_wq_name =
793 __ATTR(name, 0644, wq_name_show, wq_name_store);
795 static ssize_t wq_cdev_minor_show(struct device *dev,
796 struct device_attribute *attr, char *buf)
798 struct idxd_wq *wq = confdev_to_wq(dev);
801 mutex_lock(&wq->wq_lock);
803 minor = wq->idxd_cdev->minor;
804 mutex_unlock(&wq->wq_lock);
808 return sysfs_emit(buf, "%d\n", minor);
811 static struct device_attribute dev_attr_wq_cdev_minor =
812 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
814 static int __get_sysfs_u64(const char *buf, u64 *val)
818 rc = kstrtou64(buf, 0, val);
825 *val = roundup_pow_of_two(*val);
829 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
832 struct idxd_wq *wq = confdev_to_wq(dev);
834 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
837 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
838 const char *buf, size_t count)
840 struct idxd_wq *wq = confdev_to_wq(dev);
841 struct idxd_device *idxd = wq->idxd;
845 if (wq->state != IDXD_WQ_DISABLED)
848 rc = __get_sysfs_u64(buf, &xfer_size);
852 if (xfer_size > idxd->max_xfer_bytes)
855 wq->max_xfer_bytes = xfer_size;
860 static struct device_attribute dev_attr_wq_max_transfer_size =
861 __ATTR(max_transfer_size, 0644,
862 wq_max_transfer_size_show, wq_max_transfer_size_store);
864 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
866 struct idxd_wq *wq = confdev_to_wq(dev);
868 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
871 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
872 const char *buf, size_t count)
874 struct idxd_wq *wq = confdev_to_wq(dev);
875 struct idxd_device *idxd = wq->idxd;
879 if (wq->state != IDXD_WQ_DISABLED)
882 rc = __get_sysfs_u64(buf, &batch_size);
886 if (batch_size > idxd->max_batch_size)
889 wq->max_batch_size = (u32)batch_size;
894 static struct device_attribute dev_attr_wq_max_batch_size =
895 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
897 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
899 struct idxd_wq *wq = confdev_to_wq(dev);
901 return sysfs_emit(buf, "%u\n", wq->ats_dis);
904 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
905 const char *buf, size_t count)
907 struct idxd_wq *wq = confdev_to_wq(dev);
908 struct idxd_device *idxd = wq->idxd;
912 if (wq->state != IDXD_WQ_DISABLED)
915 if (!idxd->hw.wq_cap.wq_ats_support)
918 rc = kstrtobool(buf, &ats_dis);
922 wq->ats_dis = ats_dis;
927 static struct device_attribute dev_attr_wq_ats_disable =
928 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
930 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
932 struct idxd_wq *wq = confdev_to_wq(dev);
933 struct idxd_device *idxd = wq->idxd;
936 if (!idxd->hw.wq_cap.occupancy)
939 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
940 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
942 return sysfs_emit(buf, "%u\n", occup);
945 static struct device_attribute dev_attr_wq_occupancy =
946 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
948 static struct attribute *idxd_wq_attributes[] = {
949 &dev_attr_wq_clients.attr,
950 &dev_attr_wq_state.attr,
951 &dev_attr_wq_group_id.attr,
952 &dev_attr_wq_mode.attr,
953 &dev_attr_wq_size.attr,
954 &dev_attr_wq_priority.attr,
955 &dev_attr_wq_block_on_fault.attr,
956 &dev_attr_wq_threshold.attr,
957 &dev_attr_wq_type.attr,
958 &dev_attr_wq_name.attr,
959 &dev_attr_wq_cdev_minor.attr,
960 &dev_attr_wq_max_transfer_size.attr,
961 &dev_attr_wq_max_batch_size.attr,
962 &dev_attr_wq_ats_disable.attr,
963 &dev_attr_wq_occupancy.attr,
967 static const struct attribute_group idxd_wq_attribute_group = {
968 .attrs = idxd_wq_attributes,
971 static const struct attribute_group *idxd_wq_attribute_groups[] = {
972 &idxd_wq_attribute_group,
976 static void idxd_conf_wq_release(struct device *dev)
978 struct idxd_wq *wq = confdev_to_wq(dev);
984 struct device_type idxd_wq_device_type = {
986 .release = idxd_conf_wq_release,
987 .groups = idxd_wq_attribute_groups,
990 /* IDXD device attribs */
991 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
994 struct idxd_device *idxd = confdev_to_idxd(dev);
996 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
998 static DEVICE_ATTR_RO(version);
1000 static ssize_t max_work_queues_size_show(struct device *dev,
1001 struct device_attribute *attr,
1004 struct idxd_device *idxd = confdev_to_idxd(dev);
1006 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1008 static DEVICE_ATTR_RO(max_work_queues_size);
1010 static ssize_t max_groups_show(struct device *dev,
1011 struct device_attribute *attr, char *buf)
1013 struct idxd_device *idxd = confdev_to_idxd(dev);
1015 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1017 static DEVICE_ATTR_RO(max_groups);
1019 static ssize_t max_work_queues_show(struct device *dev,
1020 struct device_attribute *attr, char *buf)
1022 struct idxd_device *idxd = confdev_to_idxd(dev);
1024 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1026 static DEVICE_ATTR_RO(max_work_queues);
1028 static ssize_t max_engines_show(struct device *dev,
1029 struct device_attribute *attr, char *buf)
1031 struct idxd_device *idxd = confdev_to_idxd(dev);
1033 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1035 static DEVICE_ATTR_RO(max_engines);
1037 static ssize_t numa_node_show(struct device *dev,
1038 struct device_attribute *attr, char *buf)
1040 struct idxd_device *idxd = confdev_to_idxd(dev);
1042 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1044 static DEVICE_ATTR_RO(numa_node);
1046 static ssize_t max_batch_size_show(struct device *dev,
1047 struct device_attribute *attr, char *buf)
1049 struct idxd_device *idxd = confdev_to_idxd(dev);
1051 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1053 static DEVICE_ATTR_RO(max_batch_size);
1055 static ssize_t max_transfer_size_show(struct device *dev,
1056 struct device_attribute *attr,
1059 struct idxd_device *idxd = confdev_to_idxd(dev);
1061 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1063 static DEVICE_ATTR_RO(max_transfer_size);
1065 static ssize_t op_cap_show(struct device *dev,
1066 struct device_attribute *attr, char *buf)
1068 struct idxd_device *idxd = confdev_to_idxd(dev);
1071 for (i = 0; i < 4; i++)
1072 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1075 rc += sysfs_emit_at(buf, rc, "\n");
1078 static DEVICE_ATTR_RO(op_cap);
1080 static ssize_t gen_cap_show(struct device *dev,
1081 struct device_attribute *attr, char *buf)
1083 struct idxd_device *idxd = confdev_to_idxd(dev);
1085 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1087 static DEVICE_ATTR_RO(gen_cap);
1089 static ssize_t configurable_show(struct device *dev,
1090 struct device_attribute *attr, char *buf)
1092 struct idxd_device *idxd = confdev_to_idxd(dev);
1094 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1096 static DEVICE_ATTR_RO(configurable);
1098 static ssize_t clients_show(struct device *dev,
1099 struct device_attribute *attr, char *buf)
1101 struct idxd_device *idxd = confdev_to_idxd(dev);
1104 spin_lock(&idxd->dev_lock);
1105 for (i = 0; i < idxd->max_wqs; i++) {
1106 struct idxd_wq *wq = idxd->wqs[i];
1108 count += wq->client_count;
1110 spin_unlock(&idxd->dev_lock);
1112 return sysfs_emit(buf, "%d\n", count);
1114 static DEVICE_ATTR_RO(clients);
1116 static ssize_t pasid_enabled_show(struct device *dev,
1117 struct device_attribute *attr, char *buf)
1119 struct idxd_device *idxd = confdev_to_idxd(dev);
1121 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1123 static DEVICE_ATTR_RO(pasid_enabled);
1125 static ssize_t state_show(struct device *dev,
1126 struct device_attribute *attr, char *buf)
1128 struct idxd_device *idxd = confdev_to_idxd(dev);
1130 switch (idxd->state) {
1131 case IDXD_DEV_DISABLED:
1132 return sysfs_emit(buf, "disabled\n");
1133 case IDXD_DEV_ENABLED:
1134 return sysfs_emit(buf, "enabled\n");
1135 case IDXD_DEV_HALTED:
1136 return sysfs_emit(buf, "halted\n");
1139 return sysfs_emit(buf, "unknown\n");
1141 static DEVICE_ATTR_RO(state);
1143 static ssize_t errors_show(struct device *dev,
1144 struct device_attribute *attr, char *buf)
1146 struct idxd_device *idxd = confdev_to_idxd(dev);
1149 spin_lock(&idxd->dev_lock);
1150 for (i = 0; i < 4; i++)
1151 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1152 spin_unlock(&idxd->dev_lock);
1154 out += sysfs_emit_at(buf, out, "\n");
1157 static DEVICE_ATTR_RO(errors);
1159 static ssize_t max_tokens_show(struct device *dev,
1160 struct device_attribute *attr, char *buf)
1162 struct idxd_device *idxd = confdev_to_idxd(dev);
1164 return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1166 static DEVICE_ATTR_RO(max_tokens);
1168 static ssize_t token_limit_show(struct device *dev,
1169 struct device_attribute *attr, char *buf)
1171 struct idxd_device *idxd = confdev_to_idxd(dev);
1173 return sysfs_emit(buf, "%u\n", idxd->token_limit);
1176 static ssize_t token_limit_store(struct device *dev,
1177 struct device_attribute *attr,
1178 const char *buf, size_t count)
1180 struct idxd_device *idxd = confdev_to_idxd(dev);
1184 rc = kstrtoul(buf, 10, &val);
1188 if (idxd->state == IDXD_DEV_ENABLED)
1191 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1194 if (!idxd->hw.group_cap.token_limit)
1197 if (val > idxd->hw.group_cap.total_tokens)
1200 idxd->token_limit = val;
1203 static DEVICE_ATTR_RW(token_limit);
1205 static ssize_t cdev_major_show(struct device *dev,
1206 struct device_attribute *attr, char *buf)
1208 struct idxd_device *idxd = confdev_to_idxd(dev);
1210 return sysfs_emit(buf, "%u\n", idxd->major);
1212 static DEVICE_ATTR_RO(cdev_major);
1214 static ssize_t cmd_status_show(struct device *dev,
1215 struct device_attribute *attr, char *buf)
1217 struct idxd_device *idxd = confdev_to_idxd(dev);
1219 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1222 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1223 const char *buf, size_t count)
1225 struct idxd_device *idxd = confdev_to_idxd(dev);
1227 idxd->cmd_status = 0;
1230 static DEVICE_ATTR_RW(cmd_status);
1232 static struct attribute *idxd_device_attributes[] = {
1233 &dev_attr_version.attr,
1234 &dev_attr_max_groups.attr,
1235 &dev_attr_max_work_queues.attr,
1236 &dev_attr_max_work_queues_size.attr,
1237 &dev_attr_max_engines.attr,
1238 &dev_attr_numa_node.attr,
1239 &dev_attr_max_batch_size.attr,
1240 &dev_attr_max_transfer_size.attr,
1241 &dev_attr_op_cap.attr,
1242 &dev_attr_gen_cap.attr,
1243 &dev_attr_configurable.attr,
1244 &dev_attr_clients.attr,
1245 &dev_attr_pasid_enabled.attr,
1246 &dev_attr_state.attr,
1247 &dev_attr_errors.attr,
1248 &dev_attr_max_tokens.attr,
1249 &dev_attr_token_limit.attr,
1250 &dev_attr_cdev_major.attr,
1251 &dev_attr_cmd_status.attr,
1255 static const struct attribute_group idxd_device_attribute_group = {
1256 .attrs = idxd_device_attributes,
1259 static const struct attribute_group *idxd_attribute_groups[] = {
1260 &idxd_device_attribute_group,
1264 static void idxd_conf_device_release(struct device *dev)
1266 struct idxd_device *idxd = confdev_to_idxd(dev);
1268 kfree(idxd->groups);
1270 kfree(idxd->engines);
1271 kfree(idxd->irq_entries);
1272 kfree(idxd->int_handles);
1273 ida_free(&idxd_ida, idxd->id);
1277 struct device_type dsa_device_type = {
1279 .release = idxd_conf_device_release,
1280 .groups = idxd_attribute_groups,
1283 struct device_type iax_device_type = {
1285 .release = idxd_conf_device_release,
1286 .groups = idxd_attribute_groups,
1289 static int idxd_register_engine_devices(struct idxd_device *idxd)
1291 struct idxd_engine *engine;
1294 for (i = 0; i < idxd->max_engines; i++) {
1295 engine = idxd->engines[i];
1296 rc = device_add(engine_confdev(engine));
1305 for (; i < idxd->max_engines; i++) {
1306 engine = idxd->engines[i];
1307 put_device(engine_confdev(engine));
1311 engine = idxd->engines[j];
1312 device_unregister(engine_confdev(engine));
1317 static int idxd_register_group_devices(struct idxd_device *idxd)
1319 struct idxd_group *group;
1322 for (i = 0; i < idxd->max_groups; i++) {
1323 group = idxd->groups[i];
1324 rc = device_add(group_confdev(group));
1333 for (; i < idxd->max_groups; i++) {
1334 group = idxd->groups[i];
1335 put_device(group_confdev(group));
1339 group = idxd->groups[j];
1340 device_unregister(group_confdev(group));
1345 static int idxd_register_wq_devices(struct idxd_device *idxd)
1350 for (i = 0; i < idxd->max_wqs; i++) {
1352 rc = device_add(wq_confdev(wq));
1361 for (; i < idxd->max_wqs; i++) {
1363 put_device(wq_confdev(wq));
1368 device_unregister(wq_confdev(wq));
1373 int idxd_register_devices(struct idxd_device *idxd)
1375 struct device *dev = &idxd->pdev->dev;
1378 rc = device_add(idxd_confdev(idxd));
1382 rc = idxd_register_wq_devices(idxd);
1384 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1388 rc = idxd_register_engine_devices(idxd);
1390 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1394 rc = idxd_register_group_devices(idxd);
1396 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1403 for (i = 0; i < idxd->max_engines; i++)
1404 device_unregister(engine_confdev(idxd->engines[i]));
1406 for (i = 0; i < idxd->max_wqs; i++)
1407 device_unregister(wq_confdev(idxd->wqs[i]));
1409 device_del(idxd_confdev(idxd));
1413 void idxd_unregister_devices(struct idxd_device *idxd)
1417 for (i = 0; i < idxd->max_wqs; i++) {
1418 struct idxd_wq *wq = idxd->wqs[i];
1420 device_unregister(wq_confdev(wq));
1423 for (i = 0; i < idxd->max_engines; i++) {
1424 struct idxd_engine *engine = idxd->engines[i];
1426 device_unregister(engine_confdev(engine));
1429 for (i = 0; i < idxd->max_groups; i++) {
1430 struct idxd_group *group = idxd->groups[i];
1432 device_unregister(group_confdev(group));
1436 int idxd_register_bus_type(void)
1438 return bus_register(&dsa_bus_type);
1441 void idxd_unregister_bus_type(void)
1443 bus_unregister(&dsa_bus_type);