1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/compat.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/iommu.h>
5 #include <linux/module.h>
6 #include <linux/poll.h>
7 #include <linux/slab.h>
8 #include <linux/uacce.h>
10 static struct class *uacce_class;
11 static dev_t uacce_devt;
12 static DEFINE_XARRAY_ALLOC(uacce_xa);
15 * If the parent driver or the device disappears, the queue state is invalid and
16 * ops are not usable anymore.
18 static bool uacce_queue_is_valid(struct uacce_queue *q)
20 return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
23 static int uacce_start_queue(struct uacce_queue *q)
27 if (q->state != UACCE_Q_INIT)
30 if (q->uacce->ops->start_queue) {
31 ret = q->uacce->ops->start_queue(q);
36 q->state = UACCE_Q_STARTED;
40 static int uacce_put_queue(struct uacce_queue *q)
42 struct uacce_device *uacce = q->uacce;
44 if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
45 uacce->ops->stop_queue(q);
47 if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
48 uacce->ops->put_queue)
49 uacce->ops->put_queue(q);
51 q->state = UACCE_Q_ZOMBIE;
56 static long uacce_fops_unl_ioctl(struct file *filep,
57 unsigned int cmd, unsigned long arg)
59 struct uacce_queue *q = filep->private_data;
60 struct uacce_device *uacce = q->uacce;
64 * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
65 * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
66 * gets called with mmap_lock held, by taking uacce->mutex instead of
67 * q->mutex. Doing this in uacce_fops_mmap() is not possible because
68 * uacce_fops_open() calls iommu_sva_bind_device(), which takes
69 * mmap_lock, while holding uacce->mutex.
71 mutex_lock(&uacce->mutex);
72 if (!uacce_queue_is_valid(q))
76 case UACCE_CMD_START_Q:
77 ret = uacce_start_queue(q);
80 ret = uacce_put_queue(q);
83 if (uacce->ops->ioctl)
84 ret = uacce->ops->ioctl(q, cmd, arg);
89 mutex_unlock(&uacce->mutex);
94 static long uacce_fops_compat_ioctl(struct file *filep,
95 unsigned int cmd, unsigned long arg)
97 arg = (unsigned long)compat_ptr(arg);
99 return uacce_fops_unl_ioctl(filep, cmd, arg);
103 static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
106 struct iommu_sva *handle;
108 if (!(uacce->flags & UACCE_DEV_SVA))
111 handle = iommu_sva_bind_device(uacce->parent, current->mm);
113 return PTR_ERR(handle);
115 pasid = iommu_sva_get_pasid(handle);
116 if (pasid == IOMMU_PASID_INVALID) {
117 iommu_sva_unbind_device(handle);
126 static void uacce_unbind_queue(struct uacce_queue *q)
130 iommu_sva_unbind_device(q->handle);
134 static int uacce_fops_open(struct inode *inode, struct file *filep)
136 struct uacce_device *uacce;
137 struct uacce_queue *q;
140 uacce = xa_load(&uacce_xa, iminor(inode));
144 q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
148 mutex_lock(&uacce->mutex);
150 if (!uacce->parent) {
155 ret = uacce_bind_queue(uacce, q);
161 if (uacce->ops->get_queue) {
162 ret = uacce->ops->get_queue(uacce, q->pasid, q);
167 init_waitqueue_head(&q->wait);
168 filep->private_data = q;
169 uacce->inode = inode;
170 q->state = UACCE_Q_INIT;
171 mutex_init(&q->mutex);
172 list_add(&q->list, &uacce->queues);
173 mutex_unlock(&uacce->mutex);
178 uacce_unbind_queue(q);
181 mutex_unlock(&uacce->mutex);
185 static int uacce_fops_release(struct inode *inode, struct file *filep)
187 struct uacce_queue *q = filep->private_data;
188 struct uacce_device *uacce = q->uacce;
190 mutex_lock(&uacce->mutex);
192 uacce_unbind_queue(q);
194 mutex_unlock(&uacce->mutex);
200 static void uacce_vma_close(struct vm_area_struct *vma)
202 struct uacce_queue *q = vma->vm_private_data;
203 struct uacce_qfile_region *qfr = NULL;
205 if (vma->vm_pgoff < UACCE_MAX_REGION)
206 qfr = q->qfrs[vma->vm_pgoff];
211 static const struct vm_operations_struct uacce_vm_ops = {
212 .close = uacce_vma_close,
215 static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
217 struct uacce_queue *q = filep->private_data;
218 struct uacce_device *uacce = q->uacce;
219 struct uacce_qfile_region *qfr;
220 enum uacce_qfrt type = UACCE_MAX_REGION;
223 if (vma->vm_pgoff < UACCE_MAX_REGION)
224 type = vma->vm_pgoff;
228 qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
232 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
233 vma->vm_ops = &uacce_vm_ops;
234 vma->vm_private_data = q;
237 mutex_lock(&q->mutex);
238 if (!uacce_queue_is_valid(q)) {
249 case UACCE_QFRT_MMIO:
251 if (!uacce->ops->mmap) {
256 ret = uacce->ops->mmap(q, vma, qfr);
267 mutex_unlock(&q->mutex);
272 mutex_unlock(&q->mutex);
277 static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
279 struct uacce_queue *q = file->private_data;
280 struct uacce_device *uacce = q->uacce;
283 mutex_lock(&q->mutex);
284 if (!uacce_queue_is_valid(q))
287 poll_wait(file, &q->wait, wait);
289 if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
290 ret = EPOLLIN | EPOLLRDNORM;
293 mutex_unlock(&q->mutex);
297 static const struct file_operations uacce_fops = {
298 .owner = THIS_MODULE,
299 .open = uacce_fops_open,
300 .release = uacce_fops_release,
301 .unlocked_ioctl = uacce_fops_unl_ioctl,
303 .compat_ioctl = uacce_fops_compat_ioctl,
305 .mmap = uacce_fops_mmap,
306 .poll = uacce_fops_poll,
309 #define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
311 static ssize_t api_show(struct device *dev,
312 struct device_attribute *attr, char *buf)
314 struct uacce_device *uacce = to_uacce_device(dev);
316 return sysfs_emit(buf, "%s\n", uacce->api_ver);
319 static ssize_t flags_show(struct device *dev,
320 struct device_attribute *attr, char *buf)
322 struct uacce_device *uacce = to_uacce_device(dev);
324 return sysfs_emit(buf, "%u\n", uacce->flags);
327 static ssize_t available_instances_show(struct device *dev,
328 struct device_attribute *attr,
331 struct uacce_device *uacce = to_uacce_device(dev);
333 if (!uacce->ops->get_available_instances)
336 return sysfs_emit(buf, "%d\n",
337 uacce->ops->get_available_instances(uacce));
340 static ssize_t algorithms_show(struct device *dev,
341 struct device_attribute *attr, char *buf)
343 struct uacce_device *uacce = to_uacce_device(dev);
345 return sysfs_emit(buf, "%s\n", uacce->algs);
348 static ssize_t region_mmio_size_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
351 struct uacce_device *uacce = to_uacce_device(dev);
353 return sysfs_emit(buf, "%lu\n",
354 uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
357 static ssize_t region_dus_size_show(struct device *dev,
358 struct device_attribute *attr, char *buf)
360 struct uacce_device *uacce = to_uacce_device(dev);
362 return sysfs_emit(buf, "%lu\n",
363 uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
366 static ssize_t isolate_show(struct device *dev,
367 struct device_attribute *attr, char *buf)
369 struct uacce_device *uacce = to_uacce_device(dev);
371 return sysfs_emit(buf, "%d\n", uacce->ops->get_isolate_state(uacce));
374 static ssize_t isolate_strategy_show(struct device *dev, struct device_attribute *attr, char *buf)
376 struct uacce_device *uacce = to_uacce_device(dev);
379 val = uacce->ops->isolate_err_threshold_read(uacce);
381 return sysfs_emit(buf, "%u\n", val);
384 static ssize_t isolate_strategy_store(struct device *dev, struct device_attribute *attr,
385 const char *buf, size_t count)
387 struct uacce_device *uacce = to_uacce_device(dev);
391 if (kstrtoul(buf, 0, &val) < 0)
394 if (val > UACCE_MAX_ERR_THRESHOLD)
397 ret = uacce->ops->isolate_err_threshold_write(uacce, val);
404 static DEVICE_ATTR_RO(api);
405 static DEVICE_ATTR_RO(flags);
406 static DEVICE_ATTR_RO(available_instances);
407 static DEVICE_ATTR_RO(algorithms);
408 static DEVICE_ATTR_RO(region_mmio_size);
409 static DEVICE_ATTR_RO(region_dus_size);
410 static DEVICE_ATTR_RO(isolate);
411 static DEVICE_ATTR_RW(isolate_strategy);
413 static struct attribute *uacce_dev_attrs[] = {
415 &dev_attr_flags.attr,
416 &dev_attr_available_instances.attr,
417 &dev_attr_algorithms.attr,
418 &dev_attr_region_mmio_size.attr,
419 &dev_attr_region_dus_size.attr,
420 &dev_attr_isolate.attr,
421 &dev_attr_isolate_strategy.attr,
425 static umode_t uacce_dev_is_visible(struct kobject *kobj,
426 struct attribute *attr, int n)
428 struct device *dev = kobj_to_dev(kobj);
429 struct uacce_device *uacce = to_uacce_device(dev);
431 if (((attr == &dev_attr_region_mmio_size.attr) &&
432 (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
433 ((attr == &dev_attr_region_dus_size.attr) &&
434 (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
437 if (attr == &dev_attr_isolate_strategy.attr &&
438 (!uacce->ops->isolate_err_threshold_read &&
439 !uacce->ops->isolate_err_threshold_write))
442 if (attr == &dev_attr_isolate.attr && !uacce->ops->get_isolate_state)
448 static struct attribute_group uacce_dev_group = {
449 .is_visible = uacce_dev_is_visible,
450 .attrs = uacce_dev_attrs,
453 __ATTRIBUTE_GROUPS(uacce_dev);
455 static void uacce_release(struct device *dev)
457 struct uacce_device *uacce = to_uacce_device(dev);
462 static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
466 if (!(flags & UACCE_DEV_SVA))
469 flags &= ~UACCE_DEV_SVA;
471 ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
473 dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
477 ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
479 dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
480 iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
484 return flags | UACCE_DEV_SVA;
487 static void uacce_disable_sva(struct uacce_device *uacce)
489 if (!(uacce->flags & UACCE_DEV_SVA))
492 iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
493 iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
497 * uacce_alloc() - alloc an accelerator
498 * @parent: pointer of uacce parent device
499 * @interface: pointer of uacce_interface for register
501 * Returns uacce pointer if success and ERR_PTR if not
502 * Need check returned negotiated uacce->flags
504 struct uacce_device *uacce_alloc(struct device *parent,
505 struct uacce_interface *interface)
507 unsigned int flags = interface->flags;
508 struct uacce_device *uacce;
511 uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
513 return ERR_PTR(-ENOMEM);
515 flags = uacce_enable_sva(parent, flags);
517 uacce->parent = parent;
518 uacce->flags = flags;
519 uacce->ops = interface->ops;
521 ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
526 INIT_LIST_HEAD(&uacce->queues);
527 mutex_init(&uacce->mutex);
528 device_initialize(&uacce->dev);
529 uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
530 uacce->dev.class = uacce_class;
531 uacce->dev.groups = uacce_dev_groups;
532 uacce->dev.parent = uacce->parent;
533 uacce->dev.release = uacce_release;
534 dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
539 uacce_disable_sva(uacce);
543 EXPORT_SYMBOL_GPL(uacce_alloc);
546 * uacce_register() - add the accelerator to cdev and export to user space
547 * @uacce: The initialized uacce device
549 * Return 0 if register succeeded, or an error.
551 int uacce_register(struct uacce_device *uacce)
556 uacce->cdev = cdev_alloc();
560 uacce->cdev->ops = &uacce_fops;
561 uacce->cdev->owner = THIS_MODULE;
563 return cdev_device_add(uacce->cdev, &uacce->dev);
565 EXPORT_SYMBOL_GPL(uacce_register);
568 * uacce_remove() - remove the accelerator
569 * @uacce: the accelerator to remove
571 void uacce_remove(struct uacce_device *uacce)
573 struct uacce_queue *q, *next_q;
578 * unmap remaining mapping from user space, preventing user still
579 * access the mmaped area while parent device is already removed
582 unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
585 * uacce_fops_open() may be running concurrently, even after we remove
586 * the cdev. Holding uacce->mutex ensures that open() does not obtain a
587 * removed uacce device.
589 mutex_lock(&uacce->mutex);
590 /* ensure no open queue remains */
591 list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
593 * Taking q->mutex ensures that fops do not use the defunct
594 * uacce->ops after the queue is disabled.
596 mutex_lock(&q->mutex);
598 mutex_unlock(&q->mutex);
599 uacce_unbind_queue(q);
602 /* disable sva now since no opened queues */
603 uacce_disable_sva(uacce);
606 cdev_device_del(uacce->cdev, &uacce->dev);
607 xa_erase(&uacce_xa, uacce->dev_id);
609 * uacce exists as long as there are open fds, but ops will be freed
610 * now. Ensure that bugs cause NULL deref rather than use-after-free.
613 uacce->parent = NULL;
614 mutex_unlock(&uacce->mutex);
615 put_device(&uacce->dev);
617 EXPORT_SYMBOL_GPL(uacce_remove);
619 static int __init uacce_init(void)
623 uacce_class = class_create(UACCE_NAME);
624 if (IS_ERR(uacce_class))
625 return PTR_ERR(uacce_class);
627 ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
629 class_destroy(uacce_class);
634 static __exit void uacce_exit(void)
636 unregister_chrdev_region(uacce_devt, MINORMASK);
637 class_destroy(uacce_class);
640 subsys_initcall(uacce_init);
641 module_exit(uacce_exit);
643 MODULE_LICENSE("GPL");
644 MODULE_AUTHOR("HiSilicon Tech. Co., Ltd.");
645 MODULE_DESCRIPTION("Accelerator interface for Userland applications");