1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Accelerated Function Unit (AFU)
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/uaccess.h>
20 #include <linux/fpga-dfl.h>
25 * __afu_port_enable - enable a port by clear reset
26 * @pdev: port platform device.
28 * Enable Port by clear the port soft reset bit, which is set by default.
29 * The AFU is unable to respond to any MMIO access while in reset.
30 * __afu_port_enable function should only be used after __afu_port_disable
33 * The caller needs to hold lock for protection.
35 void __afu_port_enable(struct platform_device *pdev)
37 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
41 WARN_ON(!pdata->disable_count);
43 if (--pdata->disable_count != 0)
46 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
48 /* Clear port soft reset */
49 v = readq(base + PORT_HDR_CTRL);
50 v &= ~PORT_CTRL_SFTRST;
51 writeq(v, base + PORT_HDR_CTRL);
54 #define RST_POLL_INVL 10 /* us */
55 #define RST_POLL_TIMEOUT 1000 /* us */
58 * __afu_port_disable - disable a port by hold reset
59 * @pdev: port platform device.
61 * Disable Port by setting the port soft reset bit, it puts the port into reset.
63 * The caller needs to hold lock for protection.
65 int __afu_port_disable(struct platform_device *pdev)
67 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
71 if (pdata->disable_count++ != 0)
74 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
76 /* Set port soft reset */
77 v = readq(base + PORT_HDR_CTRL);
78 v |= PORT_CTRL_SFTRST;
79 writeq(v, base + PORT_HDR_CTRL);
82 * HW sets ack bit to 1 when all outstanding requests have been drained
83 * on this port and minimum soft reset pulse width has elapsed.
84 * Driver polls port_soft_reset_ack to determine if reset done by HW.
86 if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
87 v & PORT_CTRL_SFTRST_ACK,
88 RST_POLL_INVL, RST_POLL_TIMEOUT)) {
89 dev_err(&pdev->dev, "timeout, fail to reset device\n");
97 * This function resets the FPGA Port and its accelerator (AFU) by function
98 * __port_disable and __port_enable (set port soft reset bit and then clear
99 * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
100 * Reconfiguration. But it should never cause any system level issue, only
101 * functional failure (e.g. DMA or PR operation failure) and be recoverable
104 * Note: the accelerator (AFU) is not accessible when its port is in reset
105 * (disabled). Any attempts on MMIO access to AFU while in reset, will
106 * result errors reported via port error reporting sub feature (if present).
108 static int __port_reset(struct platform_device *pdev)
112 ret = __afu_port_disable(pdev);
114 __afu_port_enable(pdev);
119 static int port_reset(struct platform_device *pdev)
121 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
124 mutex_lock(&pdata->lock);
125 ret = __port_reset(pdev);
126 mutex_unlock(&pdata->lock);
131 static int port_get_id(struct platform_device *pdev)
135 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
137 return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
141 id_show(struct device *dev, struct device_attribute *attr, char *buf)
143 int id = port_get_id(to_platform_device(dev));
145 return scnprintf(buf, PAGE_SIZE, "%d\n", id);
147 static DEVICE_ATTR_RO(id);
150 ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
152 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
156 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
158 mutex_lock(&pdata->lock);
159 v = readq(base + PORT_HDR_CTRL);
160 mutex_unlock(&pdata->lock);
162 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
166 ltr_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
169 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
174 if (kstrtobool(buf, <r))
177 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
179 mutex_lock(&pdata->lock);
180 v = readq(base + PORT_HDR_CTRL);
181 v &= ~PORT_CTRL_LATENCY;
182 v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
183 writeq(v, base + PORT_HDR_CTRL);
184 mutex_unlock(&pdata->lock);
188 static DEVICE_ATTR_RW(ltr);
191 ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
193 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
197 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
199 mutex_lock(&pdata->lock);
200 v = readq(base + PORT_HDR_STS);
201 mutex_unlock(&pdata->lock);
203 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
207 ap1_event_store(struct device *dev, struct device_attribute *attr,
208 const char *buf, size_t count)
210 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
214 if (kstrtobool(buf, &clear) || !clear)
217 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
219 mutex_lock(&pdata->lock);
220 writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
221 mutex_unlock(&pdata->lock);
225 static DEVICE_ATTR_RW(ap1_event);
228 ap2_event_show(struct device *dev, struct device_attribute *attr,
231 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
235 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
237 mutex_lock(&pdata->lock);
238 v = readq(base + PORT_HDR_STS);
239 mutex_unlock(&pdata->lock);
241 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
245 ap2_event_store(struct device *dev, struct device_attribute *attr,
246 const char *buf, size_t count)
248 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
252 if (kstrtobool(buf, &clear) || !clear)
255 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
257 mutex_lock(&pdata->lock);
258 writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
259 mutex_unlock(&pdata->lock);
263 static DEVICE_ATTR_RW(ap2_event);
266 power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
268 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
272 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
274 mutex_lock(&pdata->lock);
275 v = readq(base + PORT_HDR_STS);
276 mutex_unlock(&pdata->lock);
278 return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
280 static DEVICE_ATTR_RO(power_state);
283 userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
284 const char *buf, size_t count)
286 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
287 u64 userclk_freq_cmd;
290 if (kstrtou64(buf, 0, &userclk_freq_cmd))
293 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
295 mutex_lock(&pdata->lock);
296 writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
297 mutex_unlock(&pdata->lock);
301 static DEVICE_ATTR_WO(userclk_freqcmd);
304 userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
305 const char *buf, size_t count)
307 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
308 u64 userclk_freqcntr_cmd;
311 if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
314 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
316 mutex_lock(&pdata->lock);
317 writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
318 mutex_unlock(&pdata->lock);
322 static DEVICE_ATTR_WO(userclk_freqcntrcmd);
325 userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
328 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
332 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
334 mutex_lock(&pdata->lock);
335 userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
336 mutex_unlock(&pdata->lock);
338 return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
340 static DEVICE_ATTR_RO(userclk_freqsts);
343 userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
346 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
347 u64 userclk_freqcntrsts;
350 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
352 mutex_lock(&pdata->lock);
353 userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
354 mutex_unlock(&pdata->lock);
356 return sprintf(buf, "0x%llx\n",
357 (unsigned long long)userclk_freqcntrsts);
359 static DEVICE_ATTR_RO(userclk_freqcntrsts);
361 static struct attribute *port_hdr_attrs[] = {
364 &dev_attr_ap1_event.attr,
365 &dev_attr_ap2_event.attr,
366 &dev_attr_power_state.attr,
367 &dev_attr_userclk_freqcmd.attr,
368 &dev_attr_userclk_freqcntrcmd.attr,
369 &dev_attr_userclk_freqsts.attr,
370 &dev_attr_userclk_freqcntrsts.attr,
374 static umode_t port_hdr_attrs_visible(struct kobject *kobj,
375 struct attribute *attr, int n)
377 struct device *dev = kobj_to_dev(kobj);
378 umode_t mode = attr->mode;
381 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
383 if (dfl_feature_revision(base) > 0) {
385 * userclk sysfs interfaces are only visible in case port
386 * revision is 0, as hardware with revision >0 doesn't
389 if (attr == &dev_attr_userclk_freqcmd.attr ||
390 attr == &dev_attr_userclk_freqcntrcmd.attr ||
391 attr == &dev_attr_userclk_freqsts.attr ||
392 attr == &dev_attr_userclk_freqcntrsts.attr)
399 static const struct attribute_group port_hdr_group = {
400 .attrs = port_hdr_attrs,
401 .is_visible = port_hdr_attrs_visible,
404 static int port_hdr_init(struct platform_device *pdev,
405 struct dfl_feature *feature)
413 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
414 unsigned int cmd, unsigned long arg)
419 case DFL_FPGA_PORT_RESET:
421 ret = port_reset(pdev);
426 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
433 static const struct dfl_feature_id port_hdr_id_table[] = {
434 {.id = PORT_FEATURE_ID_HEADER,},
438 static const struct dfl_feature_ops port_hdr_ops = {
439 .init = port_hdr_init,
440 .ioctl = port_hdr_ioctl,
444 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
446 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
450 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
452 mutex_lock(&pdata->lock);
453 if (pdata->disable_count) {
454 mutex_unlock(&pdata->lock);
458 guidl = readq(base + GUID_L);
459 guidh = readq(base + GUID_H);
460 mutex_unlock(&pdata->lock);
462 return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
464 static DEVICE_ATTR_RO(afu_id);
466 static struct attribute *port_afu_attrs[] = {
467 &dev_attr_afu_id.attr,
471 static umode_t port_afu_attrs_visible(struct kobject *kobj,
472 struct attribute *attr, int n)
474 struct device *dev = kobj_to_dev(kobj);
477 * sysfs entries are visible only if related private feature is
480 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
486 static const struct attribute_group port_afu_group = {
487 .attrs = port_afu_attrs,
488 .is_visible = port_afu_attrs_visible,
491 static int port_afu_init(struct platform_device *pdev,
492 struct dfl_feature *feature)
494 struct resource *res = &pdev->resource[feature->resource_index];
496 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
497 DFL_PORT_REGION_INDEX_AFU,
498 resource_size(res), res->start,
499 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
500 DFL_PORT_REGION_WRITE);
503 static const struct dfl_feature_id port_afu_id_table[] = {
504 {.id = PORT_FEATURE_ID_AFU,},
508 static const struct dfl_feature_ops port_afu_ops = {
509 .init = port_afu_init,
512 static int port_stp_init(struct platform_device *pdev,
513 struct dfl_feature *feature)
515 struct resource *res = &pdev->resource[feature->resource_index];
517 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
518 DFL_PORT_REGION_INDEX_STP,
519 resource_size(res), res->start,
520 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
521 DFL_PORT_REGION_WRITE);
524 static const struct dfl_feature_id port_stp_id_table[] = {
525 {.id = PORT_FEATURE_ID_STP,},
529 static const struct dfl_feature_ops port_stp_ops = {
530 .init = port_stp_init,
533 static struct dfl_feature_driver port_feature_drvs[] = {
535 .id_table = port_hdr_id_table,
536 .ops = &port_hdr_ops,
539 .id_table = port_afu_id_table,
540 .ops = &port_afu_ops,
543 .id_table = port_err_id_table,
544 .ops = &port_err_ops,
547 .id_table = port_stp_id_table,
548 .ops = &port_stp_ops,
555 static int afu_open(struct inode *inode, struct file *filp)
557 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
558 struct dfl_feature_platform_data *pdata;
561 pdata = dev_get_platdata(&fdev->dev);
565 mutex_lock(&pdata->lock);
566 ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
568 dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
569 dfl_feature_dev_use_count(pdata));
570 filp->private_data = fdev;
572 mutex_unlock(&pdata->lock);
577 static int afu_release(struct inode *inode, struct file *filp)
579 struct platform_device *pdev = filp->private_data;
580 struct dfl_feature_platform_data *pdata;
582 dev_dbg(&pdev->dev, "Device File Release\n");
584 pdata = dev_get_platdata(&pdev->dev);
586 mutex_lock(&pdata->lock);
587 dfl_feature_dev_use_end(pdata);
589 if (!dfl_feature_dev_use_count(pdata)) {
591 afu_dma_region_destroy(pdata);
593 mutex_unlock(&pdata->lock);
598 static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
601 /* No extension support for now */
606 afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
608 struct dfl_fpga_port_info info;
612 minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
614 if (copy_from_user(&info, arg, minsz))
617 if (info.argsz < minsz)
620 mutex_lock(&pdata->lock);
621 afu = dfl_fpga_pdata_get_private(pdata);
623 info.num_regions = afu->num_regions;
624 info.num_umsgs = afu->num_umsgs;
625 mutex_unlock(&pdata->lock);
627 if (copy_to_user(arg, &info, sizeof(info)))
633 static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
636 struct dfl_fpga_port_region_info rinfo;
637 struct dfl_afu_mmio_region region;
641 minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
643 if (copy_from_user(&rinfo, arg, minsz))
646 if (rinfo.argsz < minsz || rinfo.padding)
649 ret = afu_mmio_region_get_by_index(pdata, rinfo.index, ®ion);
653 rinfo.flags = region.flags;
654 rinfo.size = region.size;
655 rinfo.offset = region.offset;
657 if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
664 afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
666 struct dfl_fpga_port_dma_map map;
670 minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
672 if (copy_from_user(&map, arg, minsz))
675 if (map.argsz < minsz || map.flags)
678 ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
682 if (copy_to_user(arg, &map, sizeof(map))) {
683 afu_dma_unmap_region(pdata, map.iova);
687 dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
688 (unsigned long long)map.user_addr,
689 (unsigned long long)map.length,
690 (unsigned long long)map.iova);
696 afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
698 struct dfl_fpga_port_dma_unmap unmap;
701 minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
703 if (copy_from_user(&unmap, arg, minsz))
706 if (unmap.argsz < minsz || unmap.flags)
709 return afu_dma_unmap_region(pdata, unmap.iova);
712 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
714 struct platform_device *pdev = filp->private_data;
715 struct dfl_feature_platform_data *pdata;
716 struct dfl_feature *f;
719 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
721 pdata = dev_get_platdata(&pdev->dev);
724 case DFL_FPGA_GET_API_VERSION:
725 return DFL_FPGA_API_VERSION;
726 case DFL_FPGA_CHECK_EXTENSION:
727 return afu_ioctl_check_extension(pdata, arg);
728 case DFL_FPGA_PORT_GET_INFO:
729 return afu_ioctl_get_info(pdata, (void __user *)arg);
730 case DFL_FPGA_PORT_GET_REGION_INFO:
731 return afu_ioctl_get_region_info(pdata, (void __user *)arg);
732 case DFL_FPGA_PORT_DMA_MAP:
733 return afu_ioctl_dma_map(pdata, (void __user *)arg);
734 case DFL_FPGA_PORT_DMA_UNMAP:
735 return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
738 * Let sub-feature's ioctl function to handle the cmd
739 * Sub-feature's ioctl returns -ENODEV when cmd is not
740 * handled in this sub feature, and returns 0 and other
741 * error code if cmd is handled.
743 dfl_fpga_dev_for_each_feature(pdata, f)
744 if (f->ops && f->ops->ioctl) {
745 ret = f->ops->ioctl(pdev, f, cmd, arg);
754 static const struct vm_operations_struct afu_vma_ops = {
755 #ifdef CONFIG_HAVE_IOREMAP_PROT
756 .access = generic_access_phys,
760 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
762 struct platform_device *pdev = filp->private_data;
763 struct dfl_feature_platform_data *pdata;
764 u64 size = vma->vm_end - vma->vm_start;
765 struct dfl_afu_mmio_region region;
769 if (!(vma->vm_flags & VM_SHARED))
772 pdata = dev_get_platdata(&pdev->dev);
774 offset = vma->vm_pgoff << PAGE_SHIFT;
775 ret = afu_mmio_region_get_by_offset(pdata, offset, size, ®ion);
779 if (!(region.flags & DFL_PORT_REGION_MMAP))
782 if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
785 if ((vma->vm_flags & VM_WRITE) &&
786 !(region.flags & DFL_PORT_REGION_WRITE))
789 /* Support debug access to the mapping */
790 vma->vm_ops = &afu_vma_ops;
792 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
794 return remap_pfn_range(vma, vma->vm_start,
795 (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
796 size, vma->vm_page_prot);
799 static const struct file_operations afu_fops = {
800 .owner = THIS_MODULE,
802 .release = afu_release,
803 .unlocked_ioctl = afu_ioctl,
807 static int afu_dev_init(struct platform_device *pdev)
809 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
812 afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
818 mutex_lock(&pdata->lock);
819 dfl_fpga_pdata_set_private(pdata, afu);
820 afu_mmio_region_init(pdata);
821 afu_dma_region_init(pdata);
822 mutex_unlock(&pdata->lock);
827 static int afu_dev_destroy(struct platform_device *pdev)
829 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
831 mutex_lock(&pdata->lock);
832 afu_mmio_region_destroy(pdata);
833 afu_dma_region_destroy(pdata);
834 dfl_fpga_pdata_set_private(pdata, NULL);
835 mutex_unlock(&pdata->lock);
840 static int port_enable_set(struct platform_device *pdev, bool enable)
842 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
845 mutex_lock(&pdata->lock);
847 __afu_port_enable(pdev);
849 ret = __afu_port_disable(pdev);
850 mutex_unlock(&pdata->lock);
855 static struct dfl_fpga_port_ops afu_port_ops = {
856 .name = DFL_FPGA_FEATURE_DEV_PORT,
857 .owner = THIS_MODULE,
858 .get_id = port_get_id,
859 .enable_set = port_enable_set,
862 static int afu_probe(struct platform_device *pdev)
866 dev_dbg(&pdev->dev, "%s\n", __func__);
868 ret = afu_dev_init(pdev);
872 ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
876 ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
878 dfl_fpga_dev_feature_uinit(pdev);
885 afu_dev_destroy(pdev);
890 static int afu_remove(struct platform_device *pdev)
892 dev_dbg(&pdev->dev, "%s\n", __func__);
894 dfl_fpga_dev_ops_unregister(pdev);
895 dfl_fpga_dev_feature_uinit(pdev);
896 afu_dev_destroy(pdev);
901 static const struct attribute_group *afu_dev_groups[] = {
908 static struct platform_driver afu_driver = {
910 .name = DFL_FPGA_FEATURE_DEV_PORT,
911 .dev_groups = afu_dev_groups,
914 .remove = afu_remove,
917 static int __init afu_init(void)
921 dfl_fpga_port_ops_add(&afu_port_ops);
923 ret = platform_driver_register(&afu_driver);
925 dfl_fpga_port_ops_del(&afu_port_ops);
930 static void __exit afu_exit(void)
932 platform_driver_unregister(&afu_driver);
934 dfl_fpga_port_ops_del(&afu_port_ops);
937 module_init(afu_init);
938 module_exit(afu_exit);
940 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
941 MODULE_AUTHOR("Intel Corporation");
942 MODULE_LICENSE("GPL v2");
943 MODULE_ALIAS("platform:dfl-port");