Merge tag 'io_uring-5.15-2021-09-11' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / cxl / core / memdev.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3
4 #include <linux/device.h>
5 #include <linux/slab.h>
6 #include <linux/idr.h>
7 #include <linux/pci.h>
8 #include <cxlmem.h>
9 #include "core.h"
10
11 /*
12  * An entire PCI topology full of devices should be enough for any
13  * config
14  */
15 #define CXL_MEM_MAX_DEVS 65536
16
17 static int cxl_mem_major;
18 static DEFINE_IDA(cxl_memdev_ida);
19
20 static void cxl_memdev_release(struct device *dev)
21 {
22         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
23
24         ida_free(&cxl_memdev_ida, cxlmd->id);
25         kfree(cxlmd);
26 }
27
28 static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
29                                 kgid_t *gid)
30 {
31         return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
32 }
33
34 static ssize_t firmware_version_show(struct device *dev,
35                                      struct device_attribute *attr, char *buf)
36 {
37         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
38         struct cxl_mem *cxlm = cxlmd->cxlm;
39
40         return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
41 }
42 static DEVICE_ATTR_RO(firmware_version);
43
44 static ssize_t payload_max_show(struct device *dev,
45                                 struct device_attribute *attr, char *buf)
46 {
47         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
48         struct cxl_mem *cxlm = cxlmd->cxlm;
49
50         return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
51 }
52 static DEVICE_ATTR_RO(payload_max);
53
54 static ssize_t label_storage_size_show(struct device *dev,
55                                        struct device_attribute *attr, char *buf)
56 {
57         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
58         struct cxl_mem *cxlm = cxlmd->cxlm;
59
60         return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
61 }
62 static DEVICE_ATTR_RO(label_storage_size);
63
64 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
65                              char *buf)
66 {
67         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
68         struct cxl_mem *cxlm = cxlmd->cxlm;
69         unsigned long long len = range_len(&cxlm->ram_range);
70
71         return sysfs_emit(buf, "%#llx\n", len);
72 }
73
74 static struct device_attribute dev_attr_ram_size =
75         __ATTR(size, 0444, ram_size_show, NULL);
76
77 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
78                               char *buf)
79 {
80         struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
81         struct cxl_mem *cxlm = cxlmd->cxlm;
82         unsigned long long len = range_len(&cxlm->pmem_range);
83
84         return sysfs_emit(buf, "%#llx\n", len);
85 }
86
87 static struct device_attribute dev_attr_pmem_size =
88         __ATTR(size, 0444, pmem_size_show, NULL);
89
90 static struct attribute *cxl_memdev_attributes[] = {
91         &dev_attr_firmware_version.attr,
92         &dev_attr_payload_max.attr,
93         &dev_attr_label_storage_size.attr,
94         NULL,
95 };
96
97 static struct attribute *cxl_memdev_pmem_attributes[] = {
98         &dev_attr_pmem_size.attr,
99         NULL,
100 };
101
102 static struct attribute *cxl_memdev_ram_attributes[] = {
103         &dev_attr_ram_size.attr,
104         NULL,
105 };
106
107 static struct attribute_group cxl_memdev_attribute_group = {
108         .attrs = cxl_memdev_attributes,
109 };
110
111 static struct attribute_group cxl_memdev_ram_attribute_group = {
112         .name = "ram",
113         .attrs = cxl_memdev_ram_attributes,
114 };
115
116 static struct attribute_group cxl_memdev_pmem_attribute_group = {
117         .name = "pmem",
118         .attrs = cxl_memdev_pmem_attributes,
119 };
120
121 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
122         &cxl_memdev_attribute_group,
123         &cxl_memdev_ram_attribute_group,
124         &cxl_memdev_pmem_attribute_group,
125         NULL,
126 };
127
128 static const struct device_type cxl_memdev_type = {
129         .name = "cxl_memdev",
130         .release = cxl_memdev_release,
131         .devnode = cxl_memdev_devnode,
132         .groups = cxl_memdev_attribute_groups,
133 };
134
135 static void cxl_memdev_unregister(void *_cxlmd)
136 {
137         struct cxl_memdev *cxlmd = _cxlmd;
138         struct device *dev = &cxlmd->dev;
139         struct cdev *cdev = &cxlmd->cdev;
140         const struct cdevm_file_operations *cdevm_fops;
141
142         cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
143         cdevm_fops->shutdown(dev);
144
145         cdev_device_del(&cxlmd->cdev, dev);
146         put_device(dev);
147 }
148
149 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
150                                            const struct file_operations *fops)
151 {
152         struct pci_dev *pdev = cxlm->pdev;
153         struct cxl_memdev *cxlmd;
154         struct device *dev;
155         struct cdev *cdev;
156         int rc;
157
158         cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
159         if (!cxlmd)
160                 return ERR_PTR(-ENOMEM);
161
162         rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
163         if (rc < 0)
164                 goto err;
165         cxlmd->id = rc;
166
167         dev = &cxlmd->dev;
168         device_initialize(dev);
169         dev->parent = &pdev->dev;
170         dev->bus = &cxl_bus_type;
171         dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
172         dev->type = &cxl_memdev_type;
173         device_set_pm_not_required(dev);
174
175         cdev = &cxlmd->cdev;
176         cdev_init(cdev, fops);
177         return cxlmd;
178
179 err:
180         kfree(cxlmd);
181         return ERR_PTR(rc);
182 }
183
184 struct cxl_memdev *
185 devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
186                     const struct cdevm_file_operations *cdevm_fops)
187 {
188         struct cxl_memdev *cxlmd;
189         struct device *dev;
190         struct cdev *cdev;
191         int rc;
192
193         cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
194         if (IS_ERR(cxlmd))
195                 return cxlmd;
196
197         dev = &cxlmd->dev;
198         rc = dev_set_name(dev, "mem%d", cxlmd->id);
199         if (rc)
200                 goto err;
201
202         /*
203          * Activate ioctl operations, no cxl_memdev_rwsem manipulation
204          * needed as this is ordered with cdev_add() publishing the device.
205          */
206         cxlmd->cxlm = cxlm;
207
208         cdev = &cxlmd->cdev;
209         rc = cdev_device_add(cdev, dev);
210         if (rc)
211                 goto err;
212
213         rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
214         if (rc)
215                 return ERR_PTR(rc);
216         return cxlmd;
217
218 err:
219         /*
220          * The cdev was briefly live, shutdown any ioctl operations that
221          * saw that state.
222          */
223         cdevm_fops->shutdown(dev);
224         put_device(dev);
225         return ERR_PTR(rc);
226 }
227 EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
228
229 __init int cxl_memdev_init(void)
230 {
231         dev_t devt;
232         int rc;
233
234         rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
235         if (rc)
236                 return rc;
237
238         cxl_mem_major = MAJOR(devt);
239
240         return 0;
241 }
242
243 void cxl_memdev_exit(void)
244 {
245         unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
246 }