Merge branch 'kcsan.2021.05.18a' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / powerpc / platforms / powernv / opal-prd.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OPAL Runtime Diagnostics interface driver
4  * Supported on POWERNV platform
5  *
6  * Copyright IBM Corporation 2015
7  */
8
9 #define pr_fmt(fmt) "opal-prd: " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/miscdevice.h>
15 #include <linux/fs.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/poll.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <asm/opal-prd.h>
22 #include <asm/opal.h>
23 #include <asm/io.h>
24 #include <linux/uaccess.h>
25
26
27 /*
28  * The msg member must be at the end of the struct, as it's followed by the
29  * message data.
30  */
31 struct opal_prd_msg_queue_item {
32         struct list_head                list;
33         struct opal_prd_msg_header      msg;
34 };
35
36 static struct device_node *prd_node;
37 static LIST_HEAD(opal_prd_msg_queue);
38 static DEFINE_SPINLOCK(opal_prd_msg_queue_lock);
39 static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait);
40 static atomic_t prd_usage;
41
42 static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
43 {
44         struct device_node *parent, *node;
45         bool found;
46
47         if (addr + size < addr)
48                 return false;
49
50         parent = of_find_node_by_path("/reserved-memory");
51         if (!parent)
52                 return false;
53
54         found = false;
55
56         for_each_child_of_node(parent, node) {
57                 uint64_t range_addr, range_size, range_end;
58                 const __be32 *addrp;
59                 const char *label;
60
61                 addrp = of_get_address(node, 0, &range_size, NULL);
62
63                 range_addr = of_read_number(addrp, 2);
64                 range_end = range_addr + range_size;
65
66                 label = of_get_property(node, "ibm,prd-label", NULL);
67
68                 /* PRD ranges need a label */
69                 if (!label)
70                         continue;
71
72                 if (range_end <= range_addr)
73                         continue;
74
75                 if (addr >= range_addr && addr + size <= range_end) {
76                         found = true;
77                         of_node_put(node);
78                         break;
79                 }
80         }
81
82         of_node_put(parent);
83         return found;
84 }
85
86 static int opal_prd_open(struct inode *inode, struct file *file)
87 {
88         /*
89          * Prevent multiple (separate) processes from concurrent interactions
90          * with the FW PRD channel
91          */
92         if (atomic_xchg(&prd_usage, 1) == 1)
93                 return -EBUSY;
94
95         return 0;
96 }
97
98 /*
99  * opal_prd_mmap - maps firmware-provided ranges into userspace
100  * @file: file structure for the device
101  * @vma: VMA to map the registers into
102  */
103
104 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
105 {
106         size_t addr, size;
107         pgprot_t page_prot;
108
109         pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
110                         vma->vm_start, vma->vm_end, vma->vm_pgoff,
111                         vma->vm_flags);
112
113         addr = vma->vm_pgoff << PAGE_SHIFT;
114         size = vma->vm_end - vma->vm_start;
115
116         /* ensure we're mapping within one of the allowable ranges */
117         if (!opal_prd_range_is_valid(addr, size))
118                 return -EINVAL;
119
120         page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
121                                          size, vma->vm_page_prot);
122
123         return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
124                                 page_prot);
125 }
126
127 static bool opal_msg_queue_empty(void)
128 {
129         unsigned long flags;
130         bool ret;
131
132         spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
133         ret = list_empty(&opal_prd_msg_queue);
134         spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
135
136         return ret;
137 }
138
139 static __poll_t opal_prd_poll(struct file *file,
140                 struct poll_table_struct *wait)
141 {
142         poll_wait(file, &opal_prd_msg_wait, wait);
143
144         if (!opal_msg_queue_empty())
145                 return EPOLLIN | EPOLLRDNORM;
146
147         return 0;
148 }
149
150 static ssize_t opal_prd_read(struct file *file, char __user *buf,
151                 size_t count, loff_t *ppos)
152 {
153         struct opal_prd_msg_queue_item *item;
154         unsigned long flags;
155         ssize_t size, err;
156         int rc;
157
158         /* we need at least a header's worth of data */
159         if (count < sizeof(item->msg))
160                 return -EINVAL;
161
162         if (*ppos)
163                 return -ESPIPE;
164
165         item = NULL;
166
167         for (;;) {
168
169                 spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
170                 if (!list_empty(&opal_prd_msg_queue)) {
171                         item = list_first_entry(&opal_prd_msg_queue,
172                                         struct opal_prd_msg_queue_item, list);
173                         list_del(&item->list);
174                 }
175                 spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
176
177                 if (item)
178                         break;
179
180                 if (file->f_flags & O_NONBLOCK)
181                         return -EAGAIN;
182
183                 rc = wait_event_interruptible(opal_prd_msg_wait,
184                                 !opal_msg_queue_empty());
185                 if (rc)
186                         return -EINTR;
187         }
188
189         size = be16_to_cpu(item->msg.size);
190         if (size > count) {
191                 err = -EINVAL;
192                 goto err_requeue;
193         }
194
195         rc = copy_to_user(buf, &item->msg, size);
196         if (rc) {
197                 err = -EFAULT;
198                 goto err_requeue;
199         }
200
201         kfree(item);
202
203         return size;
204
205 err_requeue:
206         /* eep! re-queue at the head of the list */
207         spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
208         list_add(&item->list, &opal_prd_msg_queue);
209         spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
210         return err;
211 }
212
213 static ssize_t opal_prd_write(struct file *file, const char __user *buf,
214                 size_t count, loff_t *ppos)
215 {
216         struct opal_prd_msg_header hdr;
217         ssize_t size;
218         void *msg;
219         int rc;
220
221         size = sizeof(hdr);
222
223         if (count < size)
224                 return -EINVAL;
225
226         /* grab the header */
227         rc = copy_from_user(&hdr, buf, sizeof(hdr));
228         if (rc)
229                 return -EFAULT;
230
231         size = be16_to_cpu(hdr.size);
232
233         msg = memdup_user(buf, size);
234         if (IS_ERR(msg))
235                 return PTR_ERR(msg);
236
237         rc = opal_prd_msg(msg);
238         if (rc) {
239                 pr_warn("write: opal_prd_msg returned %d\n", rc);
240                 size = -EIO;
241         }
242
243         kfree(msg);
244
245         return size;
246 }
247
248 static int opal_prd_release(struct inode *inode, struct file *file)
249 {
250         struct opal_prd_msg_header msg;
251
252         msg.size = cpu_to_be16(sizeof(msg));
253         msg.type = OPAL_PRD_MSG_TYPE_FINI;
254
255         opal_prd_msg((struct opal_prd_msg *)&msg);
256
257         atomic_xchg(&prd_usage, 0);
258
259         return 0;
260 }
261
262 static long opal_prd_ioctl(struct file *file, unsigned int cmd,
263                 unsigned long param)
264 {
265         struct opal_prd_info info;
266         struct opal_prd_scom scom;
267         int rc = 0;
268
269         switch (cmd) {
270         case OPAL_PRD_GET_INFO:
271                 memset(&info, 0, sizeof(info));
272                 info.version = OPAL_PRD_KERNEL_VERSION;
273                 rc = copy_to_user((void __user *)param, &info, sizeof(info));
274                 if (rc)
275                         return -EFAULT;
276                 break;
277
278         case OPAL_PRD_SCOM_READ:
279                 rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
280                 if (rc)
281                         return -EFAULT;
282
283                 scom.rc = opal_xscom_read(scom.chip, scom.addr,
284                                 (__be64 *)&scom.data);
285                 scom.data = be64_to_cpu(scom.data);
286                 pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n",
287                                 scom.chip, scom.addr, scom.data, scom.rc);
288
289                 rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
290                 if (rc)
291                         return -EFAULT;
292                 break;
293
294         case OPAL_PRD_SCOM_WRITE:
295                 rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
296                 if (rc)
297                         return -EFAULT;
298
299                 scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data);
300                 pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n",
301                                 scom.chip, scom.addr, scom.data, scom.rc);
302
303                 rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
304                 if (rc)
305                         return -EFAULT;
306                 break;
307
308         default:
309                 rc = -EINVAL;
310         }
311
312         return rc;
313 }
314
315 static const struct file_operations opal_prd_fops = {
316         .open           = opal_prd_open,
317         .mmap           = opal_prd_mmap,
318         .poll           = opal_prd_poll,
319         .read           = opal_prd_read,
320         .write          = opal_prd_write,
321         .unlocked_ioctl = opal_prd_ioctl,
322         .release        = opal_prd_release,
323         .owner          = THIS_MODULE,
324 };
325
326 static struct miscdevice opal_prd_dev = {
327         .minor          = MISC_DYNAMIC_MINOR,
328         .name           = "opal-prd",
329         .fops           = &opal_prd_fops,
330 };
331
332 /* opal interface */
333 static int opal_prd_msg_notifier(struct notifier_block *nb,
334                 unsigned long msg_type, void *_msg)
335 {
336         struct opal_prd_msg_queue_item *item;
337         struct opal_prd_msg_header *hdr;
338         struct opal_msg *msg = _msg;
339         int msg_size, item_size;
340         unsigned long flags;
341
342         if (msg_type != OPAL_MSG_PRD && msg_type != OPAL_MSG_PRD2)
343                 return 0;
344
345         /* Calculate total size of the message and item we need to store. The
346          * 'size' field in the header includes the header itself. */
347         hdr = (void *)msg->params;
348         msg_size = be16_to_cpu(hdr->size);
349         item_size = msg_size + sizeof(*item) - sizeof(item->msg);
350
351         item = kzalloc(item_size, GFP_ATOMIC);
352         if (!item)
353                 return -ENOMEM;
354
355         memcpy(&item->msg, msg->params, msg_size);
356
357         spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
358         list_add_tail(&item->list, &opal_prd_msg_queue);
359         spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
360
361         wake_up_interruptible(&opal_prd_msg_wait);
362
363         return 0;
364 }
365
366 static struct notifier_block opal_prd_event_nb = {
367         .notifier_call  = opal_prd_msg_notifier,
368         .next           = NULL,
369         .priority       = 0,
370 };
371
372 static int opal_prd_probe(struct platform_device *pdev)
373 {
374         int rc;
375
376         if (!pdev || !pdev->dev.of_node)
377                 return -ENODEV;
378
379         /* We should only have one prd driver instance per machine; ensure
380          * that we only get a valid probe on a single OF node.
381          */
382         if (prd_node)
383                 return -EBUSY;
384
385         prd_node = pdev->dev.of_node;
386
387         rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb);
388         if (rc) {
389                 pr_err("Couldn't register event notifier\n");
390                 return rc;
391         }
392
393         rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb);
394         if (rc) {
395                 pr_err("Couldn't register PRD2 event notifier\n");
396                 return rc;
397         }
398
399         rc = misc_register(&opal_prd_dev);
400         if (rc) {
401                 pr_err("failed to register miscdev\n");
402                 opal_message_notifier_unregister(OPAL_MSG_PRD,
403                                 &opal_prd_event_nb);
404                 return rc;
405         }
406
407         return 0;
408 }
409
410 static int opal_prd_remove(struct platform_device *pdev)
411 {
412         misc_deregister(&opal_prd_dev);
413         opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
414         return 0;
415 }
416
417 static const struct of_device_id opal_prd_match[] = {
418         { .compatible = "ibm,opal-prd" },
419         { },
420 };
421
422 static struct platform_driver opal_prd_driver = {
423         .driver = {
424                 .name           = "opal-prd",
425                 .of_match_table = opal_prd_match,
426         },
427         .probe  = opal_prd_probe,
428         .remove = opal_prd_remove,
429 };
430
431 module_platform_driver(opal_prd_driver);
432
433 MODULE_DEVICE_TABLE(of, opal_prd_match);
434 MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver");
435 MODULE_LICENSE("GPL");