Merge tag 'linux_kselftest-next-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / counter / counter-chrdev.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic Counter character device interface
4  * Copyright (C) 2020 William Breathitt Gray
5  */
6 #include <linux/cdev.h>
7 #include <linux/counter.h>
8 #include <linux/err.h>
9 #include <linux/errno.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/kfifo.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/nospec.h>
16 #include <linux/poll.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/timekeeping.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23
24 #include "counter-chrdev.h"
25
26 struct counter_comp_node {
27         struct list_head l;
28         struct counter_component component;
29         struct counter_comp comp;
30         void *parent;
31 };
32
33 #define counter_comp_read_is_equal(a, b) \
34         (a.action_read == b.action_read || \
35         a.device_u8_read == b.device_u8_read || \
36         a.count_u8_read == b.count_u8_read || \
37         a.signal_u8_read == b.signal_u8_read || \
38         a.device_u32_read == b.device_u32_read || \
39         a.count_u32_read == b.count_u32_read || \
40         a.signal_u32_read == b.signal_u32_read || \
41         a.device_u64_read == b.device_u64_read || \
42         a.count_u64_read == b.count_u64_read || \
43         a.signal_u64_read == b.signal_u64_read || \
44         a.signal_array_u32_read == b.signal_array_u32_read || \
45         a.device_array_u64_read == b.device_array_u64_read || \
46         a.count_array_u64_read == b.count_array_u64_read || \
47         a.signal_array_u64_read == b.signal_array_u64_read)
48
49 #define counter_comp_read_is_set(comp) \
50         (comp.action_read || \
51         comp.device_u8_read || \
52         comp.count_u8_read || \
53         comp.signal_u8_read || \
54         comp.device_u32_read || \
55         comp.count_u32_read || \
56         comp.signal_u32_read || \
57         comp.device_u64_read || \
58         comp.count_u64_read || \
59         comp.signal_u64_read || \
60         comp.signal_array_u32_read || \
61         comp.device_array_u64_read || \
62         comp.count_array_u64_read || \
63         comp.signal_array_u64_read)
64
65 static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
66                                    size_t len, loff_t *f_ps)
67 {
68         struct counter_device *const counter = filp->private_data;
69         int err;
70         unsigned int copied;
71
72         if (!counter->ops)
73                 return -ENODEV;
74
75         if (len < sizeof(struct counter_event))
76                 return -EINVAL;
77
78         do {
79                 if (kfifo_is_empty(&counter->events)) {
80                         if (filp->f_flags & O_NONBLOCK)
81                                 return -EAGAIN;
82
83                         err = wait_event_interruptible(counter->events_wait,
84                                         !kfifo_is_empty(&counter->events) ||
85                                         !counter->ops);
86                         if (err < 0)
87                                 return err;
88                         if (!counter->ops)
89                                 return -ENODEV;
90                 }
91
92                 if (mutex_lock_interruptible(&counter->events_out_lock))
93                         return -ERESTARTSYS;
94                 err = kfifo_to_user(&counter->events, buf, len, &copied);
95                 mutex_unlock(&counter->events_out_lock);
96                 if (err < 0)
97                         return err;
98         } while (!copied);
99
100         return copied;
101 }
102
103 static __poll_t counter_chrdev_poll(struct file *filp,
104                                     struct poll_table_struct *pollt)
105 {
106         struct counter_device *const counter = filp->private_data;
107         __poll_t events = 0;
108
109         if (!counter->ops)
110                 return events;
111
112         poll_wait(filp, &counter->events_wait, pollt);
113
114         if (!kfifo_is_empty(&counter->events))
115                 events = EPOLLIN | EPOLLRDNORM;
116
117         return events;
118 }
119
120 static void counter_events_list_free(struct list_head *const events_list)
121 {
122         struct counter_event_node *p, *n;
123         struct counter_comp_node *q, *o;
124
125         list_for_each_entry_safe(p, n, events_list, l) {
126                 /* Free associated component nodes */
127                 list_for_each_entry_safe(q, o, &p->comp_list, l) {
128                         list_del(&q->l);
129                         kfree(q);
130                 }
131
132                 /* Free event node */
133                 list_del(&p->l);
134                 kfree(p);
135         }
136 }
137
138 static int counter_set_event_node(struct counter_device *const counter,
139                                   struct counter_watch *const watch,
140                                   const struct counter_comp_node *const cfg)
141 {
142         struct counter_event_node *event_node;
143         int err = 0;
144         struct counter_comp_node *comp_node;
145
146         /* Search for event in the list */
147         list_for_each_entry(event_node, &counter->next_events_list, l)
148                 if (event_node->event == watch->event &&
149                     event_node->channel == watch->channel)
150                         break;
151
152         /* If event is not already in the list */
153         if (&event_node->l == &counter->next_events_list) {
154                 /* Allocate new event node */
155                 event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
156                 if (!event_node)
157                         return -ENOMEM;
158
159                 /* Configure event node and add to the list */
160                 event_node->event = watch->event;
161                 event_node->channel = watch->channel;
162                 INIT_LIST_HEAD(&event_node->comp_list);
163                 list_add(&event_node->l, &counter->next_events_list);
164         }
165
166         /* Check if component watch has already been set before */
167         list_for_each_entry(comp_node, &event_node->comp_list, l)
168                 if (comp_node->parent == cfg->parent &&
169                     counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
170                         err = -EINVAL;
171                         goto exit_free_event_node;
172                 }
173
174         /* Allocate component node */
175         comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
176         if (!comp_node) {
177                 err = -ENOMEM;
178                 goto exit_free_event_node;
179         }
180         *comp_node = *cfg;
181
182         /* Add component node to event node */
183         list_add_tail(&comp_node->l, &event_node->comp_list);
184
185 exit_free_event_node:
186         /* Free event node if no one else is watching */
187         if (list_empty(&event_node->comp_list)) {
188                 list_del(&event_node->l);
189                 kfree(event_node);
190         }
191
192         return err;
193 }
194
195 static int counter_enable_events(struct counter_device *const counter)
196 {
197         unsigned long flags;
198         int err = 0;
199
200         mutex_lock(&counter->n_events_list_lock);
201         spin_lock_irqsave(&counter->events_list_lock, flags);
202
203         counter_events_list_free(&counter->events_list);
204         list_replace_init(&counter->next_events_list,
205                           &counter->events_list);
206
207         if (counter->ops->events_configure)
208                 err = counter->ops->events_configure(counter);
209
210         spin_unlock_irqrestore(&counter->events_list_lock, flags);
211         mutex_unlock(&counter->n_events_list_lock);
212
213         return err;
214 }
215
216 static int counter_disable_events(struct counter_device *const counter)
217 {
218         unsigned long flags;
219         int err = 0;
220
221         spin_lock_irqsave(&counter->events_list_lock, flags);
222
223         counter_events_list_free(&counter->events_list);
224
225         if (counter->ops->events_configure)
226                 err = counter->ops->events_configure(counter);
227
228         spin_unlock_irqrestore(&counter->events_list_lock, flags);
229
230         mutex_lock(&counter->n_events_list_lock);
231
232         counter_events_list_free(&counter->next_events_list);
233
234         mutex_unlock(&counter->n_events_list_lock);
235
236         return err;
237 }
238
239 static int counter_get_ext(const struct counter_comp *const ext,
240                            const size_t num_ext, const size_t component_id,
241                            size_t *const ext_idx, size_t *const id)
242 {
243         struct counter_array *element;
244
245         *id = 0;
246         for (*ext_idx = 0; *ext_idx < num_ext; (*ext_idx)++) {
247                 if (*id == component_id)
248                         return 0;
249
250                 if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
251                         element = ext[*ext_idx].priv;
252
253                         if (component_id - *id < element->length)
254                                 return 0;
255
256                         *id += element->length;
257                 } else
258                         (*id)++;
259         }
260
261         return -EINVAL;
262 }
263
264 static int counter_add_watch(struct counter_device *const counter,
265                              const unsigned long arg)
266 {
267         void __user *const uwatch = (void __user *)arg;
268         struct counter_watch watch;
269         struct counter_comp_node comp_node = {};
270         size_t parent, id;
271         struct counter_comp *ext;
272         size_t num_ext;
273         size_t ext_idx, ext_id;
274         int err = 0;
275
276         if (copy_from_user(&watch, uwatch, sizeof(watch)))
277                 return -EFAULT;
278
279         if (watch.component.type == COUNTER_COMPONENT_NONE)
280                 goto no_component;
281
282         parent = watch.component.parent;
283
284         /* Configure parent component info for comp node */
285         switch (watch.component.scope) {
286         case COUNTER_SCOPE_DEVICE:
287                 ext = counter->ext;
288                 num_ext = counter->num_ext;
289                 break;
290         case COUNTER_SCOPE_SIGNAL:
291                 if (parent >= counter->num_signals)
292                         return -EINVAL;
293                 parent = array_index_nospec(parent, counter->num_signals);
294
295                 comp_node.parent = counter->signals + parent;
296
297                 ext = counter->signals[parent].ext;
298                 num_ext = counter->signals[parent].num_ext;
299                 break;
300         case COUNTER_SCOPE_COUNT:
301                 if (parent >= counter->num_counts)
302                         return -EINVAL;
303                 parent = array_index_nospec(parent, counter->num_counts);
304
305                 comp_node.parent = counter->counts + parent;
306
307                 ext = counter->counts[parent].ext;
308                 num_ext = counter->counts[parent].num_ext;
309                 break;
310         default:
311                 return -EINVAL;
312         }
313
314         id = watch.component.id;
315
316         /* Configure component info for comp node */
317         switch (watch.component.type) {
318         case COUNTER_COMPONENT_SIGNAL:
319                 if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
320                         return -EINVAL;
321
322                 comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
323                 comp_node.comp.signal_u32_read = counter->ops->signal_read;
324                 break;
325         case COUNTER_COMPONENT_COUNT:
326                 if (watch.component.scope != COUNTER_SCOPE_COUNT)
327                         return -EINVAL;
328
329                 comp_node.comp.type = COUNTER_COMP_U64;
330                 comp_node.comp.count_u64_read = counter->ops->count_read;
331                 break;
332         case COUNTER_COMPONENT_FUNCTION:
333                 if (watch.component.scope != COUNTER_SCOPE_COUNT)
334                         return -EINVAL;
335
336                 comp_node.comp.type = COUNTER_COMP_FUNCTION;
337                 comp_node.comp.count_u32_read = counter->ops->function_read;
338                 break;
339         case COUNTER_COMPONENT_SYNAPSE_ACTION:
340                 if (watch.component.scope != COUNTER_SCOPE_COUNT)
341                         return -EINVAL;
342                 if (id >= counter->counts[parent].num_synapses)
343                         return -EINVAL;
344                 id = array_index_nospec(id, counter->counts[parent].num_synapses);
345
346                 comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
347                 comp_node.comp.action_read = counter->ops->action_read;
348                 comp_node.comp.priv = counter->counts[parent].synapses + id;
349                 break;
350         case COUNTER_COMPONENT_EXTENSION:
351                 err = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
352                 if (err < 0)
353                         return err;
354
355                 comp_node.comp = ext[ext_idx];
356                 break;
357         default:
358                 return -EINVAL;
359         }
360         if (!counter_comp_read_is_set(comp_node.comp))
361                 return -EOPNOTSUPP;
362
363 no_component:
364         mutex_lock(&counter->n_events_list_lock);
365
366         if (counter->ops->watch_validate) {
367                 err = counter->ops->watch_validate(counter, &watch);
368                 if (err < 0)
369                         goto err_exit;
370         }
371
372         comp_node.component = watch.component;
373
374         err = counter_set_event_node(counter, &watch, &comp_node);
375
376 err_exit:
377         mutex_unlock(&counter->n_events_list_lock);
378
379         return err;
380 }
381
382 static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
383                                  unsigned long arg)
384 {
385         struct counter_device *const counter = filp->private_data;
386         int ret = -ENODEV;
387
388         mutex_lock(&counter->ops_exist_lock);
389
390         if (!counter->ops)
391                 goto out_unlock;
392
393         switch (cmd) {
394         case COUNTER_ADD_WATCH_IOCTL:
395                 ret = counter_add_watch(counter, arg);
396                 break;
397         case COUNTER_ENABLE_EVENTS_IOCTL:
398                 ret = counter_enable_events(counter);
399                 break;
400         case COUNTER_DISABLE_EVENTS_IOCTL:
401                 ret = counter_disable_events(counter);
402                 break;
403         default:
404                 ret = -ENOIOCTLCMD;
405                 break;
406         }
407
408 out_unlock:
409         mutex_unlock(&counter->ops_exist_lock);
410
411         return ret;
412 }
413
414 static int counter_chrdev_open(struct inode *inode, struct file *filp)
415 {
416         struct counter_device *const counter = container_of(inode->i_cdev,
417                                                             typeof(*counter),
418                                                             chrdev);
419
420         get_device(&counter->dev);
421         filp->private_data = counter;
422
423         return nonseekable_open(inode, filp);
424 }
425
426 static int counter_chrdev_release(struct inode *inode, struct file *filp)
427 {
428         struct counter_device *const counter = filp->private_data;
429         int ret = 0;
430
431         mutex_lock(&counter->ops_exist_lock);
432
433         if (!counter->ops) {
434                 /* Free any lingering held memory */
435                 counter_events_list_free(&counter->events_list);
436                 counter_events_list_free(&counter->next_events_list);
437                 ret = -ENODEV;
438                 goto out_unlock;
439         }
440
441         ret = counter_disable_events(counter);
442         if (ret < 0) {
443                 mutex_unlock(&counter->ops_exist_lock);
444                 return ret;
445         }
446
447 out_unlock:
448         mutex_unlock(&counter->ops_exist_lock);
449
450         put_device(&counter->dev);
451
452         return ret;
453 }
454
455 static const struct file_operations counter_fops = {
456         .owner = THIS_MODULE,
457         .llseek = no_llseek,
458         .read = counter_chrdev_read,
459         .poll = counter_chrdev_poll,
460         .unlocked_ioctl = counter_chrdev_ioctl,
461         .open = counter_chrdev_open,
462         .release = counter_chrdev_release,
463 };
464
465 int counter_chrdev_add(struct counter_device *const counter)
466 {
467         /* Initialize Counter events lists */
468         INIT_LIST_HEAD(&counter->events_list);
469         INIT_LIST_HEAD(&counter->next_events_list);
470         spin_lock_init(&counter->events_list_lock);
471         mutex_init(&counter->n_events_list_lock);
472         init_waitqueue_head(&counter->events_wait);
473         spin_lock_init(&counter->events_in_lock);
474         mutex_init(&counter->events_out_lock);
475
476         /* Initialize character device */
477         cdev_init(&counter->chrdev, &counter_fops);
478
479         /* Allocate Counter events queue */
480         return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
481 }
482
483 void counter_chrdev_remove(struct counter_device *const counter)
484 {
485         kfifo_free(&counter->events);
486 }
487
488 static int counter_get_array_data(struct counter_device *const counter,
489                                   const enum counter_scope scope,
490                                   void *const parent,
491                                   const struct counter_comp *const comp,
492                                   const size_t idx, u64 *const value)
493 {
494         const struct counter_array *const element = comp->priv;
495         u32 value_u32 = 0;
496         int ret;
497
498         switch (element->type) {
499         case COUNTER_COMP_SIGNAL_POLARITY:
500                 if (scope != COUNTER_SCOPE_SIGNAL)
501                         return -EINVAL;
502                 ret = comp->signal_array_u32_read(counter, parent, idx,
503                                                   &value_u32);
504                 *value = value_u32;
505                 return ret;
506         case COUNTER_COMP_U64:
507                 switch (scope) {
508                 case COUNTER_SCOPE_DEVICE:
509                         return comp->device_array_u64_read(counter, idx, value);
510                 case COUNTER_SCOPE_SIGNAL:
511                         return comp->signal_array_u64_read(counter, parent, idx,
512                                                            value);
513                 case COUNTER_SCOPE_COUNT:
514                         return comp->count_array_u64_read(counter, parent, idx,
515                                                           value);
516                 default:
517                         return -EINVAL;
518                 }
519         default:
520                 return -EINVAL;
521         }
522 }
523
524 static int counter_get_data(struct counter_device *const counter,
525                             const struct counter_comp_node *const comp_node,
526                             u64 *const value)
527 {
528         const struct counter_comp *const comp = &comp_node->comp;
529         const enum counter_scope scope = comp_node->component.scope;
530         const size_t id = comp_node->component.id;
531         struct counter_signal *const signal = comp_node->parent;
532         struct counter_count *const count = comp_node->parent;
533         u8 value_u8 = 0;
534         u32 value_u32 = 0;
535         const struct counter_comp *ext;
536         size_t num_ext;
537         size_t ext_idx, ext_id;
538         int ret;
539
540         if (comp_node->component.type == COUNTER_COMPONENT_NONE)
541                 return 0;
542
543         switch (comp->type) {
544         case COUNTER_COMP_U8:
545         case COUNTER_COMP_BOOL:
546                 switch (scope) {
547                 case COUNTER_SCOPE_DEVICE:
548                         ret = comp->device_u8_read(counter, &value_u8);
549                         break;
550                 case COUNTER_SCOPE_SIGNAL:
551                         ret = comp->signal_u8_read(counter, signal, &value_u8);
552                         break;
553                 case COUNTER_SCOPE_COUNT:
554                         ret = comp->count_u8_read(counter, count, &value_u8);
555                         break;
556                 default:
557                         return -EINVAL;
558                 }
559                 *value = value_u8;
560                 return ret;
561         case COUNTER_COMP_SIGNAL_LEVEL:
562         case COUNTER_COMP_FUNCTION:
563         case COUNTER_COMP_ENUM:
564         case COUNTER_COMP_COUNT_DIRECTION:
565         case COUNTER_COMP_COUNT_MODE:
566         case COUNTER_COMP_SIGNAL_POLARITY:
567                 switch (scope) {
568                 case COUNTER_SCOPE_DEVICE:
569                         ret = comp->device_u32_read(counter, &value_u32);
570                         break;
571                 case COUNTER_SCOPE_SIGNAL:
572                         ret = comp->signal_u32_read(counter, signal,
573                                                     &value_u32);
574                         break;
575                 case COUNTER_SCOPE_COUNT:
576                         ret = comp->count_u32_read(counter, count, &value_u32);
577                         break;
578                 default:
579                         return -EINVAL;
580                 }
581                 *value = value_u32;
582                 return ret;
583         case COUNTER_COMP_U64:
584                 switch (scope) {
585                 case COUNTER_SCOPE_DEVICE:
586                         return comp->device_u64_read(counter, value);
587                 case COUNTER_SCOPE_SIGNAL:
588                         return comp->signal_u64_read(counter, signal, value);
589                 case COUNTER_SCOPE_COUNT:
590                         return comp->count_u64_read(counter, count, value);
591                 default:
592                         return -EINVAL;
593                 }
594         case COUNTER_COMP_SYNAPSE_ACTION:
595                 ret = comp->action_read(counter, count, comp->priv, &value_u32);
596                 *value = value_u32;
597                 return ret;
598         case COUNTER_COMP_ARRAY:
599                 switch (scope) {
600                 case COUNTER_SCOPE_DEVICE:
601                         ext = counter->ext;
602                         num_ext = counter->num_ext;
603                         break;
604                 case COUNTER_SCOPE_SIGNAL:
605                         ext = signal->ext;
606                         num_ext = signal->num_ext;
607                         break;
608                 case COUNTER_SCOPE_COUNT:
609                         ext = count->ext;
610                         num_ext = count->num_ext;
611                         break;
612                 default:
613                         return -EINVAL;
614                 }
615                 ret = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
616                 if (ret < 0)
617                         return ret;
618
619                 return counter_get_array_data(counter, scope, comp_node->parent,
620                                               comp, id - ext_id, value);
621         default:
622                 return -EINVAL;
623         }
624 }
625
626 /**
627  * counter_push_event - queue event for userspace reading
628  * @counter:    pointer to Counter structure
629  * @event:      triggered event
630  * @channel:    event channel
631  *
632  * Note: If no one is watching for the respective event, it is silently
633  * discarded.
634  */
635 void counter_push_event(struct counter_device *const counter, const u8 event,
636                         const u8 channel)
637 {
638         struct counter_event ev;
639         unsigned int copied = 0;
640         unsigned long flags;
641         struct counter_event_node *event_node;
642         struct counter_comp_node *comp_node;
643
644         ev.timestamp = ktime_get_ns();
645         ev.watch.event = event;
646         ev.watch.channel = channel;
647
648         /* Could be in an interrupt context, so use a spin lock */
649         spin_lock_irqsave(&counter->events_list_lock, flags);
650
651         /* Search for event in the list */
652         list_for_each_entry(event_node, &counter->events_list, l)
653                 if (event_node->event == event &&
654                     event_node->channel == channel)
655                         break;
656
657         /* If event is not in the list */
658         if (&event_node->l == &counter->events_list)
659                 goto exit_early;
660
661         /* Read and queue relevant comp for userspace */
662         list_for_each_entry(comp_node, &event_node->comp_list, l) {
663                 ev.watch.component = comp_node->component;
664                 ev.status = -counter_get_data(counter, comp_node, &ev.value);
665
666                 copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
667                                                         1, &counter->events_in_lock);
668         }
669
670 exit_early:
671         spin_unlock_irqrestore(&counter->events_list_lock, flags);
672
673         if (copied)
674                 wake_up_poll(&counter->events_wait, EPOLLIN);
675 }
676 EXPORT_SYMBOL_NS_GPL(counter_push_event, COUNTER);