1 // SPDX-License-Identifier: GPL-2.0
3 * cdev.c - Character device component for Mostcore
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/device.h>
14 #include <linux/cdev.h>
15 #include <linux/poll.h>
16 #include <linux/kfifo.h>
17 #include <linux/uaccess.h>
18 #include <linux/idr.h>
22 #define CHRDEV_REGION_SIZE 50
24 static struct cdev_component {
29 struct most_component cc;
34 spinlock_t unlink; /* synchronization lock to unlink channels */
37 struct mutex io_mutex;
38 struct most_interface *iface;
39 struct most_channel_config *cfg;
40 unsigned int channel_id;
43 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
45 struct list_head list;
48 #define to_channel(d) container_of(d, struct comp_channel, cdev)
49 static struct list_head channel_list;
50 static spinlock_t ch_list_lock;
52 static inline bool ch_has_mbo(struct comp_channel *c)
54 return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
57 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
59 if (!kfifo_peek(&c->fifo, mbo)) {
60 *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
62 kfifo_in(&c->fifo, mbo, 1);
67 static struct comp_channel *get_channel(struct most_interface *iface, int id)
69 struct comp_channel *c, *tmp;
71 int found_channel = 0;
73 spin_lock_irqsave(&ch_list_lock, flags);
74 list_for_each_entry_safe(c, tmp, &channel_list, list) {
75 if ((c->iface == iface) && (c->channel_id == id)) {
80 spin_unlock_irqrestore(&ch_list_lock, flags);
86 static void stop_channel(struct comp_channel *c)
90 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
92 most_stop_channel(c->iface, c->channel_id, &comp.cc);
95 static void destroy_cdev(struct comp_channel *c)
99 device_destroy(comp.class, c->devno);
101 spin_lock_irqsave(&ch_list_lock, flags);
103 spin_unlock_irqrestore(&ch_list_lock, flags);
106 static void destroy_channel(struct comp_channel *c)
108 ida_simple_remove(&comp.minor_id, MINOR(c->devno));
109 kfifo_free(&c->fifo);
114 * comp_open - implements the syscall to open the device
115 * @inode: inode pointer
116 * @filp: file pointer
118 * This stores the channel pointer in the private data field of
119 * the file structure and activates the channel within the core.
121 static int comp_open(struct inode *inode, struct file *filp)
123 struct comp_channel *c;
126 c = to_channel(inode->i_cdev);
127 filp->private_data = c;
129 if (((c->cfg->direction == MOST_CH_RX) &&
130 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
131 ((c->cfg->direction == MOST_CH_TX) &&
132 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
133 pr_info("WARN: Access flags mismatch\n");
137 mutex_lock(&c->io_mutex);
139 pr_info("WARN: Device is destroyed\n");
140 mutex_unlock(&c->io_mutex);
145 pr_info("WARN: Device is busy\n");
146 mutex_unlock(&c->io_mutex);
151 ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
154 mutex_unlock(&c->io_mutex);
159 * comp_close - implements the syscall to close the device
160 * @inode: inode pointer
161 * @filp: file pointer
163 * This stops the channel within the core.
165 static int comp_close(struct inode *inode, struct file *filp)
167 struct comp_channel *c = to_channel(inode->i_cdev);
169 mutex_lock(&c->io_mutex);
170 spin_lock(&c->unlink);
172 spin_unlock(&c->unlink);
175 mutex_unlock(&c->io_mutex);
177 mutex_unlock(&c->io_mutex);
184 * comp_write - implements the syscall to write to the device
185 * @filp: file pointer
186 * @buf: pointer to user buffer
187 * @count: number of bytes to write
188 * @offset: offset from where to start writing
190 static ssize_t comp_write(struct file *filp, const char __user *buf,
191 size_t count, loff_t *offset)
194 size_t to_copy, left;
195 struct mbo *mbo = NULL;
196 struct comp_channel *c = filp->private_data;
198 mutex_lock(&c->io_mutex);
199 while (c->dev && !ch_get_mbo(c, &mbo)) {
200 mutex_unlock(&c->io_mutex);
202 if ((filp->f_flags & O_NONBLOCK))
204 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
206 mutex_lock(&c->io_mutex);
209 if (unlikely(!c->dev)) {
214 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
215 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
216 if (left == to_copy) {
221 c->mbo_offs += to_copy - left;
222 if (c->mbo_offs >= c->cfg->buffer_size ||
223 c->cfg->data_type == MOST_CH_CONTROL ||
224 c->cfg->data_type == MOST_CH_ASYNC) {
225 kfifo_skip(&c->fifo);
226 mbo->buffer_length = c->mbo_offs;
228 most_submit_mbo(mbo);
231 ret = to_copy - left;
233 mutex_unlock(&c->io_mutex);
238 * comp_read - implements the syscall to read from the device
239 * @filp: file pointer
240 * @buf: pointer to user buffer
241 * @count: number of bytes to read
242 * @offset: offset from where to start reading
245 comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
247 size_t to_copy, not_copied, copied;
248 struct mbo *mbo = NULL;
249 struct comp_channel *c = filp->private_data;
251 mutex_lock(&c->io_mutex);
252 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
253 mutex_unlock(&c->io_mutex);
254 if (filp->f_flags & O_NONBLOCK)
256 if (wait_event_interruptible(c->wq,
257 (!kfifo_is_empty(&c->fifo) ||
260 mutex_lock(&c->io_mutex);
263 /* make sure we don't submit to gone devices */
264 if (unlikely(!c->dev)) {
265 mutex_unlock(&c->io_mutex);
269 to_copy = min_t(size_t,
271 mbo->processed_length - c->mbo_offs);
273 not_copied = copy_to_user(buf,
274 mbo->virt_address + c->mbo_offs,
277 copied = to_copy - not_copied;
279 c->mbo_offs += copied;
280 if (c->mbo_offs >= mbo->processed_length) {
281 kfifo_skip(&c->fifo);
285 mutex_unlock(&c->io_mutex);
289 static __poll_t comp_poll(struct file *filp, poll_table *wait)
291 struct comp_channel *c = filp->private_data;
294 poll_wait(filp, &c->wq, wait);
296 mutex_lock(&c->io_mutex);
297 if (c->cfg->direction == MOST_CH_RX) {
298 if (!c->dev || !kfifo_is_empty(&c->fifo))
299 mask |= EPOLLIN | EPOLLRDNORM;
301 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
302 mask |= EPOLLOUT | EPOLLWRNORM;
304 mutex_unlock(&c->io_mutex);
309 * Initialization of struct file_operations
311 static const struct file_operations channel_fops = {
312 .owner = THIS_MODULE,
316 .release = comp_close,
321 * comp_disconnect_channel - disconnect a channel
322 * @iface: pointer to interface instance
323 * @channel_id: channel index
325 * This frees allocated memory and removes the cdev that represents this
326 * channel in user space.
328 static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
330 struct comp_channel *c;
333 pr_info("Bad interface pointer\n");
337 c = get_channel(iface, channel_id);
341 mutex_lock(&c->io_mutex);
342 spin_lock(&c->unlink);
344 spin_unlock(&c->unlink);
348 wake_up_interruptible(&c->wq);
349 mutex_unlock(&c->io_mutex);
351 mutex_unlock(&c->io_mutex);
358 * comp_rx_completion - completion handler for rx channels
359 * @mbo: pointer to buffer object that has completed
361 * This searches for the channel linked to this MBO and stores it in the local
364 static int comp_rx_completion(struct mbo *mbo)
366 struct comp_channel *c;
371 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
375 spin_lock(&c->unlink);
376 if (!c->access_ref || !c->dev) {
377 spin_unlock(&c->unlink);
380 kfifo_in(&c->fifo, &mbo, 1);
381 spin_unlock(&c->unlink);
383 if (kfifo_is_full(&c->fifo))
384 pr_info("WARN: Fifo is full\n");
386 wake_up_interruptible(&c->wq);
391 * comp_tx_completion - completion handler for tx channels
392 * @iface: pointer to interface instance
393 * @channel_id: channel index/ID
395 * This wakes sleeping processes in the wait-queue.
397 static int comp_tx_completion(struct most_interface *iface, int channel_id)
399 struct comp_channel *c;
402 pr_info("Bad interface pointer\n");
405 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
406 pr_info("Channel ID out of range\n");
410 c = get_channel(iface, channel_id);
413 wake_up_interruptible(&c->wq);
418 * comp_probe - probe function of the driver module
419 * @iface: pointer to interface instance
420 * @channel_id: channel index/ID
421 * @cfg: pointer to actual channel configuration
422 * @name: name of the device to be created
424 * This allocates achannel object and creates the device node in /dev
426 * Returns 0 on success or error code otherwise.
428 static int comp_probe(struct most_interface *iface, int channel_id,
429 struct most_channel_config *cfg, char *name, char *args)
431 struct comp_channel *c;
432 unsigned long cl_flags;
436 if ((!iface) || (!cfg) || (!name)) {
437 pr_info("Probing component with bad arguments");
440 c = get_channel(iface, channel_id);
444 current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
445 if (current_minor < 0)
446 return current_minor;
448 c = kzalloc(sizeof(*c), GFP_KERNEL);
454 c->devno = MKDEV(comp.major, current_minor);
455 cdev_init(&c->cdev, &channel_fops);
456 c->cdev.owner = THIS_MODULE;
457 retval = cdev_add(&c->cdev, c->devno, 1);
462 c->channel_id = channel_id;
464 spin_lock_init(&c->unlink);
466 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
468 goto err_del_cdev_and_free_channel;
469 init_waitqueue_head(&c->wq);
470 mutex_init(&c->io_mutex);
471 spin_lock_irqsave(&ch_list_lock, cl_flags);
472 list_add_tail(&c->list, &channel_list);
473 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
474 c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
476 if (IS_ERR(c->dev)) {
477 retval = PTR_ERR(c->dev);
478 pr_info("failed to create new device node %s\n", name);
479 goto err_free_kfifo_and_del_list;
481 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
484 err_free_kfifo_and_del_list:
485 kfifo_free(&c->fifo);
487 err_del_cdev_and_free_channel:
492 ida_simple_remove(&comp.minor_id, current_minor);
496 static struct cdev_component comp = {
500 .probe_channel = comp_probe,
501 .disconnect_channel = comp_disconnect_channel,
502 .rx_completion = comp_rx_completion,
503 .tx_completion = comp_tx_completion,
507 static int __init mod_init(void)
513 comp.class = class_create(THIS_MODULE, "most_cdev");
514 if (IS_ERR(comp.class)) {
515 pr_info("No udev support.\n");
516 return PTR_ERR(comp.class);
519 INIT_LIST_HEAD(&channel_list);
520 spin_lock_init(&ch_list_lock);
521 ida_init(&comp.minor_id);
523 err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
526 comp.major = MAJOR(comp.devno);
527 err = most_register_component(&comp.cc);
530 err = most_register_configfs_subsys(&comp.cc);
532 goto deregister_comp;
536 most_deregister_component(&comp.cc);
538 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
540 ida_destroy(&comp.minor_id);
541 class_destroy(comp.class);
545 static void __exit mod_exit(void)
547 struct comp_channel *c, *tmp;
549 pr_info("exit module\n");
551 most_deregister_configfs_subsys(&comp.cc);
552 most_deregister_component(&comp.cc);
554 list_for_each_entry_safe(c, tmp, &channel_list, list) {
558 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
559 ida_destroy(&comp.minor_id);
560 class_destroy(comp.class);
563 module_init(mod_init);
564 module_exit(mod_exit);
565 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
566 MODULE_LICENSE("GPL");
567 MODULE_DESCRIPTION("character device component for mostcore");