2 * cdev.c - Application interfacing module for character devices
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * This file is licensed under GPLv2.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/poll.h>
22 #include <linux/kfifo.h>
23 #include <linux/uaccess.h>
24 #include <linux/idr.h>
27 static dev_t aim_devno;
28 static struct class *aim_class;
29 static struct ida minor_id;
30 static unsigned int major;
31 static struct most_aim cdev_aim;
35 spinlock_t unlink; /* synchronization lock to unlink channels */
38 struct mutex io_mutex;
39 struct most_interface *iface;
40 struct most_channel_config *cfg;
41 unsigned int channel_id;
44 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
46 struct list_head list;
49 #define to_channel(d) container_of(d, struct aim_channel, cdev)
50 static struct list_head channel_list;
51 static spinlock_t ch_list_lock;
53 static inline bool ch_has_mbo(struct aim_channel *c)
55 return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
58 static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
60 if (!kfifo_peek(&c->fifo, mbo)) {
61 *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
63 kfifo_in(&c->fifo, mbo, 1);
68 static struct aim_channel *get_channel(struct most_interface *iface, int id)
70 struct aim_channel *c, *tmp;
72 int found_channel = 0;
74 spin_lock_irqsave(&ch_list_lock, flags);
75 list_for_each_entry_safe(c, tmp, &channel_list, list) {
76 if ((c->iface == iface) && (c->channel_id == id)) {
81 spin_unlock_irqrestore(&ch_list_lock, flags);
87 static void stop_channel(struct aim_channel *c)
91 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
93 most_stop_channel(c->iface, c->channel_id, &cdev_aim);
96 static void destroy_cdev(struct aim_channel *c)
100 device_destroy(aim_class, c->devno);
102 spin_lock_irqsave(&ch_list_lock, flags);
104 spin_unlock_irqrestore(&ch_list_lock, flags);
107 static void destroy_channel(struct aim_channel *c)
109 ida_simple_remove(&minor_id, MINOR(c->devno));
110 kfifo_free(&c->fifo);
115 * aim_open - implements the syscall to open the device
116 * @inode: inode pointer
117 * @filp: file pointer
119 * This stores the channel pointer in the private data field of
120 * the file structure and activates the channel within the core.
122 static int aim_open(struct inode *inode, struct file *filp)
124 struct aim_channel *c;
127 c = to_channel(inode->i_cdev);
128 filp->private_data = c;
130 if (((c->cfg->direction == MOST_CH_RX) &&
131 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
132 ((c->cfg->direction == MOST_CH_TX) &&
133 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
134 pr_info("WARN: Access flags mismatch\n");
138 mutex_lock(&c->io_mutex);
140 pr_info("WARN: Device is destroyed\n");
141 mutex_unlock(&c->io_mutex);
146 pr_info("WARN: Device is busy\n");
147 mutex_unlock(&c->io_mutex);
152 ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
155 mutex_unlock(&c->io_mutex);
160 * aim_close - implements the syscall to close the device
161 * @inode: inode pointer
162 * @filp: file pointer
164 * This stops the channel within the core.
166 static int aim_close(struct inode *inode, struct file *filp)
168 struct aim_channel *c = to_channel(inode->i_cdev);
170 mutex_lock(&c->io_mutex);
171 spin_lock(&c->unlink);
173 spin_unlock(&c->unlink);
176 mutex_unlock(&c->io_mutex);
178 mutex_unlock(&c->io_mutex);
185 * aim_write - implements the syscall to write to the device
186 * @filp: file pointer
187 * @buf: pointer to user buffer
188 * @count: number of bytes to write
189 * @offset: offset from where to start writing
191 static ssize_t aim_write(struct file *filp, const char __user *buf,
192 size_t count, loff_t *offset)
195 size_t to_copy, left;
196 struct mbo *mbo = NULL;
197 struct aim_channel *c = filp->private_data;
199 mutex_lock(&c->io_mutex);
200 while (c->dev && !ch_get_mbo(c, &mbo)) {
201 mutex_unlock(&c->io_mutex);
203 if ((filp->f_flags & O_NONBLOCK))
205 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
207 mutex_lock(&c->io_mutex);
210 if (unlikely(!c->dev)) {
215 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
216 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
217 if (left == to_copy) {
222 c->mbo_offs += to_copy - left;
223 if (c->mbo_offs >= c->cfg->buffer_size ||
224 c->cfg->data_type == MOST_CH_CONTROL ||
225 c->cfg->data_type == MOST_CH_ASYNC) {
226 kfifo_skip(&c->fifo);
227 mbo->buffer_length = c->mbo_offs;
229 most_submit_mbo(mbo);
232 ret = to_copy - left;
234 mutex_unlock(&c->io_mutex);
239 * aim_read - implements the syscall to read from the device
240 * @filp: file pointer
241 * @buf: pointer to user buffer
242 * @count: number of bytes to read
243 * @offset: offset from where to start reading
246 aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
248 size_t to_copy, not_copied, copied;
250 struct aim_channel *c = filp->private_data;
252 mutex_lock(&c->io_mutex);
253 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
254 mutex_unlock(&c->io_mutex);
255 if (filp->f_flags & O_NONBLOCK)
257 if (wait_event_interruptible(c->wq,
258 (!kfifo_is_empty(&c->fifo) ||
261 mutex_lock(&c->io_mutex);
264 /* make sure we don't submit to gone devices */
265 if (unlikely(!c->dev)) {
266 mutex_unlock(&c->io_mutex);
270 to_copy = min_t(size_t,
272 mbo->processed_length - c->mbo_offs);
274 not_copied = copy_to_user(buf,
275 mbo->virt_address + c->mbo_offs,
278 copied = to_copy - not_copied;
280 c->mbo_offs += copied;
281 if (c->mbo_offs >= mbo->processed_length) {
282 kfifo_skip(&c->fifo);
286 mutex_unlock(&c->io_mutex);
290 static unsigned int aim_poll(struct file *filp, poll_table *wait)
292 struct aim_channel *c = filp->private_data;
293 unsigned int mask = 0;
295 poll_wait(filp, &c->wq, wait);
297 if (c->cfg->direction == MOST_CH_RX) {
298 if (!kfifo_is_empty(&c->fifo))
299 mask |= POLLIN | POLLRDNORM;
301 if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
302 mask |= POLLOUT | POLLWRNORM;
308 * Initialization of struct file_operations
310 static const struct file_operations channel_fops = {
311 .owner = THIS_MODULE,
315 .release = aim_close,
320 * aim_disconnect_channel - disconnect a channel
321 * @iface: pointer to interface instance
322 * @channel_id: channel index
324 * This frees allocated memory and removes the cdev that represents this
325 * channel in user space.
327 static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
329 struct aim_channel *c;
332 pr_info("Bad interface pointer\n");
336 c = get_channel(iface, channel_id);
340 mutex_lock(&c->io_mutex);
341 spin_lock(&c->unlink);
343 spin_unlock(&c->unlink);
347 wake_up_interruptible(&c->wq);
348 mutex_unlock(&c->io_mutex);
350 mutex_unlock(&c->io_mutex);
357 * aim_rx_completion - completion handler for rx channels
358 * @mbo: pointer to buffer object that has completed
360 * This searches for the channel linked to this MBO and stores it in the local
363 static int aim_rx_completion(struct mbo *mbo)
365 struct aim_channel *c;
370 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
374 spin_lock(&c->unlink);
375 if (!c->access_ref || !c->dev) {
376 spin_unlock(&c->unlink);
379 kfifo_in(&c->fifo, &mbo, 1);
380 spin_unlock(&c->unlink);
382 if (kfifo_is_full(&c->fifo))
383 pr_info("WARN: Fifo is full\n");
385 wake_up_interruptible(&c->wq);
390 * aim_tx_completion - completion handler for tx channels
391 * @iface: pointer to interface instance
392 * @channel_id: channel index/ID
394 * This wakes sleeping processes in the wait-queue.
396 static int aim_tx_completion(struct most_interface *iface, int channel_id)
398 struct aim_channel *c;
401 pr_info("Bad interface pointer\n");
404 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
405 pr_info("Channel ID out of range\n");
409 c = get_channel(iface, channel_id);
412 wake_up_interruptible(&c->wq);
417 * aim_probe - probe function of the driver module
418 * @iface: pointer to interface instance
419 * @channel_id: channel index/ID
420 * @cfg: pointer to actual channel configuration
421 * @parent: pointer to kobject (needed for sysfs hook-up)
422 * @name: name of the device to be created
424 * This allocates achannel object and creates the device node in /dev
426 * Returns 0 on success or error code otherwise.
428 static int aim_probe(struct most_interface *iface, int channel_id,
429 struct most_channel_config *cfg,
430 struct kobject *parent, char *name)
432 struct aim_channel *c;
433 unsigned long cl_flags;
437 if ((!iface) || (!cfg) || (!parent) || (!name)) {
438 pr_info("Probing AIM with bad arguments");
441 c = get_channel(iface, channel_id);
445 current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
446 if (current_minor < 0)
447 return current_minor;
449 c = kzalloc(sizeof(*c), GFP_KERNEL);
452 goto error_alloc_channel;
455 c->devno = MKDEV(major, current_minor);
456 cdev_init(&c->cdev, &channel_fops);
457 c->cdev.owner = THIS_MODULE;
458 retval = cdev_add(&c->cdev, c->devno, 1);
463 c->channel_id = channel_id;
465 spin_lock_init(&c->unlink);
467 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
469 pr_info("failed to alloc channel kfifo");
470 goto error_alloc_kfifo;
472 init_waitqueue_head(&c->wq);
473 mutex_init(&c->io_mutex);
474 spin_lock_irqsave(&ch_list_lock, cl_flags);
475 list_add_tail(&c->list, &channel_list);
476 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
477 c->dev = device_create(aim_class,
483 if (IS_ERR(c->dev)) {
484 retval = PTR_ERR(c->dev);
485 pr_info("failed to create new device node %s\n", name);
486 goto error_create_device;
488 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
492 kfifo_free(&c->fifo);
499 ida_simple_remove(&minor_id, current_minor);
503 static struct most_aim cdev_aim = {
505 .probe_channel = aim_probe,
506 .disconnect_channel = aim_disconnect_channel,
507 .rx_completion = aim_rx_completion,
508 .tx_completion = aim_tx_completion,
511 static int __init mod_init(void)
517 INIT_LIST_HEAD(&channel_list);
518 spin_lock_init(&ch_list_lock);
521 err = alloc_chrdev_region(&aim_devno, 0, 50, "cdev");
524 major = MAJOR(aim_devno);
526 aim_class = class_create(THIS_MODULE, "most_cdev_aim");
527 if (IS_ERR(aim_class)) {
528 pr_err("no udev support\n");
529 err = PTR_ERR(aim_class);
532 err = most_register_aim(&cdev_aim);
538 class_destroy(aim_class);
540 unregister_chrdev_region(aim_devno, 1);
542 ida_destroy(&minor_id);
546 static void __exit mod_exit(void)
548 struct aim_channel *c, *tmp;
550 pr_info("exit module\n");
552 most_deregister_aim(&cdev_aim);
554 list_for_each_entry_safe(c, tmp, &channel_list, list) {
558 class_destroy(aim_class);
559 unregister_chrdev_region(aim_devno, 1);
560 ida_destroy(&minor_id);
563 module_init(mod_init);
564 module_exit(mod_exit);
565 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
566 MODULE_LICENSE("GPL");
567 MODULE_DESCRIPTION("character device AIM for mostcore");