1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of ring allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/device.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
23 #include "ring_generic.h"
25 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
29 return __iio_push_event(&ring_buf->ev_int,
32 &ring_buf->shared_ev_pointer);
34 EXPORT_SYMBOL(iio_push_ring_event);
36 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
40 if (ring_buf->shared_ev_pointer.ev_p)
41 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
45 return iio_push_ring_event(ring_buf,
50 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
53 * iio_ring_open() - chrdev file open for ring buffer access
55 * This function relies on all ring buffer implementations having an
56 * iio_ring_buffer as their first element.
58 static int iio_ring_open(struct inode *inode, struct file *filp)
60 struct iio_handler *hand
61 = container_of(inode->i_cdev, struct iio_handler, chrdev);
62 struct iio_ring_buffer *rb = hand->private;
64 filp->private_data = hand->private;
65 if (rb->access.mark_in_use)
66 rb->access.mark_in_use(rb);
72 * iio_ring_release() - chrdev file close ring buffer access
74 * This function relies on all ring buffer implementations having an
75 * iio_ring_buffer as their first element.
77 static int iio_ring_release(struct inode *inode, struct file *filp)
79 struct cdev *cd = inode->i_cdev;
80 struct iio_handler *hand = iio_cdev_to_handler(cd);
81 struct iio_ring_buffer *rb = hand->private;
83 clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
84 if (rb->access.unmark_in_use)
85 rb->access.unmark_in_use(rb);
91 * iio_ring_rip_outer() - chrdev read for ring buffer access
93 * This function relies on all ring buffer implementations having an
94 * iio_ring _bufer as their first element.
96 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
97 size_t count, loff_t *f_ps)
99 struct iio_ring_buffer *rb = filp->private_data;
100 int ret, dead_offset;
102 /* rip lots must exist. */
103 if (!rb->access.rip_lots)
105 ret = rb->access.rip_lots(rb, count, buf, &dead_offset);
110 static const struct file_operations iio_ring_fileops = {
111 .read = iio_ring_rip_outer,
112 .release = iio_ring_release,
113 .open = iio_ring_open,
114 .owner = THIS_MODULE,
115 .llseek = noop_llseek,
119 * __iio_request_ring_buffer_event_chrdev() - allocate ring event chrdev
120 * @buf: ring buffer whose event chrdev we are allocating
121 * @id: id of this ring buffer (typically 0)
122 * @owner: the module who owns the ring buffer (for ref counting)
123 * @dev: device with which the chrdev is associated
126 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
128 struct module *owner,
133 snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
137 ret = iio_setup_ev_int(&(buf->ev_int),
150 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
152 iio_free_ev_int(&(buf->ev_int));
155 static void iio_ring_access_release(struct device *dev)
157 struct iio_ring_buffer *buf
158 = access_dev_to_iio_ring_buffer(dev);
159 cdev_del(&buf->access_handler.chrdev);
160 iio_device_free_chrdev_minor(MINOR(dev->devt));
163 static struct device_type iio_ring_access_type = {
164 .release = iio_ring_access_release,
168 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
170 struct module *owner)
174 buf->access_handler.flags = 0;
176 buf->access_dev.parent = &buf->dev;
177 buf->access_dev.bus = &iio_bus_type;
178 buf->access_dev.type = &iio_ring_access_type;
179 device_initialize(&buf->access_dev);
181 minor = iio_device_get_chrdev_minor();
184 goto error_device_put;
186 buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
191 dev_set_name(&buf->access_dev, "%s:access%d",
194 ret = device_add(&buf->access_dev);
196 printk(KERN_ERR "failed to add the ring access dev\n");
197 goto error_device_put;
200 cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
201 buf->access_handler.chrdev.owner = owner;
203 ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
205 printk(KERN_ERR "failed to allocate ring access chrdev\n");
206 goto error_device_unregister;
210 error_device_unregister:
211 device_unregister(&buf->access_dev);
213 put_device(&buf->access_dev);
218 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
220 device_unregister(&buf->access_dev);
223 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
224 struct iio_dev *dev_info)
226 if (ring->access.mark_param_change)
227 ring->access.mark_param_change(ring);
228 ring->indio_dev = dev_info;
229 ring->ev_int.private = ring;
230 ring->access_handler.private = ring;
231 ring->shared_ev_pointer.ev_p = NULL;
232 spin_lock_init(&ring->shared_ev_pointer.lock);
234 EXPORT_SYMBOL(iio_ring_buffer_init);
236 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
242 dev_set_name(&ring->dev, "%s:buffer%d",
243 dev_name(ring->dev.parent),
245 ret = device_add(&ring->dev);
249 ret = __iio_request_ring_buffer_event_chrdev(ring,
254 goto error_remove_device;
256 ret = __iio_request_ring_buffer_access_chrdev(ring,
261 goto error_free_ring_buffer_event_chrdev;
263 if (ring->scan_el_attrs) {
264 ret = sysfs_create_group(&ring->dev.kobj,
265 ring->scan_el_attrs);
268 "Failed to add sysfs scan elements\n");
269 goto error_free_ring_buffer_event_chrdev;
274 error_free_ring_buffer_event_chrdev:
275 __iio_free_ring_buffer_event_chrdev(ring);
277 device_del(&ring->dev);
281 EXPORT_SYMBOL(iio_ring_buffer_register);
283 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
285 if (ring->scan_el_attrs)
286 sysfs_remove_group(&ring->dev.kobj,
287 ring->scan_el_attrs);
289 __iio_free_ring_buffer_access_chrdev(ring);
290 __iio_free_ring_buffer_event_chrdev(ring);
291 device_del(&ring->dev);
293 EXPORT_SYMBOL(iio_ring_buffer_unregister);
295 ssize_t iio_read_ring_length(struct device *dev,
296 struct device_attribute *attr,
300 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
302 if (ring->access.get_length)
303 len = sprintf(buf, "%d\n",
304 ring->access.get_length(ring));
308 EXPORT_SYMBOL(iio_read_ring_length);
310 ssize_t iio_write_ring_length(struct device *dev,
311 struct device_attribute *attr,
317 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
318 ret = strict_strtoul(buf, 10, &val);
322 if (ring->access.get_length)
323 if (val == ring->access.get_length(ring))
326 if (ring->access.set_length) {
327 ring->access.set_length(ring, val);
328 if (ring->access.mark_param_change)
329 ring->access.mark_param_change(ring);
334 EXPORT_SYMBOL(iio_write_ring_length);
336 ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
337 struct device_attribute *attr,
341 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
343 if (ring->access.get_bytes_per_datum)
344 len = sprintf(buf, "%d\n",
345 ring->access.get_bytes_per_datum(ring));
349 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
351 ssize_t iio_store_ring_enable(struct device *dev,
352 struct device_attribute *attr,
357 bool requested_state, current_state;
359 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
360 struct iio_dev *dev_info = ring->indio_dev;
362 mutex_lock(&dev_info->mlock);
363 previous_mode = dev_info->currentmode;
364 requested_state = !(buf[0] == '0');
365 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
366 if (current_state == requested_state) {
367 printk(KERN_INFO "iio-ring, current state requested again\n");
370 if (requested_state) {
371 if (ring->preenable) {
372 ret = ring->preenable(dev_info);
375 "Buffer not started:"
376 "ring preenable failed\n");
380 if (ring->access.request_update) {
381 ret = ring->access.request_update(ring);
384 "Buffer not started:"
385 "ring parameter update failed\n");
389 if (ring->access.mark_in_use)
390 ring->access.mark_in_use(ring);
391 /* Definitely possible for devices to support both of these.*/
392 if (dev_info->modes & INDIO_RING_TRIGGERED) {
393 if (!dev_info->trig) {
395 "Buffer not started: no trigger\n");
397 if (ring->access.unmark_in_use)
398 ring->access.unmark_in_use(ring);
401 dev_info->currentmode = INDIO_RING_TRIGGERED;
402 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
403 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
404 else { /* should never be reached */
409 if (ring->postenable) {
411 ret = ring->postenable(dev_info);
414 "Buffer not started:"
415 "postenable failed\n");
416 if (ring->access.unmark_in_use)
417 ring->access.unmark_in_use(ring);
418 dev_info->currentmode = previous_mode;
419 if (ring->postdisable)
420 ring->postdisable(dev_info);
425 if (ring->predisable) {
426 ret = ring->predisable(dev_info);
430 if (ring->access.unmark_in_use)
431 ring->access.unmark_in_use(ring);
432 dev_info->currentmode = INDIO_DIRECT_MODE;
433 if (ring->postdisable) {
434 ret = ring->postdisable(dev_info);
440 mutex_unlock(&dev_info->mlock);
444 mutex_unlock(&dev_info->mlock);
447 EXPORT_SYMBOL(iio_store_ring_enable);
448 ssize_t iio_show_ring_enable(struct device *dev,
449 struct device_attribute *attr,
452 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
453 return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
454 & INDIO_ALL_RING_MODES));
456 EXPORT_SYMBOL(iio_show_ring_enable);
458 ssize_t iio_scan_el_show(struct device *dev,
459 struct device_attribute *attr,
463 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
464 struct iio_scan_el *this_el = to_iio_scan_el(attr);
466 ret = iio_scan_mask_query(ring, this_el->number);
469 return sprintf(buf, "%d\n", ret);
471 EXPORT_SYMBOL(iio_scan_el_show);
473 ssize_t iio_scan_el_store(struct device *dev,
474 struct device_attribute *attr,
480 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
481 struct iio_dev *indio_dev = ring->indio_dev;
482 struct iio_scan_el *this_el = to_iio_scan_el(attr);
484 state = !(buf[0] == '0');
485 mutex_lock(&indio_dev->mlock);
486 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
490 ret = iio_scan_mask_query(ring, this_el->number);
494 ret = iio_scan_mask_clear(ring, this_el->number);
497 } else if (state && !ret) {
498 ret = iio_scan_mask_set(ring, this_el->number);
502 if (this_el->set_state)
503 ret = this_el->set_state(this_el, indio_dev, state);
505 mutex_unlock(&indio_dev->mlock);
507 return ret ? ret : len;
510 EXPORT_SYMBOL(iio_scan_el_store);
512 ssize_t iio_scan_el_ts_show(struct device *dev,
513 struct device_attribute *attr,
516 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
517 return sprintf(buf, "%d\n", ring->scan_timestamp);
519 EXPORT_SYMBOL(iio_scan_el_ts_show);
521 ssize_t iio_scan_el_ts_store(struct device *dev,
522 struct device_attribute *attr,
527 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
528 struct iio_dev *indio_dev = ring->indio_dev;
530 state = !(buf[0] == '0');
531 mutex_lock(&indio_dev->mlock);
532 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
536 ring->scan_timestamp = state;
538 mutex_unlock(&indio_dev->mlock);
540 return ret ? ret : len;
542 EXPORT_SYMBOL(iio_scan_el_ts_store);