]> Pileus Git - ~andy/linux/blob - drivers/staging/iio/industrialio-ring.c
Merge branch 'viafb-next' of git://github.com/schandinat/linux-2.6
[~andy/linux] / drivers / staging / iio / industrialio-ring.c
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of ring allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21
22 #include "iio.h"
23 #include "ring_generic.h"
24
25 int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
26                        int event_code,
27                        s64 timestamp)
28 {
29         return __iio_push_event(&ring_buf->ev_int,
30                                event_code,
31                                timestamp,
32                                &ring_buf->shared_ev_pointer);
33 }
34 EXPORT_SYMBOL(iio_push_ring_event);
35
36 int iio_push_or_escallate_ring_event(struct iio_ring_buffer *ring_buf,
37                                     int event_code,
38                                     s64 timestamp)
39 {
40         if (ring_buf->shared_ev_pointer.ev_p)
41                 __iio_change_event(ring_buf->shared_ev_pointer.ev_p,
42                                    event_code,
43                                    timestamp);
44         else
45                 return iio_push_ring_event(ring_buf,
46                                           event_code,
47                                           timestamp);
48         return 0;
49 }
50 EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
51
52 /**
53  * iio_ring_open() - chrdev file open for ring buffer access
54  *
55  * This function relies on all ring buffer implementations having an
56  * iio_ring_buffer as their first element.
57  **/
58 static int iio_ring_open(struct inode *inode, struct file *filp)
59 {
60         struct iio_handler *hand
61                 = container_of(inode->i_cdev, struct iio_handler, chrdev);
62         struct iio_ring_buffer *rb = hand->private;
63
64         filp->private_data = hand->private;
65         if (rb->access.mark_in_use)
66                 rb->access.mark_in_use(rb);
67
68         return 0;
69 }
70
71 /**
72  * iio_ring_release() - chrdev file close ring buffer access
73  *
74  * This function relies on all ring buffer implementations having an
75  * iio_ring_buffer as their first element.
76  **/
77 static int iio_ring_release(struct inode *inode, struct file *filp)
78 {
79         struct cdev *cd = inode->i_cdev;
80         struct iio_handler *hand = iio_cdev_to_handler(cd);
81         struct iio_ring_buffer *rb = hand->private;
82
83         clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
84         if (rb->access.unmark_in_use)
85                 rb->access.unmark_in_use(rb);
86
87         return 0;
88 }
89
90 /**
91  * iio_ring_rip_outer() - chrdev read for ring buffer access
92  *
93  * This function relies on all ring buffer implementations having an
94  * iio_ring _bufer as their first element.
95  **/
96 static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
97                                   size_t count, loff_t *f_ps)
98 {
99         struct iio_ring_buffer *rb = filp->private_data;
100         int ret, dead_offset;
101
102         /* rip lots must exist. */
103         if (!rb->access.rip_lots)
104                 return -EINVAL;
105         ret = rb->access.rip_lots(rb, count, buf, &dead_offset);
106
107         return ret;
108 }
109
110 static const struct file_operations iio_ring_fileops = {
111         .read = iio_ring_rip_outer,
112         .release = iio_ring_release,
113         .open = iio_ring_open,
114         .owner = THIS_MODULE,
115         .llseek = noop_llseek,
116 };
117
118 /**
119  * __iio_request_ring_buffer_event_chrdev() - allocate ring event chrdev
120  * @buf:        ring buffer whose event chrdev we are allocating
121  * @id:         id of this ring buffer (typically 0)
122  * @owner:      the module who owns the ring buffer (for ref counting)
123  * @dev:        device with which the chrdev is associated
124  **/
125 static inline int
126 __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
127                                        int id,
128                                        struct module *owner,
129                                        struct device *dev)
130 {
131         int ret;
132
133         snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
134                  "%s:event%d",
135                  dev_name(&buf->dev),
136                  id);
137         ret = iio_setup_ev_int(&(buf->ev_int),
138                                buf->ev_int._name,
139                                owner,
140                                dev);
141         if (ret)
142                 goto error_ret;
143         return 0;
144
145 error_ret:
146         return ret;
147 }
148
149 static inline void
150 __iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
151 {
152         iio_free_ev_int(&(buf->ev_int));
153 }
154
155 static void iio_ring_access_release(struct device *dev)
156 {
157         struct iio_ring_buffer *buf
158                 = access_dev_to_iio_ring_buffer(dev);
159         cdev_del(&buf->access_handler.chrdev);
160         iio_device_free_chrdev_minor(MINOR(dev->devt));
161 }
162
163 static struct device_type iio_ring_access_type = {
164         .release = iio_ring_access_release,
165 };
166
167 static inline int
168 __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
169                                         int id,
170                                         struct module *owner)
171 {
172         int ret, minor;
173
174         buf->access_handler.flags = 0;
175
176         buf->access_dev.parent = &buf->dev;
177         buf->access_dev.bus = &iio_bus_type;
178         buf->access_dev.type = &iio_ring_access_type;
179         device_initialize(&buf->access_dev);
180
181         minor = iio_device_get_chrdev_minor();
182         if (minor < 0) {
183                 ret = minor;
184                 goto error_device_put;
185         }
186         buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
187
188
189         buf->access_id = id;
190
191         dev_set_name(&buf->access_dev, "%s:access%d",
192                      dev_name(&buf->dev),
193                      buf->access_id);
194         ret = device_add(&buf->access_dev);
195         if (ret < 0) {
196                 printk(KERN_ERR "failed to add the ring access dev\n");
197                 goto error_device_put;
198         }
199
200         cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
201         buf->access_handler.chrdev.owner = owner;
202
203         ret = cdev_add(&buf->access_handler.chrdev, buf->access_dev.devt, 1);
204         if (ret) {
205                 printk(KERN_ERR "failed to allocate ring access chrdev\n");
206                 goto error_device_unregister;
207         }
208         return 0;
209
210 error_device_unregister:
211         device_unregister(&buf->access_dev);
212 error_device_put:
213         put_device(&buf->access_dev);
214
215         return ret;
216 }
217
218 static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
219 {
220         device_unregister(&buf->access_dev);
221 }
222
223 void iio_ring_buffer_init(struct iio_ring_buffer *ring,
224                           struct iio_dev *dev_info)
225 {
226         if (ring->access.mark_param_change)
227                 ring->access.mark_param_change(ring);
228         ring->indio_dev = dev_info;
229         ring->ev_int.private = ring;
230         ring->access_handler.private = ring;
231         ring->shared_ev_pointer.ev_p = NULL;
232         spin_lock_init(&ring->shared_ev_pointer.lock);
233 }
234 EXPORT_SYMBOL(iio_ring_buffer_init);
235
236 int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
237 {
238         int ret;
239
240         ring->id = id;
241
242         dev_set_name(&ring->dev, "%s:buffer%d",
243                      dev_name(ring->dev.parent),
244                      ring->id);
245         ret = device_add(&ring->dev);
246         if (ret)
247                 goto error_ret;
248
249         ret = __iio_request_ring_buffer_event_chrdev(ring,
250                                                      0,
251                                                      ring->owner,
252                                                      &ring->dev);
253         if (ret)
254                 goto error_remove_device;
255
256         ret = __iio_request_ring_buffer_access_chrdev(ring,
257                                                       0,
258                                                       ring->owner);
259
260         if (ret)
261                 goto error_free_ring_buffer_event_chrdev;
262
263         if (ring->scan_el_attrs) {
264                 ret = sysfs_create_group(&ring->dev.kobj,
265                                          ring->scan_el_attrs);
266                 if (ret) {
267                         dev_err(&ring->dev,
268                                 "Failed to add sysfs scan elements\n");
269                         goto error_free_ring_buffer_event_chrdev;
270                 }
271         }
272
273         return ret;
274 error_free_ring_buffer_event_chrdev:
275         __iio_free_ring_buffer_event_chrdev(ring);
276 error_remove_device:
277         device_del(&ring->dev);
278 error_ret:
279         return ret;
280 }
281 EXPORT_SYMBOL(iio_ring_buffer_register);
282
283 void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
284 {
285         if (ring->scan_el_attrs)
286                 sysfs_remove_group(&ring->dev.kobj,
287                                    ring->scan_el_attrs);
288
289         __iio_free_ring_buffer_access_chrdev(ring);
290         __iio_free_ring_buffer_event_chrdev(ring);
291         device_del(&ring->dev);
292 }
293 EXPORT_SYMBOL(iio_ring_buffer_unregister);
294
295 ssize_t iio_read_ring_length(struct device *dev,
296                              struct device_attribute *attr,
297                              char *buf)
298 {
299         int len = 0;
300         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
301
302         if (ring->access.get_length)
303                 len = sprintf(buf, "%d\n",
304                               ring->access.get_length(ring));
305
306         return len;
307 }
308 EXPORT_SYMBOL(iio_read_ring_length);
309
310 ssize_t iio_write_ring_length(struct device *dev,
311                                struct device_attribute *attr,
312                                const char *buf,
313                                size_t len)
314 {
315         int ret;
316         ulong val;
317         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
318         ret = strict_strtoul(buf, 10, &val);
319         if (ret)
320                 return ret;
321
322         if (ring->access.get_length)
323                 if (val == ring->access.get_length(ring))
324                         return len;
325
326         if (ring->access.set_length) {
327                 ring->access.set_length(ring, val);
328                 if (ring->access.mark_param_change)
329                         ring->access.mark_param_change(ring);
330         }
331
332         return len;
333 }
334 EXPORT_SYMBOL(iio_write_ring_length);
335
336 ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
337                           struct device_attribute *attr,
338                           char *buf)
339 {
340         int len = 0;
341         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
342
343         if (ring->access.get_bytes_per_datum)
344                 len = sprintf(buf, "%d\n",
345                               ring->access.get_bytes_per_datum(ring));
346
347         return len;
348 }
349 EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
350
351 ssize_t iio_store_ring_enable(struct device *dev,
352                               struct device_attribute *attr,
353                               const char *buf,
354                               size_t len)
355 {
356         int ret;
357         bool requested_state, current_state;
358         int previous_mode;
359         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
360         struct iio_dev *dev_info = ring->indio_dev;
361
362         mutex_lock(&dev_info->mlock);
363         previous_mode = dev_info->currentmode;
364         requested_state = !(buf[0] == '0');
365         current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
366         if (current_state == requested_state) {
367                 printk(KERN_INFO "iio-ring, current state requested again\n");
368                 goto done;
369         }
370         if (requested_state) {
371                 if (ring->preenable) {
372                         ret = ring->preenable(dev_info);
373                         if (ret) {
374                                 printk(KERN_ERR
375                                        "Buffer not started:"
376                                        "ring preenable failed\n");
377                                 goto error_ret;
378                         }
379                 }
380                 if (ring->access.request_update) {
381                         ret = ring->access.request_update(ring);
382                         if (ret) {
383                                 printk(KERN_INFO
384                                        "Buffer not started:"
385                                        "ring parameter update failed\n");
386                                 goto error_ret;
387                         }
388                 }
389                 if (ring->access.mark_in_use)
390                         ring->access.mark_in_use(ring);
391                 /* Definitely possible for devices to support both of these.*/
392                 if (dev_info->modes & INDIO_RING_TRIGGERED) {
393                         if (!dev_info->trig) {
394                                 printk(KERN_INFO
395                                        "Buffer not started: no trigger\n");
396                                 ret = -EINVAL;
397                                 if (ring->access.unmark_in_use)
398                                         ring->access.unmark_in_use(ring);
399                                 goto error_ret;
400                         }
401                         dev_info->currentmode = INDIO_RING_TRIGGERED;
402                 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
403                         dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
404                 else { /* should never be reached */
405                         ret = -EINVAL;
406                         goto error_ret;
407                 }
408
409                 if (ring->postenable) {
410
411                         ret = ring->postenable(dev_info);
412                         if (ret) {
413                                 printk(KERN_INFO
414                                        "Buffer not started:"
415                                        "postenable failed\n");
416                                 if (ring->access.unmark_in_use)
417                                         ring->access.unmark_in_use(ring);
418                                 dev_info->currentmode = previous_mode;
419                                 if (ring->postdisable)
420                                         ring->postdisable(dev_info);
421                                 goto error_ret;
422                         }
423                 }
424         } else {
425                 if (ring->predisable) {
426                         ret = ring->predisable(dev_info);
427                         if (ret)
428                                 goto error_ret;
429                 }
430                 if (ring->access.unmark_in_use)
431                         ring->access.unmark_in_use(ring);
432                 dev_info->currentmode = INDIO_DIRECT_MODE;
433                 if (ring->postdisable) {
434                         ret = ring->postdisable(dev_info);
435                         if (ret)
436                                 goto error_ret;
437                 }
438         }
439 done:
440         mutex_unlock(&dev_info->mlock);
441         return len;
442
443 error_ret:
444         mutex_unlock(&dev_info->mlock);
445         return ret;
446 }
447 EXPORT_SYMBOL(iio_store_ring_enable);
448 ssize_t iio_show_ring_enable(struct device *dev,
449                                     struct device_attribute *attr,
450                                     char *buf)
451 {
452         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
453         return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
454                                        & INDIO_ALL_RING_MODES));
455 }
456 EXPORT_SYMBOL(iio_show_ring_enable);
457
458 ssize_t iio_scan_el_show(struct device *dev,
459                          struct device_attribute *attr,
460                          char *buf)
461 {
462         int ret;
463         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
464         struct iio_scan_el *this_el = to_iio_scan_el(attr);
465
466         ret = iio_scan_mask_query(ring, this_el->number);
467         if (ret < 0)
468                 return ret;
469         return sprintf(buf, "%d\n", ret);
470 }
471 EXPORT_SYMBOL(iio_scan_el_show);
472
473 ssize_t iio_scan_el_store(struct device *dev,
474                           struct device_attribute *attr,
475                           const char *buf,
476                           size_t len)
477 {
478         int ret = 0;
479         bool state;
480         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
481         struct iio_dev *indio_dev = ring->indio_dev;
482         struct iio_scan_el *this_el = to_iio_scan_el(attr);
483
484         state = !(buf[0] == '0');
485         mutex_lock(&indio_dev->mlock);
486         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
487                 ret = -EBUSY;
488                 goto error_ret;
489         }
490         ret = iio_scan_mask_query(ring, this_el->number);
491         if (ret < 0)
492                 goto error_ret;
493         if (!state && ret) {
494                 ret = iio_scan_mask_clear(ring, this_el->number);
495                 if (ret)
496                         goto error_ret;
497         } else if (state && !ret) {
498                 ret = iio_scan_mask_set(ring, this_el->number);
499                 if (ret)
500                         goto error_ret;
501         }
502         if (this_el->set_state)
503                 ret = this_el->set_state(this_el, indio_dev, state);
504 error_ret:
505         mutex_unlock(&indio_dev->mlock);
506
507         return ret ? ret : len;
508
509 }
510 EXPORT_SYMBOL(iio_scan_el_store);
511
512 ssize_t iio_scan_el_ts_show(struct device *dev,
513                             struct device_attribute *attr,
514                             char *buf)
515 {
516         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
517         return sprintf(buf, "%d\n", ring->scan_timestamp);
518 }
519 EXPORT_SYMBOL(iio_scan_el_ts_show);
520
521 ssize_t iio_scan_el_ts_store(struct device *dev,
522                              struct device_attribute *attr,
523                              const char *buf,
524                              size_t len)
525 {
526         int ret = 0;
527         struct iio_ring_buffer *ring = dev_get_drvdata(dev);
528         struct iio_dev *indio_dev = ring->indio_dev;
529         bool state;
530         state = !(buf[0] == '0');
531         mutex_lock(&indio_dev->mlock);
532         if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
533                 ret = -EBUSY;
534                 goto error_ret;
535         }
536         ring->scan_timestamp = state;
537 error_ret:
538         mutex_unlock(&indio_dev->mlock);
539
540         return ret ? ret : len;
541 }
542 EXPORT_SYMBOL(iio_scan_el_ts_store);
543