]> Pileus Git - ~andy/linux/blob - drivers/staging/iio/industrialio-buffer.c
staging:iio: replacing term ring with buffer in the IIO core.
[~andy/linux] / drivers / staging / iio / industrialio-buffer.c
1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Handling of buffer allocation / resizing.
10  *
11  *
12  * Things to look at here.
13  * - Better memory allocation techniques?
14  * - Alternative access techniques?
15  */
16 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22
23 #include "iio.h"
24 #include "iio_core.h"
25 #include "sysfs.h"
26 #include "buffer_generic.h"
27
28 static const char * const iio_endian_prefix[] = {
29         [IIO_BE] = "be",
30         [IIO_LE] = "le",
31 };
32
33 /**
34  * iio_buffer_read_first_n_outer() - chrdev read for buffer access
35  *
36  * This function relies on all buffer implementations having an
37  * iio_buffer as their first element.
38  **/
39 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
40                                       size_t n, loff_t *f_ps)
41 {
42         struct iio_dev *indio_dev = filp->private_data;
43         struct iio_buffer *rb = indio_dev->buffer;
44
45         if (!rb->access->read_first_n)
46                 return -EINVAL;
47         return rb->access->read_first_n(rb, n, buf);
48 }
49
50 /**
51  * iio_buffer_poll() - poll the buffer to find out if it has data
52  */
53 unsigned int iio_buffer_poll(struct file *filp,
54                              struct poll_table_struct *wait)
55 {
56         struct iio_dev *indio_dev = filp->private_data;
57         struct iio_buffer *rb = indio_dev->buffer;
58
59         poll_wait(filp, &rb->pollq, wait);
60         if (rb->stufftoread)
61                 return POLLIN | POLLRDNORM;
62         /* need a way of knowing if there may be enough data... */
63         return 0;
64 }
65
66 void iio_chrdev_buffer_open(struct iio_dev *indio_dev)
67 {
68         struct iio_buffer *rb = indio_dev->buffer;
69         if (rb && rb->access->mark_in_use)
70                 rb->access->mark_in_use(rb);
71 }
72
73 void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
74 {
75         struct iio_buffer *rb = indio_dev->buffer;
76
77         clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
78         if (rb->access->unmark_in_use)
79                 rb->access->unmark_in_use(rb);
80
81 }
82
83 void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *dev_info)
84 {
85         buffer->indio_dev = dev_info;
86         init_waitqueue_head(&buffer->pollq);
87 }
88 EXPORT_SYMBOL(iio_buffer_init);
89
90 static ssize_t iio_show_scan_index(struct device *dev,
91                                    struct device_attribute *attr,
92                                    char *buf)
93 {
94         return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
95 }
96
97 static ssize_t iio_show_fixed_type(struct device *dev,
98                                    struct device_attribute *attr,
99                                    char *buf)
100 {
101         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
102         u8 type = this_attr->c->scan_type.endianness;
103
104         if (type == IIO_CPU) {
105                 if (__LITTLE_ENDIAN)
106                         type = IIO_LE;
107                 else
108                         type = IIO_BE;
109         }
110         return sprintf(buf, "%s:%c%d/%d>>%u\n",
111                        iio_endian_prefix[type],
112                        this_attr->c->scan_type.sign,
113                        this_attr->c->scan_type.realbits,
114                        this_attr->c->scan_type.storagebits,
115                        this_attr->c->scan_type.shift);
116 }
117
118 static ssize_t iio_scan_el_show(struct device *dev,
119                                 struct device_attribute *attr,
120                                 char *buf)
121 {
122         int ret;
123         struct iio_dev *dev_info = dev_get_drvdata(dev);
124
125         ret = iio_scan_mask_query(dev_info->buffer,
126                                   to_iio_dev_attr(attr)->address);
127         if (ret < 0)
128                 return ret;
129         return sprintf(buf, "%d\n", ret);
130 }
131
132 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
133 {
134         clear_bit(bit, buffer->scan_mask);
135         buffer->scan_count--;
136         return 0;
137 }
138
139 static ssize_t iio_scan_el_store(struct device *dev,
140                                  struct device_attribute *attr,
141                                  const char *buf,
142                                  size_t len)
143 {
144         int ret = 0;
145         bool state;
146         struct iio_dev *indio_dev = dev_get_drvdata(dev);
147         struct iio_buffer *buffer = indio_dev->buffer;
148         struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
149
150         state = !(buf[0] == '0');
151         mutex_lock(&indio_dev->mlock);
152         if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
153                 ret = -EBUSY;
154                 goto error_ret;
155         }
156         ret = iio_scan_mask_query(buffer, this_attr->address);
157         if (ret < 0)
158                 goto error_ret;
159         if (!state && ret) {
160                 ret = iio_scan_mask_clear(buffer, this_attr->address);
161                 if (ret)
162                         goto error_ret;
163         } else if (state && !ret) {
164                 ret = iio_scan_mask_set(buffer, this_attr->address);
165                 if (ret)
166                         goto error_ret;
167         }
168
169 error_ret:
170         mutex_unlock(&indio_dev->mlock);
171
172         return ret ? ret : len;
173
174 }
175
176 static ssize_t iio_scan_el_ts_show(struct device *dev,
177                                    struct device_attribute *attr,
178                                    char *buf)
179 {
180         struct iio_dev *dev_info = dev_get_drvdata(dev);
181         return sprintf(buf, "%d\n", dev_info->buffer->scan_timestamp);
182 }
183
184 static ssize_t iio_scan_el_ts_store(struct device *dev,
185                                     struct device_attribute *attr,
186                                     const char *buf,
187                                     size_t len)
188 {
189         int ret = 0;
190         struct iio_dev *indio_dev = dev_get_drvdata(dev);
191         bool state;
192
193         state = !(buf[0] == '0');
194         mutex_lock(&indio_dev->mlock);
195         if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
196                 ret = -EBUSY;
197                 goto error_ret;
198         }
199         indio_dev->buffer->scan_timestamp = state;
200 error_ret:
201         mutex_unlock(&indio_dev->mlock);
202
203         return ret ? ret : len;
204 }
205
206 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
207                                         const struct iio_chan_spec *chan)
208 {
209         int ret, attrcount = 0;
210         struct iio_buffer *buffer = indio_dev->buffer;
211
212         ret = __iio_add_chan_devattr("index",
213                                      chan,
214                                      &iio_show_scan_index,
215                                      NULL,
216                                      0,
217                                      0,
218                                      &indio_dev->dev,
219                                      &buffer->scan_el_dev_attr_list);
220         if (ret)
221                 goto error_ret;
222         attrcount++;
223         ret = __iio_add_chan_devattr("type",
224                                      chan,
225                                      &iio_show_fixed_type,
226                                      NULL,
227                                      0,
228                                      0,
229                                      &indio_dev->dev,
230                                      &buffer->scan_el_dev_attr_list);
231         if (ret)
232                 goto error_ret;
233         attrcount++;
234         if (chan->type != IIO_TIMESTAMP)
235                 ret = __iio_add_chan_devattr("en",
236                                              chan,
237                                              &iio_scan_el_show,
238                                              &iio_scan_el_store,
239                                              chan->scan_index,
240                                              0,
241                                              &indio_dev->dev,
242                                              &buffer->scan_el_dev_attr_list);
243         else
244                 ret = __iio_add_chan_devattr("en",
245                                              chan,
246                                              &iio_scan_el_ts_show,
247                                              &iio_scan_el_ts_store,
248                                              chan->scan_index,
249                                              0,
250                                              &indio_dev->dev,
251                                              &buffer->scan_el_dev_attr_list);
252         attrcount++;
253         ret = attrcount;
254 error_ret:
255         return ret;
256 }
257
258 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
259                                                      struct iio_dev_attr *p)
260 {
261         kfree(p->dev_attr.attr.name);
262         kfree(p);
263 }
264
265 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
266 {
267         struct iio_dev_attr *p, *n;
268         struct iio_buffer *buffer = indio_dev->buffer;
269
270         list_for_each_entry_safe(p, n,
271                                  &buffer->scan_el_dev_attr_list, l)
272                 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
273 }
274
275 static const char * const iio_scan_elements_group_name = "scan_elements";
276
277 int iio_buffer_register(struct iio_dev *indio_dev,
278                         const struct iio_chan_spec *channels,
279                         int num_channels)
280 {
281         struct iio_dev_attr *p;
282         struct attribute **attr;
283         struct iio_buffer *buffer = indio_dev->buffer;
284         int ret, i, attrn, attrcount, attrcount_orig = 0;
285
286         if (buffer->attrs)
287                 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
288
289         if (buffer->scan_el_attrs != NULL) {
290                 attr = buffer->scan_el_attrs->attrs;
291                 while (*attr++ != NULL)
292                         attrcount_orig++;
293         }
294         attrcount = attrcount_orig;
295         INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
296         if (channels) {
297                 /* new magic */
298                 for (i = 0; i < num_channels; i++) {
299                         /* Establish necessary mask length */
300                         if (channels[i].scan_index >
301                             (int)indio_dev->masklength - 1)
302                                 indio_dev->masklength
303                                         = indio_dev->channels[i].scan_index + 1;
304
305                         ret = iio_buffer_add_channel_sysfs(indio_dev,
306                                                          &channels[i]);
307                         if (ret < 0)
308                                 goto error_cleanup_dynamic;
309                         attrcount += ret;
310                 }
311                 if (indio_dev->masklength && buffer->scan_mask == NULL) {
312                         buffer->scan_mask
313                                 = kzalloc(sizeof(*buffer->scan_mask)*
314                                           BITS_TO_LONGS(indio_dev->masklength),
315                                           GFP_KERNEL);
316                         if (buffer->scan_mask == NULL) {
317                                 ret = -ENOMEM;
318                                 goto error_cleanup_dynamic;
319                         }
320                 }
321         }
322
323         buffer->scan_el_group.name = iio_scan_elements_group_name;
324
325         buffer->scan_el_group.attrs
326                 = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
327                           (attrcount + 1),
328                           GFP_KERNEL);
329         if (buffer->scan_el_group.attrs == NULL) {
330                 ret = -ENOMEM;
331                 goto error_free_scan_mask;
332         }
333         if (buffer->scan_el_attrs)
334                 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
335                        sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
336         attrn = attrcount_orig;
337
338         list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
339                 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
340         indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
341
342         return 0;
343
344 error_free_scan_mask:
345         kfree(buffer->scan_mask);
346 error_cleanup_dynamic:
347         __iio_buffer_attr_cleanup(indio_dev);
348
349         return ret;
350 }
351 EXPORT_SYMBOL(iio_buffer_register);
352
353 void iio_buffer_unregister(struct iio_dev *indio_dev)
354 {
355         kfree(indio_dev->buffer->scan_mask);
356         kfree(indio_dev->buffer->scan_el_group.attrs);
357         __iio_buffer_attr_cleanup(indio_dev);
358 }
359 EXPORT_SYMBOL(iio_buffer_unregister);
360
361 ssize_t iio_buffer_read_length(struct device *dev,
362                                struct device_attribute *attr,
363                                char *buf)
364 {
365         struct iio_dev *indio_dev = dev_get_drvdata(dev);
366         struct iio_buffer *buffer = indio_dev->buffer;
367
368         if (buffer->access->get_length)
369                 return sprintf(buf, "%d\n",
370                                buffer->access->get_length(buffer));
371
372         return 0;
373 }
374 EXPORT_SYMBOL(iio_buffer_read_length);
375
376 ssize_t iio_buffer_write_length(struct device *dev,
377                                 struct device_attribute *attr,
378                                 const char *buf,
379                                 size_t len)
380 {
381         int ret;
382         ulong val;
383         struct iio_dev *indio_dev = dev_get_drvdata(dev);
384         struct iio_buffer *buffer = indio_dev->buffer;
385
386         ret = strict_strtoul(buf, 10, &val);
387         if (ret)
388                 return ret;
389
390         if (buffer->access->get_length)
391                 if (val == buffer->access->get_length(buffer))
392                         return len;
393
394         if (buffer->access->set_length) {
395                 buffer->access->set_length(buffer, val);
396                 if (buffer->access->mark_param_change)
397                         buffer->access->mark_param_change(buffer);
398         }
399
400         return len;
401 }
402 EXPORT_SYMBOL(iio_buffer_write_length);
403
404 ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
405                                         struct device_attribute *attr,
406                                         char *buf)
407 {
408         struct iio_dev *indio_dev = dev_get_drvdata(dev);
409         struct iio_buffer *buffer = indio_dev->buffer;
410
411         if (buffer->access->get_bytes_per_datum)
412                 return sprintf(buf, "%d\n",
413                                buffer->access->get_bytes_per_datum(buffer));
414
415         return 0;
416 }
417 EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
418
419 ssize_t iio_buffer_store_enable(struct device *dev,
420                                 struct device_attribute *attr,
421                                 const char *buf,
422                                 size_t len)
423 {
424         int ret;
425         bool requested_state, current_state;
426         int previous_mode;
427         struct iio_dev *dev_info = dev_get_drvdata(dev);
428         struct iio_buffer *buffer = dev_info->buffer;
429
430         mutex_lock(&dev_info->mlock);
431         previous_mode = dev_info->currentmode;
432         requested_state = !(buf[0] == '0');
433         current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
434         if (current_state == requested_state) {
435                 printk(KERN_INFO "iio-buffer, current state requested again\n");
436                 goto done;
437         }
438         if (requested_state) {
439                 if (buffer->setup_ops->preenable) {
440                         ret = buffer->setup_ops->preenable(dev_info);
441                         if (ret) {
442                                 printk(KERN_ERR
443                                        "Buffer not started:"
444                                        "buffer preenable failed\n");
445                                 goto error_ret;
446                         }
447                 }
448                 if (buffer->access->request_update) {
449                         ret = buffer->access->request_update(buffer);
450                         if (ret) {
451                                 printk(KERN_INFO
452                                        "Buffer not started:"
453                                        "buffer parameter update failed\n");
454                                 goto error_ret;
455                         }
456                 }
457                 if (buffer->access->mark_in_use)
458                         buffer->access->mark_in_use(buffer);
459                 /* Definitely possible for devices to support both of these.*/
460                 if (dev_info->modes & INDIO_BUFFER_TRIGGERED) {
461                         if (!dev_info->trig) {
462                                 printk(KERN_INFO
463                                        "Buffer not started: no trigger\n");
464                                 ret = -EINVAL;
465                                 if (buffer->access->unmark_in_use)
466                                         buffer->access->unmark_in_use(buffer);
467                                 goto error_ret;
468                         }
469                         dev_info->currentmode = INDIO_BUFFER_TRIGGERED;
470                 } else if (dev_info->modes & INDIO_BUFFER_HARDWARE)
471                         dev_info->currentmode = INDIO_BUFFER_HARDWARE;
472                 else { /* should never be reached */
473                         ret = -EINVAL;
474                         goto error_ret;
475                 }
476
477                 if (buffer->setup_ops->postenable) {
478                         ret = buffer->setup_ops->postenable(dev_info);
479                         if (ret) {
480                                 printk(KERN_INFO
481                                        "Buffer not started:"
482                                        "postenable failed\n");
483                                 if (buffer->access->unmark_in_use)
484                                         buffer->access->unmark_in_use(buffer);
485                                 dev_info->currentmode = previous_mode;
486                                 if (buffer->setup_ops->postdisable)
487                                         buffer->setup_ops->
488                                                 postdisable(dev_info);
489                                 goto error_ret;
490                         }
491                 }
492         } else {
493                 if (buffer->setup_ops->predisable) {
494                         ret = buffer->setup_ops->predisable(dev_info);
495                         if (ret)
496                                 goto error_ret;
497                 }
498                 if (buffer->access->unmark_in_use)
499                         buffer->access->unmark_in_use(buffer);
500                 dev_info->currentmode = INDIO_DIRECT_MODE;
501                 if (buffer->setup_ops->postdisable) {
502                         ret = buffer->setup_ops->postdisable(dev_info);
503                         if (ret)
504                                 goto error_ret;
505                 }
506         }
507 done:
508         mutex_unlock(&dev_info->mlock);
509         return len;
510
511 error_ret:
512         mutex_unlock(&dev_info->mlock);
513         return ret;
514 }
515 EXPORT_SYMBOL(iio_buffer_store_enable);
516
517 ssize_t iio_buffer_show_enable(struct device *dev,
518                                struct device_attribute *attr,
519                                char *buf)
520 {
521         struct iio_dev *dev_info = dev_get_drvdata(dev);
522         return sprintf(buf, "%d\n", !!(dev_info->currentmode
523                                        & INDIO_ALL_BUFFER_MODES));
524 }
525 EXPORT_SYMBOL(iio_buffer_show_enable);
526
527 int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
528 {
529         struct iio_buffer *buffer = indio_dev->buffer;
530         size_t size;
531         dev_dbg(&indio_dev->dev, "%s\n", __func__);
532         /* Check if there are any scan elements enabled, if not fail*/
533         if (!(buffer->scan_count || buffer->scan_timestamp))
534                 return -EINVAL;
535         if (buffer->scan_timestamp)
536                 if (buffer->scan_count)
537                         /* Timestamp (aligned to s64) and data */
538                         size = (((buffer->scan_count * buffer->bpe)
539                                         + sizeof(s64) - 1)
540                                 & ~(sizeof(s64) - 1))
541                                 + sizeof(s64);
542                 else /* Timestamp only  */
543                         size = sizeof(s64);
544         else /* Data only */
545                 size = buffer->scan_count * buffer->bpe;
546         buffer->access->set_bytes_per_datum(buffer, size);
547
548         return 0;
549 }
550 EXPORT_SYMBOL(iio_sw_buffer_preenable);
551
552
553 /* note NULL used as error indicator as it doesn't make sense. */
554 static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
555                                           unsigned int masklength,
556                                           unsigned long *mask)
557 {
558         if (bitmap_empty(mask, masklength))
559                 return NULL;
560         while (*av_masks) {
561                 if (bitmap_subset(mask, av_masks, masklength))
562                         return av_masks;
563                 av_masks += BITS_TO_LONGS(masklength);
564         }
565         return NULL;
566 }
567
568 /**
569  * iio_scan_mask_set() - set particular bit in the scan mask
570  * @buffer: the buffer whose scan mask we are interested in
571  * @bit: the bit to be set.
572  **/
573 int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
574 {
575         struct iio_dev *dev_info = buffer->indio_dev;
576         unsigned long *mask;
577         unsigned long *trialmask;
578
579         trialmask = kmalloc(sizeof(*trialmask)*
580                             BITS_TO_LONGS(dev_info->masklength),
581                             GFP_KERNEL);
582
583         if (trialmask == NULL)
584                 return -ENOMEM;
585         if (!dev_info->masklength) {
586                 WARN_ON("trying to set scanmask prior to registering buffer\n");
587                 kfree(trialmask);
588                 return -EINVAL;
589         }
590         bitmap_copy(trialmask, buffer->scan_mask, dev_info->masklength);
591         set_bit(bit, trialmask);
592
593         if (dev_info->available_scan_masks) {
594                 mask = iio_scan_mask_match(dev_info->available_scan_masks,
595                                            dev_info->masklength,
596                                            trialmask);
597                 if (!mask) {
598                         kfree(trialmask);
599                         return -EINVAL;
600                 }
601         }
602         bitmap_copy(buffer->scan_mask, trialmask, dev_info->masklength);
603         buffer->scan_count++;
604
605         kfree(trialmask);
606
607         return 0;
608 };
609 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
610
611 int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
612 {
613         struct iio_dev *dev_info = buffer->indio_dev;
614         long *mask;
615
616         if (bit > dev_info->masklength)
617                 return -EINVAL;
618
619         if (!buffer->scan_mask)
620                 return 0;
621         if (dev_info->available_scan_masks)
622                 mask = iio_scan_mask_match(dev_info->available_scan_masks,
623                                            dev_info->masklength,
624                                            buffer->scan_mask);
625         else
626                 mask = buffer->scan_mask;
627         if (!mask)
628                 return 0;
629
630         return test_bit(bit, mask);
631 };
632 EXPORT_SYMBOL_GPL(iio_scan_mask_query);