1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
14 #include <linux/iio/iio.h>
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
20 struct iio_map_internal {
21 struct iio_dev *indio_dev;
26 static LIST_HEAD(iio_map_list);
27 static DEFINE_MUTEX(iio_map_list_lock);
29 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
32 struct iio_map_internal *mapi;
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
50 mutex_unlock(&iio_map_list_lock);
54 EXPORT_SYMBOL_GPL(iio_map_array_register);
58 * Remove all map entries associated with the given iio device
60 int iio_map_array_unregister(struct iio_dev *indio_dev)
63 struct iio_map_internal *mapi;
64 struct list_head *pos, *tmp;
66 mutex_lock(&iio_map_list_lock);
67 list_for_each_safe(pos, tmp, &iio_map_list) {
68 mapi = list_entry(pos, struct iio_map_internal, l);
69 if (indio_dev == mapi->indio_dev) {
75 mutex_unlock(&iio_map_list_lock);
78 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
80 static const struct iio_chan_spec
81 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
84 const struct iio_chan_spec *chan = NULL;
86 for (i = 0; i < indio_dev->num_channels; i++)
87 if (indio_dev->channels[i].datasheet_name &&
88 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
89 chan = &indio_dev->channels[i];
96 struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
98 struct iio_map_internal *c_i = NULL, *c = NULL;
99 struct iio_channel *channel;
102 if (name == NULL && channel_name == NULL)
103 return ERR_PTR(-ENODEV);
105 /* first find matching entry the channel map */
106 mutex_lock(&iio_map_list_lock);
107 list_for_each_entry(c_i, &iio_map_list, l) {
108 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
110 strcmp(channel_name, c_i->map->consumer_channel) != 0))
113 iio_device_get(c->indio_dev);
116 mutex_unlock(&iio_map_list_lock);
118 return ERR_PTR(-ENODEV);
120 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
121 if (channel == NULL) {
126 channel->indio_dev = c->indio_dev;
128 if (c->map->adc_channel_label) {
130 iio_chan_spec_from_name(channel->indio_dev,
131 c->map->adc_channel_label);
133 if (channel->channel == NULL) {
144 iio_device_put(c->indio_dev);
147 EXPORT_SYMBOL_GPL(iio_channel_get);
149 void iio_channel_release(struct iio_channel *channel)
151 iio_device_put(channel->indio_dev);
154 EXPORT_SYMBOL_GPL(iio_channel_release);
156 struct iio_channel *iio_channel_get_all(struct device *dev)
159 struct iio_channel *chans;
160 struct iio_map_internal *c = NULL;
166 return ERR_PTR(-EINVAL);
167 name = dev_name(dev);
169 mutex_lock(&iio_map_list_lock);
170 /* first count the matching maps */
171 list_for_each_entry(c, &iio_map_list, l)
172 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
182 /* NULL terminated array to save passing size */
183 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
189 /* for each map fill in the chans element */
190 list_for_each_entry(c, &iio_map_list, l) {
191 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
193 chans[mapind].indio_dev = c->indio_dev;
194 chans[mapind].data = c->map->consumer_data;
195 chans[mapind].channel =
196 iio_chan_spec_from_name(chans[mapind].indio_dev,
197 c->map->adc_channel_label);
198 if (chans[mapind].channel == NULL) {
200 goto error_free_chans;
202 iio_device_get(chans[mapind].indio_dev);
207 goto error_free_chans;
209 mutex_unlock(&iio_map_list_lock);
214 for (i = 0; i < nummaps; i++)
215 iio_device_put(chans[i].indio_dev);
218 mutex_unlock(&iio_map_list_lock);
222 EXPORT_SYMBOL_GPL(iio_channel_get_all);
224 void iio_channel_release_all(struct iio_channel *channels)
226 struct iio_channel *chan = &channels[0];
228 while (chan->indio_dev) {
229 iio_device_put(chan->indio_dev);
234 EXPORT_SYMBOL_GPL(iio_channel_release_all);
236 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
237 enum iio_chan_info_enum info)
244 return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
248 int iio_read_channel_raw(struct iio_channel *chan, int *val)
252 mutex_lock(&chan->indio_dev->info_exist_lock);
253 if (chan->indio_dev->info == NULL) {
258 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
260 mutex_unlock(&chan->indio_dev->info_exist_lock);
264 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
266 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
267 int raw, int *processed, unsigned int scale)
269 int scale_type, scale_val, scale_val2, offset;
273 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
277 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
278 IIO_CHAN_INFO_SCALE);
282 switch (scale_type) {
284 *processed = raw64 * scale_val;
286 case IIO_VAL_INT_PLUS_MICRO:
288 *processed = -raw64 * scale_val;
290 *processed = raw64 * scale_val;
291 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
294 case IIO_VAL_INT_PLUS_NANO:
296 *processed = -raw64 * scale_val;
298 *processed = raw64 * scale_val;
299 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
302 case IIO_VAL_FRACTIONAL:
303 *processed = div_s64(raw64 * (s64)scale_val * scale,
306 case IIO_VAL_FRACTIONAL_LOG2:
307 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
316 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
317 int *processed, unsigned int scale)
321 mutex_lock(&chan->indio_dev->info_exist_lock);
322 if (chan->indio_dev->info == NULL) {
327 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
330 mutex_unlock(&chan->indio_dev->info_exist_lock);
334 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
336 int iio_read_channel_processed(struct iio_channel *chan, int *val)
340 mutex_lock(&chan->indio_dev->info_exist_lock);
341 if (chan->indio_dev->info == NULL) {
346 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
347 ret = iio_channel_read(chan, val, NULL,
348 IIO_CHAN_INFO_PROCESSED);
350 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
353 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
357 mutex_unlock(&chan->indio_dev->info_exist_lock);
361 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
363 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
367 mutex_lock(&chan->indio_dev->info_exist_lock);
368 if (chan->indio_dev->info == NULL) {
373 ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
375 mutex_unlock(&chan->indio_dev->info_exist_lock);
379 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
381 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
384 /* Need to verify underlying driver has not gone away */
386 mutex_lock(&chan->indio_dev->info_exist_lock);
387 if (chan->indio_dev->info == NULL) {
392 *type = chan->channel->type;
394 mutex_unlock(&chan->indio_dev->info_exist_lock);
398 EXPORT_SYMBOL_GPL(iio_get_channel_type);