1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
3 *
4 * Copyright (c) 2011 Jonathan Cameron
5 */
6 #include <linux/cleanup.h>
7 #include <linux/err.h>
8 #include <linux/export.h>
9 #include <linux/minmax.h>
10 #include <linux/mm.h>
11 #include <linux/mutex.h>
12 #include <linux/property.h>
13 #include <linux/slab.h>
14 #include <linux/units.h>
15
16 #include <linux/iio/iio.h>
17 #include <linux/iio/iio-opaque.h>
18 #include "iio_core.h"
19 #include <linux/iio/machine.h>
20 #include <linux/iio/driver.h>
21 #include <linux/iio/consumer.h>
22
23 struct iio_map_internal {
24 struct iio_dev *indio_dev;
25 const struct iio_map *map;
26 struct list_head l;
27 };
28
29 static LIST_HEAD(iio_map_list);
30 static DEFINE_MUTEX(iio_map_list_lock);
31
iio_map_array_unregister_locked(struct iio_dev * indio_dev)32 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
33 {
34 int ret = -ENODEV;
35 struct iio_map_internal *mapi, *next;
36
37 list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
38 if (indio_dev == mapi->indio_dev) {
39 list_del(&mapi->l);
40 kfree(mapi);
41 ret = 0;
42 }
43 }
44 return ret;
45 }
46
iio_map_array_register(struct iio_dev * indio_dev,const struct iio_map * maps)47 int iio_map_array_register(struct iio_dev *indio_dev, const struct iio_map *maps)
48 {
49 struct iio_map_internal *mapi;
50 int i = 0;
51 int ret;
52
53 if (!maps)
54 return 0;
55
56 guard(mutex)(&iio_map_list_lock);
57 while (maps[i].consumer_dev_name) {
58 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
59 if (!mapi) {
60 ret = -ENOMEM;
61 goto error_ret;
62 }
63 mapi->map = &maps[i];
64 mapi->indio_dev = indio_dev;
65 list_add_tail(&mapi->l, &iio_map_list);
66 i++;
67 }
68
69 return 0;
70 error_ret:
71 iio_map_array_unregister_locked(indio_dev);
72 return ret;
73 }
74 EXPORT_SYMBOL_GPL(iio_map_array_register);
75
76 /*
77 * Remove all map entries associated with the given iio device
78 */
iio_map_array_unregister(struct iio_dev * indio_dev)79 int iio_map_array_unregister(struct iio_dev *indio_dev)
80 {
81 guard(mutex)(&iio_map_list_lock);
82 return iio_map_array_unregister_locked(indio_dev);
83 }
84 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
85
iio_map_array_unregister_cb(void * indio_dev)86 static void iio_map_array_unregister_cb(void *indio_dev)
87 {
88 iio_map_array_unregister(indio_dev);
89 }
90
devm_iio_map_array_register(struct device * dev,struct iio_dev * indio_dev,const struct iio_map * maps)91 int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev,
92 const struct iio_map *maps)
93 {
94 int ret;
95
96 ret = iio_map_array_register(indio_dev, maps);
97 if (ret)
98 return ret;
99
100 return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
101 }
102 EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
103
104 static const struct iio_chan_spec
iio_chan_spec_from_name(const struct iio_dev * indio_dev,const char * name)105 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
106 {
107 int i;
108 const struct iio_chan_spec *chan = NULL;
109
110 for (i = 0; i < indio_dev->num_channels; i++)
111 if (indio_dev->channels[i].datasheet_name &&
112 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
113 chan = &indio_dev->channels[i];
114 break;
115 }
116 return chan;
117 }
118
119 /**
120 * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
121 * @indio_dev: pointer to the iio_dev structure
122 * @iiospec: IIO specifier as found in the device tree
123 *
124 * This is simple translation function, suitable for the most 1:1 mapped
125 * channels in IIO chips. This function performs only one sanity check:
126 * whether IIO index is less than num_channels (that is specified in the
127 * iio_dev).
128 */
__fwnode_iio_simple_xlate(struct iio_dev * indio_dev,const struct fwnode_reference_args * iiospec)129 static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
130 const struct fwnode_reference_args *iiospec)
131 {
132 if (!iiospec->nargs)
133 return 0;
134
135 if (iiospec->args[0] >= indio_dev->num_channels) {
136 dev_err(&indio_dev->dev, "invalid channel index %llu\n",
137 iiospec->args[0]);
138 return -EINVAL;
139 }
140
141 return iiospec->args[0];
142 }
143
__fwnode_iio_channel_get(struct iio_channel * channel,struct fwnode_handle * fwnode,int index)144 static int __fwnode_iio_channel_get(struct iio_channel *channel,
145 struct fwnode_handle *fwnode, int index)
146 {
147 struct fwnode_reference_args iiospec;
148 struct device *idev;
149 struct iio_dev *indio_dev;
150 int err;
151
152 err = fwnode_property_get_reference_args(fwnode, "io-channels",
153 "#io-channel-cells", 0,
154 index, &iiospec);
155 if (err)
156 return err;
157
158 idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
159 if (!idev) {
160 fwnode_handle_put(iiospec.fwnode);
161 return -EPROBE_DEFER;
162 }
163
164 indio_dev = dev_to_iio_dev(idev);
165 channel->indio_dev = indio_dev;
166 if (indio_dev->info->fwnode_xlate)
167 index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
168 else
169 index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
170 fwnode_handle_put(iiospec.fwnode);
171 if (index < 0)
172 goto err_put;
173 channel->channel = &indio_dev->channels[index];
174
175 return 0;
176
177 err_put:
178 iio_device_put(indio_dev);
179 return index;
180 }
181
fwnode_iio_channel_get(struct fwnode_handle * fwnode,int index)182 static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
183 int index)
184 {
185 int err;
186
187 if (index < 0)
188 return ERR_PTR(-EINVAL);
189
190 struct iio_channel *channel __free(kfree) =
191 kzalloc(sizeof(*channel), GFP_KERNEL);
192 if (!channel)
193 return ERR_PTR(-ENOMEM);
194
195 err = __fwnode_iio_channel_get(channel, fwnode, index);
196 if (err)
197 return ERR_PTR(err);
198
199 return_ptr(channel);
200 }
201
202 static struct iio_channel *
__fwnode_iio_channel_get_by_name(struct fwnode_handle * fwnode,const char * name)203 __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
204 {
205 struct iio_channel *chan;
206 int index = 0;
207
208 /*
209 * For named iio channels, first look up the name in the
210 * "io-channel-names" property. If it cannot be found, the
211 * index will be an error code, and fwnode_iio_channel_get()
212 * will fail.
213 */
214 if (name)
215 index = fwnode_property_match_string(fwnode, "io-channel-names",
216 name);
217
218 chan = fwnode_iio_channel_get(fwnode, index);
219 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
220 return chan;
221 if (name) {
222 if (index >= 0) {
223 pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
224 fwnode, name, index);
225 /*
226 * In this case, we found 'name' in 'io-channel-names'
227 * but somehow we still fail so that we should not proceed
228 * with any other lookup. Hence, explicitly return -EINVAL
229 * (maybe not the better error code) so that the caller
230 * won't do a system lookup.
231 */
232 return ERR_PTR(-EINVAL);
233 }
234 /*
235 * If index < 0, then fwnode_property_get_reference_args() fails
236 * with -EINVAL or -ENOENT (ACPI case) which is expected. We
237 * should not proceed if we get any other error.
238 */
239 if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
240 return chan;
241 } else if (PTR_ERR(chan) != -ENOENT) {
242 /*
243 * if !name, then we should only proceed the lookup if
244 * fwnode_property_get_reference_args() returns -ENOENT.
245 */
246 return chan;
247 }
248
249 /* so we continue the lookup */
250 return ERR_PTR(-ENODEV);
251 }
252
fwnode_iio_channel_get_by_name(struct fwnode_handle * fwnode,const char * name)253 struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
254 const char *name)
255 {
256 struct fwnode_handle *parent;
257 struct iio_channel *chan;
258
259 /* Walk up the tree of devices looking for a matching iio channel */
260 chan = __fwnode_iio_channel_get_by_name(fwnode, name);
261 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
262 return chan;
263
264 /*
265 * No matching IIO channel found on this node.
266 * If the parent node has a "io-channel-ranges" property,
267 * then we can try one of its channels.
268 */
269 fwnode_for_each_parent_node(fwnode, parent) {
270 if (!fwnode_property_present(parent, "io-channel-ranges")) {
271 fwnode_handle_put(parent);
272 return ERR_PTR(-ENODEV);
273 }
274
275 chan = __fwnode_iio_channel_get_by_name(parent, name);
276 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
277 fwnode_handle_put(parent);
278 return chan;
279 }
280 }
281
282 return ERR_PTR(-ENODEV);
283 }
284 EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
285
fwnode_iio_channel_get_all(struct device * dev)286 static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
287 {
288 struct fwnode_handle *fwnode = dev_fwnode(dev);
289 int i, mapind, nummaps = 0;
290 int ret;
291
292 do {
293 ret = fwnode_property_get_reference_args(fwnode, "io-channels",
294 "#io-channel-cells", 0,
295 nummaps, NULL);
296 if (ret < 0)
297 break;
298 } while (++nummaps);
299
300 if (nummaps == 0)
301 return ERR_PTR(-ENODEV);
302
303 /* NULL terminated array to save passing size */
304 struct iio_channel *chans __free(kfree) =
305 kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
306 if (!chans)
307 return ERR_PTR(-ENOMEM);
308
309 /* Search for FW matches */
310 for (mapind = 0; mapind < nummaps; mapind++) {
311 ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
312 if (ret)
313 goto error_free_chans;
314 }
315 return_ptr(chans);
316
317 error_free_chans:
318 for (i = 0; i < mapind; i++)
319 iio_device_put(chans[i].indio_dev);
320 return ERR_PTR(ret);
321 }
322
iio_channel_get_sys(const char * name,const char * channel_name)323 static struct iio_channel *iio_channel_get_sys(const char *name,
324 const char *channel_name)
325 {
326 struct iio_map_internal *c_i = NULL, *c = NULL;
327 int err;
328
329 if (!(name || channel_name))
330 return ERR_PTR(-ENODEV);
331
332 /* first find matching entry the channel map */
333 scoped_guard(mutex, &iio_map_list_lock) {
334 list_for_each_entry(c_i, &iio_map_list, l) {
335 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
336 (channel_name &&
337 strcmp(channel_name, c_i->map->consumer_channel) != 0))
338 continue;
339 c = c_i;
340 iio_device_get(c->indio_dev);
341 break;
342 }
343 }
344 if (!c)
345 return ERR_PTR(-ENODEV);
346
347 struct iio_channel *channel __free(kfree) =
348 kzalloc(sizeof(*channel), GFP_KERNEL);
349 if (!channel) {
350 err = -ENOMEM;
351 goto error_no_mem;
352 }
353
354 channel->indio_dev = c->indio_dev;
355
356 if (c->map->adc_channel_label) {
357 channel->channel =
358 iio_chan_spec_from_name(channel->indio_dev,
359 c->map->adc_channel_label);
360
361 if (!channel->channel) {
362 err = -EINVAL;
363 goto error_no_mem;
364 }
365 }
366
367 return_ptr(channel);
368
369 error_no_mem:
370 iio_device_put(c->indio_dev);
371 return ERR_PTR(err);
372 }
373
iio_channel_get(struct device * dev,const char * channel_name)374 struct iio_channel *iio_channel_get(struct device *dev,
375 const char *channel_name)
376 {
377 const char *name = dev ? dev_name(dev) : NULL;
378 struct iio_channel *channel;
379
380 if (dev) {
381 channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
382 channel_name);
383 if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
384 return channel;
385 }
386
387 return iio_channel_get_sys(name, channel_name);
388 }
389 EXPORT_SYMBOL_GPL(iio_channel_get);
390
iio_channel_release(struct iio_channel * channel)391 void iio_channel_release(struct iio_channel *channel)
392 {
393 if (!channel)
394 return;
395 iio_device_put(channel->indio_dev);
396 kfree(channel);
397 }
398 EXPORT_SYMBOL_GPL(iio_channel_release);
399
devm_iio_channel_free(void * iio_channel)400 static void devm_iio_channel_free(void *iio_channel)
401 {
402 iio_channel_release(iio_channel);
403 }
404
devm_iio_channel_get(struct device * dev,const char * channel_name)405 struct iio_channel *devm_iio_channel_get(struct device *dev,
406 const char *channel_name)
407 {
408 struct iio_channel *channel;
409 int ret;
410
411 channel = iio_channel_get(dev, channel_name);
412 if (IS_ERR(channel))
413 return channel;
414
415 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
416 if (ret)
417 return ERR_PTR(ret);
418
419 return channel;
420 }
421 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
422
devm_fwnode_iio_channel_get_by_name(struct device * dev,struct fwnode_handle * fwnode,const char * channel_name)423 struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
424 struct fwnode_handle *fwnode,
425 const char *channel_name)
426 {
427 struct iio_channel *channel;
428 int ret;
429
430 channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
431 if (IS_ERR(channel))
432 return channel;
433
434 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
435 if (ret)
436 return ERR_PTR(ret);
437
438 return channel;
439 }
440 EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
441
iio_channel_get_all(struct device * dev)442 struct iio_channel *iio_channel_get_all(struct device *dev)
443 {
444 const char *name;
445 struct iio_map_internal *c = NULL;
446 struct iio_channel *fw_chans;
447 int nummaps = 0;
448 int mapind = 0;
449 int i, ret;
450
451 if (!dev)
452 return ERR_PTR(-EINVAL);
453
454 fw_chans = fwnode_iio_channel_get_all(dev);
455 /*
456 * We only want to carry on if the error is -ENODEV. Anything else
457 * should be reported up the stack.
458 */
459 if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
460 return fw_chans;
461
462 name = dev_name(dev);
463
464 guard(mutex)(&iio_map_list_lock);
465 /* first count the matching maps */
466 list_for_each_entry(c, &iio_map_list, l)
467 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
468 continue;
469 else
470 nummaps++;
471
472 if (nummaps == 0)
473 return ERR_PTR(-ENODEV);
474
475 /* NULL terminated array to save passing size */
476 struct iio_channel *chans __free(kfree) =
477 kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
478 if (!chans)
479 return ERR_PTR(-ENOMEM);
480
481 /* for each map fill in the chans element */
482 list_for_each_entry(c, &iio_map_list, l) {
483 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
484 continue;
485 chans[mapind].indio_dev = c->indio_dev;
486 chans[mapind].data = c->map->consumer_data;
487 chans[mapind].channel =
488 iio_chan_spec_from_name(chans[mapind].indio_dev,
489 c->map->adc_channel_label);
490 if (!chans[mapind].channel) {
491 ret = -EINVAL;
492 goto error_free_chans;
493 }
494 iio_device_get(chans[mapind].indio_dev);
495 mapind++;
496 }
497 if (mapind == 0) {
498 ret = -ENODEV;
499 goto error_free_chans;
500 }
501
502 return_ptr(chans);
503
504 error_free_chans:
505 for (i = 0; i < mapind; i++)
506 iio_device_put(chans[i].indio_dev);
507 return ERR_PTR(ret);
508 }
509 EXPORT_SYMBOL_GPL(iio_channel_get_all);
510
iio_channel_release_all(struct iio_channel * channels)511 void iio_channel_release_all(struct iio_channel *channels)
512 {
513 struct iio_channel *chan = &channels[0];
514
515 while (chan->indio_dev) {
516 iio_device_put(chan->indio_dev);
517 chan++;
518 }
519 kfree(channels);
520 }
521 EXPORT_SYMBOL_GPL(iio_channel_release_all);
522
devm_iio_channel_free_all(void * iio_channels)523 static void devm_iio_channel_free_all(void *iio_channels)
524 {
525 iio_channel_release_all(iio_channels);
526 }
527
devm_iio_channel_get_all(struct device * dev)528 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
529 {
530 struct iio_channel *channels;
531 int ret;
532
533 channels = iio_channel_get_all(dev);
534 if (IS_ERR(channels))
535 return channels;
536
537 ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
538 channels);
539 if (ret)
540 return ERR_PTR(ret);
541
542 return channels;
543 }
544 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
545
iio_channel_read(struct iio_channel * chan,int * val,int * val2,enum iio_chan_info_enum info)546 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
547 enum iio_chan_info_enum info)
548 {
549 const struct iio_info *iio_info = chan->indio_dev->info;
550 int unused;
551 int vals[INDIO_MAX_RAW_ELEMENTS];
552 int ret;
553 int val_len = 2;
554
555 if (!val2)
556 val2 = &unused;
557
558 if (!iio_channel_has_info(chan->channel, info))
559 return -EINVAL;
560
561 if (iio_info->read_raw_multi) {
562 ret = iio_info->read_raw_multi(chan->indio_dev,
563 chan->channel,
564 INDIO_MAX_RAW_ELEMENTS,
565 vals, &val_len, info);
566 *val = vals[0];
567 *val2 = vals[1];
568 } else if (iio_info->read_raw) {
569 ret = iio_info->read_raw(chan->indio_dev,
570 chan->channel, val, val2, info);
571 } else {
572 return -EINVAL;
573 }
574
575 return ret;
576 }
577
iio_read_channel_raw(struct iio_channel * chan,int * val)578 int iio_read_channel_raw(struct iio_channel *chan, int *val)
579 {
580 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
581
582 guard(mutex)(&iio_dev_opaque->info_exist_lock);
583 if (!chan->indio_dev->info)
584 return -ENODEV;
585
586 return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
587 }
588 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
589
iio_read_channel_average_raw(struct iio_channel * chan,int * val)590 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
591 {
592 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
593
594 guard(mutex)(&iio_dev_opaque->info_exist_lock);
595 if (!chan->indio_dev->info)
596 return -ENODEV;
597
598 return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
599 }
600 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
601
iio_multiply_value(int * result,s64 multiplier,unsigned int type,int val,int val2)602 int iio_multiply_value(int *result, s64 multiplier,
603 unsigned int type, int val, int val2)
604 {
605 s64 denominator;
606
607 switch (type) {
608 case IIO_VAL_INT:
609 *result = multiplier * val;
610 return IIO_VAL_INT;
611 case IIO_VAL_INT_PLUS_MICRO:
612 case IIO_VAL_INT_PLUS_NANO:
613 switch (type) {
614 case IIO_VAL_INT_PLUS_MICRO:
615 denominator = MICRO;
616 break;
617 case IIO_VAL_INT_PLUS_NANO:
618 denominator = NANO;
619 break;
620 }
621 *result = multiplier * abs(val);
622 *result += div_s64(multiplier * abs(val2), denominator);
623 if (val < 0 || val2 < 0)
624 *result *= -1;
625 return IIO_VAL_INT;
626 case IIO_VAL_FRACTIONAL:
627 *result = div_s64(multiplier * val, val2);
628 return IIO_VAL_INT;
629 case IIO_VAL_FRACTIONAL_LOG2:
630 *result = (multiplier * val) >> val2;
631 return IIO_VAL_INT;
632 default:
633 return -EINVAL;
634 }
635 }
636 EXPORT_SYMBOL_NS_GPL(iio_multiply_value, "IIO_UNIT_TEST");
637
iio_convert_raw_to_processed_unlocked(struct iio_channel * chan,int raw,int * processed,unsigned int scale)638 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
639 int raw, int *processed,
640 unsigned int scale)
641 {
642 int scale_type, scale_val, scale_val2;
643 int offset_type, offset_val, offset_val2;
644 s64 raw64 = raw;
645 int ret;
646
647 offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
648 IIO_CHAN_INFO_OFFSET);
649 if (offset_type >= 0) {
650 switch (offset_type) {
651 case IIO_VAL_INT:
652 break;
653 case IIO_VAL_INT_PLUS_MICRO:
654 case IIO_VAL_INT_PLUS_NANO:
655 /*
656 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
657 * implicitely truncate the offset to it's integer form.
658 */
659 break;
660 case IIO_VAL_FRACTIONAL:
661 offset_val /= offset_val2;
662 break;
663 case IIO_VAL_FRACTIONAL_LOG2:
664 offset_val >>= offset_val2;
665 break;
666 default:
667 return -EINVAL;
668 }
669
670 raw64 += offset_val;
671 }
672
673 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
674 IIO_CHAN_INFO_SCALE);
675 if (scale_type < 0) {
676 /*
677 * If no channel scaling is available apply consumer scale to
678 * raw value and return.
679 */
680 *processed = raw64 * scale;
681 return 0;
682 }
683
684 ret = iio_multiply_value(processed, raw64 * scale,
685 scale_type, scale_val, scale_val2);
686 if (ret < 0)
687 return ret;
688
689 return 0;
690 }
691
iio_convert_raw_to_processed(struct iio_channel * chan,int raw,int * processed,unsigned int scale)692 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
693 int *processed, unsigned int scale)
694 {
695 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
696
697 guard(mutex)(&iio_dev_opaque->info_exist_lock);
698 if (!chan->indio_dev->info)
699 return -ENODEV;
700
701 return iio_convert_raw_to_processed_unlocked(chan, raw, processed,
702 scale);
703 }
704 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
705
iio_read_channel_attribute(struct iio_channel * chan,int * val,int * val2,enum iio_chan_info_enum attribute)706 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
707 enum iio_chan_info_enum attribute)
708 {
709 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
710
711 guard(mutex)(&iio_dev_opaque->info_exist_lock);
712 if (!chan->indio_dev->info)
713 return -ENODEV;
714
715 return iio_channel_read(chan, val, val2, attribute);
716 }
717 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
718
iio_read_channel_offset(struct iio_channel * chan,int * val,int * val2)719 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
720 {
721 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
722 }
723 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
724
iio_read_channel_processed_scale(struct iio_channel * chan,int * val,unsigned int scale)725 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
726 unsigned int scale)
727 {
728 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
729 int ret, pval, pval2;
730
731 guard(mutex)(&iio_dev_opaque->info_exist_lock);
732 if (!chan->indio_dev->info)
733 return -ENODEV;
734
735 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
736 ret = iio_channel_read(chan, &pval, &pval2,
737 IIO_CHAN_INFO_PROCESSED);
738 if (ret < 0)
739 return ret;
740
741 return iio_multiply_value(val, scale, ret, pval, pval2);
742 } else {
743 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
744 if (ret < 0)
745 return ret;
746
747 return iio_convert_raw_to_processed_unlocked(chan, *val, val,
748 scale);
749 }
750 }
751 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
752
iio_read_channel_processed(struct iio_channel * chan,int * val)753 int iio_read_channel_processed(struct iio_channel *chan, int *val)
754 {
755 /* This is just a special case with scale factor 1 */
756 return iio_read_channel_processed_scale(chan, val, 1);
757 }
758 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
759
iio_read_channel_scale(struct iio_channel * chan,int * val,int * val2)760 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
761 {
762 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
763 }
764 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
765
iio_channel_read_avail(struct iio_channel * chan,const int ** vals,int * type,int * length,enum iio_chan_info_enum info)766 static int iio_channel_read_avail(struct iio_channel *chan,
767 const int **vals, int *type, int *length,
768 enum iio_chan_info_enum info)
769 {
770 const struct iio_info *iio_info = chan->indio_dev->info;
771
772 if (!iio_channel_has_available(chan->channel, info))
773 return -EINVAL;
774
775 if (iio_info->read_avail)
776 return iio_info->read_avail(chan->indio_dev, chan->channel,
777 vals, type, length, info);
778 return -EINVAL;
779 }
780
iio_read_avail_channel_attribute(struct iio_channel * chan,const int ** vals,int * type,int * length,enum iio_chan_info_enum attribute)781 int iio_read_avail_channel_attribute(struct iio_channel *chan,
782 const int **vals, int *type, int *length,
783 enum iio_chan_info_enum attribute)
784 {
785 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
786
787 guard(mutex)(&iio_dev_opaque->info_exist_lock);
788 if (!chan->indio_dev->info)
789 return -ENODEV;
790
791 return iio_channel_read_avail(chan, vals, type, length, attribute);
792 }
793 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
794
iio_read_avail_channel_raw(struct iio_channel * chan,const int ** vals,int * length)795 int iio_read_avail_channel_raw(struct iio_channel *chan,
796 const int **vals, int *length)
797 {
798 int ret;
799 int type;
800
801 ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
802 IIO_CHAN_INFO_RAW);
803
804 if (ret >= 0 && type != IIO_VAL_INT)
805 /* raw values are assumed to be IIO_VAL_INT */
806 ret = -EINVAL;
807
808 return ret;
809 }
810 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
811
iio_channel_read_max(struct iio_channel * chan,int * val,int * val2,int * type,enum iio_chan_info_enum info)812 static int iio_channel_read_max(struct iio_channel *chan,
813 int *val, int *val2, int *type,
814 enum iio_chan_info_enum info)
815 {
816 const int *vals;
817 int length;
818 int ret;
819
820 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
821 if (ret < 0)
822 return ret;
823
824 switch (ret) {
825 case IIO_AVAIL_RANGE:
826 switch (*type) {
827 case IIO_VAL_INT:
828 *val = vals[2];
829 break;
830 default:
831 *val = vals[4];
832 if (val2)
833 *val2 = vals[5];
834 }
835 return 0;
836
837 case IIO_AVAIL_LIST:
838 if (length <= 0)
839 return -EINVAL;
840 switch (*type) {
841 case IIO_VAL_INT:
842 *val = max_array(vals, length);
843 break;
844 default:
845 /* TODO: learn about max for other iio values */
846 return -EINVAL;
847 }
848 return 0;
849
850 default:
851 return -EINVAL;
852 }
853 }
854
iio_read_max_channel_raw(struct iio_channel * chan,int * val)855 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
856 {
857 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
858 int type;
859
860 guard(mutex)(&iio_dev_opaque->info_exist_lock);
861 if (!chan->indio_dev->info)
862 return -ENODEV;
863
864 return iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
865 }
866 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
867
iio_channel_read_min(struct iio_channel * chan,int * val,int * val2,int * type,enum iio_chan_info_enum info)868 static int iio_channel_read_min(struct iio_channel *chan,
869 int *val, int *val2, int *type,
870 enum iio_chan_info_enum info)
871 {
872 const int *vals;
873 int length;
874 int ret;
875
876 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
877 if (ret < 0)
878 return ret;
879
880 switch (ret) {
881 case IIO_AVAIL_RANGE:
882 switch (*type) {
883 case IIO_VAL_INT:
884 *val = vals[0];
885 break;
886 default:
887 *val = vals[0];
888 if (val2)
889 *val2 = vals[1];
890 }
891 return 0;
892
893 case IIO_AVAIL_LIST:
894 if (length <= 0)
895 return -EINVAL;
896 switch (*type) {
897 case IIO_VAL_INT:
898 *val = min_array(vals, length);
899 break;
900 default:
901 /* TODO: learn about min for other iio values */
902 return -EINVAL;
903 }
904 return 0;
905
906 default:
907 return -EINVAL;
908 }
909 }
910
iio_read_min_channel_raw(struct iio_channel * chan,int * val)911 int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
912 {
913 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
914 int type;
915
916 guard(mutex)(&iio_dev_opaque->info_exist_lock);
917 if (!chan->indio_dev->info)
918 return -ENODEV;
919
920 return iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
921 }
922 EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
923
iio_get_channel_type(struct iio_channel * chan,enum iio_chan_type * type)924 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
925 {
926 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
927
928 guard(mutex)(&iio_dev_opaque->info_exist_lock);
929 if (!chan->indio_dev->info)
930 return -ENODEV;
931
932 *type = chan->channel->type;
933
934 return 0;
935 }
936 EXPORT_SYMBOL_GPL(iio_get_channel_type);
937
iio_channel_write(struct iio_channel * chan,int val,int val2,enum iio_chan_info_enum info)938 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
939 enum iio_chan_info_enum info)
940 {
941 const struct iio_info *iio_info = chan->indio_dev->info;
942
943 if (iio_info->write_raw)
944 return iio_info->write_raw(chan->indio_dev,
945 chan->channel, val, val2, info);
946 return -EINVAL;
947 }
948
iio_write_channel_attribute(struct iio_channel * chan,int val,int val2,enum iio_chan_info_enum attribute)949 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
950 enum iio_chan_info_enum attribute)
951 {
952 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
953
954 guard(mutex)(&iio_dev_opaque->info_exist_lock);
955 if (!chan->indio_dev->info)
956 return -ENODEV;
957
958 return iio_channel_write(chan, val, val2, attribute);
959 }
960 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
961
iio_write_channel_raw(struct iio_channel * chan,int val)962 int iio_write_channel_raw(struct iio_channel *chan, int val)
963 {
964 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
965 }
966 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
967
iio_get_channel_ext_info_count(struct iio_channel * chan)968 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
969 {
970 const struct iio_chan_spec_ext_info *ext_info;
971 unsigned int i = 0;
972
973 if (!chan->channel->ext_info)
974 return i;
975
976 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
977 ++i;
978
979 return i;
980 }
981 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
982
983 static const struct iio_chan_spec_ext_info *
iio_lookup_ext_info(const struct iio_channel * chan,const char * attr)984 iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
985 {
986 const struct iio_chan_spec_ext_info *ext_info;
987
988 if (!chan->channel->ext_info)
989 return NULL;
990
991 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
992 if (!strcmp(attr, ext_info->name))
993 return ext_info;
994 }
995
996 return NULL;
997 }
998
iio_read_channel_ext_info(struct iio_channel * chan,const char * attr,char * buf)999 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
1000 const char *attr, char *buf)
1001 {
1002 const struct iio_chan_spec_ext_info *ext_info;
1003
1004 if (!buf || offset_in_page(buf)) {
1005 pr_err("iio: invalid ext_info read buffer\n");
1006 return -EINVAL;
1007 }
1008
1009 ext_info = iio_lookup_ext_info(chan, attr);
1010 if (!ext_info)
1011 return -EINVAL;
1012
1013 return ext_info->read(chan->indio_dev, ext_info->private,
1014 chan->channel, buf);
1015 }
1016 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
1017
iio_write_channel_ext_info(struct iio_channel * chan,const char * attr,const char * buf,size_t len)1018 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
1019 const char *buf, size_t len)
1020 {
1021 const struct iio_chan_spec_ext_info *ext_info;
1022
1023 ext_info = iio_lookup_ext_info(chan, attr);
1024 if (!ext_info)
1025 return -EINVAL;
1026
1027 return ext_info->write(chan->indio_dev, ext_info->private,
1028 chan->channel, buf, len);
1029 }
1030 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
1031
iio_read_channel_label(struct iio_channel * chan,char * buf)1032 ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
1033 {
1034 if (!buf || offset_in_page(buf)) {
1035 pr_err("iio: invalid label read buffer\n");
1036 return -EINVAL;
1037 }
1038
1039 return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
1040 }
1041 EXPORT_SYMBOL_GPL(iio_read_channel_label);
1042