1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 /* The industrial I/O core
4 *
5 * Copyright (c) 2008 Jonathan Cameron
6 */
7 #ifndef _INDUSTRIAL_IO_H_
8 #define _INDUSTRIAL_IO_H_
9
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/slab.h>
13 #include <linux/iio/types.h>
14 /* IIO TODO LIST */
15 /*
16 * Provide means of adjusting timer accuracy.
17 * Currently assumes nano seconds.
18 */
19
20 struct fwnode_reference_args;
21
22 enum iio_shared_by {
23 IIO_SEPARATE,
24 IIO_SHARED_BY_TYPE,
25 IIO_SHARED_BY_DIR,
26 IIO_SHARED_BY_ALL
27 };
28
29 enum iio_endian {
30 IIO_CPU,
31 IIO_BE,
32 IIO_LE,
33 };
34
35 struct iio_chan_spec;
36 struct iio_dev;
37
38 /**
39 * struct iio_chan_spec_ext_info - Extended channel info attribute
40 * @name: Info attribute name
41 * @shared: Whether this attribute is shared between all channels.
42 * @read: Read callback for this info attribute, may be NULL.
43 * @write: Write callback for this info attribute, may be NULL.
44 * @private: Data private to the driver.
45 */
46 struct iio_chan_spec_ext_info {
47 const char *name;
48 enum iio_shared_by shared;
49 ssize_t (*read)(struct iio_dev *, uintptr_t private,
50 struct iio_chan_spec const *, char *buf);
51 ssize_t (*write)(struct iio_dev *, uintptr_t private,
52 struct iio_chan_spec const *, const char *buf,
53 size_t len);
54 uintptr_t private;
55 };
56
57 /**
58 * struct iio_enum - Enum channel info attribute
59 * @items: An array of strings.
60 * @num_items: Length of the item array.
61 * @set: Set callback function, may be NULL.
62 * @get: Get callback function, may be NULL.
63 *
64 * The iio_enum struct can be used to implement enum style channel attributes.
65 * Enum style attributes are those which have a set of strings which map to
66 * unsigned integer values. The IIO enum helper code takes care of mapping
67 * between value and string as well as generating a "_available" file which
68 * contains a list of all available items. The set callback will be called when
69 * the attribute is updated. The last parameter is the index to the newly
70 * activated item. The get callback will be used to query the currently active
71 * item and is supposed to return the index for it.
72 */
73 struct iio_enum {
74 const char * const *items;
75 unsigned int num_items;
76 int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
77 int (*get)(struct iio_dev *, const struct iio_chan_spec *);
78 };
79
80 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
81 uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
82 ssize_t iio_enum_read(struct iio_dev *indio_dev,
83 uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
84 ssize_t iio_enum_write(struct iio_dev *indio_dev,
85 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
86 size_t len);
87
88 /**
89 * IIO_ENUM() - Initialize enum extended channel attribute
90 * @_name: Attribute name
91 * @_shared: Whether the attribute is shared between all channels
92 * @_e: Pointer to an iio_enum struct
93 *
94 * This should usually be used together with IIO_ENUM_AVAILABLE()
95 */
96 #define IIO_ENUM(_name, _shared, _e) \
97 { \
98 .name = (_name), \
99 .shared = (_shared), \
100 .read = iio_enum_read, \
101 .write = iio_enum_write, \
102 .private = (uintptr_t)(_e), \
103 }
104
105 /**
106 * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
107 * @_name: Attribute name ("_available" will be appended to the name)
108 * @_shared: Whether the attribute is shared between all channels
109 * @_e: Pointer to an iio_enum struct
110 *
111 * Creates a read only attribute which lists all the available enum items in a
112 * space separated list. This should usually be used together with IIO_ENUM()
113 */
114 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
115 { \
116 .name = (_name "_available"), \
117 .shared = _shared, \
118 .read = iio_enum_available_read, \
119 .private = (uintptr_t)(_e), \
120 }
121
122 /**
123 * struct iio_mount_matrix - iio mounting matrix
124 * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
125 * main hardware
126 */
127 struct iio_mount_matrix {
128 const char *rotation[9];
129 };
130
131 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
132 const struct iio_chan_spec *chan, char *buf);
133 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
134
135 typedef const struct iio_mount_matrix *
136 (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
137 const struct iio_chan_spec *chan);
138
139 /**
140 * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
141 * @_shared: Whether the attribute is shared between all channels
142 * @_get: Pointer to an iio_get_mount_matrix_t accessor
143 */
144 #define IIO_MOUNT_MATRIX(_shared, _get) \
145 { \
146 .name = "mount_matrix", \
147 .shared = (_shared), \
148 .read = iio_show_mount_matrix, \
149 .private = (uintptr_t)(_get), \
150 }
151
152 /**
153 * struct iio_event_spec - specification for a channel event
154 * @type: Type of the event
155 * @dir: Direction of the event
156 * @mask_separate: Bit mask of enum iio_event_info values. Attributes
157 * set in this mask will be registered per channel.
158 * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes
159 * set in this mask will be shared by channel type.
160 * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes
161 * set in this mask will be shared by channel type and
162 * direction.
163 * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes
164 * set in this mask will be shared by all channels.
165 */
166 struct iio_event_spec {
167 enum iio_event_type type;
168 enum iio_event_direction dir;
169 unsigned long mask_separate;
170 unsigned long mask_shared_by_type;
171 unsigned long mask_shared_by_dir;
172 unsigned long mask_shared_by_all;
173 };
174
175 /**
176 * struct iio_chan_spec - specification of a single channel
177 * @type: What type of measurement is the channel making.
178 * @channel: What number do we wish to assign the channel.
179 * @channel2: If there is a second number for a differential
180 * channel then this is it. If modified is set then the
181 * value here specifies the modifier.
182 * @address: Driver specific identifier.
183 * @scan_index: Monotonic index to give ordering in scans when read
184 * from a buffer.
185 * @scan_type: struct describing the scan type
186 * @scan_type.sign: 's' or 'u' to specify signed or unsigned
187 * @scan_type.realbits: Number of valid bits of data
188 * @scan_type.storagebits: Realbits + padding
189 * @scan_type.shift: Shift right by this before masking out
190 * realbits.
191 * @scan_type.repeat: Number of times real/storage bits repeats.
192 * When the repeat element is more than 1, then
193 * the type element in sysfs will show a repeat
194 * value. Otherwise, the number of repetitions
195 * is omitted.
196 * @scan_type.endianness: little or big endian
197 * @info_mask_separate: What information is to be exported that is specific to
198 * this channel.
199 * @info_mask_separate_available: What availability information is to be
200 * exported that is specific to this channel.
201 * @info_mask_shared_by_type: What information is to be exported that is shared
202 * by all channels of the same type.
203 * @info_mask_shared_by_type_available: What availability information is to be
204 * exported that is shared by all channels of the same
205 * type.
206 * @info_mask_shared_by_dir: What information is to be exported that is shared
207 * by all channels of the same direction.
208 * @info_mask_shared_by_dir_available: What availability information is to be
209 * exported that is shared by all channels of the same
210 * direction.
211 * @info_mask_shared_by_all: What information is to be exported that is shared
212 * by all channels.
213 * @info_mask_shared_by_all_available: What availability information is to be
214 * exported that is shared by all channels.
215 * @event_spec: Array of events which should be registered for this
216 * channel.
217 * @num_event_specs: Size of the event_spec array.
218 * @ext_info: Array of extended info attributes for this channel.
219 * The array is NULL terminated, the last element should
220 * have its name field set to NULL.
221 * @extend_name: Allows labeling of channel attributes with an
222 * informative name. Note this has no effect codes etc,
223 * unlike modifiers.
224 * This field is deprecated in favour of providing
225 * iio_info->read_label() to override the label, which
226 * unlike @extend_name does not affect sysfs filenames.
227 * @datasheet_name: A name used in in-kernel mapping of channels. It should
228 * correspond to the first name that the channel is referred
229 * to by in the datasheet (e.g. IND), or the nearest
230 * possible compound name (e.g. IND-INC).
231 * @modified: Does a modifier apply to this channel. What these are
232 * depends on the channel type. Modifier is set in
233 * channel2. Examples are IIO_MOD_X for axial sensors about
234 * the 'x' axis.
235 * @indexed: Specify the channel has a numerical index. If not,
236 * the channel index number will be suppressed for sysfs
237 * attributes but not for event codes.
238 * @output: Channel is output.
239 * @differential: Channel is differential.
240 */
241 struct iio_chan_spec {
242 enum iio_chan_type type;
243 int channel;
244 int channel2;
245 unsigned long address;
246 int scan_index;
247 struct {
248 char sign;
249 u8 realbits;
250 u8 storagebits;
251 u8 shift;
252 u8 repeat;
253 enum iio_endian endianness;
254 } scan_type;
255 long info_mask_separate;
256 long info_mask_separate_available;
257 long info_mask_shared_by_type;
258 long info_mask_shared_by_type_available;
259 long info_mask_shared_by_dir;
260 long info_mask_shared_by_dir_available;
261 long info_mask_shared_by_all;
262 long info_mask_shared_by_all_available;
263 const struct iio_event_spec *event_spec;
264 unsigned int num_event_specs;
265 const struct iio_chan_spec_ext_info *ext_info;
266 const char *extend_name;
267 const char *datasheet_name;
268 unsigned modified:1;
269 unsigned indexed:1;
270 unsigned output:1;
271 unsigned differential:1;
272 };
273
274
275 /**
276 * iio_channel_has_info() - Checks whether a channel supports a info attribute
277 * @chan: The channel to be queried
278 * @type: Type of the info attribute to be checked
279 *
280 * Returns true if the channels supports reporting values for the given info
281 * attribute type, false otherwise.
282 */
iio_channel_has_info(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)283 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
284 enum iio_chan_info_enum type)
285 {
286 return (chan->info_mask_separate & BIT(type)) |
287 (chan->info_mask_shared_by_type & BIT(type)) |
288 (chan->info_mask_shared_by_dir & BIT(type)) |
289 (chan->info_mask_shared_by_all & BIT(type));
290 }
291
292 /**
293 * iio_channel_has_available() - Checks if a channel has an available attribute
294 * @chan: The channel to be queried
295 * @type: Type of the available attribute to be checked
296 *
297 * Returns true if the channel supports reporting available values for the
298 * given attribute type, false otherwise.
299 */
iio_channel_has_available(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)300 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
301 enum iio_chan_info_enum type)
302 {
303 return (chan->info_mask_separate_available & BIT(type)) |
304 (chan->info_mask_shared_by_type_available & BIT(type)) |
305 (chan->info_mask_shared_by_dir_available & BIT(type)) |
306 (chan->info_mask_shared_by_all_available & BIT(type));
307 }
308
309 #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \
310 .type = IIO_TIMESTAMP, \
311 .channel = -1, \
312 .scan_index = _si, \
313 .scan_type = { \
314 .sign = 's', \
315 .realbits = 64, \
316 .storagebits = 64, \
317 }, \
318 }
319
320 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
321
322 /*
323 * Device operating modes
324 * @INDIO_DIRECT_MODE: There is an access to either:
325 * a) The last single value available for devices that do not provide
326 * on-demand reads.
327 * b) A new value after performing an on-demand read otherwise.
328 * On most devices, this is a single-shot read. On some devices with data
329 * streams without an 'on-demand' function, this might also be the 'last value'
330 * feature. Above all, this mode internally means that we are not in any of the
331 * other modes, and sysfs reads should work.
332 * Device drivers should inform the core if they support this mode.
333 * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
334 * It indicates that an explicit trigger is required. This requests the core to
335 * attach a poll function when enabling the buffer, which is indicated by the
336 * _TRIGGERED suffix.
337 * The core will ensure this mode is set when registering a triggered buffer
338 * with iio_triggered_buffer_setup().
339 * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
340 * No poll function can be attached because there is no triggered infrastructure
341 * we can use to cause capture. There is a kfifo that the driver will fill, but
342 * not "only one scan at a time". Typically, hardware will have a buffer that
343 * can hold multiple scans. Software may read one or more scans at a single time
344 * and push the available data to a Kfifo. This means the core will not attach
345 * any poll function when enabling the buffer.
346 * The core will ensure this mode is set when registering a simple kfifo buffer
347 * with devm_iio_kfifo_buffer_setup().
348 * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
349 * Same as above but this time the buffer is not a kfifo where we have direct
350 * access to the data. Instead, the consumer driver must access the data through
351 * non software visible channels (or DMA when there is no demux possible in
352 * software)
353 * The core will ensure this mode is set when registering a dmaengine buffer
354 * with devm_iio_dmaengine_buffer_setup().
355 * @INDIO_EVENT_TRIGGERED: Very unusual mode.
356 * Triggers usually refer to an external event which will start data capture.
357 * Here it is kind of the opposite as, a particular state of the data might
358 * produce an event which can be considered as an event. We don't necessarily
359 * have access to the data itself, but to the event produced. For example, this
360 * can be a threshold detector. The internal path of this mode is very close to
361 * the INDIO_BUFFER_TRIGGERED mode.
362 * The core will ensure this mode is set when registering a triggered event.
363 * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
364 * Here, triggers can result in data capture and can be routed to multiple
365 * hardware components, which make them close to regular triggers in the way
366 * they must be managed by the core, but without the entire interrupts/poll
367 * functions burden. Interrupts are irrelevant as the data flow is hardware
368 * mediated and distributed.
369 */
370 #define INDIO_DIRECT_MODE 0x01
371 #define INDIO_BUFFER_TRIGGERED 0x02
372 #define INDIO_BUFFER_SOFTWARE 0x04
373 #define INDIO_BUFFER_HARDWARE 0x08
374 #define INDIO_EVENT_TRIGGERED 0x10
375 #define INDIO_HARDWARE_TRIGGERED 0x20
376
377 #define INDIO_ALL_BUFFER_MODES \
378 (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
379
380 #define INDIO_ALL_TRIGGERED_MODES \
381 (INDIO_BUFFER_TRIGGERED \
382 | INDIO_EVENT_TRIGGERED \
383 | INDIO_HARDWARE_TRIGGERED)
384
385 #define INDIO_MAX_RAW_ELEMENTS 4
386
387 struct iio_val_int_plus_micro {
388 int integer;
389 int micro;
390 };
391
392 struct iio_trigger; /* forward declaration */
393
394 /**
395 * struct iio_info - constant information about device
396 * @event_attrs: event control attributes
397 * @attrs: general purpose device attributes
398 * @read_raw: function to request a value from the device.
399 * mask specifies which value. Note 0 means a reading of
400 * the channel in question. Return value will specify the
401 * type of value returned by the device. val and val2 will
402 * contain the elements making up the returned value.
403 * @read_raw_multi: function to return values from the device.
404 * mask specifies which value. Note 0 means a reading of
405 * the channel in question. Return value will specify the
406 * type of value returned by the device. vals pointer
407 * contain the elements making up the returned value.
408 * max_len specifies maximum number of elements
409 * vals pointer can contain. val_len is used to return
410 * length of valid elements in vals.
411 * @read_avail: function to return the available values from the device.
412 * mask specifies which value. Note 0 means the available
413 * values for the channel in question. Return value
414 * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
415 * returned in vals. The type of the vals are returned in
416 * type and the number of vals is returned in length. For
417 * ranges, there are always three vals returned; min, step
418 * and max. For lists, all possible values are enumerated.
419 * @write_raw: function to write a value to the device.
420 * Parameters are the same as for read_raw.
421 * @read_label: function to request label name for a specified label,
422 * for better channel identification.
423 * @write_raw_get_fmt: callback function to query the expected
424 * format/precision. If not set by the driver, write_raw
425 * returns IIO_VAL_INT_PLUS_MICRO.
426 * @read_event_config: find out if the event is enabled.
427 * @write_event_config: set if the event is enabled.
428 * @read_event_value: read a configuration value associated with the event.
429 * @write_event_value: write a configuration value for the event.
430 * @read_event_label: function to request label name for a specified label,
431 * for better event identification.
432 * @validate_trigger: function to validate the trigger when the
433 * current trigger gets changed.
434 * @update_scan_mode: function to configure device and scan buffer when
435 * channels have changed
436 * @debugfs_reg_access: function to read or write register value of device
437 * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
438 * @hwfifo_set_watermark: function pointer to set the current hardware
439 * fifo watermark level; see hwfifo_* entries in
440 * Documentation/ABI/testing/sysfs-bus-iio for details on
441 * how the hardware fifo operates
442 * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
443 * in the hardware fifo to the device buffer. The driver
444 * should not flush more than count samples. The function
445 * must return the number of samples flushed, 0 if no
446 * samples were flushed or a negative integer if no samples
447 * were flushed and there was an error.
448 **/
449 struct iio_info {
450 const struct attribute_group *event_attrs;
451 const struct attribute_group *attrs;
452
453 int (*read_raw)(struct iio_dev *indio_dev,
454 struct iio_chan_spec const *chan,
455 int *val,
456 int *val2,
457 long mask);
458
459 int (*read_raw_multi)(struct iio_dev *indio_dev,
460 struct iio_chan_spec const *chan,
461 int max_len,
462 int *vals,
463 int *val_len,
464 long mask);
465
466 int (*read_avail)(struct iio_dev *indio_dev,
467 struct iio_chan_spec const *chan,
468 const int **vals,
469 int *type,
470 int *length,
471 long mask);
472
473 int (*write_raw)(struct iio_dev *indio_dev,
474 struct iio_chan_spec const *chan,
475 int val,
476 int val2,
477 long mask);
478
479 int (*read_label)(struct iio_dev *indio_dev,
480 struct iio_chan_spec const *chan,
481 char *label);
482
483 int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
484 struct iio_chan_spec const *chan,
485 long mask);
486
487 int (*read_event_config)(struct iio_dev *indio_dev,
488 const struct iio_chan_spec *chan,
489 enum iio_event_type type,
490 enum iio_event_direction dir);
491
492 int (*write_event_config)(struct iio_dev *indio_dev,
493 const struct iio_chan_spec *chan,
494 enum iio_event_type type,
495 enum iio_event_direction dir,
496 int state);
497
498 int (*read_event_value)(struct iio_dev *indio_dev,
499 const struct iio_chan_spec *chan,
500 enum iio_event_type type,
501 enum iio_event_direction dir,
502 enum iio_event_info info, int *val, int *val2);
503
504 int (*write_event_value)(struct iio_dev *indio_dev,
505 const struct iio_chan_spec *chan,
506 enum iio_event_type type,
507 enum iio_event_direction dir,
508 enum iio_event_info info, int val, int val2);
509
510 int (*read_event_label)(struct iio_dev *indio_dev,
511 struct iio_chan_spec const *chan,
512 enum iio_event_type type,
513 enum iio_event_direction dir,
514 char *label);
515
516 int (*validate_trigger)(struct iio_dev *indio_dev,
517 struct iio_trigger *trig);
518 int (*update_scan_mode)(struct iio_dev *indio_dev,
519 const unsigned long *scan_mask);
520 int (*debugfs_reg_access)(struct iio_dev *indio_dev,
521 unsigned reg, unsigned writeval,
522 unsigned *readval);
523 int (*fwnode_xlate)(struct iio_dev *indio_dev,
524 const struct fwnode_reference_args *iiospec);
525 int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
526 int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
527 unsigned count);
528 };
529
530 /**
531 * struct iio_buffer_setup_ops - buffer setup related callbacks
532 * @preenable: [DRIVER] function to run prior to marking buffer enabled
533 * @postenable: [DRIVER] function to run after marking buffer enabled
534 * @predisable: [DRIVER] function to run prior to marking buffer
535 * disabled
536 * @postdisable: [DRIVER] function to run after marking buffer disabled
537 * @validate_scan_mask: [DRIVER] function callback to check whether a given
538 * scan mask is valid for the device.
539 */
540 struct iio_buffer_setup_ops {
541 int (*preenable)(struct iio_dev *);
542 int (*postenable)(struct iio_dev *);
543 int (*predisable)(struct iio_dev *);
544 int (*postdisable)(struct iio_dev *);
545 bool (*validate_scan_mask)(struct iio_dev *indio_dev,
546 const unsigned long *scan_mask);
547 };
548
549 /**
550 * struct iio_dev - industrial I/O device
551 * @modes: [DRIVER] bitmask listing all the operating modes
552 * supported by the IIO device. This list should be
553 * initialized before registering the IIO device. It can
554 * also be filed up by the IIO core, as a result of
555 * enabling particular features in the driver
556 * (see iio_triggered_event_setup()).
557 * @dev: [DRIVER] device structure, should be assigned a parent
558 * and owner
559 * @buffer: [DRIVER] any buffer present
560 * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
561 * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
562 * array in order of preference, the most preferred
563 * masks first.
564 * @masklength: [INTERN] the length of the mask established from
565 * channels
566 * @active_scan_mask: [INTERN] union of all scan masks requested by buffers
567 * @scan_timestamp: [INTERN] set if any buffers have requested timestamp
568 * @trig: [INTERN] current device trigger (buffer modes)
569 * @pollfunc: [DRIVER] function run on trigger being received
570 * @pollfunc_event: [DRIVER] function run on events trigger being received
571 * @channels: [DRIVER] channel specification structure table
572 * @num_channels: [DRIVER] number of channels specified in @channels.
573 * @name: [DRIVER] name of the device.
574 * @label: [DRIVER] unique name to identify which device this is
575 * @info: [DRIVER] callbacks and constant info from driver
576 * @setup_ops: [DRIVER] callbacks to call before and after buffer
577 * enable/disable
578 * @priv: [DRIVER] reference to driver's private information
579 * **MUST** be accessed **ONLY** via iio_priv() helper
580 */
581 struct iio_dev {
582 int modes;
583 struct device dev;
584
585 struct iio_buffer *buffer;
586 int scan_bytes;
587
588 const unsigned long *available_scan_masks;
589 unsigned masklength;
590 const unsigned long *active_scan_mask;
591 bool scan_timestamp;
592 struct iio_trigger *trig;
593 struct iio_poll_func *pollfunc;
594 struct iio_poll_func *pollfunc_event;
595
596 struct iio_chan_spec const *channels;
597 int num_channels;
598
599 const char *name;
600 const char *label;
601 const struct iio_info *info;
602 const struct iio_buffer_setup_ops *setup_ops;
603
604 void *priv;
605 };
606
607 int iio_device_id(struct iio_dev *indio_dev);
608 int iio_device_get_current_mode(struct iio_dev *indio_dev);
609 bool iio_buffer_enabled(struct iio_dev *indio_dev);
610
611 const struct iio_chan_spec
612 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
613 /**
614 * iio_device_register() - register a device with the IIO subsystem
615 * @indio_dev: Device structure filled by the device driver
616 **/
617 #define iio_device_register(indio_dev) \
618 __iio_device_register((indio_dev), THIS_MODULE)
619 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
620 void iio_device_unregister(struct iio_dev *indio_dev);
621 /**
622 * devm_iio_device_register - Resource-managed iio_device_register()
623 * @dev: Device to allocate iio_dev for
624 * @indio_dev: Device structure filled by the device driver
625 *
626 * Managed iio_device_register. The IIO device registered with this
627 * function is automatically unregistered on driver detach. This function
628 * calls iio_device_register() internally. Refer to that function for more
629 * information.
630 *
631 * RETURNS:
632 * 0 on success, negative error number on failure.
633 */
634 #define devm_iio_device_register(dev, indio_dev) \
635 __devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
636 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
637 struct module *this_mod);
638 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
639 int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
640 void iio_device_release_direct_mode(struct iio_dev *indio_dev);
641 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
642 void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
643
644 extern struct bus_type iio_bus_type;
645
646 /**
647 * iio_device_put() - reference counted deallocation of struct device
648 * @indio_dev: IIO device structure containing the device
649 **/
iio_device_put(struct iio_dev * indio_dev)650 static inline void iio_device_put(struct iio_dev *indio_dev)
651 {
652 if (indio_dev)
653 put_device(&indio_dev->dev);
654 }
655
656 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
657 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
658
659 /**
660 * dev_to_iio_dev() - Get IIO device struct from a device struct
661 * @dev: The device embedded in the IIO device
662 *
663 * Note: The device must be a IIO device, otherwise the result is undefined.
664 */
dev_to_iio_dev(struct device * dev)665 static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
666 {
667 return container_of(dev, struct iio_dev, dev);
668 }
669
670 /**
671 * iio_device_get() - increment reference count for the device
672 * @indio_dev: IIO device structure
673 *
674 * Returns: The passed IIO device
675 **/
iio_device_get(struct iio_dev * indio_dev)676 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
677 {
678 return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
679 }
680
681 /**
682 * iio_device_set_parent() - assign parent device to the IIO device object
683 * @indio_dev: IIO device structure
684 * @parent: reference to parent device object
685 *
686 * This utility must be called between IIO device allocation
687 * (via devm_iio_device_alloc()) & IIO device registration
688 * (via iio_device_register() and devm_iio_device_register())).
689 * By default, the device allocation will also assign a parent device to
690 * the IIO device object. In cases where devm_iio_device_alloc() is used,
691 * sometimes the parent device must be different than the device used to
692 * manage the allocation.
693 * In that case, this helper should be used to change the parent, hence the
694 * requirement to call this between allocation & registration.
695 **/
iio_device_set_parent(struct iio_dev * indio_dev,struct device * parent)696 static inline void iio_device_set_parent(struct iio_dev *indio_dev,
697 struct device *parent)
698 {
699 indio_dev->dev.parent = parent;
700 }
701
702 /**
703 * iio_device_set_drvdata() - Set device driver data
704 * @indio_dev: IIO device structure
705 * @data: Driver specific data
706 *
707 * Allows to attach an arbitrary pointer to an IIO device, which can later be
708 * retrieved by iio_device_get_drvdata().
709 */
iio_device_set_drvdata(struct iio_dev * indio_dev,void * data)710 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
711 {
712 dev_set_drvdata(&indio_dev->dev, data);
713 }
714
715 /**
716 * iio_device_get_drvdata() - Get device driver data
717 * @indio_dev: IIO device structure
718 *
719 * Returns the data previously set with iio_device_set_drvdata()
720 */
iio_device_get_drvdata(const struct iio_dev * indio_dev)721 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
722 {
723 return dev_get_drvdata(&indio_dev->dev);
724 }
725
726 /*
727 * Used to ensure the iio_priv() structure is aligned to allow that structure
728 * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
729 * must not share cachelines with the rest of the structure, thus making
730 * them safe for use with non-coherent DMA.
731 */
732 #define IIO_DMA_MINALIGN ARCH_DMA_MINALIGN
733 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
734
735 /* The information at the returned address is guaranteed to be cacheline aligned */
iio_priv(const struct iio_dev * indio_dev)736 static inline void *iio_priv(const struct iio_dev *indio_dev)
737 {
738 return indio_dev->priv;
739 }
740
741 void iio_device_free(struct iio_dev *indio_dev);
742 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
743
744 #define devm_iio_trigger_alloc(parent, fmt, ...) \
745 __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
746 __printf(3, 4)
747 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
748 struct module *this_mod,
749 const char *fmt, ...);
750 /**
751 * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
752 * @indio_dev: IIO device structure for device
753 **/
754 #if defined(CONFIG_DEBUG_FS)
755 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
756 #else
iio_get_debugfs_dentry(struct iio_dev * indio_dev)757 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
758 {
759 return NULL;
760 }
761 #endif
762
763 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
764
765 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
766 int *fract);
767
768 /**
769 * IIO_DEGREE_TO_RAD() - Convert degree to rad
770 * @deg: A value in degree
771 *
772 * Returns the given value converted from degree to rad
773 */
774 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
775
776 /**
777 * IIO_RAD_TO_DEGREE() - Convert rad to degree
778 * @rad: A value in rad
779 *
780 * Returns the given value converted from rad to degree
781 */
782 #define IIO_RAD_TO_DEGREE(rad) \
783 (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
784
785 /**
786 * IIO_G_TO_M_S_2() - Convert g to meter / second**2
787 * @g: A value in g
788 *
789 * Returns the given value converted from g to meter / second**2
790 */
791 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
792
793 /**
794 * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
795 * @ms2: A value in meter / second**2
796 *
797 * Returns the given value converted from meter / second**2 to g
798 */
799 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
800
801 #endif /* _INDUSTRIAL_IO_H_ */
802