1 /* The industrial I/O core
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * Based on elements of hwmon and input subsystems.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24 #include <linux/anon_inodes.h>
25 #include "iio.h"
26 #include "iio_core.h"
27 #include "iio_core_trigger.h"
28 #include "sysfs.h"
29 #include "events.h"
30 
31 /* IDA to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida);
33 
34 static dev_t iio_devt;
35 
36 #define IIO_DEV_MAX 256
37 struct bus_type iio_bus_type = {
38 	.name = "iio",
39 };
40 EXPORT_SYMBOL(iio_bus_type);
41 
42 static const char * const iio_data_type_name[] = {
43 	[IIO_RAW] = "raw",
44 	[IIO_PROCESSED] = "input",
45 };
46 
47 static const char * const iio_direction[] = {
48 	[0] = "in",
49 	[1] = "out",
50 };
51 
52 static const char * const iio_chan_type_name_spec[] = {
53 	[IIO_VOLTAGE] = "voltage",
54 	[IIO_CURRENT] = "current",
55 	[IIO_POWER] = "power",
56 	[IIO_ACCEL] = "accel",
57 	[IIO_ANGL_VEL] = "anglvel",
58 	[IIO_MAGN] = "magn",
59 	[IIO_LIGHT] = "illuminance",
60 	[IIO_INTENSITY] = "intensity",
61 	[IIO_PROXIMITY] = "proximity",
62 	[IIO_TEMP] = "temp",
63 	[IIO_INCLI] = "incli",
64 	[IIO_ROT] = "rot",
65 	[IIO_ANGL] = "angl",
66 	[IIO_TIMESTAMP] = "timestamp",
67 	[IIO_CAPACITANCE] = "capacitance",
68 };
69 
70 static const char * const iio_modifier_names[] = {
71 	[IIO_MOD_X] = "x",
72 	[IIO_MOD_Y] = "y",
73 	[IIO_MOD_Z] = "z",
74 	[IIO_MOD_LIGHT_BOTH] = "both",
75 	[IIO_MOD_LIGHT_IR] = "ir",
76 };
77 
78 /* relies on pairs of these shared then separate */
79 static const char * const iio_chan_info_postfix[] = {
80 	[IIO_CHAN_INFO_SCALE] = "scale",
81 	[IIO_CHAN_INFO_OFFSET] = "offset",
82 	[IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
83 	[IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
84 	[IIO_CHAN_INFO_PEAK] = "peak_raw",
85 	[IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
86 	[IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
87 	[IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
88 	[IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
89 	= "filter_low_pass_3db_frequency",
90 };
91 
92 const struct iio_chan_spec
iio_find_channel_from_si(struct iio_dev * indio_dev,int si)93 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
94 {
95 	int i;
96 
97 	for (i = 0; i < indio_dev->num_channels; i++)
98 		if (indio_dev->channels[i].scan_index == si)
99 			return &indio_dev->channels[i];
100 	return NULL;
101 }
102 
103 /**
104  * struct iio_detected_event_list - list element for events that have occurred
105  * @list:		linked list header
106  * @ev:			the event itself
107  */
108 struct iio_detected_event_list {
109 	struct list_head		list;
110 	struct iio_event_data		ev;
111 };
112 
113 /**
114  * struct iio_event_interface - chrdev interface for an event line
115  * @dev:		device assocated with event interface
116  * @wait:		wait queue to allow blocking reads of events
117  * @event_list_lock:	mutex to protect the list of detected events
118  * @det_events:		list of detected events
119  * @max_events:		maximum number of events before new ones are dropped
120  * @current_events:	number of events in detected list
121  * @flags:		file operations related flags including busy flag.
122  */
123 struct iio_event_interface {
124 	wait_queue_head_t			wait;
125 	struct mutex				event_list_lock;
126 	struct list_head			det_events;
127 	int					max_events;
128 	int					current_events;
129 	struct list_head dev_attr_list;
130 	unsigned long flags;
131 	struct attribute_group			group;
132 };
133 
iio_push_event(struct iio_dev * indio_dev,u64 ev_code,s64 timestamp)134 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
135 {
136 	struct iio_event_interface *ev_int = indio_dev->event_interface;
137 	struct iio_detected_event_list *ev;
138 	int ret = 0;
139 
140 	/* Does anyone care? */
141 	mutex_lock(&ev_int->event_list_lock);
142 	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
143 		if (ev_int->current_events == ev_int->max_events) {
144 			mutex_unlock(&ev_int->event_list_lock);
145 			return 0;
146 		}
147 		ev = kmalloc(sizeof(*ev), GFP_KERNEL);
148 		if (ev == NULL) {
149 			ret = -ENOMEM;
150 			mutex_unlock(&ev_int->event_list_lock);
151 			goto error_ret;
152 		}
153 		ev->ev.id = ev_code;
154 		ev->ev.timestamp = timestamp;
155 
156 		list_add_tail(&ev->list, &ev_int->det_events);
157 		ev_int->current_events++;
158 		mutex_unlock(&ev_int->event_list_lock);
159 		wake_up_interruptible(&ev_int->wait);
160 	} else
161 		mutex_unlock(&ev_int->event_list_lock);
162 
163 error_ret:
164 	return ret;
165 }
166 EXPORT_SYMBOL(iio_push_event);
167 
168 /* This turns up an awful lot */
iio_read_const_attr(struct device * dev,struct device_attribute * attr,char * buf)169 ssize_t iio_read_const_attr(struct device *dev,
170 			    struct device_attribute *attr,
171 			    char *buf)
172 {
173 	return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
174 }
175 EXPORT_SYMBOL(iio_read_const_attr);
176 
iio_event_chrdev_read(struct file * filep,char __user * buf,size_t count,loff_t * f_ps)177 static ssize_t iio_event_chrdev_read(struct file *filep,
178 				     char __user *buf,
179 				     size_t count,
180 				     loff_t *f_ps)
181 {
182 	struct iio_event_interface *ev_int = filep->private_data;
183 	struct iio_detected_event_list *el;
184 	size_t len = sizeof(el->ev);
185 	int ret;
186 
187 	if (count < len)
188 		return -EINVAL;
189 
190 	mutex_lock(&ev_int->event_list_lock);
191 	if (list_empty(&ev_int->det_events)) {
192 		if (filep->f_flags & O_NONBLOCK) {
193 			ret = -EAGAIN;
194 			goto error_mutex_unlock;
195 		}
196 		mutex_unlock(&ev_int->event_list_lock);
197 		/* Blocking on device; waiting for something to be there */
198 		ret = wait_event_interruptible(ev_int->wait,
199 					       !list_empty(&ev_int
200 							   ->det_events));
201 		if (ret)
202 			goto error_ret;
203 		/* Single access device so no one else can get the data */
204 		mutex_lock(&ev_int->event_list_lock);
205 	}
206 
207 	el = list_first_entry(&ev_int->det_events,
208 			      struct iio_detected_event_list,
209 			      list);
210 	if (copy_to_user(buf, &(el->ev), len)) {
211 		ret = -EFAULT;
212 		goto error_mutex_unlock;
213 	}
214 	list_del(&el->list);
215 	ev_int->current_events--;
216 	mutex_unlock(&ev_int->event_list_lock);
217 	kfree(el);
218 
219 	return len;
220 
221 error_mutex_unlock:
222 	mutex_unlock(&ev_int->event_list_lock);
223 error_ret:
224 
225 	return ret;
226 }
227 
iio_event_chrdev_release(struct inode * inode,struct file * filep)228 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
229 {
230 	struct iio_event_interface *ev_int = filep->private_data;
231 	struct iio_detected_event_list *el, *t;
232 
233 	mutex_lock(&ev_int->event_list_lock);
234 	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
235 	/*
236 	 * In order to maintain a clean state for reopening,
237 	 * clear out any awaiting events. The mask will prevent
238 	 * any new __iio_push_event calls running.
239 	 */
240 	list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
241 		list_del(&el->list);
242 		kfree(el);
243 	}
244 	ev_int->current_events = 0;
245 	mutex_unlock(&ev_int->event_list_lock);
246 
247 	return 0;
248 }
249 
250 static const struct file_operations iio_event_chrdev_fileops = {
251 	.read =  iio_event_chrdev_read,
252 	.release = iio_event_chrdev_release,
253 	.owner = THIS_MODULE,
254 	.llseek = noop_llseek,
255 };
256 
iio_event_getfd(struct iio_dev * indio_dev)257 static int iio_event_getfd(struct iio_dev *indio_dev)
258 {
259 	struct iio_event_interface *ev_int = indio_dev->event_interface;
260 	int fd;
261 
262 	if (ev_int == NULL)
263 		return -ENODEV;
264 
265 	mutex_lock(&ev_int->event_list_lock);
266 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
267 		mutex_unlock(&ev_int->event_list_lock);
268 		return -EBUSY;
269 	}
270 	mutex_unlock(&ev_int->event_list_lock);
271 	fd = anon_inode_getfd("iio:event",
272 				&iio_event_chrdev_fileops, ev_int, O_RDONLY);
273 	if (fd < 0) {
274 		mutex_lock(&ev_int->event_list_lock);
275 		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
276 		mutex_unlock(&ev_int->event_list_lock);
277 	}
278 	return fd;
279 }
280 
iio_init(void)281 static int __init iio_init(void)
282 {
283 	int ret;
284 
285 	/* Register sysfs bus */
286 	ret  = bus_register(&iio_bus_type);
287 	if (ret < 0) {
288 		printk(KERN_ERR
289 		       "%s could not register bus type\n",
290 			__FILE__);
291 		goto error_nothing;
292 	}
293 
294 	ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
295 	if (ret < 0) {
296 		printk(KERN_ERR "%s: failed to allocate char dev region\n",
297 		       __FILE__);
298 		goto error_unregister_bus_type;
299 	}
300 
301 	return 0;
302 
303 error_unregister_bus_type:
304 	bus_unregister(&iio_bus_type);
305 error_nothing:
306 	return ret;
307 }
308 
iio_exit(void)309 static void __exit iio_exit(void)
310 {
311 	if (iio_devt)
312 		unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
313 	bus_unregister(&iio_bus_type);
314 }
315 
iio_read_channel_info(struct device * dev,struct device_attribute * attr,char * buf)316 static ssize_t iio_read_channel_info(struct device *dev,
317 				     struct device_attribute *attr,
318 				     char *buf)
319 {
320 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
321 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
322 	int val, val2;
323 	int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
324 					    &val, &val2, this_attr->address);
325 
326 	if (ret < 0)
327 		return ret;
328 
329 	if (ret == IIO_VAL_INT)
330 		return sprintf(buf, "%d\n", val);
331 	else if (ret == IIO_VAL_INT_PLUS_MICRO) {
332 		if (val2 < 0)
333 			return sprintf(buf, "-%d.%06u\n", val, -val2);
334 		else
335 			return sprintf(buf, "%d.%06u\n", val, val2);
336 	} else if (ret == IIO_VAL_INT_PLUS_NANO) {
337 		if (val2 < 0)
338 			return sprintf(buf, "-%d.%09u\n", val, -val2);
339 		else
340 			return sprintf(buf, "%d.%09u\n", val, val2);
341 	} else
342 		return 0;
343 }
344 
iio_write_channel_info(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)345 static ssize_t iio_write_channel_info(struct device *dev,
346 				      struct device_attribute *attr,
347 				      const char *buf,
348 				      size_t len)
349 {
350 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
351 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
352 	int ret, integer = 0, fract = 0, fract_mult = 100000;
353 	bool integer_part = true, negative = false;
354 
355 	/* Assumes decimal - precision based on number of digits */
356 	if (!indio_dev->info->write_raw)
357 		return -EINVAL;
358 
359 	if (indio_dev->info->write_raw_get_fmt)
360 		switch (indio_dev->info->write_raw_get_fmt(indio_dev,
361 			this_attr->c, this_attr->address)) {
362 		case IIO_VAL_INT_PLUS_MICRO:
363 			fract_mult = 100000;
364 			break;
365 		case IIO_VAL_INT_PLUS_NANO:
366 			fract_mult = 100000000;
367 			break;
368 		default:
369 			return -EINVAL;
370 		}
371 
372 	if (buf[0] == '-') {
373 		negative = true;
374 		buf++;
375 	}
376 
377 	while (*buf) {
378 		if ('0' <= *buf && *buf <= '9') {
379 			if (integer_part)
380 				integer = integer*10 + *buf - '0';
381 			else {
382 				fract += fract_mult*(*buf - '0');
383 				if (fract_mult == 1)
384 					break;
385 				fract_mult /= 10;
386 			}
387 		} else if (*buf == '\n') {
388 			if (*(buf + 1) == '\0')
389 				break;
390 			else
391 				return -EINVAL;
392 		} else if (*buf == '.') {
393 			integer_part = false;
394 		} else {
395 			return -EINVAL;
396 		}
397 		buf++;
398 	}
399 	if (negative) {
400 		if (integer)
401 			integer = -integer;
402 		else
403 			fract = -fract;
404 	}
405 
406 	ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
407 					 integer, fract, this_attr->address);
408 	if (ret)
409 		return ret;
410 
411 	return len;
412 }
413 
414 static
__iio_device_attr_init(struct device_attribute * dev_attr,const char * postfix,struct iio_chan_spec const * chan,ssize_t (* readfunc)(struct device * dev,struct device_attribute * attr,char * buf),ssize_t (* writefunc)(struct device * dev,struct device_attribute * attr,const char * buf,size_t len),bool generic)415 int __iio_device_attr_init(struct device_attribute *dev_attr,
416 			   const char *postfix,
417 			   struct iio_chan_spec const *chan,
418 			   ssize_t (*readfunc)(struct device *dev,
419 					       struct device_attribute *attr,
420 					       char *buf),
421 			   ssize_t (*writefunc)(struct device *dev,
422 						struct device_attribute *attr,
423 						const char *buf,
424 						size_t len),
425 			   bool generic)
426 {
427 	int ret;
428 	char *name_format, *full_postfix;
429 	sysfs_attr_init(&dev_attr->attr);
430 
431 	/* Build up postfix of <extend_name>_<modifier>_postfix */
432 	if (chan->modified && !generic) {
433 		if (chan->extend_name)
434 			full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
435 						 iio_modifier_names[chan
436 								    ->channel2],
437 						 chan->extend_name,
438 						 postfix);
439 		else
440 			full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
441 						 iio_modifier_names[chan
442 								    ->channel2],
443 						 postfix);
444 	} else {
445 		if (chan->extend_name == NULL)
446 			full_postfix = kstrdup(postfix, GFP_KERNEL);
447 		else
448 			full_postfix = kasprintf(GFP_KERNEL,
449 						 "%s_%s",
450 						 chan->extend_name,
451 						 postfix);
452 	}
453 	if (full_postfix == NULL) {
454 		ret = -ENOMEM;
455 		goto error_ret;
456 	}
457 
458 	if (chan->differential) { /* Differential  can not have modifier */
459 		if (generic)
460 			name_format
461 				= kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
462 					    iio_direction[chan->output],
463 					    iio_chan_type_name_spec[chan->type],
464 					    iio_chan_type_name_spec[chan->type],
465 					    full_postfix);
466 		else if (chan->indexed)
467 			name_format
468 				= kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s",
469 					    iio_direction[chan->output],
470 					    iio_chan_type_name_spec[chan->type],
471 					    chan->channel,
472 					    iio_chan_type_name_spec[chan->type],
473 					    chan->channel2,
474 					    full_postfix);
475 		else {
476 			WARN_ON("Differential channels must be indexed\n");
477 			ret = -EINVAL;
478 			goto error_free_full_postfix;
479 		}
480 	} else { /* Single ended */
481 		if (generic)
482 			name_format
483 				= kasprintf(GFP_KERNEL, "%s_%s_%s",
484 					    iio_direction[chan->output],
485 					    iio_chan_type_name_spec[chan->type],
486 					    full_postfix);
487 		else if (chan->indexed)
488 			name_format
489 				= kasprintf(GFP_KERNEL, "%s_%s%d_%s",
490 					    iio_direction[chan->output],
491 					    iio_chan_type_name_spec[chan->type],
492 					    chan->channel,
493 					    full_postfix);
494 		else
495 			name_format
496 				= kasprintf(GFP_KERNEL, "%s_%s_%s",
497 					    iio_direction[chan->output],
498 					    iio_chan_type_name_spec[chan->type],
499 					    full_postfix);
500 	}
501 	if (name_format == NULL) {
502 		ret = -ENOMEM;
503 		goto error_free_full_postfix;
504 	}
505 	dev_attr->attr.name = kasprintf(GFP_KERNEL,
506 					name_format,
507 					chan->channel,
508 					chan->channel2);
509 	if (dev_attr->attr.name == NULL) {
510 		ret = -ENOMEM;
511 		goto error_free_name_format;
512 	}
513 
514 	if (readfunc) {
515 		dev_attr->attr.mode |= S_IRUGO;
516 		dev_attr->show = readfunc;
517 	}
518 
519 	if (writefunc) {
520 		dev_attr->attr.mode |= S_IWUSR;
521 		dev_attr->store = writefunc;
522 	}
523 	kfree(name_format);
524 	kfree(full_postfix);
525 
526 	return 0;
527 
528 error_free_name_format:
529 	kfree(name_format);
530 error_free_full_postfix:
531 	kfree(full_postfix);
532 error_ret:
533 	return ret;
534 }
535 
__iio_device_attr_deinit(struct device_attribute * dev_attr)536 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
537 {
538 	kfree(dev_attr->attr.name);
539 }
540 
__iio_add_chan_devattr(const char * postfix,struct iio_chan_spec const * chan,ssize_t (* readfunc)(struct device * dev,struct device_attribute * attr,char * buf),ssize_t (* writefunc)(struct device * dev,struct device_attribute * attr,const char * buf,size_t len),u64 mask,bool generic,struct device * dev,struct list_head * attr_list)541 int __iio_add_chan_devattr(const char *postfix,
542 			   struct iio_chan_spec const *chan,
543 			   ssize_t (*readfunc)(struct device *dev,
544 					       struct device_attribute *attr,
545 					       char *buf),
546 			   ssize_t (*writefunc)(struct device *dev,
547 						struct device_attribute *attr,
548 						const char *buf,
549 						size_t len),
550 			   u64 mask,
551 			   bool generic,
552 			   struct device *dev,
553 			   struct list_head *attr_list)
554 {
555 	int ret;
556 	struct iio_dev_attr *iio_attr, *t;
557 
558 	iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
559 	if (iio_attr == NULL) {
560 		ret = -ENOMEM;
561 		goto error_ret;
562 	}
563 	ret = __iio_device_attr_init(&iio_attr->dev_attr,
564 				     postfix, chan,
565 				     readfunc, writefunc, generic);
566 	if (ret)
567 		goto error_iio_dev_attr_free;
568 	iio_attr->c = chan;
569 	iio_attr->address = mask;
570 	list_for_each_entry(t, attr_list, l)
571 		if (strcmp(t->dev_attr.attr.name,
572 			   iio_attr->dev_attr.attr.name) == 0) {
573 			if (!generic)
574 				dev_err(dev, "tried to double register : %s\n",
575 					t->dev_attr.attr.name);
576 			ret = -EBUSY;
577 			goto error_device_attr_deinit;
578 		}
579 	list_add(&iio_attr->l, attr_list);
580 
581 	return 0;
582 
583 error_device_attr_deinit:
584 	__iio_device_attr_deinit(&iio_attr->dev_attr);
585 error_iio_dev_attr_free:
586 	kfree(iio_attr);
587 error_ret:
588 	return ret;
589 }
590 
iio_device_add_channel_sysfs(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)591 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
592 					struct iio_chan_spec const *chan)
593 {
594 	int ret, i, attrcount = 0;
595 
596 	if (chan->channel < 0)
597 		return 0;
598 
599 	ret = __iio_add_chan_devattr(iio_data_type_name[chan->processed_val],
600 				     chan,
601 				     &iio_read_channel_info,
602 				     (chan->output ?
603 				      &iio_write_channel_info : NULL),
604 				     0,
605 				     0,
606 				     &indio_dev->dev,
607 				     &indio_dev->channel_attr_list);
608 	if (ret)
609 		goto error_ret;
610 	attrcount++;
611 
612 	for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
613 		ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
614 					     chan,
615 					     &iio_read_channel_info,
616 					     &iio_write_channel_info,
617 					     i/2,
618 					     !(i%2),
619 					     &indio_dev->dev,
620 					     &indio_dev->channel_attr_list);
621 		if (ret == -EBUSY && (i%2 == 0)) {
622 			ret = 0;
623 			continue;
624 		}
625 		if (ret < 0)
626 			goto error_ret;
627 		attrcount++;
628 	}
629 	ret = attrcount;
630 error_ret:
631 	return ret;
632 }
633 
iio_device_remove_and_free_read_attr(struct iio_dev * indio_dev,struct iio_dev_attr * p)634 static void iio_device_remove_and_free_read_attr(struct iio_dev *indio_dev,
635 						 struct iio_dev_attr *p)
636 {
637 	kfree(p->dev_attr.attr.name);
638 	kfree(p);
639 }
640 
iio_show_dev_name(struct device * dev,struct device_attribute * attr,char * buf)641 static ssize_t iio_show_dev_name(struct device *dev,
642 				 struct device_attribute *attr,
643 				 char *buf)
644 {
645 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
646 	return sprintf(buf, "%s\n", indio_dev->name);
647 }
648 
649 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
650 
iio_device_register_sysfs(struct iio_dev * indio_dev)651 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
652 {
653 	int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
654 	struct iio_dev_attr *p, *n;
655 	struct attribute **attr;
656 
657 	/* First count elements in any existing group */
658 	if (indio_dev->info->attrs) {
659 		attr = indio_dev->info->attrs->attrs;
660 		while (*attr++ != NULL)
661 			attrcount_orig++;
662 	}
663 	attrcount = attrcount_orig;
664 	/*
665 	 * New channel registration method - relies on the fact a group does
666 	 *  not need to be initialized if it is name is NULL.
667 	 */
668 	INIT_LIST_HEAD(&indio_dev->channel_attr_list);
669 	if (indio_dev->channels)
670 		for (i = 0; i < indio_dev->num_channels; i++) {
671 			ret = iio_device_add_channel_sysfs(indio_dev,
672 							   &indio_dev
673 							   ->channels[i]);
674 			if (ret < 0)
675 				goto error_clear_attrs;
676 			attrcount += ret;
677 		}
678 
679 	if (indio_dev->name)
680 		attrcount++;
681 
682 	indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
683 						   sizeof(indio_dev->chan_attr_group.attrs[0]),
684 						   GFP_KERNEL);
685 	if (indio_dev->chan_attr_group.attrs == NULL) {
686 		ret = -ENOMEM;
687 		goto error_clear_attrs;
688 	}
689 	/* Copy across original attributes */
690 	if (indio_dev->info->attrs)
691 		memcpy(indio_dev->chan_attr_group.attrs,
692 		       indio_dev->info->attrs->attrs,
693 		       sizeof(indio_dev->chan_attr_group.attrs[0])
694 		       *attrcount_orig);
695 	attrn = attrcount_orig;
696 	/* Add all elements from the list. */
697 	list_for_each_entry(p, &indio_dev->channel_attr_list, l)
698 		indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
699 	if (indio_dev->name)
700 		indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
701 
702 	indio_dev->groups[indio_dev->groupcounter++] =
703 		&indio_dev->chan_attr_group;
704 
705 	return 0;
706 
707 error_clear_attrs:
708 	list_for_each_entry_safe(p, n,
709 				 &indio_dev->channel_attr_list, l) {
710 		list_del(&p->l);
711 		iio_device_remove_and_free_read_attr(indio_dev, p);
712 	}
713 
714 	return ret;
715 }
716 
iio_device_unregister_sysfs(struct iio_dev * indio_dev)717 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
718 {
719 
720 	struct iio_dev_attr *p, *n;
721 
722 	list_for_each_entry_safe(p, n, &indio_dev->channel_attr_list, l) {
723 		list_del(&p->l);
724 		iio_device_remove_and_free_read_attr(indio_dev, p);
725 	}
726 	kfree(indio_dev->chan_attr_group.attrs);
727 }
728 
729 static const char * const iio_ev_type_text[] = {
730 	[IIO_EV_TYPE_THRESH] = "thresh",
731 	[IIO_EV_TYPE_MAG] = "mag",
732 	[IIO_EV_TYPE_ROC] = "roc",
733 	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
734 	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
735 };
736 
737 static const char * const iio_ev_dir_text[] = {
738 	[IIO_EV_DIR_EITHER] = "either",
739 	[IIO_EV_DIR_RISING] = "rising",
740 	[IIO_EV_DIR_FALLING] = "falling"
741 };
742 
iio_ev_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)743 static ssize_t iio_ev_state_store(struct device *dev,
744 				  struct device_attribute *attr,
745 				  const char *buf,
746 				  size_t len)
747 {
748 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
749 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
750 	int ret;
751 	bool val;
752 
753 	ret = strtobool(buf, &val);
754 	if (ret < 0)
755 		return ret;
756 
757 	ret = indio_dev->info->write_event_config(indio_dev,
758 						  this_attr->address,
759 						  val);
760 	return (ret < 0) ? ret : len;
761 }
762 
iio_ev_state_show(struct device * dev,struct device_attribute * attr,char * buf)763 static ssize_t iio_ev_state_show(struct device *dev,
764 				 struct device_attribute *attr,
765 				 char *buf)
766 {
767 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
768 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
769 	int val = indio_dev->info->read_event_config(indio_dev,
770 						     this_attr->address);
771 
772 	if (val < 0)
773 		return val;
774 	else
775 		return sprintf(buf, "%d\n", val);
776 }
777 
iio_ev_value_show(struct device * dev,struct device_attribute * attr,char * buf)778 static ssize_t iio_ev_value_show(struct device *dev,
779 				 struct device_attribute *attr,
780 				 char *buf)
781 {
782 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
783 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
784 	int val, ret;
785 
786 	ret = indio_dev->info->read_event_value(indio_dev,
787 						this_attr->address, &val);
788 	if (ret < 0)
789 		return ret;
790 
791 	return sprintf(buf, "%d\n", val);
792 }
793 
iio_ev_value_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)794 static ssize_t iio_ev_value_store(struct device *dev,
795 				  struct device_attribute *attr,
796 				  const char *buf,
797 				  size_t len)
798 {
799 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
800 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
801 	unsigned long val;
802 	int ret;
803 
804 	if (!indio_dev->info->write_event_value)
805 		return -EINVAL;
806 
807 	ret = strict_strtoul(buf, 10, &val);
808 	if (ret)
809 		return ret;
810 
811 	ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
812 						 val);
813 	if (ret < 0)
814 		return ret;
815 
816 	return len;
817 }
818 
iio_device_add_event_sysfs(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)819 static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
820 				      struct iio_chan_spec const *chan)
821 {
822 	int ret = 0, i, attrcount = 0;
823 	u64 mask = 0;
824 	char *postfix;
825 	if (!chan->event_mask)
826 		return 0;
827 
828 	for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
829 		postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
830 				    iio_ev_type_text[i/IIO_EV_DIR_MAX],
831 				    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
832 		if (postfix == NULL) {
833 			ret = -ENOMEM;
834 			goto error_ret;
835 		}
836 		if (chan->modified)
837 			mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
838 						  i/IIO_EV_DIR_MAX,
839 						  i%IIO_EV_DIR_MAX);
840 		else if (chan->differential)
841 			mask = IIO_EVENT_CODE(chan->type,
842 					      0, 0,
843 					      i%IIO_EV_DIR_MAX,
844 					      i/IIO_EV_DIR_MAX,
845 					      0,
846 					      chan->channel,
847 					      chan->channel2);
848 		else
849 			mask = IIO_UNMOD_EVENT_CODE(chan->type,
850 						    chan->channel,
851 						    i/IIO_EV_DIR_MAX,
852 						    i%IIO_EV_DIR_MAX);
853 
854 		ret = __iio_add_chan_devattr(postfix,
855 					     chan,
856 					     &iio_ev_state_show,
857 					     iio_ev_state_store,
858 					     mask,
859 					     0,
860 					     &indio_dev->dev,
861 					     &indio_dev->event_interface->
862 					     dev_attr_list);
863 		kfree(postfix);
864 		if (ret)
865 			goto error_ret;
866 		attrcount++;
867 		postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
868 				    iio_ev_type_text[i/IIO_EV_DIR_MAX],
869 				    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
870 		if (postfix == NULL) {
871 			ret = -ENOMEM;
872 			goto error_ret;
873 		}
874 		ret = __iio_add_chan_devattr(postfix, chan,
875 					     iio_ev_value_show,
876 					     iio_ev_value_store,
877 					     mask,
878 					     0,
879 					     &indio_dev->dev,
880 					     &indio_dev->event_interface->
881 					     dev_attr_list);
882 		kfree(postfix);
883 		if (ret)
884 			goto error_ret;
885 		attrcount++;
886 	}
887 	ret = attrcount;
888 error_ret:
889 	return ret;
890 }
891 
__iio_remove_event_config_attrs(struct iio_dev * indio_dev)892 static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
893 {
894 	struct iio_dev_attr *p, *n;
895 	list_for_each_entry_safe(p, n,
896 				 &indio_dev->event_interface->
897 				 dev_attr_list, l) {
898 		kfree(p->dev_attr.attr.name);
899 		kfree(p);
900 	}
901 }
902 
__iio_add_event_config_attrs(struct iio_dev * indio_dev)903 static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
904 {
905 	int j, ret, attrcount = 0;
906 
907 	INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
908 	/* Dynically created from the channels array */
909 	for (j = 0; j < indio_dev->num_channels; j++) {
910 		ret = iio_device_add_event_sysfs(indio_dev,
911 						 &indio_dev->channels[j]);
912 		if (ret < 0)
913 			goto error_clear_attrs;
914 		attrcount += ret;
915 	}
916 	return attrcount;
917 
918 error_clear_attrs:
919 	__iio_remove_event_config_attrs(indio_dev);
920 
921 	return ret;
922 }
923 
iio_check_for_dynamic_events(struct iio_dev * indio_dev)924 static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
925 {
926 	int j;
927 
928 	for (j = 0; j < indio_dev->num_channels; j++)
929 		if (indio_dev->channels[j].event_mask != 0)
930 			return true;
931 	return false;
932 }
933 
iio_setup_ev_int(struct iio_event_interface * ev_int)934 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
935 {
936 	mutex_init(&ev_int->event_list_lock);
937 	/* discussion point - make this variable? */
938 	ev_int->max_events = 10;
939 	ev_int->current_events = 0;
940 	INIT_LIST_HEAD(&ev_int->det_events);
941 	init_waitqueue_head(&ev_int->wait);
942 }
943 
944 static const char *iio_event_group_name = "events";
iio_device_register_eventset(struct iio_dev * indio_dev)945 static int iio_device_register_eventset(struct iio_dev *indio_dev)
946 {
947 	struct iio_dev_attr *p;
948 	int ret = 0, attrcount_orig = 0, attrcount, attrn;
949 	struct attribute **attr;
950 
951 	if (!(indio_dev->info->event_attrs ||
952 	      iio_check_for_dynamic_events(indio_dev)))
953 		return 0;
954 
955 	indio_dev->event_interface =
956 		kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
957 	if (indio_dev->event_interface == NULL) {
958 		ret = -ENOMEM;
959 		goto error_ret;
960 	}
961 
962 	iio_setup_ev_int(indio_dev->event_interface);
963 	if (indio_dev->info->event_attrs != NULL) {
964 		attr = indio_dev->info->event_attrs->attrs;
965 		while (*attr++ != NULL)
966 			attrcount_orig++;
967 	}
968 	attrcount = attrcount_orig;
969 	if (indio_dev->channels) {
970 		ret = __iio_add_event_config_attrs(indio_dev);
971 		if (ret < 0)
972 			goto error_free_setup_event_lines;
973 		attrcount += ret;
974 	}
975 
976 	indio_dev->event_interface->group.name = iio_event_group_name;
977 	indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
978 							  sizeof(indio_dev->event_interface->group.attrs[0]),
979 							  GFP_KERNEL);
980 	if (indio_dev->event_interface->group.attrs == NULL) {
981 		ret = -ENOMEM;
982 		goto error_free_setup_event_lines;
983 	}
984 	if (indio_dev->info->event_attrs)
985 		memcpy(indio_dev->event_interface->group.attrs,
986 		       indio_dev->info->event_attrs->attrs,
987 		       sizeof(indio_dev->event_interface->group.attrs[0])
988 		       *attrcount_orig);
989 	attrn = attrcount_orig;
990 	/* Add all elements from the list. */
991 	list_for_each_entry(p,
992 			    &indio_dev->event_interface->dev_attr_list,
993 			    l)
994 		indio_dev->event_interface->group.attrs[attrn++] =
995 			&p->dev_attr.attr;
996 	indio_dev->groups[indio_dev->groupcounter++] =
997 		&indio_dev->event_interface->group;
998 
999 	return 0;
1000 
1001 error_free_setup_event_lines:
1002 	__iio_remove_event_config_attrs(indio_dev);
1003 	kfree(indio_dev->event_interface);
1004 error_ret:
1005 
1006 	return ret;
1007 }
1008 
iio_device_unregister_eventset(struct iio_dev * indio_dev)1009 static void iio_device_unregister_eventset(struct iio_dev *indio_dev)
1010 {
1011 	if (indio_dev->event_interface == NULL)
1012 		return;
1013 	__iio_remove_event_config_attrs(indio_dev);
1014 	kfree(indio_dev->event_interface->group.attrs);
1015 	kfree(indio_dev->event_interface);
1016 }
1017 
iio_dev_release(struct device * device)1018 static void iio_dev_release(struct device *device)
1019 {
1020 	struct iio_dev *indio_dev = container_of(device, struct iio_dev, dev);
1021 	cdev_del(&indio_dev->chrdev);
1022 	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
1023 		iio_device_unregister_trigger_consumer(indio_dev);
1024 	iio_device_unregister_eventset(indio_dev);
1025 	iio_device_unregister_sysfs(indio_dev);
1026 }
1027 
1028 static struct device_type iio_dev_type = {
1029 	.name = "iio_device",
1030 	.release = iio_dev_release,
1031 };
1032 
iio_allocate_device(int sizeof_priv)1033 struct iio_dev *iio_allocate_device(int sizeof_priv)
1034 {
1035 	struct iio_dev *dev;
1036 	size_t alloc_size;
1037 
1038 	alloc_size = sizeof(struct iio_dev);
1039 	if (sizeof_priv) {
1040 		alloc_size = ALIGN(alloc_size, IIO_ALIGN);
1041 		alloc_size += sizeof_priv;
1042 	}
1043 	/* ensure 32-byte alignment of whole construct ? */
1044 	alloc_size += IIO_ALIGN - 1;
1045 
1046 	dev = kzalloc(alloc_size, GFP_KERNEL);
1047 
1048 	if (dev) {
1049 		dev->dev.groups = dev->groups;
1050 		dev->dev.type = &iio_dev_type;
1051 		dev->dev.bus = &iio_bus_type;
1052 		device_initialize(&dev->dev);
1053 		dev_set_drvdata(&dev->dev, (void *)dev);
1054 		mutex_init(&dev->mlock);
1055 
1056 		dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
1057 		if (dev->id < 0) {
1058 			/* cannot use a dev_err as the name isn't available */
1059 			printk(KERN_ERR "Failed to get id\n");
1060 			kfree(dev);
1061 			return NULL;
1062 		}
1063 		dev_set_name(&dev->dev, "iio:device%d", dev->id);
1064 	}
1065 
1066 	return dev;
1067 }
1068 EXPORT_SYMBOL(iio_allocate_device);
1069 
iio_free_device(struct iio_dev * dev)1070 void iio_free_device(struct iio_dev *dev)
1071 {
1072 	if (dev) {
1073 		ida_simple_remove(&iio_ida, dev->id);
1074 		kfree(dev);
1075 	}
1076 }
1077 EXPORT_SYMBOL(iio_free_device);
1078 
1079 /**
1080  * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1081  **/
iio_chrdev_open(struct inode * inode,struct file * filp)1082 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1083 {
1084 	struct iio_dev *indio_dev = container_of(inode->i_cdev,
1085 						struct iio_dev, chrdev);
1086 
1087 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
1088 		return -EBUSY;
1089 
1090 	filp->private_data = indio_dev;
1091 
1092 	return 0;
1093 }
1094 
1095 /**
1096  * iio_chrdev_release() - chrdev file close buffer access and ioctls
1097  **/
iio_chrdev_release(struct inode * inode,struct file * filp)1098 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1099 {
1100 	struct iio_dev *indio_dev = container_of(inode->i_cdev,
1101 						struct iio_dev, chrdev);
1102 	clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
1103 	return 0;
1104 }
1105 
1106 /* Somewhat of a cross file organization violation - ioctls here are actually
1107  * event related */
iio_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1108 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1109 {
1110 	struct iio_dev *indio_dev = filp->private_data;
1111 	int __user *ip = (int __user *)arg;
1112 	int fd;
1113 
1114 	if (cmd == IIO_GET_EVENT_FD_IOCTL) {
1115 		fd = iio_event_getfd(indio_dev);
1116 		if (copy_to_user(ip, &fd, sizeof(fd)))
1117 			return -EFAULT;
1118 		return 0;
1119 	}
1120 	return -EINVAL;
1121 }
1122 
1123 static const struct file_operations iio_buffer_fileops = {
1124 	.read = iio_buffer_read_first_n_outer_addr,
1125 	.release = iio_chrdev_release,
1126 	.open = iio_chrdev_open,
1127 	.poll = iio_buffer_poll_addr,
1128 	.owner = THIS_MODULE,
1129 	.llseek = noop_llseek,
1130 	.unlocked_ioctl = iio_ioctl,
1131 	.compat_ioctl = iio_ioctl,
1132 };
1133 
iio_device_register(struct iio_dev * indio_dev)1134 int iio_device_register(struct iio_dev *indio_dev)
1135 {
1136 	int ret;
1137 
1138 	/* configure elements for the chrdev */
1139 	indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
1140 
1141 	ret = iio_device_register_sysfs(indio_dev);
1142 	if (ret) {
1143 		dev_err(indio_dev->dev.parent,
1144 			"Failed to register sysfs interfaces\n");
1145 		goto error_ret;
1146 	}
1147 	ret = iio_device_register_eventset(indio_dev);
1148 	if (ret) {
1149 		dev_err(indio_dev->dev.parent,
1150 			"Failed to register event set\n");
1151 		goto error_free_sysfs;
1152 	}
1153 	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
1154 		iio_device_register_trigger_consumer(indio_dev);
1155 
1156 	ret = device_add(&indio_dev->dev);
1157 	if (ret < 0)
1158 		goto error_unreg_eventset;
1159 	cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
1160 	indio_dev->chrdev.owner = indio_dev->info->driver_module;
1161 	ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
1162 	if (ret < 0)
1163 		goto error_del_device;
1164 	return 0;
1165 
1166 error_del_device:
1167 	device_del(&indio_dev->dev);
1168 error_unreg_eventset:
1169 	iio_device_unregister_eventset(indio_dev);
1170 error_free_sysfs:
1171 	iio_device_unregister_sysfs(indio_dev);
1172 error_ret:
1173 	return ret;
1174 }
1175 EXPORT_SYMBOL(iio_device_register);
1176 
iio_device_unregister(struct iio_dev * indio_dev)1177 void iio_device_unregister(struct iio_dev *indio_dev)
1178 {
1179 	device_unregister(&indio_dev->dev);
1180 }
1181 EXPORT_SYMBOL(iio_device_unregister);
1182 subsys_initcall(iio_init);
1183 module_exit(iio_exit);
1184 
1185 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
1186 MODULE_DESCRIPTION("Industrial I/O core");
1187 MODULE_LICENSE("GPL");
1188