1 /*
2  * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * Copyright (c) 2009 Jonathan Cameron <jic23@cam.ac.uk>
9  *
10  */
11 
12 #include <linux/interrupt.h>
13 #include <linux/fs.h>
14 #include <linux/slab.h>
15 #include <linux/kernel.h>
16 #include <linux/spi/spi.h>
17 #include <linux/sysfs.h>
18 #include <linux/sched.h>
19 #include <linux/poll.h>
20 
21 #include "../iio.h"
22 #include "../sysfs.h"
23 #include "../buffer.h"
24 #include "../ring_hw.h"
25 #include "sca3000.h"
26 
27 /* RFC / future work
28  *
29  * The internal ring buffer doesn't actually change what it holds depending
30  * on which signals are enabled etc, merely whether you can read them.
31  * As such the scan mode selection is somewhat different than for a software
32  * ring buffer and changing it actually covers any data already in the buffer.
33  * Currently scan elements aren't configured so it doesn't matter.
34  */
35 
sca3000_read_data(struct sca3000_state * st,uint8_t reg_address_high,u8 ** rx_p,int len)36 static int sca3000_read_data(struct sca3000_state *st,
37 			    uint8_t reg_address_high,
38 			    u8 **rx_p,
39 			    int len)
40 {
41 	int ret;
42 	struct spi_message msg;
43 	struct spi_transfer xfer[2] = {
44 		{
45 			.len = 1,
46 			.tx_buf = st->tx,
47 		}, {
48 			.len = len,
49 		}
50 	};
51 	*rx_p = kmalloc(len, GFP_KERNEL);
52 	if (*rx_p == NULL) {
53 		ret = -ENOMEM;
54 		goto error_ret;
55 	}
56 	xfer[1].rx_buf = *rx_p;
57 	st->tx[0] = SCA3000_READ_REG(reg_address_high);
58 	spi_message_init(&msg);
59 	spi_message_add_tail(&xfer[0], &msg);
60 	spi_message_add_tail(&xfer[1], &msg);
61 	ret = spi_sync(st->us, &msg);
62 	if (ret) {
63 		dev_err(get_device(&st->us->dev), "problem reading register");
64 		goto error_free_rx;
65 	}
66 
67 	return 0;
68 error_free_rx:
69 	kfree(*rx_p);
70 error_ret:
71 	return ret;
72 }
73 
74 /**
75  * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
76  * @r:			the ring
77  * @count:		number of samples to try and pull
78  * @data:		output the actual samples pulled from the hw ring
79  *
80  * Currently does not provide timestamps.  As the hardware doesn't add them they
81  * can only be inferred approximately from ring buffer events such as 50% full
82  * and knowledge of when buffer was last emptied.  This is left to userspace.
83  **/
sca3000_read_first_n_hw_rb(struct iio_buffer * r,size_t count,char __user * buf)84 static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
85 				      size_t count, char __user *buf)
86 {
87 	struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
88 	struct iio_dev *indio_dev = hw_ring->private;
89 	struct sca3000_state *st = iio_priv(indio_dev);
90 	u8 *rx;
91 	int ret, i, num_available, num_read = 0;
92 	int bytes_per_sample = 1;
93 
94 	if (st->bpse == 11)
95 		bytes_per_sample = 2;
96 
97 	mutex_lock(&st->lock);
98 	if (count % bytes_per_sample) {
99 		ret = -EINVAL;
100 		goto error_ret;
101 	}
102 
103 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
104 	if (ret)
105 		goto error_ret;
106 	else
107 		num_available = st->rx[0];
108 	/*
109 	 * num_available is the total number of samples available
110 	 * i.e. number of time points * number of channels.
111 	 */
112 	if (count > num_available * bytes_per_sample)
113 		num_read = num_available*bytes_per_sample;
114 	else
115 		num_read = count;
116 
117 	ret = sca3000_read_data(st,
118 				SCA3000_REG_ADDR_RING_OUT,
119 				&rx, num_read);
120 	if (ret)
121 		goto error_ret;
122 
123 	for (i = 0; i < num_read; i++)
124 		*(((u16 *)rx) + i) = be16_to_cpup((u16 *)rx + i);
125 
126 	if (copy_to_user(buf, rx, num_read))
127 		ret = -EFAULT;
128 	kfree(rx);
129 	r->stufftoread = 0;
130 error_ret:
131 	mutex_unlock(&st->lock);
132 
133 	return ret ? ret : num_read;
134 }
135 
136 /* This is only valid with all 3 elements enabled */
sca3000_ring_get_length(struct iio_buffer * r)137 static int sca3000_ring_get_length(struct iio_buffer *r)
138 {
139 	return 64;
140 }
141 
142 /* only valid if resolution is kept at 11bits */
sca3000_ring_get_bytes_per_datum(struct iio_buffer * r)143 static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
144 {
145 	return 6;
146 }
147 
148 static IIO_BUFFER_ENABLE_ATTR;
149 static IIO_BUFFER_LENGTH_ATTR;
150 
151 /**
152  * sca3000_query_ring_int() is the hardware ring status interrupt enabled
153  **/
sca3000_query_ring_int(struct device * dev,struct device_attribute * attr,char * buf)154 static ssize_t sca3000_query_ring_int(struct device *dev,
155 				      struct device_attribute *attr,
156 				      char *buf)
157 {
158 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
159 	int ret, val;
160 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
161 	struct sca3000_state *st = iio_priv(indio_dev);
162 
163 	mutex_lock(&st->lock);
164 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
165 	val = st->rx[0];
166 	mutex_unlock(&st->lock);
167 	if (ret)
168 		return ret;
169 
170 	return sprintf(buf, "%d\n", !!(val & this_attr->address));
171 }
172 
173 /**
174  * sca3000_set_ring_int() set state of ring status interrupt
175  **/
sca3000_set_ring_int(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)176 static ssize_t sca3000_set_ring_int(struct device *dev,
177 				      struct device_attribute *attr,
178 				      const char *buf,
179 				      size_t len)
180 {
181 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
182 	struct sca3000_state *st = iio_priv(indio_dev);
183 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
184 	long val;
185 	int ret;
186 
187 	mutex_lock(&st->lock);
188 	ret = strict_strtol(buf, 10, &val);
189 	if (ret)
190 		goto error_ret;
191 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
192 	if (ret)
193 		goto error_ret;
194 	if (val)
195 		ret = sca3000_write_reg(st,
196 					SCA3000_REG_ADDR_INT_MASK,
197 					st->rx[0] | this_attr->address);
198 	else
199 		ret = sca3000_write_reg(st,
200 					SCA3000_REG_ADDR_INT_MASK,
201 					st->rx[0] & ~this_attr->address);
202 error_ret:
203 	mutex_unlock(&st->lock);
204 
205 	return ret ? ret : len;
206 }
207 
208 static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
209 		       sca3000_query_ring_int,
210 		       sca3000_set_ring_int,
211 		       SCA3000_INT_MASK_RING_HALF);
212 
213 static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
214 		       sca3000_query_ring_int,
215 		       sca3000_set_ring_int,
216 		       SCA3000_INT_MASK_RING_THREE_QUARTER);
217 
sca3000_show_buffer_scale(struct device * dev,struct device_attribute * attr,char * buf)218 static ssize_t sca3000_show_buffer_scale(struct device *dev,
219 					 struct device_attribute *attr,
220 					 char *buf)
221 {
222 	struct iio_dev *indio_dev = dev_get_drvdata(dev);
223 	struct sca3000_state *st = iio_priv(indio_dev);
224 
225 	return sprintf(buf, "0.%06d\n", 4*st->info->scale);
226 }
227 
228 static IIO_DEVICE_ATTR(in_accel_scale,
229 		       S_IRUGO,
230 		       sca3000_show_buffer_scale,
231 		       NULL,
232 		       0);
233 
234 /*
235  * Ring buffer attributes
236  * This device is a bit unusual in that the sampling frequency and bpse
237  * only apply to the ring buffer.  At all times full rate and accuracy
238  * is available via direct reading from registers.
239  */
240 static struct attribute *sca3000_ring_attributes[] = {
241 	&dev_attr_length.attr,
242 	&dev_attr_enable.attr,
243 	&iio_dev_attr_50_percent.dev_attr.attr,
244 	&iio_dev_attr_75_percent.dev_attr.attr,
245 	&iio_dev_attr_in_accel_scale.dev_attr.attr,
246 	NULL,
247 };
248 
249 static struct attribute_group sca3000_ring_attr = {
250 	.attrs = sca3000_ring_attributes,
251 	.name = "buffer",
252 };
253 
sca3000_rb_allocate(struct iio_dev * indio_dev)254 static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
255 {
256 	struct iio_buffer *buf;
257 	struct iio_hw_buffer *ring;
258 
259 	ring = kzalloc(sizeof *ring, GFP_KERNEL);
260 	if (!ring)
261 		return NULL;
262 
263 	ring->private = indio_dev;
264 	buf = &ring->buf;
265 	buf->stufftoread = 0;
266 	buf->attrs = &sca3000_ring_attr;
267 	iio_buffer_init(buf);
268 
269 	return buf;
270 }
271 
sca3000_rb_free(struct iio_buffer * r)272 static inline void sca3000_rb_free(struct iio_buffer *r)
273 {
274 	kfree(iio_to_hw_buf(r));
275 }
276 
277 static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
278 	.read_first_n = &sca3000_read_first_n_hw_rb,
279 	.get_length = &sca3000_ring_get_length,
280 	.get_bytes_per_datum = &sca3000_ring_get_bytes_per_datum,
281 };
282 
sca3000_configure_ring(struct iio_dev * indio_dev)283 int sca3000_configure_ring(struct iio_dev *indio_dev)
284 {
285 	indio_dev->buffer = sca3000_rb_allocate(indio_dev);
286 	if (indio_dev->buffer == NULL)
287 		return -ENOMEM;
288 	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
289 
290 	indio_dev->buffer->access = &sca3000_ring_access_funcs;
291 
292 	return 0;
293 }
294 
sca3000_unconfigure_ring(struct iio_dev * indio_dev)295 void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
296 {
297 	sca3000_rb_free(indio_dev->buffer);
298 }
299 
300 static inline
__sca3000_hw_ring_state_set(struct iio_dev * indio_dev,bool state)301 int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
302 {
303 	struct sca3000_state *st = iio_priv(indio_dev);
304 	int ret;
305 
306 	mutex_lock(&st->lock);
307 	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
308 	if (ret)
309 		goto error_ret;
310 	if (state) {
311 		printk(KERN_INFO "supposedly enabling ring buffer\n");
312 		ret = sca3000_write_reg(st,
313 					SCA3000_REG_ADDR_MODE,
314 					(st->rx[0] | SCA3000_RING_BUF_ENABLE));
315 	} else
316 		ret = sca3000_write_reg(st,
317 					SCA3000_REG_ADDR_MODE,
318 					(st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
319 error_ret:
320 	mutex_unlock(&st->lock);
321 
322 	return ret;
323 }
324 /**
325  * sca3000_hw_ring_preenable() hw ring buffer preenable function
326  *
327  * Very simple enable function as the chip will allows normal reads
328  * during ring buffer operation so as long as it is indeed running
329  * before we notify the core, the precise ordering does not matter.
330  **/
sca3000_hw_ring_preenable(struct iio_dev * indio_dev)331 static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
332 {
333 	return __sca3000_hw_ring_state_set(indio_dev, 1);
334 }
335 
sca3000_hw_ring_postdisable(struct iio_dev * indio_dev)336 static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
337 {
338 	return __sca3000_hw_ring_state_set(indio_dev, 0);
339 }
340 
341 static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
342 	.preenable = &sca3000_hw_ring_preenable,
343 	.postdisable = &sca3000_hw_ring_postdisable,
344 };
345 
sca3000_register_ring_funcs(struct iio_dev * indio_dev)346 void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
347 {
348 	indio_dev->setup_ops = &sca3000_ring_setup_ops;
349 }
350 
351 /**
352  * sca3000_ring_int_process() ring specific interrupt handling.
353  *
354  * This is only split from the main interrupt handler so as to
355  * reduce the amount of code if the ring buffer is not enabled.
356  **/
sca3000_ring_int_process(u8 val,struct iio_buffer * ring)357 void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
358 {
359 	if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
360 		   SCA3000_INT_STATUS_HALF)) {
361 		ring->stufftoread = true;
362 		wake_up_interruptible(&ring->pollq);
363 	}
364 }
365