Lines Matching +full:multi +full:- +full:attr

1 // SPDX-License-Identifier: GPL-2.0-only
9 * - Better memory allocation techniques?
10 * - Alternative access techniques?
24 #include <linux/iio/iio-opaque.h>
38 return !list_empty(&buf->buffer_list); in iio_buffer_is_active()
43 return buf->access->data_available(buf); in iio_buffer_data_available()
49 if (!indio_dev->info->hwfifo_flush_to_buffer) in iio_buffer_flush_hwfifo()
50 return -ENODEV; in iio_buffer_flush_hwfifo()
52 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); in iio_buffer_flush_hwfifo()
62 if (!indio_dev->info) in iio_buffer_ready()
74 /* force a flush for non-blocking reads */ in iio_buffer_ready()
77 to_flush - avail); in iio_buffer_ready()
83 to_wait - avail); in iio_buffer_ready()
94 * iio_buffer_read() - chrdev read for buffer access
109 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_read()
110 struct iio_buffer *rb = ib->buffer; in iio_buffer_read()
111 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_read()
117 if (!indio_dev->info) in iio_buffer_read()
118 return -ENODEV; in iio_buffer_read()
120 if (!rb || !rb->access->read) in iio_buffer_read()
121 return -EINVAL; in iio_buffer_read()
123 if (rb->direction != IIO_BUFFER_DIRECTION_IN) in iio_buffer_read()
124 return -EPERM; in iio_buffer_read()
126 datum_size = rb->bytes_per_datum; in iio_buffer_read()
135 if (filp->f_flags & O_NONBLOCK) in iio_buffer_read()
138 to_wait = min_t(size_t, n / datum_size, rb->watermark); in iio_buffer_read()
140 add_wait_queue(&rb->pollq, &wait); in iio_buffer_read()
142 if (!indio_dev->info) { in iio_buffer_read()
143 ret = -ENODEV; in iio_buffer_read()
149 ret = -ERESTARTSYS; in iio_buffer_read()
158 ret = rb->access->read(rb, n, buf); in iio_buffer_read()
159 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) in iio_buffer_read()
160 ret = -EAGAIN; in iio_buffer_read()
162 remove_wait_queue(&rb->pollq, &wait); in iio_buffer_read()
169 if (buf->access->space_available) in iio_buffer_space_available()
170 return buf->access->space_available(buf); in iio_buffer_space_available()
178 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_write()
179 struct iio_buffer *rb = ib->buffer; in iio_buffer_write()
180 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_write()
185 if (!indio_dev->info) in iio_buffer_write()
186 return -ENODEV; in iio_buffer_write()
188 if (!rb || !rb->access->write) in iio_buffer_write()
189 return -EINVAL; in iio_buffer_write()
191 if (rb->direction != IIO_BUFFER_DIRECTION_OUT) in iio_buffer_write()
192 return -EPERM; in iio_buffer_write()
195 add_wait_queue(&rb->pollq, &wait); in iio_buffer_write()
197 if (!indio_dev->info) in iio_buffer_write()
198 return -ENODEV; in iio_buffer_write()
202 ret = -ERESTARTSYS; in iio_buffer_write()
206 if (filp->f_flags & O_NONBLOCK) { in iio_buffer_write()
208 ret = -EAGAIN; in iio_buffer_write()
217 ret = rb->access->write(rb, n - written, buf + written); in iio_buffer_write()
224 remove_wait_queue(&rb->pollq, &wait); in iio_buffer_write()
230 * iio_buffer_poll() - poll the buffer to find out if it has data
241 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_poll()
242 struct iio_buffer *rb = ib->buffer; in iio_buffer_poll()
243 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_poll()
245 if (!indio_dev->info || !rb) in iio_buffer_poll()
248 poll_wait(filp, &rb->pollq, wait); in iio_buffer_poll()
250 switch (rb->direction) { in iio_buffer_poll()
252 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) in iio_buffer_poll()
267 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_read_wrapper()
268 struct iio_buffer *rb = ib->buffer; in iio_buffer_read_wrapper()
271 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) in iio_buffer_read_wrapper()
272 return -EBUSY; in iio_buffer_read_wrapper()
280 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_write_wrapper()
281 struct iio_buffer *rb = ib->buffer; in iio_buffer_write_wrapper()
284 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) in iio_buffer_write_wrapper()
285 return -EBUSY; in iio_buffer_write_wrapper()
293 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_poll_wrapper()
294 struct iio_buffer *rb = ib->buffer; in iio_buffer_poll_wrapper()
297 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) in iio_buffer_poll_wrapper()
304 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
316 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { in iio_buffer_wakeup_poll()
317 buffer = iio_dev_opaque->attached_buffers[i]; in iio_buffer_wakeup_poll()
318 wake_up(&buffer->pollq); in iio_buffer_wakeup_poll()
324 if (!buffer || !buffer->access || !buffer->access->remove_from) in iio_pop_from_buffer()
325 return -EINVAL; in iio_pop_from_buffer()
327 return buffer->access->remove_from(buffer, data); in iio_pop_from_buffer()
333 INIT_LIST_HEAD(&buffer->demux_list); in iio_buffer_init()
334 INIT_LIST_HEAD(&buffer->buffer_list); in iio_buffer_init()
335 init_waitqueue_head(&buffer->pollq); in iio_buffer_init()
336 kref_init(&buffer->ref); in iio_buffer_init()
337 if (!buffer->watermark) in iio_buffer_init()
338 buffer->watermark = 1; in iio_buffer_init()
348 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { in iio_device_detach_buffers()
349 buffer = iio_dev_opaque->attached_buffers[i]; in iio_device_detach_buffers()
353 kfree(iio_dev_opaque->attached_buffers); in iio_device_detach_buffers()
357 struct device_attribute *attr, in iio_show_scan_index() argument
360 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); in iio_show_scan_index()
364 struct device_attribute *attr, in iio_show_fixed_type() argument
367 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); in iio_show_fixed_type()
368 u8 type = this_attr->c->scan_type.endianness; in iio_show_fixed_type()
377 if (this_attr->c->scan_type.repeat > 1) in iio_show_fixed_type()
380 this_attr->c->scan_type.sign, in iio_show_fixed_type()
381 this_attr->c->scan_type.realbits, in iio_show_fixed_type()
382 this_attr->c->scan_type.storagebits, in iio_show_fixed_type()
383 this_attr->c->scan_type.repeat, in iio_show_fixed_type()
384 this_attr->c->scan_type.shift); in iio_show_fixed_type()
388 this_attr->c->scan_type.sign, in iio_show_fixed_type()
389 this_attr->c->scan_type.realbits, in iio_show_fixed_type()
390 this_attr->c->scan_type.storagebits, in iio_show_fixed_type()
391 this_attr->c->scan_type.shift); in iio_show_fixed_type()
395 struct device_attribute *attr, in iio_scan_el_show() argument
399 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_show()
402 ret = !!test_bit(to_iio_dev_attr(attr)->address, in iio_scan_el_show()
403 buffer->scan_mask); in iio_scan_el_show()
417 * The condition here do not handle multi-long masks correctly. in iio_scan_mask_match()
424 * avaliable_scan_masks is a zero terminated array of longs - and in iio_scan_mask_match()
425 * using the proper bitmap_empty() check for multi-long wide masks in iio_scan_mask_match()
426 * would require the array to be terminated with multiple zero longs - in iio_scan_mask_match()
429 * As writing of this no multi-long wide masks were found in-tree, so in iio_scan_mask_match()
448 if (!indio_dev->setup_ops->validate_scan_mask) in iio_validate_scan_mask()
451 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); in iio_validate_scan_mask()
455 * iio_scan_mask_set() - set particular bit in the scan mask
470 if (!indio_dev->masklength) { in iio_scan_mask_set()
472 return -EINVAL; in iio_scan_mask_set()
475 trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL); in iio_scan_mask_set()
477 return -ENOMEM; in iio_scan_mask_set()
478 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); in iio_scan_mask_set()
484 if (indio_dev->available_scan_masks) { in iio_scan_mask_set()
485 mask = iio_scan_mask_match(indio_dev->available_scan_masks, in iio_scan_mask_set()
486 indio_dev->masklength, in iio_scan_mask_set()
491 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); in iio_scan_mask_set()
499 return -EINVAL; in iio_scan_mask_set()
504 clear_bit(bit, buffer->scan_mask); in iio_scan_mask_clear()
511 if (bit > indio_dev->masklength) in iio_scan_mask_query()
512 return -EINVAL; in iio_scan_mask_query()
514 if (!buffer->scan_mask) in iio_scan_mask_query()
518 return !!test_bit(bit, buffer->scan_mask); in iio_scan_mask_query()
522 struct device_attribute *attr, in iio_scan_el_store() argument
530 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); in iio_scan_el_store()
531 struct iio_buffer *buffer = this_attr->buffer; in iio_scan_el_store()
536 mutex_lock(&iio_dev_opaque->mlock); in iio_scan_el_store()
538 ret = -EBUSY; in iio_scan_el_store()
541 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); in iio_scan_el_store()
545 ret = iio_scan_mask_clear(buffer, this_attr->address); in iio_scan_el_store()
549 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); in iio_scan_el_store()
555 mutex_unlock(&iio_dev_opaque->mlock); in iio_scan_el_store()
561 struct device_attribute *attr, in iio_scan_el_ts_show() argument
564 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_ts_show()
566 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp); in iio_scan_el_ts_show()
570 struct device_attribute *attr, in iio_scan_el_ts_store() argument
577 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_ts_store()
584 mutex_lock(&iio_dev_opaque->mlock); in iio_scan_el_ts_store()
586 ret = -EBUSY; in iio_scan_el_ts_store()
589 buffer->scan_timestamp = state; in iio_scan_el_ts_store()
591 mutex_unlock(&iio_dev_opaque->mlock); in iio_scan_el_ts_store()
608 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
610 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
620 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
622 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
626 if (chan->type != IIO_TIMESTAMP) in iio_buffer_add_channel_sysfs()
631 chan->scan_index, in iio_buffer_add_channel_sysfs()
633 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
635 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
641 chan->scan_index, in iio_buffer_add_channel_sysfs()
643 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
645 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
653 static ssize_t length_show(struct device *dev, struct device_attribute *attr, in length_show() argument
656 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in length_show()
658 return sysfs_emit(buf, "%d\n", buffer->length); in length_show()
661 static ssize_t length_store(struct device *dev, struct device_attribute *attr, in length_store() argument
666 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in length_store()
674 if (val == buffer->length) in length_store()
677 mutex_lock(&iio_dev_opaque->mlock); in length_store()
679 ret = -EBUSY; in length_store()
681 buffer->access->set_length(buffer, val); in length_store()
686 if (buffer->length && buffer->length < buffer->watermark) in length_store()
687 buffer->watermark = buffer->length; in length_store()
689 mutex_unlock(&iio_dev_opaque->mlock); in length_store()
694 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, in enable_show() argument
697 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in enable_show()
709 bytes = ch->scan_type.storagebits / 8; in iio_storage_bytes_for_si()
710 if (ch->scan_type.repeat > 1) in iio_storage_bytes_for_si()
711 bytes *= ch->scan_type.repeat; in iio_storage_bytes_for_si()
720 iio_dev_opaque->scan_index_timestamp); in iio_storage_bytes_for_timestamp()
731 indio_dev->masklength) { in iio_compute_scan_bytes()
755 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); in iio_buffer_activate()
760 list_del_init(&buffer->buffer_list); in iio_buffer_deactivate()
761 wake_up_interruptible(&buffer->pollq); in iio_buffer_deactivate()
771 &iio_dev_opaque->buffer_list, buffer_list) in iio_buffer_deactivate_all()
778 if (!buffer->access->enable) in iio_buffer_enable()
780 return buffer->access->enable(buffer, indio_dev); in iio_buffer_enable()
786 if (!buffer->access->disable) in iio_buffer_disable()
788 return buffer->access->disable(buffer, indio_dev); in iio_buffer_disable()
796 if (!buffer->access->set_bytes_per_datum) in iio_buffer_update_bytes_per_datum()
799 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, in iio_buffer_update_bytes_per_datum()
800 buffer->scan_timestamp); in iio_buffer_update_bytes_per_datum()
802 buffer->access->set_bytes_per_datum(buffer, bytes); in iio_buffer_update_bytes_per_datum()
811 if (buffer->access->request_update) { in iio_buffer_request_update()
812 ret = buffer->access->request_update(buffer); in iio_buffer_request_update()
814 dev_dbg(&indio_dev->dev, in iio_buffer_request_update()
828 if (!indio_dev->available_scan_masks) in iio_free_scan_mask()
854 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) { in iio_verify_update()
855 dev_dbg(&indio_dev->dev, in iio_verify_update()
857 return -EINVAL; in iio_verify_update()
861 config->watermark = ~0; in iio_verify_update()
868 list_is_singular(&iio_dev_opaque->buffer_list)) in iio_verify_update()
871 modes = indio_dev->modes; in iio_verify_update()
873 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_verify_update()
876 modes &= buffer->access->modes; in iio_verify_update()
877 config->watermark = min(config->watermark, buffer->watermark); in iio_verify_update()
881 modes &= insert_buffer->access->modes; in iio_verify_update()
882 config->watermark = min(config->watermark, in iio_verify_update()
883 insert_buffer->watermark); in iio_verify_update()
887 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { in iio_verify_update()
888 config->mode = INDIO_BUFFER_TRIGGERED; in iio_verify_update()
894 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) in iio_verify_update()
895 return -EINVAL; in iio_verify_update()
896 config->mode = INDIO_BUFFER_HARDWARE; in iio_verify_update()
899 config->mode = INDIO_BUFFER_SOFTWARE; in iio_verify_update()
902 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) in iio_verify_update()
903 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); in iio_verify_update()
904 return -EINVAL; in iio_verify_update()
908 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL); in iio_verify_update()
910 return -ENOMEM; in iio_verify_update()
914 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_verify_update()
917 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, in iio_verify_update()
918 indio_dev->masklength); in iio_verify_update()
919 scan_timestamp |= buffer->scan_timestamp; in iio_verify_update()
924 insert_buffer->scan_mask, indio_dev->masklength); in iio_verify_update()
925 scan_timestamp |= insert_buffer->scan_timestamp; in iio_verify_update()
928 if (indio_dev->available_scan_masks) { in iio_verify_update()
929 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, in iio_verify_update()
930 indio_dev->masklength, in iio_verify_update()
935 return -EINVAL; in iio_verify_update()
940 config->scan_bytes = iio_compute_scan_bytes(indio_dev, in iio_verify_update()
942 config->scan_mask = scan_mask; in iio_verify_update()
943 config->scan_timestamp = scan_timestamp; in iio_verify_update()
949 * struct iio_demux_table - table describing demux memcpy ops
966 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { in iio_buffer_demux_free()
967 list_del(&p->l); in iio_buffer_demux_free()
977 if (*p && (*p)->from + (*p)->length == in_loc && in iio_buffer_add_demux()
978 (*p)->to + (*p)->length == out_loc) { in iio_buffer_add_demux()
979 (*p)->length += length; in iio_buffer_add_demux()
983 return -ENOMEM; in iio_buffer_add_demux()
984 (*p)->from = in_loc; in iio_buffer_add_demux()
985 (*p)->to = out_loc; in iio_buffer_add_demux()
986 (*p)->length = length; in iio_buffer_add_demux()
987 list_add_tail(&(*p)->l, &buffer->demux_list); in iio_buffer_add_demux()
996 int ret, in_ind = -1, out_ind, length; in iio_buffer_update_demux()
1002 kfree(buffer->demux_bounce); in iio_buffer_update_demux()
1003 buffer->demux_bounce = NULL; in iio_buffer_update_demux()
1006 if (bitmap_equal(indio_dev->active_scan_mask, in iio_buffer_update_demux()
1007 buffer->scan_mask, in iio_buffer_update_demux()
1008 indio_dev->masklength)) in iio_buffer_update_demux()
1013 buffer->scan_mask, in iio_buffer_update_demux()
1014 indio_dev->masklength) { in iio_buffer_update_demux()
1015 in_ind = find_next_bit(indio_dev->active_scan_mask, in iio_buffer_update_demux()
1016 indio_dev->masklength, in iio_buffer_update_demux()
1022 in_ind = find_next_bit(indio_dev->active_scan_mask, in iio_buffer_update_demux()
1023 indio_dev->masklength, in iio_buffer_update_demux()
1036 if (buffer->scan_timestamp) { in iio_buffer_update_demux()
1045 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); in iio_buffer_update_demux()
1046 if (!buffer->demux_bounce) { in iio_buffer_update_demux()
1047 ret = -ENOMEM; in iio_buffer_update_demux()
1064 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_update_demux()
1072 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) in iio_update_demux()
1085 indio_dev->active_scan_mask = config->scan_mask; in iio_enable_buffers()
1086 indio_dev->scan_timestamp = config->scan_timestamp; in iio_enable_buffers()
1087 indio_dev->scan_bytes = config->scan_bytes; in iio_enable_buffers()
1088 iio_dev_opaque->currentmode = config->mode; in iio_enable_buffers()
1093 if (indio_dev->setup_ops->preenable) { in iio_enable_buffers()
1094 ret = indio_dev->setup_ops->preenable(indio_dev); in iio_enable_buffers()
1096 dev_dbg(&indio_dev->dev, in iio_enable_buffers()
1102 if (indio_dev->info->update_scan_mode) { in iio_enable_buffers()
1103 ret = indio_dev->info in iio_enable_buffers()
1104 ->update_scan_mode(indio_dev, in iio_enable_buffers()
1105 indio_dev->active_scan_mask); in iio_enable_buffers()
1107 dev_dbg(&indio_dev->dev, in iio_enable_buffers()
1114 if (indio_dev->info->hwfifo_set_watermark) in iio_enable_buffers()
1115 indio_dev->info->hwfifo_set_watermark(indio_dev, in iio_enable_buffers()
1116 config->watermark); in iio_enable_buffers()
1118 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_enable_buffers()
1126 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { in iio_enable_buffers()
1127 ret = iio_trigger_attach_poll_func(indio_dev->trig, in iio_enable_buffers()
1128 indio_dev->pollfunc); in iio_enable_buffers()
1133 if (indio_dev->setup_ops->postenable) { in iio_enable_buffers()
1134 ret = indio_dev->setup_ops->postenable(indio_dev); in iio_enable_buffers()
1136 dev_dbg(&indio_dev->dev, in iio_enable_buffers()
1145 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { in iio_enable_buffers()
1146 iio_trigger_detach_poll_func(indio_dev->trig, in iio_enable_buffers()
1147 indio_dev->pollfunc); in iio_enable_buffers()
1150 buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list); in iio_enable_buffers()
1151 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, in iio_enable_buffers()
1155 if (indio_dev->setup_ops->postdisable) in iio_enable_buffers()
1156 indio_dev->setup_ops->postdisable(indio_dev); in iio_enable_buffers()
1158 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; in iio_enable_buffers()
1159 indio_dev->active_scan_mask = NULL; in iio_enable_buffers()
1171 /* Wind down existing buffers - iff there are any */ in iio_disable_buffers()
1172 if (list_empty(&iio_dev_opaque->buffer_list)) in iio_disable_buffers()
1182 if (indio_dev->setup_ops->predisable) { in iio_disable_buffers()
1183 ret2 = indio_dev->setup_ops->predisable(indio_dev); in iio_disable_buffers()
1188 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { in iio_disable_buffers()
1189 iio_trigger_detach_poll_func(indio_dev->trig, in iio_disable_buffers()
1190 indio_dev->pollfunc); in iio_disable_buffers()
1193 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_disable_buffers()
1199 if (indio_dev->setup_ops->postdisable) { in iio_disable_buffers()
1200 ret2 = indio_dev->setup_ops->postdisable(indio_dev); in iio_disable_buffers()
1205 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); in iio_disable_buffers()
1206 indio_dev->active_scan_mask = NULL; in iio_disable_buffers()
1207 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; in iio_disable_buffers()
1241 if (list_empty(&iio_dev_opaque->buffer_list)) in __iio_update_buffers()
1277 insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT) in iio_update_buffers()
1278 return -EINVAL; in iio_update_buffers()
1280 mutex_lock(&iio_dev_opaque->info_exist_lock); in iio_update_buffers()
1281 mutex_lock(&iio_dev_opaque->mlock); in iio_update_buffers()
1294 if (!indio_dev->info) { in iio_update_buffers()
1295 ret = -ENODEV; in iio_update_buffers()
1302 mutex_unlock(&iio_dev_opaque->mlock); in iio_update_buffers()
1303 mutex_unlock(&iio_dev_opaque->info_exist_lock); in iio_update_buffers()
1315 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, in enable_store() argument
1322 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in enable_store()
1329 mutex_lock(&iio_dev_opaque->mlock); in enable_store()
1343 mutex_unlock(&iio_dev_opaque->mlock); in enable_store()
1347 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr, in watermark_show() argument
1350 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in watermark_show()
1352 return sysfs_emit(buf, "%u\n", buffer->watermark); in watermark_show()
1356 struct device_attribute *attr, in watermark_store() argument
1361 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in watermark_store()
1369 return -EINVAL; in watermark_store()
1371 mutex_lock(&iio_dev_opaque->mlock); in watermark_store()
1373 if (val > buffer->length) { in watermark_store()
1374 ret = -EINVAL; in watermark_store()
1379 ret = -EBUSY; in watermark_store()
1383 buffer->watermark = val; in watermark_store()
1385 mutex_unlock(&iio_dev_opaque->mlock); in watermark_store()
1391 struct device_attribute *attr, char *buf) in data_available_show() argument
1393 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in data_available_show()
1399 struct device_attribute *attr, in direction_show() argument
1402 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in direction_show()
1404 switch (buffer->direction) { in direction_show()
1410 return -EINVAL; in direction_show()
1429 &dev_attr_length.attr,
1430 &dev_attr_enable.attr,
1431 &dev_attr_watermark.attr,
1432 &dev_attr_data_available.attr,
1433 &dev_attr_direction.attr,
1436 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1439 struct attribute *attr) in iio_buffer_wrap_attr() argument
1441 struct device_attribute *dattr = to_dev_attr(attr); in iio_buffer_wrap_attr()
1448 iio_attr->buffer = buffer; in iio_buffer_wrap_attr()
1449 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr)); in iio_buffer_wrap_attr()
1450 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL); in iio_buffer_wrap_attr()
1451 if (!iio_attr->dev_attr.attr.name) { in iio_buffer_wrap_attr()
1456 sysfs_attr_init(&iio_attr->dev_attr.attr); in iio_buffer_wrap_attr()
1458 list_add(&iio_attr->l, &buffer->buffer_attr_list); in iio_buffer_wrap_attr()
1460 return &iio_attr->dev_attr.attr; in iio_buffer_wrap_attr()
1475 return -ENOMEM; in iio_buffer_register_legacy_sysfs_groups()
1479 group = &iio_dev_opaque->legacy_buffer_group; in iio_buffer_register_legacy_sysfs_groups()
1480 group->attrs = attrs; in iio_buffer_register_legacy_sysfs_groups()
1481 group->name = "buffer"; in iio_buffer_register_legacy_sysfs_groups()
1489 ret = -ENOMEM; in iio_buffer_register_legacy_sysfs_groups()
1496 group = &iio_dev_opaque->legacy_scan_el_group; in iio_buffer_register_legacy_sysfs_groups()
1497 group->attrs = attrs; in iio_buffer_register_legacy_sysfs_groups()
1498 group->name = "scan_elements"; in iio_buffer_register_legacy_sysfs_groups()
1507 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); in iio_buffer_register_legacy_sysfs_groups()
1509 kfree(iio_dev_opaque->legacy_buffer_group.attrs); in iio_buffer_register_legacy_sysfs_groups()
1518 kfree(iio_dev_opaque->legacy_buffer_group.attrs); in iio_buffer_unregister_legacy_sysfs_groups()
1519 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); in iio_buffer_unregister_legacy_sysfs_groups()
1524 struct iio_dev_buffer_pair *ib = filep->private_data; in iio_buffer_chrdev_release()
1525 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_chrdev_release()
1526 struct iio_buffer *buffer = ib->buffer; in iio_buffer_chrdev_release()
1528 wake_up(&buffer->pollq); in iio_buffer_chrdev_release()
1531 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); in iio_buffer_chrdev_release()
1555 return -EFAULT; in iio_device_buffer_getfd()
1557 if (idx >= iio_dev_opaque->attached_buffers_cnt) in iio_device_buffer_getfd()
1558 return -ENODEV; in iio_device_buffer_getfd()
1562 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_device_buffer_getfd()
1564 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) { in iio_device_buffer_getfd()
1565 ret = -EBUSY; in iio_device_buffer_getfd()
1571 ret = -ENOMEM; in iio_device_buffer_getfd()
1575 ib->indio_dev = indio_dev; in iio_device_buffer_getfd()
1576 ib->buffer = buffer; in iio_device_buffer_getfd()
1596 return -EFAULT; in iio_device_buffer_getfd()
1604 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); in iio_device_buffer_getfd()
1628 struct attribute **attr; in __iio_buffer_alloc_sysfs_and_mask() local
1633 if (buffer->attrs) { in __iio_buffer_alloc_sysfs_and_mask()
1634 while (buffer->attrs[buffer_attrcount]) in __iio_buffer_alloc_sysfs_and_mask()
1640 INIT_LIST_HEAD(&buffer->buffer_attr_list); in __iio_buffer_alloc_sysfs_and_mask()
1641 channels = indio_dev->channels; in __iio_buffer_alloc_sysfs_and_mask()
1644 for (i = 0; i < indio_dev->num_channels; i++) { in __iio_buffer_alloc_sysfs_and_mask()
1652 dev_err(&indio_dev->dev, in __iio_buffer_alloc_sysfs_and_mask()
1657 ret = -EINVAL; in __iio_buffer_alloc_sysfs_and_mask()
1667 iio_dev_opaque->scan_index_timestamp = in __iio_buffer_alloc_sysfs_and_mask()
1670 if (indio_dev->masklength && !buffer->scan_mask) { in __iio_buffer_alloc_sysfs_and_mask()
1671 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength, in __iio_buffer_alloc_sysfs_and_mask()
1673 if (!buffer->scan_mask) { in __iio_buffer_alloc_sysfs_and_mask()
1674 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
1681 attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL); in __iio_buffer_alloc_sysfs_and_mask()
1682 if (!attr) { in __iio_buffer_alloc_sysfs_and_mask()
1683 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
1687 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); in __iio_buffer_alloc_sysfs_and_mask()
1688 if (!buffer->access->set_length) in __iio_buffer_alloc_sysfs_and_mask()
1689 attr[0] = &dev_attr_length_ro.attr; in __iio_buffer_alloc_sysfs_and_mask()
1691 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) in __iio_buffer_alloc_sysfs_and_mask()
1692 attr[2] = &dev_attr_watermark_ro.attr; in __iio_buffer_alloc_sysfs_and_mask()
1694 if (buffer->attrs) in __iio_buffer_alloc_sysfs_and_mask()
1695 for (i = 0, id_attr = buffer->attrs[i]; in __iio_buffer_alloc_sysfs_and_mask()
1696 (id_attr = buffer->attrs[i]); i++) in __iio_buffer_alloc_sysfs_and_mask()
1697 attr[ARRAY_SIZE(iio_buffer_attrs) + i] = in __iio_buffer_alloc_sysfs_and_mask()
1698 (struct attribute *)&id_attr->dev_attr.attr; in __iio_buffer_alloc_sysfs_and_mask()
1700 buffer->buffer_group.attrs = attr; in __iio_buffer_alloc_sysfs_and_mask()
1705 wrapped = iio_buffer_wrap_attr(buffer, attr[i]); in __iio_buffer_alloc_sysfs_and_mask()
1707 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
1710 attr[i] = wrapped; in __iio_buffer_alloc_sysfs_and_mask()
1714 list_for_each_entry(p, &buffer->buffer_attr_list, l) in __iio_buffer_alloc_sysfs_and_mask()
1715 attr[attrn++] = &p->dev_attr.attr; in __iio_buffer_alloc_sysfs_and_mask()
1717 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index); in __iio_buffer_alloc_sysfs_and_mask()
1718 if (!buffer->buffer_group.name) { in __iio_buffer_alloc_sysfs_and_mask()
1719 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
1723 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group); in __iio_buffer_alloc_sysfs_and_mask()
1731 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr, in __iio_buffer_alloc_sysfs_and_mask()
1740 kfree(buffer->buffer_group.name); in __iio_buffer_alloc_sysfs_and_mask()
1742 kfree(buffer->buffer_group.attrs); in __iio_buffer_alloc_sysfs_and_mask()
1744 bitmap_free(buffer->scan_mask); in __iio_buffer_alloc_sysfs_and_mask()
1746 iio_free_chan_devattr_list(&buffer->buffer_attr_list); in __iio_buffer_alloc_sysfs_and_mask()
1757 bitmap_free(buffer->scan_mask); in __iio_buffer_free_sysfs_and_mask()
1758 kfree(buffer->buffer_group.name); in __iio_buffer_free_sysfs_and_mask()
1759 kfree(buffer->buffer_group.attrs); in __iio_buffer_free_sysfs_and_mask()
1760 iio_free_chan_devattr_list(&buffer->buffer_attr_list); in __iio_buffer_free_sysfs_and_mask()
1771 channels = indio_dev->channels; in iio_buffers_alloc_sysfs_and_mask()
1773 int ml = indio_dev->masklength; in iio_buffers_alloc_sysfs_and_mask()
1775 for (i = 0; i < indio_dev->num_channels; i++) in iio_buffers_alloc_sysfs_and_mask()
1777 indio_dev->masklength = ml; in iio_buffers_alloc_sysfs_and_mask()
1780 if (!iio_dev_opaque->attached_buffers_cnt) in iio_buffers_alloc_sysfs_and_mask()
1783 for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) { in iio_buffers_alloc_sysfs_and_mask()
1784 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_buffers_alloc_sysfs_and_mask()
1790 sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_alloc_sysfs_and_mask()
1791 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL); in iio_buffers_alloc_sysfs_and_mask()
1792 if (!iio_dev_opaque->buffer_ioctl_handler) { in iio_buffers_alloc_sysfs_and_mask()
1793 ret = -ENOMEM; in iio_buffers_alloc_sysfs_and_mask()
1797 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl; in iio_buffers_alloc_sysfs_and_mask()
1799 iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_alloc_sysfs_and_mask()
1804 while (idx--) { in iio_buffers_alloc_sysfs_and_mask()
1805 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_buffers_alloc_sysfs_and_mask()
1817 if (!iio_dev_opaque->attached_buffers_cnt) in iio_buffers_free_sysfs_and_mask()
1820 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_free_sysfs_and_mask()
1821 kfree(iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_free_sysfs_and_mask()
1823 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) { in iio_buffers_free_sysfs_and_mask()
1824 buffer = iio_dev_opaque->attached_buffers[i]; in iio_buffers_free_sysfs_and_mask()
1830 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1841 return bitmap_weight(mask, indio_dev->masklength) == 1; in iio_validate_scan_mask_onehot()
1850 if (list_empty(&buffer->demux_list)) in iio_demux()
1852 list_for_each_entry(t, &buffer->demux_list, l) in iio_demux()
1853 memcpy(buffer->demux_bounce + t->to, in iio_demux()
1854 datain + t->from, t->length); in iio_demux()
1856 return buffer->demux_bounce; in iio_demux()
1864 ret = buffer->access->store_to(buffer, dataout); in iio_push_to_buffer()
1872 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); in iio_push_to_buffer()
1877 * iio_push_to_buffers() - push to a registered buffer.
1887 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) { in iio_push_to_buffers()
1898 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
1918 * Conservative estimate - we can always safely copy the minimum in iio_push_to_buffers_with_ts_unaligned()
1924 data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz); in iio_push_to_buffers_with_ts_unaligned()
1925 if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) { in iio_push_to_buffers_with_ts_unaligned()
1928 bb = devm_krealloc(&indio_dev->dev, in iio_push_to_buffers_with_ts_unaligned()
1929 iio_dev_opaque->bounce_buffer, in iio_push_to_buffers_with_ts_unaligned()
1930 indio_dev->scan_bytes, GFP_KERNEL); in iio_push_to_buffers_with_ts_unaligned()
1932 return -ENOMEM; in iio_push_to_buffers_with_ts_unaligned()
1933 iio_dev_opaque->bounce_buffer = bb; in iio_push_to_buffers_with_ts_unaligned()
1934 iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes; in iio_push_to_buffers_with_ts_unaligned()
1936 memcpy(iio_dev_opaque->bounce_buffer, data, data_sz); in iio_push_to_buffers_with_ts_unaligned()
1938 iio_dev_opaque->bounce_buffer, in iio_push_to_buffers_with_ts_unaligned()
1944 * iio_buffer_release() - Free a buffer's resources
1956 buffer->access->release(buffer); in iio_buffer_release()
1960 * iio_buffer_get() - Grab a reference to the buffer
1968 kref_get(&buffer->ref); in iio_buffer_get()
1975 * iio_buffer_put() - Release the reference to the buffer
1981 kref_put(&buffer->ref, iio_buffer_release); in iio_buffer_put()
1986 * iio_device_attach_buffer - Attach a buffer to a IIO device
1994 * buffer will also be assigned to 'indio_dev->buffer'.
2002 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers; in iio_device_attach_buffer()
2003 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt; in iio_device_attach_buffer()
2009 return -ENOMEM; in iio_device_attach_buffer()
2010 iio_dev_opaque->attached_buffers = new; in iio_device_attach_buffer()
2015 if (!indio_dev->buffer) in iio_device_attach_buffer()
2016 indio_dev->buffer = buffer; in iio_device_attach_buffer()
2018 iio_dev_opaque->attached_buffers[cnt - 1] = buffer; in iio_device_attach_buffer()
2019 iio_dev_opaque->attached_buffers_cnt = cnt; in iio_device_attach_buffer()