Lines Matching full:buffer
6 * Handling of buffer allocation / resizing.
28 #include <linux/iio/buffer.h>
65 /* drain the buffer if it was disabled */ in iio_buffer_ready()
94 * iio_buffer_read() - chrdev read for buffer access
96 * @buf: Destination buffer for iio buffer read
100 * This function relies on all buffer implementations having an
110 struct iio_buffer *rb = ib->buffer; in iio_buffer_read()
130 * buffer, so signal end of file now. in iio_buffer_read()
179 struct iio_buffer *rb = ib->buffer; in iio_buffer_write()
230 * iio_buffer_poll() - poll the buffer to find out if it has data
242 struct iio_buffer *rb = ib->buffer; in iio_buffer_poll()
268 struct iio_buffer *rb = ib->buffer; in iio_buffer_read_wrapper()
270 /* check if buffer was opened through new API */ in iio_buffer_read_wrapper()
281 struct iio_buffer *rb = ib->buffer; in iio_buffer_write_wrapper()
283 /* check if buffer was opened through new API */ in iio_buffer_write_wrapper()
294 struct iio_buffer *rb = ib->buffer; in iio_buffer_poll_wrapper()
296 /* check if buffer was opened through new API */ in iio_buffer_poll_wrapper()
304 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
313 struct iio_buffer *buffer; in iio_buffer_wakeup_poll() local
317 buffer = iio_dev_opaque->attached_buffers[i]; in iio_buffer_wakeup_poll()
318 wake_up(&buffer->pollq); in iio_buffer_wakeup_poll()
322 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data) in iio_pop_from_buffer() argument
324 if (!buffer || !buffer->access || !buffer->access->remove_from) in iio_pop_from_buffer()
327 return buffer->access->remove_from(buffer, data); in iio_pop_from_buffer()
331 void iio_buffer_init(struct iio_buffer *buffer) in iio_buffer_init() argument
333 INIT_LIST_HEAD(&buffer->demux_list); in iio_buffer_init()
334 INIT_LIST_HEAD(&buffer->buffer_list); in iio_buffer_init()
335 init_waitqueue_head(&buffer->pollq); in iio_buffer_init()
336 kref_init(&buffer->ref); in iio_buffer_init()
337 if (!buffer->watermark) in iio_buffer_init()
338 buffer->watermark = 1; in iio_buffer_init()
345 struct iio_buffer *buffer; in iio_device_detach_buffers() local
349 buffer = iio_dev_opaque->attached_buffers[i]; in iio_device_detach_buffers()
350 iio_buffer_put(buffer); in iio_device_detach_buffers()
399 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_show() local
403 buffer->scan_mask); in iio_scan_el_show()
457 * @buffer: the buffer whose scan mask we are interested in
465 struct iio_buffer *buffer, int bit) in iio_scan_mask_set() argument
471 WARN(1, "Trying to set scanmask prior to registering buffer\n"); in iio_scan_mask_set()
478 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); in iio_scan_mask_set()
491 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); in iio_scan_mask_set()
502 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) in iio_scan_mask_clear() argument
504 clear_bit(bit, buffer->scan_mask); in iio_scan_mask_clear()
509 struct iio_buffer *buffer, int bit) in iio_scan_mask_query() argument
514 if (!buffer->scan_mask) in iio_scan_mask_query()
518 return !!test_bit(bit, buffer->scan_mask); in iio_scan_mask_query()
531 struct iio_buffer *buffer = this_attr->buffer; in iio_scan_el_store() local
537 if (iio_buffer_is_active(buffer)) { in iio_scan_el_store()
541 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); in iio_scan_el_store()
545 ret = iio_scan_mask_clear(buffer, this_attr->address); in iio_scan_el_store()
549 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); in iio_scan_el_store()
564 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_ts_show() local
566 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp); in iio_scan_el_ts_show()
577 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_ts_store() local
585 if (iio_buffer_is_active(buffer)) { in iio_scan_el_ts_store()
589 buffer->scan_timestamp = state; in iio_scan_el_ts_store()
597 struct iio_buffer *buffer, in iio_buffer_add_channel_sysfs() argument
609 buffer, in iio_buffer_add_channel_sysfs()
610 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
621 buffer, in iio_buffer_add_channel_sysfs()
622 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
634 buffer, in iio_buffer_add_channel_sysfs()
635 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
644 buffer, in iio_buffer_add_channel_sysfs()
645 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
656 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in length_show() local
658 return sysfs_emit(buf, "%d\n", buffer->length); in length_show()
666 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in length_store() local
674 if (val == buffer->length) in length_store()
678 if (iio_buffer_is_active(buffer)) { in length_store()
681 buffer->access->set_length(buffer, val); in length_store()
686 if (buffer->length && buffer->length < buffer->watermark) in length_store()
687 buffer->watermark = buffer->length; in length_store()
697 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in enable_show() local
699 return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer)); in enable_show()
750 struct iio_buffer *buffer) in iio_buffer_activate() argument
754 iio_buffer_get(buffer); in iio_buffer_activate()
755 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); in iio_buffer_activate()
758 static void iio_buffer_deactivate(struct iio_buffer *buffer) in iio_buffer_deactivate() argument
760 list_del_init(&buffer->buffer_list); in iio_buffer_deactivate()
761 wake_up_interruptible(&buffer->pollq); in iio_buffer_deactivate()
762 iio_buffer_put(buffer); in iio_buffer_deactivate()
768 struct iio_buffer *buffer, *_buffer; in iio_buffer_deactivate_all() local
770 list_for_each_entry_safe(buffer, _buffer, in iio_buffer_deactivate_all()
772 iio_buffer_deactivate(buffer); in iio_buffer_deactivate_all()
775 static int iio_buffer_enable(struct iio_buffer *buffer, in iio_buffer_enable() argument
778 if (!buffer->access->enable) in iio_buffer_enable()
780 return buffer->access->enable(buffer, indio_dev); in iio_buffer_enable()
783 static int iio_buffer_disable(struct iio_buffer *buffer, in iio_buffer_disable() argument
786 if (!buffer->access->disable) in iio_buffer_disable()
788 return buffer->access->disable(buffer, indio_dev); in iio_buffer_disable()
792 struct iio_buffer *buffer) in iio_buffer_update_bytes_per_datum() argument
796 if (!buffer->access->set_bytes_per_datum) in iio_buffer_update_bytes_per_datum()
799 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, in iio_buffer_update_bytes_per_datum()
800 buffer->scan_timestamp); in iio_buffer_update_bytes_per_datum()
802 buffer->access->set_bytes_per_datum(buffer, bytes); in iio_buffer_update_bytes_per_datum()
806 struct iio_buffer *buffer) in iio_buffer_request_update() argument
810 iio_buffer_update_bytes_per_datum(indio_dev, buffer); in iio_buffer_request_update()
811 if (buffer->access->request_update) { in iio_buffer_request_update()
812 ret = buffer->access->request_update(buffer); in iio_buffer_request_update()
815 "Buffer not started: buffer parameter update failed (%d)\n", in iio_buffer_request_update()
849 struct iio_buffer *buffer; in iio_verify_update() local
864 * If there is just one buffer and we are removing it there is nothing in iio_verify_update()
873 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_verify_update()
874 if (buffer == remove_buffer) in iio_verify_update()
876 modes &= buffer->access->modes; in iio_verify_update()
877 config->watermark = min(config->watermark, buffer->watermark); in iio_verify_update()
891 * Keep things simple for now and only allow a single buffer to in iio_verify_update()
901 /* Can only occur on first buffer */ in iio_verify_update()
903 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); in iio_verify_update()
914 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_verify_update()
915 if (buffer == remove_buffer) in iio_verify_update()
917 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, in iio_verify_update()
919 scan_timestamp |= buffer->scan_timestamp; in iio_verify_update()
962 static void iio_buffer_demux_free(struct iio_buffer *buffer) in iio_buffer_demux_free() argument
966 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { in iio_buffer_demux_free()
972 static int iio_buffer_add_demux(struct iio_buffer *buffer, in iio_buffer_add_demux() argument
987 list_add_tail(&(*p)->l, &buffer->demux_list); in iio_buffer_add_demux()
994 struct iio_buffer *buffer) in iio_buffer_update_demux() argument
1001 iio_buffer_demux_free(buffer); in iio_buffer_update_demux()
1002 kfree(buffer->demux_bounce); in iio_buffer_update_demux()
1003 buffer->demux_bounce = NULL; in iio_buffer_update_demux()
1007 buffer->scan_mask, in iio_buffer_update_demux()
1013 buffer->scan_mask, in iio_buffer_update_demux()
1029 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); in iio_buffer_update_demux()
1036 if (buffer->scan_timestamp) { in iio_buffer_update_demux()
1040 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); in iio_buffer_update_demux()
1045 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); in iio_buffer_update_demux()
1046 if (!buffer->demux_bounce) { in iio_buffer_update_demux()
1053 iio_buffer_demux_free(buffer); in iio_buffer_update_demux()
1061 struct iio_buffer *buffer; in iio_update_demux() local
1064 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_update_demux()
1065 ret = iio_buffer_update_demux(indio_dev, buffer); in iio_update_demux()
1072 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) in iio_update_demux()
1073 iio_buffer_demux_free(buffer); in iio_update_demux()
1082 struct iio_buffer *buffer, *tmp = NULL; in iio_enable_buffers() local
1097 "Buffer not started: buffer preenable failed (%d)\n", ret); in iio_enable_buffers()
1108 "Buffer not started: update scan mode failed (%d)\n", in iio_enable_buffers()
1118 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_enable_buffers()
1119 ret = iio_buffer_enable(buffer, indio_dev); in iio_enable_buffers()
1121 tmp = buffer; in iio_enable_buffers()
1137 "Buffer not started: postenable failed (%d)\n", ret); in iio_enable_buffers()
1150 buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list); in iio_enable_buffers()
1151 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, in iio_enable_buffers()
1153 iio_buffer_disable(buffer, indio_dev); in iio_enable_buffers()
1167 struct iio_buffer *buffer; in iio_disable_buffers() local
1193 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_disable_buffers()
1194 ret2 = iio_buffer_disable(buffer, indio_dev); in iio_disable_buffers()
1322 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in enable_store() local
1332 inlist = iio_buffer_is_active(buffer); in enable_store()
1338 ret = __iio_update_buffers(indio_dev, buffer, NULL); in enable_store()
1340 ret = __iio_update_buffers(indio_dev, NULL, buffer); in enable_store()
1350 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in watermark_show() local
1352 return sysfs_emit(buf, "%u\n", buffer->watermark); in watermark_show()
1361 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in watermark_store() local
1373 if (val > buffer->length) { in watermark_store()
1378 if (iio_buffer_is_active(buffer)) { in watermark_store()
1383 buffer->watermark = val; in watermark_store()
1393 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in data_available_show() local
1395 return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer)); in data_available_show()
1402 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in direction_show() local
1404 switch (buffer->direction) { in direction_show()
1438 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer, in iio_buffer_wrap_attr() argument
1448 iio_attr->buffer = buffer; in iio_buffer_wrap_attr()
1458 list_add(&iio_attr->l, &buffer->buffer_attr_list); in iio_buffer_wrap_attr()
1481 group->name = "buffer"; in iio_buffer_register_legacy_sysfs_groups()
1526 struct iio_buffer *buffer = ib->buffer; in iio_buffer_chrdev_release() local
1528 wake_up(&buffer->pollq); in iio_buffer_chrdev_release()
1531 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); in iio_buffer_chrdev_release()
1551 struct iio_buffer *buffer; in iio_device_buffer_getfd() local
1562 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_device_buffer_getfd()
1564 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) { in iio_device_buffer_getfd()
1576 ib->buffer = buffer; in iio_device_buffer_getfd()
1578 fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops, in iio_device_buffer_getfd()
1604 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); in iio_device_buffer_getfd()
1621 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, in __iio_buffer_alloc_sysfs_and_mask() argument
1633 if (buffer->attrs) { in __iio_buffer_alloc_sysfs_and_mask()
1634 while (buffer->attrs[buffer_attrcount]) in __iio_buffer_alloc_sysfs_and_mask()
1640 INIT_LIST_HEAD(&buffer->buffer_attr_list); in __iio_buffer_alloc_sysfs_and_mask()
1661 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer, in __iio_buffer_alloc_sysfs_and_mask()
1670 if (indio_dev->masklength && !buffer->scan_mask) { in __iio_buffer_alloc_sysfs_and_mask()
1671 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength, in __iio_buffer_alloc_sysfs_and_mask()
1673 if (!buffer->scan_mask) { in __iio_buffer_alloc_sysfs_and_mask()
1688 if (!buffer->access->set_length) in __iio_buffer_alloc_sysfs_and_mask()
1691 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) in __iio_buffer_alloc_sysfs_and_mask()
1694 if (buffer->attrs) in __iio_buffer_alloc_sysfs_and_mask()
1695 for (i = 0, id_attr = buffer->attrs[i]; in __iio_buffer_alloc_sysfs_and_mask()
1696 (id_attr = buffer->attrs[i]); i++) in __iio_buffer_alloc_sysfs_and_mask()
1700 buffer->buffer_group.attrs = attr; in __iio_buffer_alloc_sysfs_and_mask()
1705 wrapped = iio_buffer_wrap_attr(buffer, attr[i]); in __iio_buffer_alloc_sysfs_and_mask()
1714 list_for_each_entry(p, &buffer->buffer_attr_list, l) in __iio_buffer_alloc_sysfs_and_mask()
1717 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index); in __iio_buffer_alloc_sysfs_and_mask()
1718 if (!buffer->buffer_group.name) { in __iio_buffer_alloc_sysfs_and_mask()
1723 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group); in __iio_buffer_alloc_sysfs_and_mask()
1727 /* we only need to register the legacy groups for the first buffer */ in __iio_buffer_alloc_sysfs_and_mask()
1740 kfree(buffer->buffer_group.name); in __iio_buffer_alloc_sysfs_and_mask()
1742 kfree(buffer->buffer_group.attrs); in __iio_buffer_alloc_sysfs_and_mask()
1744 bitmap_free(buffer->scan_mask); in __iio_buffer_alloc_sysfs_and_mask()
1746 iio_free_chan_devattr_list(&buffer->buffer_attr_list); in __iio_buffer_alloc_sysfs_and_mask()
1751 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer, in __iio_buffer_free_sysfs_and_mask() argument
1757 bitmap_free(buffer->scan_mask); in __iio_buffer_free_sysfs_and_mask()
1758 kfree(buffer->buffer_group.name); in __iio_buffer_free_sysfs_and_mask()
1759 kfree(buffer->buffer_group.attrs); in __iio_buffer_free_sysfs_and_mask()
1760 iio_free_chan_devattr_list(&buffer->buffer_attr_list); in __iio_buffer_free_sysfs_and_mask()
1767 struct iio_buffer *buffer; in iio_buffers_alloc_sysfs_and_mask() local
1784 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_buffers_alloc_sysfs_and_mask()
1785 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx); in iio_buffers_alloc_sysfs_and_mask()
1805 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_buffers_alloc_sysfs_and_mask()
1806 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx); in iio_buffers_alloc_sysfs_and_mask()
1814 struct iio_buffer *buffer; in iio_buffers_free_sysfs_and_mask() local
1824 buffer = iio_dev_opaque->attached_buffers[i]; in iio_buffers_free_sysfs_and_mask()
1825 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i); in iio_buffers_free_sysfs_and_mask()
1845 static const void *iio_demux(struct iio_buffer *buffer, in iio_demux() argument
1850 if (list_empty(&buffer->demux_list)) in iio_demux()
1852 list_for_each_entry(t, &buffer->demux_list, l) in iio_demux()
1853 memcpy(buffer->demux_bounce + t->to, in iio_demux()
1856 return buffer->demux_bounce; in iio_demux()
1859 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) in iio_push_to_buffer() argument
1861 const void *dataout = iio_demux(buffer, data); in iio_push_to_buffer()
1864 ret = buffer->access->store_to(buffer, dataout); in iio_push_to_buffer()
1872 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); in iio_push_to_buffer()
1877 * iio_push_to_buffers() - push to a registered buffer.
1898 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
1919 * of either the data provided or the length of the destination buffer. in iio_push_to_buffers_with_ts_unaligned()
1944 * iio_buffer_release() - Free a buffer's resources
1947 * This function is called when the last reference to the buffer has been
1948 * dropped. It will typically free all resources allocated by the buffer. Do not
1950 * buffer.
1954 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); in iio_buffer_release() local
1956 buffer->access->release(buffer); in iio_buffer_release()
1960 * iio_buffer_get() - Grab a reference to the buffer
1961 * @buffer: The buffer to grab a reference for, may be NULL
1963 * Returns the pointer to the buffer that was passed into the function.
1965 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) in iio_buffer_get() argument
1967 if (buffer) in iio_buffer_get()
1968 kref_get(&buffer->ref); in iio_buffer_get()
1970 return buffer; in iio_buffer_get()
1975 * iio_buffer_put() - Release the reference to the buffer
1976 * @buffer: The buffer to release the reference for, may be NULL
1978 void iio_buffer_put(struct iio_buffer *buffer) in iio_buffer_put() argument
1980 if (buffer) in iio_buffer_put()
1981 kref_put(&buffer->ref, iio_buffer_release); in iio_buffer_put()
1986 * iio_device_attach_buffer - Attach a buffer to a IIO device
1987 * @indio_dev: The device the buffer should be attached to
1988 * @buffer: The buffer to attach to the device
1992 * This function attaches a buffer to a IIO device. The buffer stays attached to
1994 * buffer will also be assigned to 'indio_dev->buffer'.
1999 struct iio_buffer *buffer) in iio_device_attach_buffer() argument
2012 buffer = iio_buffer_get(buffer); in iio_device_attach_buffer()
2014 /* first buffer is legacy; attach it to the IIO device directly */ in iio_device_attach_buffer()
2015 if (!indio_dev->buffer) in iio_device_attach_buffer()
2016 indio_dev->buffer = buffer; in iio_device_attach_buffer()
2018 iio_dev_opaque->attached_buffers[cnt - 1] = buffer; in iio_device_attach_buffer()