Lines Matching full:buffer

3  * Generic ring buffer
34 * The "absolute" timestamp in the buffer is only 59 bits.
44 * The ring buffer header is special. We must manually up keep it.
66 * The ring buffer is made up of a list of pages. A separate list of pages is
67 * allocated for each CPU. A writer may only write to a buffer that is
69 * from any per cpu buffer.
71 * The reader is special. For each per cpu buffer, the reader has its own
73 * page is swapped with another page in the ring buffer.
77 * again (as long as it is out of the ring buffer).
82 * |reader| RING BUFFER
93 * |reader| RING BUFFER
104 * |reader| RING BUFFER
115 * |buffer| RING BUFFER
127 * and swap that into the ring buffer.
265 /* inline for ring buffer fast paths */
289 #define for_each_buffer_cpu(buffer, cpu) \ argument
290 for_each_cpu(cpu, buffer->cpumask)
292 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
293 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
318 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
327 * Note, the buffer_page list must be first. The buffer pages
328 * are allocated in cache lines, which means that each buffer
331 * add flags in the list struct pointers, to make the ring buffer
335 struct list_head list; /* list of buffer pages */
345 * The buffer page counters, write and entries, must be reset
409 * ABSOLUTE - the buffer requests all events to have absolute time stamps
445 * head_page == tail_page && head == tail then buffer is empty.
451 struct trace_buffer *buffer; member
486 /* ring buffer pages to update, > 0 to add, < 0 to remove */
534 int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s) in ring_buffer_print_page_header() argument
558 (unsigned int)buffer->subbuf_size, in ring_buffer_print_page_header()
576 * is on the buffer that it passed in.
633 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
637 * @buffer: The buffer that the event is on
641 * committed to the ring buffer. And must be called from the same
648 * the max nesting, then the write_stamp of the buffer is returned,
652 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, in ring_buffer_event_time_stamp() argument
655 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
684 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
685 * @buffer: The ring_buffer to get the number of pages from
688 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
690 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
692 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
696 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
697 * @buffer: The ring_buffer to get the number of pages from
700 * Returns the number of pages that have content in the ring buffer.
702 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
708 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
709 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
710 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
726 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
728 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
737 * Add one as dirty will never equal nr_pages, as the sub-buffer in full_hit()
741 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
747 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
750 * ring buffer waiters queue.
776 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
777 * @buffer: The ring buffer to wake waiters on
778 * @cpu: The CPU buffer to wake waiters on
780 * In the case of a file that represents a ring buffer is closing,
783 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
788 if (!buffer) in ring_buffer_wake_waiters()
794 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
795 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
797 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
799 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
804 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
805 /* The CPU buffer may not have been initialized yet */ in ring_buffer_wake_waiters()
815 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) in rb_watermark_hit() argument
822 return !ring_buffer_empty(buffer); in rb_watermark_hit()
824 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
826 if (!ring_buffer_empty_cpu(buffer, cpu)) { in rb_watermark_hit()
835 ret = !pagebusy && full_hit(buffer, cpu, full); in rb_watermark_hit()
846 * ring_buffer_wait - wait for input to the ring buffer
847 * @buffer: buffer to wait on
848 * @cpu: the cpu buffer to wait on
852 * as data is added to any of the @buffer's cpu buffers. Otherwise
853 * it will wait for data to be added to a specific cpu buffer.
855 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
864 * data in any cpu buffer, or a specific buffer, put the in ring_buffer_wait()
868 work = &buffer->irq_work; in ring_buffer_wait()
872 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
874 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
890 * We don't clear it even if the buffer is no longer in ring_buffer_wait()
908 if (rb_watermark_hit(buffer, cpu, full)) in ring_buffer_wait()
923 if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current)) in ring_buffer_wait()
930 * ring_buffer_poll_wait - poll on buffer input
931 * @buffer: buffer to wait on
932 * @cpu: the cpu buffer to wait on
938 * as data is added to any of the @buffer's cpu buffers. Otherwise
939 * it will wait for data to be added to a specific cpu buffer.
944 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
951 rbwork = &buffer->irq_work; in ring_buffer_poll_wait()
954 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
957 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
979 * checking if the ring buffer is empty. Once the waiters_pending bit in ring_buffer_poll_wait()
986 * the buffer goes from empty to having content. But as this race is in ring_buffer_poll_wait()
993 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; in ring_buffer_poll_wait()
995 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
996 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1001 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1009 atomic_inc(&__b->buffer->record_disabled); \
1020 static inline u64 rb_time_stamp(struct trace_buffer *buffer) in rb_time_stamp() argument
1025 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1028 ts = buffer->clock(); in rb_time_stamp()
1034 u64 ring_buffer_time_stamp(struct trace_buffer *buffer) in ring_buffer_time_stamp() argument
1039 time = rb_time_stamp(buffer); in ring_buffer_time_stamp()
1046 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, in ring_buffer_normalize_time_stamp() argument
1055 * Making the ring buffer lockless makes things tricky.
1060 * The reader page is always off the ring buffer, but when the
1062 * a new one from the buffer. The reader needs to take from
1112 * the reader page with a page in the buffer, but before it
1409 * rb_check_pages - integrity check of buffer pages
1410 * @cpu_buffer: CPU buffer with pages to test
1489 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
1493 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
1526 * The ring buffer page list is a circular list that does not in rb_allocate_pages()
1541 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1554 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1556 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1573 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
1630 * @flags: attributes to set for the ring buffer.
1631 * @key: ring buffer reader_lock_key.
1634 * flag. This flag means that the buffer will overwrite old data
1635 * when the buffer wraps. If this flag is not set, the buffer will
1641 struct trace_buffer *buffer; in __ring_buffer_alloc() local
1648 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), in __ring_buffer_alloc()
1650 if (!buffer) in __ring_buffer_alloc()
1653 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1656 /* Default buffer page size - one system page */ in __ring_buffer_alloc()
1657 buffer->subbuf_order = 0; in __ring_buffer_alloc()
1658 buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE; in __ring_buffer_alloc()
1660 /* Max payload is buffer page size - header (8bytes) */ in __ring_buffer_alloc()
1661 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2); in __ring_buffer_alloc()
1663 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in __ring_buffer_alloc()
1664 buffer->flags = flags; in __ring_buffer_alloc()
1665 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1666 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1668 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1669 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1675 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1678 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1680 if (!buffer->buffers) in __ring_buffer_alloc()
1684 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1685 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1686 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1689 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1693 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1695 return buffer; in __ring_buffer_alloc()
1698 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1699 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1700 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1702 kfree(buffer->buffers); in __ring_buffer_alloc()
1705 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1708 kfree(buffer); in __ring_buffer_alloc()
1714 * ring_buffer_free - free a ring buffer.
1715 * @buffer: the buffer to free.
1718 ring_buffer_free(struct trace_buffer *buffer) in ring_buffer_free() argument
1722 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1724 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
1726 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1727 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1729 kfree(buffer->buffers); in ring_buffer_free()
1730 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1732 kfree(buffer); in ring_buffer_free()
1736 void ring_buffer_set_clock(struct trace_buffer *buffer, in ring_buffer_set_clock() argument
1739 buffer->clock = clock; in ring_buffer_set_clock()
1742 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) in ring_buffer_set_time_stamp_abs() argument
1744 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1747 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) in ring_buffer_time_stamp_abs() argument
1749 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1791 * from the ring buffer in rb_remove_pages()
1820 /* make sure pages points to a valid page in the ring buffer */ in rb_remove_pages()
1834 /* last buffer page to remove */ in rb_remove_pages()
1851 * bytes consumed in ring buffer from here. in rb_remove_pages()
1885 * in the ring buffer. Now we are racing with the writer trying to in rb_insert_pages()
1977 * ring_buffer_resize - resize the ring buffer
1978 * @buffer: the buffer to resize.
1980 * @cpu_id: the cpu buffer to resize
1982 * Minimum size is 2 * buffer->subbuf_size.
1986 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, in ring_buffer_resize() argument
1994 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
1996 if (!buffer) in ring_buffer_resize()
1999 /* Make sure the requested buffer exists */ in ring_buffer_resize()
2001 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2004 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in ring_buffer_resize()
2010 /* prevent another thread from changing buffer sizes */ in ring_buffer_resize()
2011 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2012 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2017 * manipulating the ring buffer and is expecting a sane state while in ring_buffer_resize()
2020 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2021 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2029 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2030 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2058 * since we can change their buffer sizes without any race. in ring_buffer_resize()
2060 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2061 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2084 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2085 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2096 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2103 * manipulating the ring buffer and is expecting a sane state while in ring_buffer_resize()
2147 * The ring buffer resize can happen with the ring buffer in ring_buffer_resize()
2149 * as possible. But if the buffer is disabled, we do not need in ring_buffer_resize()
2151 * that the buffer is not corrupt. in ring_buffer_resize()
2153 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2154 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2156 * Even though the buffer was disabled, we must make sure in ring_buffer_resize()
2162 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2163 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2166 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2169 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2170 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2174 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2177 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2190 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2191 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2196 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) in ring_buffer_change_overwrite() argument
2198 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2200 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2202 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2203 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2295 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
2486 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
2555 /* Set write to end of buffer */ in rb_reset_tail()
2571 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail() local
2581 * it all the way around the buffer, bail, and warn in rb_move_tail()
2594 * page with the buffer head. in rb_move_tail()
2600 * the buffer, unless the commit page is still on the in rb_move_tail()
2614 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2630 * page. We could have a small buffer, and in rb_move_tail()
2631 * have filled up the buffer with events in rb_move_tail()
2729 * is added to the buffer, it will lose those bits. in rb_add_timestamp()
2745 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
2760 * @cpu_buffer: The per cpu buffer of the @event
2765 * is the actual size that is written to the ring buffer,
2840 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
3010 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3012 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3013 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3015 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3035 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3100 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3152 * @buffer: The ring buffer to modify
3154 * The ring buffer has a safety mechanism to prevent recursion.
3163 void ring_buffer_nest_start(struct trace_buffer *buffer) in ring_buffer_nest_start() argument
3171 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3178 * @buffer: The ring buffer to modify
3183 void ring_buffer_nest_end(struct trace_buffer *buffer) in ring_buffer_nest_end() argument
3190 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3198 * @buffer: The buffer to commit to
3200 * This commits the data to the ring buffer, and releases any locks held.
3204 int ring_buffer_unlock_commit(struct trace_buffer *buffer) in ring_buffer_unlock_commit() argument
3209 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3213 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3370 * the buffer page.
3476 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3487 /* Use the sub-buffer timestamp */ in __rb_reserve_next()
3510 /* See if we shot pass the end of this buffer page */ in __rb_reserve_next()
3511 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
3546 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3583 /* We reserved something on the buffer */ in __rb_reserve_next()
3604 rb_reserve_next_event(struct trace_buffer *buffer, in rb_reserve_next_event() argument
3613 /* ring buffer does cmpxchg, make sure it is safe in NMI context */ in rb_reserve_next_event()
3624 * Due to the ability to swap a cpu buffer from a buffer in rb_reserve_next_event()
3630 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3639 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3642 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
3680 * ring_buffer_lock_reserve - reserve a part of the buffer
3681 * @buffer: the ring buffer to reserve from
3684 * Returns a reserved event on the ring buffer to copy directly to.
3695 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) in ring_buffer_lock_reserve() argument
3704 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3709 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3712 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3717 if (unlikely(length > buffer->max_data_size)) in ring_buffer_lock_reserve()
3723 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3751 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
3773 /* commit not part of this buffer?? */ in rb_decrement_entry()
3779 * @buffer: the ring buffer
3782 * Sometimes an event that is in the ring buffer needs to be ignored.
3783 * This function lets the user discard an event in the ring buffer
3787 * committed. It will try to free the event from the ring buffer
3796 void ring_buffer_discard_commit(struct trace_buffer *buffer, in ring_buffer_discard_commit() argument
3806 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3813 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3830 * ring_buffer_write - write data to the buffer without reserving
3831 * @buffer: The ring buffer to write to.
3833 * @data: The data to write to the buffer.
3836 * one function. If you already have the data to write to the buffer, it
3842 int ring_buffer_write(struct trace_buffer *buffer, in ring_buffer_write() argument
3854 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3859 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3862 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3867 if (length > buffer->max_data_size) in ring_buffer_write()
3873 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3883 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3913 * committed content has been read, the ring buffer is empty. in rb_per_cpu_empty()
3934 * ring_buffer_record_disable - stop all writes into the buffer
3935 * @buffer: The ring buffer to stop writes to.
3937 * This prevents all writes to the buffer. Any attempt to write
3938 * to the buffer after this will fail and return NULL.
3942 void ring_buffer_record_disable(struct trace_buffer *buffer) in ring_buffer_record_disable() argument
3944 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
3949 * ring_buffer_record_enable - enable writes to the buffer
3950 * @buffer: The ring buffer to enable writes
3955 void ring_buffer_record_enable(struct trace_buffer *buffer) in ring_buffer_record_enable() argument
3957 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
3962 * ring_buffer_record_off - stop all writes into the buffer
3963 * @buffer: The ring buffer to stop writes to.
3965 * This prevents all writes to the buffer. Any attempt to write
3966 * to the buffer after this will fail and return NULL.
3972 void ring_buffer_record_off(struct trace_buffer *buffer) in ring_buffer_record_off() argument
3977 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
3980 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
3985 * ring_buffer_record_on - restart writes into the buffer
3986 * @buffer: The ring buffer to start writes to.
3988 * This enables all writes to the buffer that was disabled by
3995 void ring_buffer_record_on(struct trace_buffer *buffer) in ring_buffer_record_on() argument
4000 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4003 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4008 * ring_buffer_record_is_on - return true if the ring buffer can write
4009 * @buffer: The ring buffer to see if write is enabled
4011 * Returns true if the ring buffer is in a state that it accepts writes.
4013 bool ring_buffer_record_is_on(struct trace_buffer *buffer) in ring_buffer_record_is_on() argument
4015 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4019 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4020 * @buffer: The ring buffer to see if write is set enabled
4022 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4025 * It may return true when the ring buffer has been disabled by
4027 * the ring buffer.
4029 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) in ring_buffer_record_is_set_on() argument
4031 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4036 * @buffer: The ring buffer to stop writes to.
4037 * @cpu: The CPU buffer to stop
4039 * This prevents all writes to the buffer. Any attempt to write
4040 * to the buffer after this will fail and return NULL.
4044 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
4048 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4051 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4057 * ring_buffer_record_enable_cpu - enable writes to the buffer
4058 * @buffer: The ring buffer to enable writes
4064 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
4068 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4071 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4077 * The total entries in the ring buffer is the running counter
4078 * of entries entered into the ring buffer, minus the sum of
4079 * the entries read from the ring buffer and the number of
4090 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4091 * @buffer: The ring buffer
4092 * @cpu: The per CPU buffer to read from.
4094 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
4101 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4104 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4123 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4124 * @buffer: The ring buffer
4125 * @cpu: The per CPU buffer to read from.
4127 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
4132 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4135 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4143 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4144 * @buffer: The ring buffer
4145 * @cpu: The per CPU buffer to get the entries from.
4147 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
4151 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4154 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4162 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4163 * @buffer: The ring buffer
4164 * @cpu: The per CPU buffer to get the number of overruns from
4166 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4171 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4174 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4183 * commits failing due to the buffer wrapping around while there are uncommitted
4185 * @buffer: The ring buffer
4186 * @cpu: The per CPU buffer to get the number of overruns from
4189 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4194 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4197 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4206 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4207 * @buffer: The ring buffer
4208 * @cpu: The per CPU buffer to get the number of overruns from
4211 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4216 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4219 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4228 * @buffer: The ring buffer
4229 * @cpu: The per CPU buffer to get the number of events read
4232 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4236 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4239 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4245 * ring_buffer_entries - get the number of entries in a buffer
4246 * @buffer: The ring buffer
4248 * Returns the total number of entries in the ring buffer
4251 unsigned long ring_buffer_entries(struct trace_buffer *buffer) in ring_buffer_entries() argument
4257 /* if you care about this being correct, lock the buffer */ in ring_buffer_entries()
4258 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4259 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4268 * ring_buffer_overruns - get the number of overruns in buffer
4269 * @buffer: The ring buffer
4271 * Returns the total number of overruns in the ring buffer
4274 unsigned long ring_buffer_overruns(struct trace_buffer *buffer) in ring_buffer_overruns() argument
4280 /* if you care about this being correct, lock the buffer */ in ring_buffer_overruns()
4281 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4282 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4447 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
4484 /* Don't bother swapping if the ring buffer is empty */ in rb_get_reader_page()
4507 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
4508 * has no specific buffer page to point to. Lets move it out in rb_get_reader_page()
4619 /* This function should not be called when buffer is empty */ in rb_advance_reader()
4651 * Check if we are at the end of the buffer. in rb_advance_iter()
4718 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4728 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4746 struct trace_buffer *buffer; in rb_iter_peek() local
4755 buffer = cpu_buffer->buffer; in rb_iter_peek()
4758 * Check if someone performed a consuming read to the buffer in rb_iter_peek()
4759 * or removed some pages from the buffer. In these cases, in rb_iter_peek()
4775 * the ring buffer with an active write as the consumer is. in rb_iter_peek()
4811 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4821 ring_buffer_normalize_time_stamp(buffer, in rb_iter_peek()
4842 * If an NMI die dumps out the content of the ring buffer in rb_reader_lock()
4844 * preempted a task that holds the ring buffer locks. If in rb_reader_lock()
4846 * to do the read, but this can corrupt the ring buffer, in rb_reader_lock()
4853 /* Continue without locking, but disable the ring buffer */ in rb_reader_lock()
4867 * @buffer: The ring buffer to read
4876 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
4879 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4884 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4903 * @iter: The ring buffer iterator
4918 * @iter: The ring buffer iterator
4944 * @buffer: The ring buffer to get the next event from
4945 * @cpu: the cpu to read the buffer from
4949 * Returns the next event in the ring buffer, and that event is consumed.
4951 * and eventually empty the ring buffer if the producer is slower.
4954 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
4966 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
4969 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4993 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4994 * @buffer: The ring buffer to read from
4995 * @cpu: The cpu buffer to iterate over
4999 * through the buffer. Memory is allocated, buffer recording
5002 * Disabling buffer recording prevents the reading from being
5014 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
5019 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5027 iter->event_size = buffer->subbuf_size; in ring_buffer_read_prepare()
5034 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5059 * ring_buffer_read_start - start a non consuming read of the buffer
5062 * This finalizes the startup of an iteration through the buffer.
5089 * ring_buffer_read_finish - finish reading the iterator of the buffer
5092 * This re-enables the recording to the buffer, and frees the
5102 * Ring buffer is disabled from recording, here's a good place in ring_buffer_read_finish()
5103 * to check the integrity of the ring buffer. in ring_buffer_read_finish()
5119 * @iter: The ring buffer iterator
5138 * ring_buffer_size - return the size of the ring buffer (in bytes)
5139 * @buffer: The ring buffer.
5140 * @cpu: The CPU to get ring buffer size from.
5142 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
5144 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5147 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5153 * @buffer: The ring buffer.
5157 unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer) in ring_buffer_max_event_size() argument
5160 if (ring_buffer_time_stamp_abs(buffer)) in ring_buffer_max_event_size()
5161 return buffer->max_data_size - RB_LEN_TIME_EXTEND; in ring_buffer_max_event_size()
5162 return buffer->max_data_size; in ring_buffer_max_event_size()
5222 /* Must have disabled the cpu buffer then done a synchronize_rcu */
5243 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5244 * @buffer: The ring buffer to reset a per cpu buffer of
5245 * @cpu: The CPU buffer to be reset
5247 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5249 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5251 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5254 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset_cpu()
5255 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5268 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5276 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5277 * @buffer: The ring buffer to reset a per cpu buffer of
5279 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) in ring_buffer_reset_online_cpus() argument
5284 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset_online_cpus()
5285 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5287 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5288 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5297 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5298 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5313 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5317 * ring_buffer_reset - reset a ring buffer
5318 * @buffer: The ring buffer to reset all cpu buffers
5320 void ring_buffer_reset(struct trace_buffer *buffer) in ring_buffer_reset() argument
5325 /* prevent another thread from changing buffer sizes */ in ring_buffer_reset()
5326 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5328 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5329 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5338 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5339 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5347 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5352 * ring_buffer_empty - is the ring buffer empty?
5353 * @buffer: The ring buffer to test
5355 bool ring_buffer_empty(struct trace_buffer *buffer) in ring_buffer_empty() argument
5363 /* yes this is racy, but if you don't like the race, lock the buffer */ in ring_buffer_empty()
5364 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5365 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5381 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5382 * @buffer: The ring buffer
5383 * @cpu: The CPU buffer to test
5385 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5392 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5395 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5408 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5409 * @buffer_a: One buffer to swap with
5410 * @buffer_b: The other buffer to swap with
5414 * of a CPU buffer and has another back up buffer lying around.
5415 * it is expected that the tracer handles the cpu buffer not being
5470 * it will mess the state of the cpu buffer. in ring_buffer_swap_cpu()
5480 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5481 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5495 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5496 * @buffer: the buffer to allocate for.
5497 * @cpu: the cpu buffer to allocate.
5500 * When reading a full page from the ring buffer, these functions
5503 * needs to get pages from the ring buffer, it passes the result
5505 * the page that was allocated, with the read page of the buffer.
5511 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5518 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5525 bpage->order = buffer->subbuf_order; in ring_buffer_alloc_read_page()
5526 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5542 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
5559 * @buffer: the buffer the page was allocate for
5560 * @cpu: the cpu buffer the page came from
5565 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, in ring_buffer_free_read_page() argument
5573 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5576 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5580 * is different from the subbuffer order of the buffer - in ring_buffer_free_read_page()
5583 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order) in ring_buffer_free_read_page()
5604 * ring_buffer_read_page - extract a page from the ring buffer
5605 * @buffer: buffer to extract from
5608 * @cpu: the cpu of the buffer to extract
5611 * This function will pull out a page from the ring buffer and consume it.
5614 * to swap with a page in the ring buffer.
5617 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
5620 * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
5623 * ring_buffer_free_read_page(buffer, cpu, rpage);
5629 * The ring buffer can be used anywhere in the kernel and can not
5630 * blindly call wake_up. The layer that uses the ring buffer must be
5637 int ring_buffer_read_page(struct trace_buffer *buffer, in ring_buffer_read_page() argument
5641 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5652 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5666 if (data_page->order != buffer->subbuf_order) in ring_buffer_read_page()
5691 * we must copy the data from the page to the buffer. in ring_buffer_read_page()
5789 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
5801 if (commit < buffer->subbuf_size) in ring_buffer_read_page()
5802 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit); in ring_buffer_read_page()
5825 * ring_buffer_subbuf_size_get - get size of the sub buffer.
5826 * @buffer: the buffer to get the sub buffer size from
5828 * Returns size of the sub buffer, in bytes.
5830 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer) in ring_buffer_subbuf_size_get() argument
5832 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_size_get()
5837 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
5838 * @buffer: The ring_buffer to get the system sub page order from
5840 * By default, one ring buffer sub page equals to one system page. This parameter
5841 * is configurable, per ring buffer. The size of the ring buffer sub page can be
5844 * Returns the order of buffer sub page size, in system pages:
5845 * 0 means the sub buffer size is 1 system page and so forth.
5848 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer) in ring_buffer_subbuf_order_get() argument
5850 if (!buffer) in ring_buffer_subbuf_order_get()
5853 return buffer->subbuf_order; in ring_buffer_subbuf_order_get()
5858 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
5859 * @buffer: The ring_buffer to set the new page size.
5860 * @order: Order of the system pages in one sub buffer page
5862 * By default, one ring buffer pages equals to one system page. This API can be
5863 * used to set new size of the ring buffer page. The size must be order of
5865 * system pages that are allocated for one ring buffer page:
5873 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) in ring_buffer_subbuf_order_set() argument
5883 if (!buffer || order < 0) in ring_buffer_subbuf_order_set()
5886 if (buffer->subbuf_order == order) in ring_buffer_subbuf_order_set()
5897 old_order = buffer->subbuf_order; in ring_buffer_subbuf_order_set()
5898 old_size = buffer->subbuf_size; in ring_buffer_subbuf_order_set()
5900 /* prevent another thread from changing buffer sizes */ in ring_buffer_subbuf_order_set()
5901 mutex_lock(&buffer->mutex); in ring_buffer_subbuf_order_set()
5902 atomic_inc(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
5907 buffer->subbuf_order = order; in ring_buffer_subbuf_order_set()
5908 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_order_set()
5911 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
5913 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
5916 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
5919 nr_pages = old_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_subbuf_order_set()
5920 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size); in ring_buffer_subbuf_order_set()
5931 /* Allocate the new size buffer */ in ring_buffer_subbuf_order_set()
5941 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
5943 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
5946 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
5991 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
5992 mutex_unlock(&buffer->mutex); in ring_buffer_subbuf_order_set()
5997 buffer->subbuf_order = old_order; in ring_buffer_subbuf_order_set()
5998 buffer->subbuf_size = old_size; in ring_buffer_subbuf_order_set()
6000 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6001 mutex_unlock(&buffer->mutex); in ring_buffer_subbuf_order_set()
6003 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_subbuf_order_set()
6004 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6021 * If we were to free the buffer, then the user would lose any trace that was in
6022 * the buffer.
6026 struct trace_buffer *buffer; in trace_rb_cpu_prepare() local
6031 buffer = container_of(node, struct trace_buffer, node); in trace_rb_cpu_prepare()
6032 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
6038 for_each_buffer_cpu(buffer, cpu_i) { in trace_rb_cpu_prepare()
6041 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
6042 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
6050 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
6051 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
6052 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
6053 WARN(1, "failed to allocate ring buffer on CPU %u\n", in trace_rb_cpu_prepare()
6058 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
6064 * This is a basic integrity check of the ring buffer.
6067 * writing to the per cpu ring buffer various sizes of data.
6071 * IPIs to the other CPUs to also write into the ring buffer.
6072 * this is to test the nesting ability of the buffer.
6075 * ring buffer should happen that's not expected, a big warning
6081 struct trace_buffer *buffer; member
6134 /* read rb_test_started before checking buffer enabled */ in rb_write_something()
6137 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
6151 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
6177 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
6223 struct trace_buffer *buffer; in test_ringbuffer() local
6228 pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); in test_ringbuffer()
6232 pr_info("Running ring buffer tests...\n"); in test_ringbuffer()
6234 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); in test_ringbuffer()
6235 if (WARN_ON(!buffer)) in test_ringbuffer()
6238 /* Disable buffer so that threads can't write to it yet */ in test_ringbuffer()
6239 ring_buffer_record_off(buffer); in test_ringbuffer()
6242 rb_data[cpu].buffer = buffer; in test_ringbuffer()
6262 ring_buffer_record_on(buffer); in test_ringbuffer()
6264 * Show buffer is enabled before setting rb_test_started. in test_ringbuffer()
6267 * buffer gets enabled, there will always be some kind of in test_ringbuffer()
6270 * the threads see that the buffer is active. in test_ringbuffer()
6288 ring_buffer_free(buffer); in test_ringbuffer()
6328 if (RB_WARN_ON(buffer, total_dropped)) in test_ringbuffer()
6333 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()
6340 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
6342 RB_WARN_ON(buffer, 1); in test_ringbuffer()
6362 if (RB_WARN_ON(buffer, total_len != total_alloc || in test_ringbuffer()
6366 if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) in test_ringbuffer()
6372 pr_info("Ring buffer PASSED!\n"); in test_ringbuffer()
6374 ring_buffer_free(buffer); in test_ringbuffer()