Lines Matching refs:ring_buffer_per_cpu
409 #define alloc_cpu_buffer(cpu) (struct ring_buffer_per_cpu *) \
410 kzalloc_node(ALIGN(sizeof(struct ring_buffer_per_cpu), \
513 struct ring_buffer_per_cpu { struct
582 struct ring_buffer_per_cpu **buffers; argument
601 struct ring_buffer_per_cpu *cpu_buffer;
662 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
689 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
736 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
797 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
831 struct ring_buffer_per_cpu *cpu_buffer = in rb_wake_up_waiters()
832 container_of(rbwork, struct ring_buffer_per_cpu, irq_work); in rb_wake_up_waiters()
857 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters()
889 struct ring_buffer_per_cpu *cpu_buffer; in rb_watermark_hit()
988 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait()
1047 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait()
1112 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1113 struct ring_buffer_per_cpu *__b = \
1299 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate()
1329 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate()
1340 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set()
1363 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update()
1372 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head()
1381 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal()
1405 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page()
1457 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update()
1518 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage()
1526 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_links()
1547 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages()
1690 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx) in rb_range_buffer()
1913 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_meta_validate_events()
2155 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start()
2180 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show()
2227 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_meta_buffer_update()
2241 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages()
2327 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages()
2352 static struct ring_buffer_per_cpu *
2355 struct ring_buffer_per_cpu *cpu_buffer __free(kfree) = in rb_allocate_cpu_buffer()
2449 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer()
2721 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages()
2831 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages()
2912 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages()
2928 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler()
2929 struct ring_buffer_per_cpu, update_pages_work); in update_pages_handler()
2947 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize()
3174 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event()
3246 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index()
3252 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) in rb_event_index()
3263 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
3291 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_head()
3308 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_reader()
3335 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page()
3492 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail()
3569 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3575 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail()
3679 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_time_stamp()
3708 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp()
3726 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp()
3779 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event()
3839 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard()
3904 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit()
3911 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write()
3971 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit()
4016 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit()
4040 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups()
4144 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock()
4171 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock()
4195 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start()
4215 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end()
4236 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit()
4402 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
4455 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
4463 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next()
4607 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event()
4704 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve()
4751 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry()
4806 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit()
4850 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write()
4904 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries()
4910 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty()
5026 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_is_on_cpu()
5046 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu()
5066 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu()
5084 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts()
5116 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu()
5136 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu()
5155 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu()
5178 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu()
5200 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu()
5221 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu()
5240 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries()
5263 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns()
5279 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
5308 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset()
5328 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty()
5371 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp()
5431 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page()
5601 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader()
5627 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter()
5654 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events()
5660 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek()
5737 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek()
5824 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock()
5849 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock()
5869 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5917 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5947 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume()
5998 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start()
6040 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
6060 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
6126 static int rb_page_id(struct ring_buffer_per_cpu *cpu_buffer, in rb_page_id()
6141 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_meta_page()
6163 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu()
6219 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer()
6240 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
6272 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus()
6313 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset()
6348 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty()
6378 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu()
6412 struct ring_buffer_per_cpu *cpu_buffer_a; in ring_buffer_swap_cpu()
6413 struct ring_buffer_per_cpu *cpu_buffer_b; in ring_buffer_swap_cpu()
6505 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page()
6554 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page()
6627 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
6856 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_subbuf_order_set()
7012 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_alloc_meta_page()
7028 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_meta_page()
7036 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_setup_ids_meta_page()
7076 static struct ring_buffer_per_cpu *
7079 struct ring_buffer_per_cpu *cpu_buffer; in rb_get_mapped_buffer()
7096 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_put_mapped_buffer()
7105 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer, in __rb_inc_dec_mapped()
7151 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma()
7243 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma()
7253 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map()
7319 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map_dup()
7336 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unmap()
7373 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map_get_reader()