Lines Matching +full:array +full:- +full:nest

1 // SPDX-License-Identifier: GPL-2.0
81 * +------+
84 * +------+ +---+ +---+ +---+
85 * | |-->| |-->| |
86 * +---+ +---+ +---+
89 * +---------------+
92 * +------+
94 * |page |------------------v
95 * +------+ +---+ +---+ +---+
96 * | |-->| |-->| |
97 * +---+ +---+ +---+
100 * +---------------+
103 * +------+
105 * |page |------------------v
106 * +------+ +---+ +---+ +---+
107 * ^ | |-->| |-->| |
108 * | +---+ +---+ +---+
111 * +------------------------------+
114 * +------+
116 * |page |------------------v
117 * +------+ +---+ +---+ +---+
118 * ^ | | | |-->| |
119 * | New +---+ +---+ +---+
120 * | Reader------^ |
122 * +------------------------------+
138 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
165 (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
169 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; in rb_null_event()
175 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_set_padding()
176 event->time_delta = 0; in rb_event_set_padding()
184 if (event->type_len) in rb_event_data_length()
185 length = event->type_len * RB_ALIGNMENT; in rb_event_data_length()
187 length = event->array[0]; in rb_event_data_length()
199 switch (event->type_len) { in rb_event_length()
203 return -1; in rb_event_length()
204 return event->array[0] + RB_EVNT_HDR_SIZE; in rb_event_length()
239 * ring_buffer_event_length - return the length of the event
256 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in ring_buffer_event_length()
258 length -= RB_EVNT_HDR_SIZE; in ring_buffer_event_length()
259 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) in ring_buffer_event_length()
260 length -= sizeof(event->array[0]); in ring_buffer_event_length()
271 WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); in rb_event_data()
272 /* If length is in len field, then array[0] has the data */ in rb_event_data()
273 if (event->type_len) in rb_event_data()
274 return (void *)&event->array[0]; in rb_event_data()
275 /* Otherwise length is in array[0] and array[1] has the data */ in rb_event_data()
276 return (void *)&event->array[1]; in rb_event_data()
280 * ring_buffer_event_data - return the data of the event
290 for_each_cpu(cpu, buffer->cpumask)
293 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
296 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
303 ts = event->array[0]; in rb_event_time_stamp()
305 ts += event->time_delta; in rb_event_time_stamp()
361 local_set(&bpage->commit, 0); in rb_init_page()
366 return local_read(&bpage->page->commit); in rb_page_commit()
371 free_pages((unsigned long)bpage->page, bpage->order); in free_buffer_page()
408 * EXTEND - wants a time extend
409 * ABSOLUTE - the buffer requests all events to have absolute time stamps
410 * FORCE - force a full time stamp.
465 unsigned long nest; member
558 (unsigned int)buffer->subbuf_size, in ring_buffer_print_page_header()
566 *ret = local64_read(&t->time); in rb_time_read()
570 local64_set(&t->time, val); in rb_time_set()
584 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
585 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
596 commit = local_read(&page->page->commit); in verify_event()
597 write = local_read(&page->write); in verify_event()
598 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
599 addr < (unsigned long)&page->page->data[write]) in verify_event()
602 next = rb_list_head(page->list.next); in verify_event()
636 * ring_buffer_event_time_stamp - return the event's current time stamp
655 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
656 unsigned int nest; in ring_buffer_event_time_stamp() local
660 if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { in ring_buffer_event_time_stamp()
662 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
665 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
667 if (WARN_ON_ONCE(!nest)) in ring_buffer_event_time_stamp()
671 if (likely(--nest < MAX_NEST)) in ring_buffer_event_time_stamp()
672 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
675 WARN_ONCE(1, "nest (%d) greater than max", nest); in ring_buffer_event_time_stamp()
678 rb_time_read(&cpu_buffer->write_stamp, &ts); in ring_buffer_event_time_stamp()
684 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
692 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
696 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
708 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
709 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
710 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
715 cnt -= lost; in ring_buffer_nr_dirty_pages()
723 return cnt - read; in ring_buffer_nr_dirty_pages()
728 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
732 nr_pages = cpu_buffer->nr_pages; in full_hit()
737 * Add one as dirty will never equal nr_pages, as the sub-buffer in full_hit()
747 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
756 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
757 if (rbwork->full_waiters_pending || rbwork->wakeup_full) { in rb_wake_up_waiters()
763 raw_spin_lock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
764 rbwork->wakeup_full = false; in rb_wake_up_waiters()
765 rbwork->full_waiters_pending = false; in rb_wake_up_waiters()
768 cpu_buffer->shortest_full = 0; in rb_wake_up_waiters()
769 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
771 wake_up_all(&rbwork->full_waiters); in rb_wake_up_waiters()
776 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
797 rbwork = &buffer->irq_work; in ring_buffer_wake_waiters()
799 if (WARN_ON_ONCE(!buffer->buffers)) in ring_buffer_wake_waiters()
804 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
808 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
812 irq_work_queue(&rbwork->work); in ring_buffer_wake_waiters()
824 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
833 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
834 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_watermark_hit()
837 if (!cpu_buffer->shortest_full || in rb_watermark_hit()
838 cpu_buffer->shortest_full > full) in rb_watermark_hit()
839 cpu_buffer->shortest_full = full; in rb_watermark_hit()
840 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
846 * ring_buffer_wait - wait for input to the ring buffer
868 work = &buffer->irq_work; in ring_buffer_wait()
872 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
873 return -ENODEV; in ring_buffer_wait()
874 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
875 work = &cpu_buffer->irq_work; in ring_buffer_wait()
879 prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
881 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
904 work->full_waiters_pending = true; in ring_buffer_wait()
906 work->waiters_pending = true; in ring_buffer_wait()
912 ret = -EINTR; in ring_buffer_wait()
919 finish_wait(&work->full_waiters, &wait); in ring_buffer_wait()
921 finish_wait(&work->waiters, &wait); in ring_buffer_wait()
924 ret = -EINTR; in ring_buffer_wait()
930 * ring_buffer_poll_wait - poll on buffer input
951 rbwork = &buffer->irq_work; in ring_buffer_poll_wait()
954 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
957 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
958 rbwork = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
964 poll_wait(filp, &rbwork->full_waiters, poll_table); in ring_buffer_poll_wait()
966 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_poll_wait()
967 rbwork->full_waiters_pending = true; in ring_buffer_poll_wait()
968 if (!cpu_buffer->shortest_full || in ring_buffer_poll_wait()
969 cpu_buffer->shortest_full > full) in ring_buffer_poll_wait()
970 cpu_buffer->shortest_full = full; in ring_buffer_poll_wait()
971 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_poll_wait()
973 poll_wait(filp, &rbwork->waiters, poll_table); in ring_buffer_poll_wait()
974 rbwork->waiters_pending = true; in ring_buffer_poll_wait()
1009 atomic_inc(&__b->buffer->record_disabled); \
1011 atomic_inc(&b->record_disabled); \
1024 /* Skip retpolines :-( */ in rb_time_stamp()
1025 if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) in rb_time_stamp()
1028 ts = buffer->clock(); in rb_time_stamp()
1086 * head->list->prev->next bit 1 bit 0
1087 * ------- -------
1094 * +----+ +-----+ +-----+
1095 * | |------>| T |---X--->| N |
1096 * | |<------| | | |
1097 * +----+ +-----+ +-----+
1099 * | +-----+ | |
1100 * +----------| R |----------+ |
1101 * | |<-----------+
1102 * +-----+
1104 * Key: ---X--> HEAD flag set in pointer
1134 * rb_list_head - remove any bit
1144 * rb_is_head_page - test if the given page is the head page
1156 val = (unsigned long)list->next; in rb_is_head_page()
1158 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1173 struct list_head *list = page->list.prev; in rb_is_reader_page()
1175 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1179 * rb_set_list_to_head - set a list_head to be pointing to head.
1185 ptr = (unsigned long *)&list->next; in rb_set_list_to_head()
1191 * rb_head_page_activate - sets up head page
1197 head = cpu_buffer->head_page; in rb_head_page_activate()
1204 rb_set_list_to_head(head->list.prev); in rb_head_page_activate()
1209 unsigned long *ptr = (unsigned long *)&list->next; in rb_list_head_clear()
1215 * rb_head_page_deactivate - clears head page ptr (for free list)
1223 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1225 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1235 unsigned long val = (unsigned long)&head->list; in rb_head_page_set()
1238 list = &prev->list; in rb_head_page_set()
1242 ret = cmpxchg((unsigned long *)&list->next, in rb_head_page_set()
1281 struct list_head *p = rb_list_head((*bpage)->list.next); in rb_inc_page()
1294 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1298 list = cpu_buffer->pages; in rb_set_head_page()
1299 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1302 page = head = cpu_buffer->head_page; in rb_set_head_page()
1311 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
1312 cpu_buffer->head_page = page; in rb_set_head_page()
1327 unsigned long *ptr = (unsigned long *)&old->list.prev->next; in rb_head_page_replace()
1333 return try_cmpxchg(ptr, &val, (unsigned long)&new->list); in rb_head_page_replace()
1337 * rb_tail_page_update - move the tail page forward
1355 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update()
1356 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); in rb_tail_page_update()
1358 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1370 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1385 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update()
1386 (void)local_cmpxchg(&next_page->entries, old_entries, eval); in rb_tail_page_update()
1393 local_set(&next_page->page->commit, 0); in rb_tail_page_update()
1396 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1409 * rb_check_pages - integrity check of buffer pages
1417 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1421 rb_list_head(rb_list_head(head->next)->prev) != head)) in rb_check_pages()
1425 rb_list_head(rb_list_head(head->prev)->next) != head)) in rb_check_pages()
1428 for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { in rb_check_pages()
1430 rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) in rb_check_pages()
1434 rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) in rb_check_pages()
1443 bool user_thread = current->mm != NULL; in __rb_allocate_pages()
1456 return -ENOMEM; in __rb_allocate_pages()
1460 * gracefully without invoking oom-killer and the system is not in __rb_allocate_pages()
1480 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1486 list_add(&bpage->list, pages); in __rb_allocate_pages()
1488 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, in __rb_allocate_pages()
1489 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
1492 bpage->page = page_address(page); in __rb_allocate_pages()
1493 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
1494 rb_init_page(bpage->page); in __rb_allocate_pages()
1506 list_del_init(&bpage->list); in __rb_allocate_pages()
1512 return -ENOMEM; in __rb_allocate_pages()
1523 return -ENOMEM; in rb_allocate_pages()
1530 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1533 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1553 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1554 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1555 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1556 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1557 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1558 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1559 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1560 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1561 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1562 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1571 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1573 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
1576 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
1577 rb_init_page(bpage->page); in rb_allocate_cpu_buffer()
1579 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1580 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1586 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1587 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1588 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1595 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1604 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1607 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
1609 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1615 list_del_init(&bpage->list); in rb_free_cpu_buffer()
1622 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
1628 * __ring_buffer_alloc - allocate a new ring_buffer
1653 if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) in __ring_buffer_alloc()
1656 /* Default buffer page size - one system page */ in __ring_buffer_alloc()
1657 buffer->subbuf_order = 0; in __ring_buffer_alloc()
1658 buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE; in __ring_buffer_alloc()
1660 /* Max payload is buffer page size - header (8bytes) */ in __ring_buffer_alloc()
1661 buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2); in __ring_buffer_alloc()
1663 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in __ring_buffer_alloc()
1664 buffer->flags = flags; in __ring_buffer_alloc()
1665 buffer->clock = trace_clock_local; in __ring_buffer_alloc()
1666 buffer->reader_lock_key = key; in __ring_buffer_alloc()
1668 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); in __ring_buffer_alloc()
1669 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
1675 buffer->cpus = nr_cpu_ids; in __ring_buffer_alloc()
1678 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), in __ring_buffer_alloc()
1680 if (!buffer->buffers) in __ring_buffer_alloc()
1684 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1685 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1686 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1689 ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in __ring_buffer_alloc()
1693 mutex_init(&buffer->mutex); in __ring_buffer_alloc()
1699 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1700 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1702 kfree(buffer->buffers); in __ring_buffer_alloc()
1705 free_cpumask_var(buffer->cpumask); in __ring_buffer_alloc()
1714 * ring_buffer_free - free a ring buffer.
1722 cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); in ring_buffer_free()
1724 irq_work_sync(&buffer->irq_work.work); in ring_buffer_free()
1727 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
1729 kfree(buffer->buffers); in ring_buffer_free()
1730 free_cpumask_var(buffer->cpumask); in ring_buffer_free()
1739 buffer->clock = clock; in ring_buffer_set_clock()
1744 buffer->time_stamp_abs = abs; in ring_buffer_set_time_stamp_abs()
1749 return buffer->time_stamp_abs; in ring_buffer_time_stamp_abs()
1756 return local_read(&bpage->entries) & RB_WRITE_MASK; in rb_page_entries()
1761 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write()
1776 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1777 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1787 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1793 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1794 tail_page = rb_list_head(tail_page->next); in rb_remove_pages()
1798 first_page = list_entry(rb_list_head(to_remove->next), in rb_remove_pages()
1802 to_remove = rb_list_head(to_remove)->next; in rb_remove_pages()
1806 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
1808 next_page = rb_list_head(to_remove)->next; in rb_remove_pages()
1815 tail_page->next = (struct list_head *)((unsigned long)next_page | in rb_remove_pages()
1818 next_page->prev = tail_page; in rb_remove_pages()
1821 cpu_buffer->pages = next_page; in rb_remove_pages()
1825 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1829 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1830 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1832 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1854 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1855 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
1856 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
1864 nr_removed--; in rb_remove_pages()
1876 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1882 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
1890 * 2. We cmpxchg the prev_page->next to point from head page to the in rb_insert_pages()
1892 * 3. Finally, we update the head->prev to the end of new list. in rb_insert_pages()
1899 while (retries--) { in rb_insert_pages()
1907 head_page = &hpage->list; in rb_insert_pages()
1908 prev_page = head_page->prev; in rb_insert_pages()
1910 first_page = pages->next; in rb_insert_pages()
1911 last_page = pages->prev; in rb_insert_pages()
1916 last_page->next = head_page_with_bit; in rb_insert_pages()
1917 first_page->prev = prev_page; in rb_insert_pages()
1920 if (try_cmpxchg(&prev_page->next, in rb_insert_pages()
1927 head_page->prev = last_page; in rb_insert_pages()
1940 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
1945 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
1947 list_del_init(&bpage->list); in rb_insert_pages()
1958 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
1962 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
1965 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
1973 complete(&cpu_buffer->update_done); in update_pages_handler()
1977 * ring_buffer_resize - resize the ring buffer
1982 * Minimum size is 2 * buffer->subbuf_size.
1994 * Always succeed at resizing a non-existent buffer: in ring_buffer_resize()
2001 !cpumask_test_cpu(cpu_id, buffer->cpumask)) in ring_buffer_resize()
2004 nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size); in ring_buffer_resize()
2011 mutex_lock(&buffer->mutex); in ring_buffer_resize()
2012 atomic_inc(&buffer->resizing); in ring_buffer_resize()
2021 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2022 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2023 err = -EBUSY; in ring_buffer_resize()
2030 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2032 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2033 cpu_buffer->nr_pages; in ring_buffer_resize()
2037 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2043 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2044 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2045 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2047 err = -ENOMEM; in ring_buffer_resize()
2061 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2062 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2068 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2075 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2077 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
2085 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2086 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2090 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2091 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2096 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2098 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2106 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2107 err = -EBUSY; in ring_buffer_resize()
2111 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2112 cpu_buffer->nr_pages; in ring_buffer_resize()
2114 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2115 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2116 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2117 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2118 err = -ENOMEM; in ring_buffer_resize()
2136 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2137 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2141 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2153 if (atomic_read(&buffer->record_disabled)) { in ring_buffer_resize()
2154 atomic_inc(&buffer->record_disabled); in ring_buffer_resize()
2163 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2166 atomic_dec(&buffer->record_disabled); in ring_buffer_resize()
2169 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2170 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2177 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2178 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2180 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2183 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2185 list_del_init(&bpage->list); in ring_buffer_resize()
2190 atomic_dec(&buffer->resizing); in ring_buffer_resize()
2191 mutex_unlock(&buffer->mutex); in ring_buffer_resize()
2198 mutex_lock(&buffer->mutex); in ring_buffer_change_overwrite()
2200 buffer->flags |= RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2202 buffer->flags &= ~RB_FL_OVERWRITE; in ring_buffer_change_overwrite()
2203 mutex_unlock(&buffer->mutex); in ring_buffer_change_overwrite()
2209 return bpage->page->data + index; in __rb_page_index()
2215 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2216 cpu_buffer->reader_page->read); in rb_reader_event()
2223 struct buffer_page *iter_head_page = iter->head_page; in rb_iter_head_event()
2227 if (iter->head != iter->next_event) in rb_iter_head_event()
2228 return iter->event; in rb_iter_head_event()
2239 if (iter->head > commit - 8) in rb_iter_head_event()
2242 event = __rb_page_index(iter_head_page, iter->head); in rb_iter_head_event()
2251 if ((iter->head + length) > commit || length > iter->event_size) in rb_iter_head_event()
2255 memcpy(iter->event, event, length); in rb_iter_head_event()
2263 if (iter->page_stamp != iter_head_page->page->time_stamp || in rb_iter_head_event()
2267 iter->next_event = iter->head + length; in rb_iter_head_event()
2268 return iter->event; in rb_iter_head_event()
2271 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_head_event()
2272 iter->head = 0; in rb_iter_head_event()
2273 iter->next_event = 0; in rb_iter_head_event()
2274 iter->missed_events = 1; in rb_iter_head_event()
2287 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2295 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
2297 return addr - BUF_PAGE_HDR_SIZE; in rb_event_index()
2302 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
2310 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2311 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2313 rb_inc_page(&iter->head_page); in rb_inc_iter()
2315 iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter()
2316 iter->head = 0; in rb_inc_iter()
2317 iter->next_event = 0; in rb_inc_iter()
2321 * rb_handle_head_page - writer hit the head page
2325 * -1 on error
2349 * NORMAL - an interrupt already moved it for us in rb_handle_head_page()
2350 * HEAD - we are the first to get here. in rb_handle_head_page()
2351 * UPDATE - we are the interrupt interrupting in rb_handle_head_page()
2353 * MOVED - a reader on another CPU moved the next in rb_handle_head_page()
2365 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2366 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
2367 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
2399 return -1; in rb_handle_head_page()
2424 * HEAD - an interrupt came in and already set it. in rb_handle_head_page()
2425 * NORMAL - One of two things: in rb_handle_head_page()
2437 return -1; in rb_handle_head_page()
2453 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2476 return -1; in rb_handle_head_page()
2486 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
2487 struct buffer_page *tail_page = info->tail_page; in rb_reset_tail()
2489 unsigned long length = info->length; in rb_reset_tail()
2502 tail_page->real_end = 0; in rb_reset_tail()
2504 local_sub(length, &tail_page->write); in rb_reset_tail()
2515 tail_page->real_end = tail; in rb_reset_tail()
2529 if (tail > (bsize - RB_EVNT_MIN_SIZE)) { in rb_reset_tail()
2539 local_sub(length, &tail_page->write); in rb_reset_tail()
2544 event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE; in rb_reset_tail()
2545 event->type_len = RINGBUF_TYPE_PADDING; in rb_reset_tail()
2547 event->time_delta = 1; in rb_reset_tail()
2550 local_add(bsize - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2552 /* Make sure the padding is visible before the tail_page->write update */ in rb_reset_tail()
2556 length = (tail + length) - bsize; in rb_reset_tail()
2557 local_sub(length, &tail_page->write); in rb_reset_tail()
2569 struct buffer_page *tail_page = info->tail_page; in rb_move_tail()
2570 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2571 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2585 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2603 if (rb_is_head_page(next_page, &tail_page->list)) { in rb_move_tail()
2609 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2614 if (!(buffer->flags & RB_FL_OVERWRITE)) { in rb_move_tail()
2615 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2637 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2638 cpu_buffer->tail_page) && in rb_move_tail()
2639 (cpu_buffer->commit_page == in rb_move_tail()
2640 cpu_buffer->reader_page))) { in rb_move_tail()
2641 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2656 local_inc(&cpu_buffer->committing); in rb_move_tail()
2659 return ERR_PTR(-EAGAIN); in rb_move_tail()
2674 event->type_len = RINGBUF_TYPE_TIME_STAMP; in rb_add_time_stamp()
2676 event->type_len = RINGBUF_TYPE_TIME_EXTEND; in rb_add_time_stamp()
2680 event->time_delta = delta & TS_MASK; in rb_add_time_stamp()
2681 event->array[0] = delta >> TS_SHIFT; in rb_add_time_stamp()
2684 event->time_delta = 0; in rb_add_time_stamp()
2685 event->array[0] = 0; in rb_add_time_stamp()
2705 (unsigned long long)info->delta, in rb_check_timestamp()
2706 (unsigned long long)info->ts, in rb_check_timestamp()
2707 (unsigned long long)info->before, in rb_check_timestamp()
2708 (unsigned long long)info->after, in rb_check_timestamp()
2709 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}), in rb_check_timestamp()
2723 bool abs = info->add_timestamp & in rb_add_timestamp()
2726 if (unlikely(info->delta > (1ULL << 59))) { in rb_add_timestamp()
2731 if (abs && (info->ts & TS_MSB)) { in rb_add_timestamp()
2732 info->delta &= ABS_TS_MASK; in rb_add_timestamp()
2735 } else if (info->before == info->after && info->before > info->ts) { in rb_add_timestamp()
2745 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", in rb_add_timestamp()
2746 info->before, info->ts); in rb_add_timestamp()
2751 info->delta = 0; in rb_add_timestamp()
2753 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs); in rb_add_timestamp()
2754 *length -= RB_LEN_TIME_EXTEND; in rb_add_timestamp()
2759 * rb_update_event - update event type and data
2774 unsigned length = info->length; in rb_update_event()
2775 u64 delta = info->delta; in rb_update_event()
2776 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event() local
2778 if (!WARN_ON_ONCE(nest >= MAX_NEST)) in rb_update_event()
2779 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2785 if (unlikely(info->add_timestamp)) in rb_update_event()
2788 event->time_delta = delta; in rb_update_event()
2789 length -= RB_EVNT_HDR_SIZE; in rb_update_event()
2791 event->type_len = 0; in rb_update_event()
2792 event->array[0] = length; in rb_update_event()
2794 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); in rb_update_event()
2799 struct ring_buffer_event event; /* Used only for sizeof array */ in rb_calculate_event_length()
2806 length += sizeof(event.array[0]); in rb_calculate_event_length()
2840 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
2842 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
2848 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { in rb_try_to_discard()
2850 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard()
2864 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
2884 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { in rb_try_to_discard()
2886 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2897 local_inc(&cpu_buffer->committing); in rb_start_commit()
2898 local_inc(&cpu_buffer->commits); in rb_start_commit()
2915 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2917 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
2918 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
2921 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2927 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2928 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2929 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
2934 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
2938 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2939 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2941 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
2954 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2963 !local_read(&cpu_buffer->committing))) in rb_end_commit()
2967 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
2970 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
2973 local_dec(&cpu_buffer->committing); in rb_end_commit()
2983 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
2984 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
2985 local_inc(&cpu_buffer->committing); in rb_end_commit()
2995 /* array[0] holds the actual length for the discarded event */ in rb_event_discard()
2996 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; in rb_event_discard()
2997 event->type_len = RINGBUF_TYPE_PADDING; in rb_event_discard()
2999 if (!event->time_delta) in rb_event_discard()
3000 event->time_delta = 1; in rb_event_discard()
3005 local_inc(&cpu_buffer->entries); in rb_commit()
3012 if (buffer->irq_work.waiters_pending) { in rb_wakeups()
3013 buffer->irq_work.waiters_pending = false; in rb_wakeups()
3015 irq_work_queue(&buffer->irq_work.work); in rb_wakeups()
3018 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3019 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3021 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3024 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3027 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3030 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3033 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3035 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3038 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3039 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3041 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3078 * 101 - 1 = 100
3081 * 1010 - 1 = 1001
3116 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3119 bit = RB_CTX_NORMAL - bit; in trace_recursive_lock()
3121 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3128 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3134 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3135 cpu_buffer->current_context = val; in trace_recursive_lock()
3143 cpu_buffer->current_context &= in trace_recursive_unlock()
3144 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3151 * ring_buffer_nest_start - Allow to trace while nested
3157 * will allow this function to nest within a currently active
3171 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3173 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3177 * ring_buffer_nest_end - Allow to trace while nested
3190 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3192 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3197 * ring_buffer_unlock_commit - commit a reserved
3209 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3250 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) in show_flags()
3255 if (entry->flags & TRACE_FLAG_SOFTIRQ) in show_flags()
3258 if (entry->flags & TRACE_FLAG_HARDIRQ) in show_flags()
3261 if (entry->flags & TRACE_FLAG_NMI) in show_flags()
3271 if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry)) in show_irq()
3275 if (entry->flags & TRACE_FLAG_IRQS_OFF) in show_irq()
3305 ts = bpage->time_stamp; in dump_buffer_page()
3310 event = (struct ring_buffer_event *)(bpage->data + e); in dump_buffer_page()
3312 switch (event->type_len) { in dump_buffer_page()
3329 ts += event->time_delta; in dump_buffer_page()
3331 e, ts, event->time_delta); in dump_buffer_page()
3335 ts += event->time_delta; in dump_buffer_page()
3337 e, ts, event->time_delta, in dump_buffer_page()
3358 atomic_inc(&cpu_buffer->record_disabled); \
3364 /* Do not re-enable checking */ \
3382 bpage = info->tail_page->page; in check_buffer()
3386 tail = local_read(&bpage->commit); in check_buffer()
3387 } else if (info->add_timestamp & in check_buffer()
3397 if (tail <= 8 || tail > local_read(&bpage->commit)) in check_buffer()
3406 ts = bpage->time_stamp; in check_buffer()
3410 event = (struct ring_buffer_event *)(bpage->data + e); in check_buffer()
3412 switch (event->type_len) { in check_buffer()
3424 cpu_buffer->cpu, ts, delta); in check_buffer()
3430 if (event->time_delta == 1) in check_buffer()
3434 ts += event->time_delta; in check_buffer()
3441 if ((full && ts > info->ts) || in check_buffer()
3442 (!full && ts + info->delta != info->ts)) { in check_buffer()
3444 cpu_buffer->cpu, in check_buffer()
3445 ts + info->delta, info->ts, info->delta, in check_buffer()
3446 info->before, info->after, in check_buffer()
3468 /* Don't let the compiler play games with cpu_buffer->tail_page */ in __rb_reserve_next()
3469 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3471 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next()
3473 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3474 rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3476 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3478 if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { in __rb_reserve_next()
3479 info->delta = info->ts; in __rb_reserve_next()
3487 /* Use the sub-buffer timestamp */ in __rb_reserve_next()
3488 info->delta = 0; in __rb_reserve_next()
3489 } else if (unlikely(info->before != info->after)) { in __rb_reserve_next()
3490 info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3491 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3493 info->delta = info->ts - info->after; in __rb_reserve_next()
3494 if (unlikely(test_time_stamp(info->delta))) { in __rb_reserve_next()
3495 info->add_timestamp |= RB_ADD_STAMP_EXTEND; in __rb_reserve_next()
3496 info->length += RB_LEN_TIME_EXTEND; in __rb_reserve_next()
3501 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3503 /*C*/ write = local_add_return(info->length, &tail_page->write); in __rb_reserve_next()
3508 tail = write - info->length; in __rb_reserve_next()
3511 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
3518 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3525 if (likely(!(info->add_timestamp & in __rb_reserve_next()
3528 info->delta = info->ts - info->after; in __rb_reserve_next()
3531 info->delta = info->ts; in __rb_reserve_next()
3535 /* SLOW PATH - Interrupted between A and C */ in __rb_reserve_next()
3538 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3546 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3547 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
3550 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3552 /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && in __rb_reserve_next()
3553 info->after == info->before && info->after < ts) { in __rb_reserve_next()
3556 * safe to use info->after for the delta as it in __rb_reserve_next()
3557 * matched info->before and is still valid. in __rb_reserve_next()
3559 info->delta = ts - info->after; in __rb_reserve_next()
3569 info->delta = 0; in __rb_reserve_next()
3571 info->ts = ts; in __rb_reserve_next()
3572 info->add_timestamp &= ~RB_ADD_STAMP_FORCE; in __rb_reserve_next()
3579 if (unlikely(!tail && !(info->add_timestamp & in __rb_reserve_next()
3581 info->delta = 0; in __rb_reserve_next()
3588 local_inc(&tail_page->entries); in __rb_reserve_next()
3595 tail_page->page->time_stamp = info->ts; in __rb_reserve_next()
3598 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3630 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3631 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3632 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3639 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3642 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
3666 if (unlikely(PTR_ERR(event) == -EAGAIN)) { in rb_reserve_next_event()
3668 info.length -= RB_LEN_TIME_EXTEND; in rb_reserve_next_event()
3680 * ring_buffer_lock_reserve - reserve a part of the buffer
3704 if (unlikely(atomic_read(&buffer->record_disabled))) in ring_buffer_lock_reserve()
3709 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3712 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3714 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3717 if (unlikely(length > buffer->max_data_size)) in ring_buffer_lock_reserve()
3748 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3751 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
3754 if (likely(bpage->page == (void *)addr)) { in rb_decrement_entry()
3755 local_dec(&bpage->entries); in rb_decrement_entry()
3766 if (bpage->page == (void *)addr) { in rb_decrement_entry()
3767 local_dec(&bpage->entries); in rb_decrement_entry()
3778 * ring_buffer_discard_commit - discard an event that has not been committed
3806 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3813 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3830 * ring_buffer_write - write data to the buffer without reserving
3849 int ret = -EBUSY; in ring_buffer_write()
3854 if (atomic_read(&buffer->record_disabled)) in ring_buffer_write()
3859 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3862 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3864 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3867 if (length > buffer->max_data_size) in ring_buffer_write()
3899 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3901 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
3908 if (reader->read != rb_page_commit(reader)) in rb_per_cpu_empty()
3934 * ring_buffer_record_disable - stop all writes into the buffer
3944 atomic_inc(&buffer->record_disabled); in ring_buffer_record_disable()
3949 * ring_buffer_record_enable - enable writes to the buffer
3957 atomic_dec(&buffer->record_disabled); in ring_buffer_record_enable()
3962 * ring_buffer_record_off - stop all writes into the buffer
3977 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_off()
3980 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_off()
3985 * ring_buffer_record_on - restart writes into the buffer
4000 rd = atomic_read(&buffer->record_disabled); in ring_buffer_record_on()
4003 } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); in ring_buffer_record_on()
4008 * ring_buffer_record_is_on - return true if the ring buffer can write
4015 return !atomic_read(&buffer->record_disabled); in ring_buffer_record_is_on()
4019 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4031 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); in ring_buffer_record_is_set_on()
4035 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4048 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4051 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4052 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4057 * ring_buffer_record_enable_cpu - enable writes to the buffer
4068 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4071 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4072 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4085 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4086 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4090 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4101 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4104 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4105 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4110 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4111 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4115 ret = bpage->page->time_stamp; in ring_buffer_oldest_event_ts()
4116 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4123 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4132 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4135 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4136 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4143 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4151 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4154 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4161 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4171 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4174 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4175 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4182 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4194 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4197 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4198 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4205 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4216 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4219 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4220 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4227 * ring_buffer_read_events_cpu - get the number of events successfully read
4236 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4239 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4240 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4245 * ring_buffer_entries - get the number of entries in a buffer
4259 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4268 * ring_buffer_overruns - get the number of overruns in buffer
4282 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4283 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4292 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
4295 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4296 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4297 iter->next_event = iter->head; in rb_iter_reset()
4299 iter->cache_reader_page = iter->head_page; in rb_iter_reset()
4300 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4301 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
4303 if (iter->head) { in rb_iter_reset()
4304 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4305 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4307 iter->read_stamp = iter->head_page->page->time_stamp; in rb_iter_reset()
4308 iter->page_stamp = iter->read_stamp; in rb_iter_reset()
4313 * ring_buffer_iter_reset - reset an iterator
4327 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4329 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4331 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4336 * ring_buffer_iter_empty - check if an iterator has no more to read
4350 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4351 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4352 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4353 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4354 commit_ts = commit_page->page->time_stamp; in ring_buffer_iter_empty()
4367 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4368 curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); in ring_buffer_iter_empty()
4376 return ((iter->head_page == commit_page && iter->head >= commit) || in ring_buffer_iter_empty()
4377 (iter->head_page == reader && commit_page == head_page && in ring_buffer_iter_empty()
4378 head_page->read == commit && in ring_buffer_iter_empty()
4379 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4389 switch (event->type_len) { in rb_update_read_stamp()
4395 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4400 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
4401 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4405 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4419 switch (event->type_len) { in rb_update_iter_read_stamp()
4425 iter->read_stamp += delta; in rb_update_iter_read_stamp()
4430 delta = rb_fix_abs_ts(delta, iter->read_stamp); in rb_update_iter_read_stamp()
4431 iter->read_stamp = delta; in rb_update_iter_read_stamp()
4435 iter->read_stamp += event->time_delta; in rb_update_iter_read_stamp()
4439 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4447 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
4454 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4468 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4471 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4476 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4481 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4491 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4492 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4493 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4494 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4503 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4504 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4507 * cpu_buffer->pages just needs to point to the buffer, it in rb_get_reader_page()
4511 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4514 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
4526 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4539 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4552 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4553 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
4555 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4558 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4559 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4561 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4562 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4563 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4570 if (reader && reader->read == 0) in rb_get_reader_page()
4571 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4573 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4625 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) in rb_advance_reader()
4626 cpu_buffer->read++; in rb_advance_reader()
4631 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4632 cpu_buffer->read_bytes += length; in rb_advance_reader()
4639 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4642 if (iter->head == iter->next_event) { in rb_advance_iter()
4648 iter->head = iter->next_event; in rb_advance_iter()
4653 if (iter->next_event >= rb_page_size(iter->head_page)) { in rb_advance_iter()
4655 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4661 rb_update_iter_read_stamp(iter, iter->event); in rb_advance_iter()
4666 return cpu_buffer->lost_events; in rb_lost_events()
4695 switch (event->type_len) { in rb_buffer_peek()
4717 *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); in rb_buffer_peek()
4718 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4719 cpu_buffer->cpu, ts); in rb_buffer_peek()
4727 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4728 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4729 cpu_buffer->cpu, ts); in rb_buffer_peek()
4754 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4755 buffer = cpu_buffer->buffer; in rb_iter_peek()
4762 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4763 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
4764 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
4784 if (iter->head >= rb_page_size(iter->head_page)) { in rb_iter_peek()
4793 switch (event->type_len) { in rb_iter_peek()
4810 *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); in rb_iter_peek()
4811 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4812 cpu_buffer->cpu, ts); in rb_iter_peek()
4820 *ts = iter->read_stamp + event->time_delta; in rb_iter_peek()
4822 cpu_buffer->cpu, ts); in rb_iter_peek()
4837 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4850 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4854 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4862 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4866 * ring_buffer_peek - peek at the next event to be read
4879 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4884 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4891 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4896 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_peek()
4902 /** ring_buffer_iter_dropped - report if there are dropped events
4909 bool ret = iter->missed_events != 0; in ring_buffer_iter_dropped()
4911 iter->missed_events = 0; in ring_buffer_iter_dropped()
4917 * ring_buffer_iter_peek - peek at the next event to be read
4927 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
4932 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4934 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4936 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_iter_peek()
4943 * ring_buffer_consume - return an event and consume it
4966 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
4969 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4975 cpu_buffer->lost_events = 0; in ring_buffer_consume()
4985 if (event && event->type_len == RINGBUF_TYPE_PADDING) in ring_buffer_consume()
4993 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5019 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5027 iter->event_size = buffer->subbuf_size; in ring_buffer_read_prepare()
5028 iter->event = kmalloc(iter->event_size, flags); in ring_buffer_read_prepare()
5029 if (!iter->event) { in ring_buffer_read_prepare()
5034 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5036 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5038 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5045 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5059 * ring_buffer_read_start - start a non consuming read of the buffer
5078 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5080 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5081 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5083 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5084 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5089 * ring_buffer_read_finish - finish reading the iterator of the buffer
5092 * This re-enables the recording to the buffer, and frees the
5098 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5107 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5109 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5111 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5112 kfree(iter->event); in ring_buffer_read_finish()
5118 * ring_buffer_iter_advance - advance the iterator to the next location
5126 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
5129 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5133 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5138 * ring_buffer_size - return the size of the ring buffer (in bytes)
5144 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5147 return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5152 * ring_buffer_max_event_size - return the max data size of an event
5161 return buffer->max_data_size - RB_LEN_TIME_EXTEND; in ring_buffer_max_event_size()
5162 return buffer->max_data_size; in ring_buffer_max_event_size()
5168 local_set(&page->write, 0); in rb_clear_buffer_page()
5169 local_set(&page->entries, 0); in rb_clear_buffer_page()
5170 rb_init_page(page->page); in rb_clear_buffer_page()
5171 page->read = 0; in rb_clear_buffer_page()
5181 cpu_buffer->head_page in rb_reset_cpu()
5182 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5183 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5184 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5188 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5189 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5191 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5192 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5193 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5195 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5196 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5197 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5198 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5199 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5200 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5201 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5202 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5203 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5204 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5205 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5206 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5207 cpu_buffer->read = 0; in rb_reset_cpu()
5208 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5210 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5211 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5213 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5215 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5216 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5219 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
5227 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5229 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5232 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5236 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5239 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5243 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5249 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5251 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5255 mutex_lock(&buffer->mutex); in ring_buffer_reset_cpu()
5257 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5258 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5265 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5266 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5268 mutex_unlock(&buffer->mutex); in ring_buffer_reset_cpu()
5276 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5285 mutex_lock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5288 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5290 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5291 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5298 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5304 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
5309 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5310 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5313 mutex_unlock(&buffer->mutex); in ring_buffer_reset_online_cpus()
5317 * ring_buffer_reset - reset a ring buffer
5326 mutex_lock(&buffer->mutex); in ring_buffer_reset()
5329 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5331 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5332 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5339 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5343 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5344 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5347 mutex_unlock(&buffer->mutex); in ring_buffer_reset()
5352 * ring_buffer_empty - is the ring buffer empty?
5365 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5381 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5392 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5395 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5408 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5423 int ret = -EINVAL; in ring_buffer_swap_cpu()
5425 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
5426 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
5429 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
5430 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
5433 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
5436 if (buffer_a->subbuf_order != buffer_b->subbuf_order) in ring_buffer_swap_cpu()
5439 ret = -EAGAIN; in ring_buffer_swap_cpu()
5441 if (atomic_read(&buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5444 if (atomic_read(&buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5447 if (atomic_read(&cpu_buffer_a->record_disabled)) in ring_buffer_swap_cpu()
5450 if (atomic_read(&cpu_buffer_b->record_disabled)) in ring_buffer_swap_cpu()
5459 atomic_inc(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5460 atomic_inc(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5462 ret = -EBUSY; in ring_buffer_swap_cpu()
5463 if (local_read(&cpu_buffer_a->committing)) in ring_buffer_swap_cpu()
5465 if (local_read(&cpu_buffer_b->committing)) in ring_buffer_swap_cpu()
5472 if (atomic_read(&buffer_a->resizing)) in ring_buffer_swap_cpu()
5474 if (atomic_read(&buffer_b->resizing)) in ring_buffer_swap_cpu()
5477 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
5478 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
5480 cpu_buffer_b->buffer = buffer_a; in ring_buffer_swap_cpu()
5481 cpu_buffer_a->buffer = buffer_b; in ring_buffer_swap_cpu()
5486 atomic_dec(&cpu_buffer_a->record_disabled); in ring_buffer_swap_cpu()
5487 atomic_dec(&cpu_buffer_b->record_disabled); in ring_buffer_swap_cpu()
5495 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5518 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5519 return ERR_PTR(-ENODEV); in ring_buffer_alloc_read_page()
5523 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
5525 bpage->order = buffer->subbuf_order; in ring_buffer_alloc_read_page()
5526 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5528 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5530 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5531 bpage->data = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5532 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5535 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5538 if (bpage->data) in ring_buffer_alloc_read_page()
5542 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
5545 return ERR_PTR(-ENOMEM); in ring_buffer_alloc_read_page()
5548 bpage->data = page_address(page); in ring_buffer_alloc_read_page()
5551 rb_init_page(bpage->data); in ring_buffer_alloc_read_page()
5558 * ring_buffer_free_read_page - free an allocated read page
5569 struct buffer_data_page *bpage = data_page->data; in ring_buffer_free_read_page()
5573 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5576 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5580 * is different from the subbuffer order of the buffer - in ring_buffer_free_read_page()
5583 if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order) in ring_buffer_free_read_page()
5587 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5589 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5590 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5594 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5598 free_pages((unsigned long)bpage, data_page->order); in ring_buffer_free_read_page()
5604 * ring_buffer_read_page - extract a page from the ring buffer
5641 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5650 int ret = -1; in ring_buffer_read_page()
5652 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5662 len -= BUF_PAGE_HDR_SIZE; in ring_buffer_read_page()
5664 if (!data_page || !data_page->data) in ring_buffer_read_page()
5666 if (data_page->order != buffer->subbuf_order) in ring_buffer_read_page()
5669 bpage = data_page->data; in ring_buffer_read_page()
5673 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5681 read = reader->read; in ring_buffer_read_page()
5685 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5694 if (read || (len < (commit - read)) || in ring_buffer_read_page()
5695 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5696 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5708 (!read || (len < (commit - read)) || in ring_buffer_read_page()
5709 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
5712 if (len > (commit - read)) in ring_buffer_read_page()
5713 len = (commit - read); in ring_buffer_read_page()
5722 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5733 memcpy(bpage->data + pos, rpage->data + rpos, size); in ring_buffer_read_page()
5735 len -= size; in ring_buffer_read_page()
5738 rpos = reader->read; in ring_buffer_read_page()
5750 local_set(&bpage->commit, pos); in ring_buffer_read_page()
5751 bpage->time_stamp = save_timestamp; in ring_buffer_read_page()
5757 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5758 cpu_buffer->read_bytes += rb_page_commit(reader); in ring_buffer_read_page()
5762 bpage = reader->page; in ring_buffer_read_page()
5763 reader->page = data_page->data; in ring_buffer_read_page()
5764 local_set(&reader->write, 0); in ring_buffer_read_page()
5765 local_set(&reader->entries, 0); in ring_buffer_read_page()
5766 reader->read = 0; in ring_buffer_read_page()
5767 data_page->data = bpage; in ring_buffer_read_page()
5774 if (reader->real_end) in ring_buffer_read_page()
5775 local_set(&bpage->commit, reader->real_end); in ring_buffer_read_page()
5779 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5781 commit = local_read(&bpage->commit); in ring_buffer_read_page()
5789 if (buffer->subbuf_size - commit >= sizeof(missed_events)) { in ring_buffer_read_page()
5790 memcpy(&bpage->data[commit], &missed_events, in ring_buffer_read_page()
5792 local_add(RB_MISSED_STORED, &bpage->commit); in ring_buffer_read_page()
5795 local_add(RB_MISSED_EVENTS, &bpage->commit); in ring_buffer_read_page()
5801 if (commit < buffer->subbuf_size) in ring_buffer_read_page()
5802 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit); in ring_buffer_read_page()
5805 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5813 * ring_buffer_read_page_data - get pointer to the data in the page.
5820 return page->data; in ring_buffer_read_page_data()
5825 * ring_buffer_subbuf_size_get - get size of the sub buffer.
5832 return buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_size_get()
5837 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
5851 return -EINVAL; in ring_buffer_subbuf_order_get()
5853 return buffer->subbuf_order; in ring_buffer_subbuf_order_get()
5858 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
5866 * 0 - 1 system page
5867 * 1 - 2 system pages
5868 * 3 - 4 system pages
5884 return -EINVAL; in ring_buffer_subbuf_order_set()
5886 if (buffer->subbuf_order == order) in ring_buffer_subbuf_order_set()
5891 return -EINVAL; in ring_buffer_subbuf_order_set()
5895 return -EINVAL; in ring_buffer_subbuf_order_set()
5897 old_order = buffer->subbuf_order; in ring_buffer_subbuf_order_set()
5898 old_size = buffer->subbuf_size; in ring_buffer_subbuf_order_set()
5901 mutex_lock(&buffer->mutex); in ring_buffer_subbuf_order_set()
5902 atomic_inc(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
5907 buffer->subbuf_order = order; in ring_buffer_subbuf_order_set()
5908 buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE; in ring_buffer_subbuf_order_set()
5913 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
5916 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
5919 nr_pages = old_size * buffer->buffers[cpu]->nr_pages; in ring_buffer_subbuf_order_set()
5920 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size); in ring_buffer_subbuf_order_set()
5926 cpu_buffer->nr_pages_to_update = nr_pages; in ring_buffer_subbuf_order_set()
5932 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
5934 &cpu_buffer->new_pages)) { in ring_buffer_subbuf_order_set()
5936 err = -ENOMEM; in ring_buffer_subbuf_order_set()
5943 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_subbuf_order_set()
5946 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
5952 list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) { in ring_buffer_subbuf_order_set()
5953 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
5957 bpage = list_entry(cpu_buffer->pages, struct buffer_page, list); in ring_buffer_subbuf_order_set()
5961 free_buffer_page(cpu_buffer->reader_page); in ring_buffer_subbuf_order_set()
5964 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, in ring_buffer_subbuf_order_set()
5966 list_del_init(&cpu_buffer->reader_page->list); in ring_buffer_subbuf_order_set()
5969 cpu_buffer->pages = cpu_buffer->new_pages.next; in ring_buffer_subbuf_order_set()
5970 cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev; in ring_buffer_subbuf_order_set()
5971 cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next; in ring_buffer_subbuf_order_set()
5974 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
5976 cpu_buffer->head_page in ring_buffer_subbuf_order_set()
5977 = list_entry(cpu_buffer->pages, struct buffer_page, list); in ring_buffer_subbuf_order_set()
5978 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in ring_buffer_subbuf_order_set()
5980 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; in ring_buffer_subbuf_order_set()
5981 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_subbuf_order_set()
5983 free_pages((unsigned long)cpu_buffer->free_page, old_order); in ring_buffer_subbuf_order_set()
5984 cpu_buffer->free_page = NULL; in ring_buffer_subbuf_order_set()
5991 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
5992 mutex_unlock(&buffer->mutex); in ring_buffer_subbuf_order_set()
5997 buffer->subbuf_order = old_order; in ring_buffer_subbuf_order_set()
5998 buffer->subbuf_size = old_size; in ring_buffer_subbuf_order_set()
6000 atomic_dec(&buffer->record_disabled); in ring_buffer_subbuf_order_set()
6001 mutex_unlock(&buffer->mutex); in ring_buffer_subbuf_order_set()
6004 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6006 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_subbuf_order_set()
6009 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { in ring_buffer_subbuf_order_set()
6010 list_del_init(&bpage->list); in ring_buffer_subbuf_order_set()
6032 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
6041 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
6042 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
6050 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
6052 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
6055 return -ENOMEM; in trace_rb_cpu_prepare()
6058 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
6126 cnt = data->cnt + (nested ? 27 : 0); in rb_write_something()
6129 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); in rb_write_something()
6137 event = ring_buffer_lock_reserve(data->buffer, len); in rb_write_something()
6142 data->bytes_dropped += len; in rb_write_something()
6144 data->bytes_dropped_nested += len; in rb_write_something()
6151 if (RB_WARN_ON(data->buffer, event_len < len)) in rb_write_something()
6155 item->size = size; in rb_write_something()
6156 memcpy(item->str, rb_string, size); in rb_write_something()
6159 data->bytes_alloc_nested += event_len; in rb_write_something()
6160 data->bytes_written_nested += len; in rb_write_something()
6161 data->events_nested++; in rb_write_something()
6162 if (!data->min_size_nested || len < data->min_size_nested) in rb_write_something()
6163 data->min_size_nested = len; in rb_write_something()
6164 if (len > data->max_size_nested) in rb_write_something()
6165 data->max_size_nested = len; in rb_write_something()
6167 data->bytes_alloc += event_len; in rb_write_something()
6168 data->bytes_written += len; in rb_write_something()
6169 data->events++; in rb_write_something()
6170 if (!data->min_size || len < data->min_size) in rb_write_something()
6171 data->max_size = len; in rb_write_something()
6172 if (len > data->max_size) in rb_write_something()
6173 data->max_size = len; in rb_write_something()
6177 ring_buffer_unlock_commit(data->buffer); in rb_write_something()
6188 data->cnt++; in rb_test()
6191 /* Now sleep between a min of 100-300us and a max of 1ms */ in rb_test()
6192 usleep_range(((data->cnt % 3) + 1) * 100, 1000); in rb_test()
6310 ret = -1; in test_ringbuffer()
6312 total_events = data->events + data->events_nested; in test_ringbuffer()
6313 total_written = data->bytes_written + data->bytes_written_nested; in test_ringbuffer()
6314 total_alloc = data->bytes_alloc + data->bytes_alloc_nested; in test_ringbuffer()
6315 total_dropped = data->bytes_dropped + data->bytes_dropped_nested; in test_ringbuffer()
6317 big_event_size = data->max_size + data->max_size_nested; in test_ringbuffer()
6318 small_event_size = data->min_size + data->min_size_nested; in test_ringbuffer()
6337 total_size += item->size + sizeof(struct rb_item); in test_ringbuffer()
6338 if (memcmp(&item->str[0], rb_string, item->size) != 0) { in test_ringbuffer()
6340 pr_info("buffer had: %.*s\n", item->size, item->str); in test_ringbuffer()
6341 pr_info("expected: %.*s\n", item->size, rb_string); in test_ringbuffer()
6343 ret = -1; in test_ringbuffer()
6351 ret = -1; in test_ringbuffer()