Lines Matching defs:bpage

386 static void rb_init_page(struct buffer_data_page *bpage)
388 local_set(&bpage->commit, 0);
391 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
393 return local_read(&bpage->page->commit);
396 static void free_buffer_page(struct buffer_page *bpage)
399 if (!bpage->range)
400 free_pages((unsigned long)bpage->page, bpage->order);
401 kfree(bpage);
1354 static inline void rb_inc_page(struct buffer_page **bpage)
1356 struct list_head *p = rb_list_head((*bpage)->list.next);
1358 *bpage = list_entry(p, struct buffer_page, list);
1361 static inline void rb_dec_page(struct buffer_page **bpage)
1363 struct list_head *p = rb_list_head((*bpage)->list.prev);
1365 *bpage = list_entry(p, struct buffer_page, list);
1483 struct buffer_page *bpage)
1485 unsigned long val = (unsigned long)bpage;
1946 struct buffer_page *bpage = orig_head;
1948 rb_dec_page(&bpage);
1957 bpage->list.next = &cpu_buffer->reader_page->list;
1961 bpage = head_page;
1963 head_page->list.prev = bpage->list.prev;
1964 rb_dec_page(&bpage);
1965 bpage->list.next = &head_page->list;
1966 rb_set_list_to_head(&bpage->list);
1973 bpage = cpu_buffer->reader_page;
1974 meta->buffers[0] = rb_meta_subbuf_idx(meta, bpage->page);
1975 bpage->id = 0;
1977 for (i = 1, bpage = head_page; i < meta->nr_subbufs;
1978 i++, rb_inc_page(&bpage)) {
1979 meta->buffers[i] = rb_meta_subbuf_idx(meta, bpage->page);
1980 bpage->id = i;
2187 struct buffer_page *bpage)
2191 if (meta->head_buffer == (unsigned long)bpage->page)
2192 cpu_buffer->head_page = bpage;
2194 if (meta->commit_buffer == (unsigned long)bpage->page) {
2195 cpu_buffer->commit_page = bpage;
2196 cpu_buffer->tail_page = bpage;
2205 struct buffer_page *bpage, *tmp;
2246 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2248 if (!bpage)
2251 rb_check_bpage(cpu_buffer, bpage);
2257 list_add_tail(&bpage->list, pages);
2261 bpage->page = rb_range_buffer(cpu_buffer, i + 1);
2262 if (!bpage->page)
2266 rb_meta_buffer_update(cpu_buffer, bpage);
2267 bpage->range = 1;
2268 bpage->id = i + 1;
2275 bpage->page = page_address(page);
2276 rb_init_page(bpage->page);
2278 bpage->order = cpu_buffer->buffer->subbuf_order;
2289 list_for_each_entry_safe(bpage, tmp, pages, list) {
2290 list_del_init(&bpage->list);
2291 free_buffer_page(bpage);
2329 struct buffer_page *bpage;
2350 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2352 if (!bpage)
2355 rb_check_bpage(cpu_buffer, bpage);
2357 cpu_buffer->reader_page = bpage;
2366 bpage->page = rb_range_buffer(cpu_buffer, 0);
2367 if (!bpage->page)
2370 rb_meta_buffer_update(cpu_buffer, bpage);
2371 bpage->range = 1;
2378 bpage->page = page_address(page);
2379 rb_init_page(bpage->page);
2430 struct buffer_page *bpage, *tmp;
2439 list_for_each_entry_safe(bpage, tmp, head, list) {
2440 list_del_init(&bpage->list);
2441 free_buffer_page(bpage);
2443 bpage = list_entry(head, struct buffer_page, list);
2444 free_buffer_page(bpage);
2688 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
2690 return local_read(&bpage->entries) & RB_WRITE_MASK;
2693 static inline unsigned long rb_page_write(struct buffer_page *bpage)
2695 return local_read(&bpage->write) & RB_WRITE_MASK;
2880 struct buffer_page *bpage, *tmp;
2881 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2883 list_del_init(&bpage->list);
2884 free_buffer_page(bpage);
3112 struct buffer_page *bpage, *tmp;
3120 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
3122 list_del_init(&bpage->list);
3123 free_buffer_page(bpage);
3144 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
3146 return bpage->page->data + index;
3216 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
3218 return rb_page_commit(bpage) & ~RB_MISSED_MASK;
3819 struct buffer_page *bpage;
3827 bpage = READ_ONCE(cpu_buffer->tail_page);
3833 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
3835 local_read(&bpage->write) & ~RB_WRITE_MASK;
3869 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
4286 static void dump_buffer_page(struct buffer_data_page *bpage,
4294 ts = bpage->time_stamp;
4299 event = (struct ring_buffer_event *)(bpage->data + e);
4349 dump_buffer_page(bpage, info, tail); \
4365 struct buffer_data_page *bpage;
4370 bpage = info->tail_page->page;
4374 tail = local_read(&bpage->commit);
4385 if (tail <= 8 || tail > local_read(&bpage->commit))
4394 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
4714 struct buffer_page *bpage = cpu_buffer->commit_page;
4720 if (likely(bpage->page == (void *)addr)) {
4721 local_dec(&bpage->entries);
4729 rb_inc_page(&bpage);
4730 start = bpage;
4732 if (bpage->page == (void *)addr) {
4733 local_dec(&bpage->entries);
4736 rb_inc_page(&bpage);
4737 } while (bpage != start);
5044 struct buffer_page *bpage;
5057 bpage = cpu_buffer->reader_page;
5059 bpage = rb_set_head_page(cpu_buffer);
5060 if (bpage)
5061 ret = bpage->page->time_stamp;
6086 struct buffer_page *bpage, int id)
6093 id = rb_meta_subbuf_idx(cpu_buffer->ring_meta, bpage->page);
6095 bpage->id = id;
6465 struct buffer_data_read_page *bpage = NULL;
6472 bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
6473 if (!bpage)
6476 bpage->order = buffer->subbuf_order;
6482 bpage->data = cpu_buffer->free_page;
6489 if (bpage->data)
6496 kfree(bpage);
6500 bpage->data = page_address(page);
6503 rb_init_page(bpage->data);
6505 return bpage;
6521 struct buffer_data_page *bpage = data_page->data;
6522 struct page *page = virt_to_page(bpage);
6542 cpu_buffer->free_page = bpage;
6543 bpage = NULL;
6550 free_pages((unsigned long)bpage, data_page->order);
6595 struct buffer_data_page *bpage;
6620 bpage = data_page->data;
6621 if (!bpage)
6685 memcpy(bpage->data + pos, rpage->data + rpos, size);
6701 /* update bpage */
6702 local_set(&bpage->commit, pos);
6703 bpage->time_stamp = save_timestamp;
6713 rb_init_page(bpage);
6714 bpage = reader->page;
6719 data_page->data = bpage;
6727 local_set(&bpage->commit, reader->real_end);
6732 commit = local_read(&bpage->commit);
6741 memcpy(&bpage->data[commit], &missed_events,
6743 local_add(RB_MISSED_STORED, &bpage->commit);
6746 local_add(RB_MISSED_EVENTS, &bpage->commit);
6753 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
6823 struct buffer_page *bpage, *tmp;
6943 list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
6944 list_del_init(&bpage->list);
6945 free_buffer_page(bpage);
6968 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6969 list_del_init(&bpage->list);
6970 free_buffer_page(bpage);
7356 struct buffer_data_page *bpage = reader->page;
7364 local_set(&bpage->commit, reader->real_end);
7371 memcpy(&bpage->data[commit], &missed_events,
7373 local_add(RB_MISSED_STORED, &bpage->commit);
7375 local_add(RB_MISSED_EVENTS, &bpage->commit);