134e5b958SVincent Donnefort // SPDX-License-Identifier: GPL-2.0
234e5b958SVincent Donnefort /*
334e5b958SVincent Donnefort * Copyright (C) 2025 - Google LLC
434e5b958SVincent Donnefort * Author: Vincent Donnefort <vdonnefort@google.com>
534e5b958SVincent Donnefort */
634e5b958SVincent Donnefort
734e5b958SVincent Donnefort #include <linux/atomic.h>
834e5b958SVincent Donnefort #include <linux/simple_ring_buffer.h>
934e5b958SVincent Donnefort
1034e5b958SVincent Donnefort #include <asm/barrier.h>
1134e5b958SVincent Donnefort #include <asm/local.h>
1234e5b958SVincent Donnefort
1334e5b958SVincent Donnefort enum simple_rb_link_type {
1434e5b958SVincent Donnefort SIMPLE_RB_LINK_NORMAL = 0,
1534e5b958SVincent Donnefort SIMPLE_RB_LINK_HEAD = 1,
1634e5b958SVincent Donnefort SIMPLE_RB_LINK_HEAD_MOVING
1734e5b958SVincent Donnefort };
1834e5b958SVincent Donnefort
1934e5b958SVincent Donnefort #define SIMPLE_RB_LINK_MASK ~(SIMPLE_RB_LINK_HEAD | SIMPLE_RB_LINK_HEAD_MOVING)
2034e5b958SVincent Donnefort
simple_bpage_set_head_link(struct simple_buffer_page * bpage)2134e5b958SVincent Donnefort static void simple_bpage_set_head_link(struct simple_buffer_page *bpage)
2234e5b958SVincent Donnefort {
2334e5b958SVincent Donnefort unsigned long link = (unsigned long)bpage->link.next;
2434e5b958SVincent Donnefort
2534e5b958SVincent Donnefort link &= SIMPLE_RB_LINK_MASK;
2634e5b958SVincent Donnefort link |= SIMPLE_RB_LINK_HEAD;
2734e5b958SVincent Donnefort
2834e5b958SVincent Donnefort /*
2934e5b958SVincent Donnefort * Paired with simple_rb_find_head() to order access between the head
3034e5b958SVincent Donnefort * link and overrun. It ensures we always report an up-to-date value
3134e5b958SVincent Donnefort * after swapping the reader page.
3234e5b958SVincent Donnefort */
3334e5b958SVincent Donnefort smp_store_release(&bpage->link.next, (struct list_head *)link);
3434e5b958SVincent Donnefort }
3534e5b958SVincent Donnefort
simple_bpage_unset_head_link(struct simple_buffer_page * bpage,struct simple_buffer_page * dst,enum simple_rb_link_type new_type)3634e5b958SVincent Donnefort static bool simple_bpage_unset_head_link(struct simple_buffer_page *bpage,
3734e5b958SVincent Donnefort struct simple_buffer_page *dst,
3834e5b958SVincent Donnefort enum simple_rb_link_type new_type)
3934e5b958SVincent Donnefort {
4034e5b958SVincent Donnefort unsigned long *link = (unsigned long *)(&bpage->link.next);
4134e5b958SVincent Donnefort unsigned long old = (*link & SIMPLE_RB_LINK_MASK) | SIMPLE_RB_LINK_HEAD;
4234e5b958SVincent Donnefort unsigned long new = (unsigned long)(&dst->link) | new_type;
4334e5b958SVincent Donnefort
4434e5b958SVincent Donnefort return try_cmpxchg(link, &old, new);
4534e5b958SVincent Donnefort }
4634e5b958SVincent Donnefort
simple_bpage_set_normal_link(struct simple_buffer_page * bpage)4734e5b958SVincent Donnefort static void simple_bpage_set_normal_link(struct simple_buffer_page *bpage)
4834e5b958SVincent Donnefort {
4934e5b958SVincent Donnefort unsigned long link = (unsigned long)bpage->link.next;
5034e5b958SVincent Donnefort
5134e5b958SVincent Donnefort WRITE_ONCE(bpage->link.next, (struct list_head *)(link & SIMPLE_RB_LINK_MASK));
5234e5b958SVincent Donnefort }
5334e5b958SVincent Donnefort
simple_bpage_from_link(struct list_head * link)5434e5b958SVincent Donnefort static struct simple_buffer_page *simple_bpage_from_link(struct list_head *link)
5534e5b958SVincent Donnefort {
5634e5b958SVincent Donnefort unsigned long ptr = (unsigned long)link & SIMPLE_RB_LINK_MASK;
5734e5b958SVincent Donnefort
5834e5b958SVincent Donnefort return container_of((struct list_head *)ptr, struct simple_buffer_page, link);
5934e5b958SVincent Donnefort }
6034e5b958SVincent Donnefort
simple_bpage_next_page(struct simple_buffer_page * bpage)6134e5b958SVincent Donnefort static struct simple_buffer_page *simple_bpage_next_page(struct simple_buffer_page *bpage)
6234e5b958SVincent Donnefort {
6334e5b958SVincent Donnefort return simple_bpage_from_link(bpage->link.next);
6434e5b958SVincent Donnefort }
6534e5b958SVincent Donnefort
simple_bpage_reset(struct simple_buffer_page * bpage)6634e5b958SVincent Donnefort static void simple_bpage_reset(struct simple_buffer_page *bpage)
6734e5b958SVincent Donnefort {
6834e5b958SVincent Donnefort bpage->write = 0;
6934e5b958SVincent Donnefort bpage->entries = 0;
7034e5b958SVincent Donnefort
7134e5b958SVincent Donnefort local_set(&bpage->page->commit, 0);
7234e5b958SVincent Donnefort }
7334e5b958SVincent Donnefort
simple_bpage_init(struct simple_buffer_page * bpage,void * page)74*63592308SVincent Donnefort static void simple_bpage_init(struct simple_buffer_page *bpage, void *page)
7534e5b958SVincent Donnefort {
7634e5b958SVincent Donnefort INIT_LIST_HEAD(&bpage->link);
7734e5b958SVincent Donnefort bpage->page = (struct buffer_data_page *)page;
7834e5b958SVincent Donnefort
7934e5b958SVincent Donnefort simple_bpage_reset(bpage);
8034e5b958SVincent Donnefort }
8134e5b958SVincent Donnefort
8234e5b958SVincent Donnefort #define simple_rb_meta_inc(__meta, __inc) \
8334e5b958SVincent Donnefort WRITE_ONCE((__meta), (__meta + __inc))
8434e5b958SVincent Donnefort
simple_rb_loaded(struct simple_rb_per_cpu * cpu_buffer)8534e5b958SVincent Donnefort static bool simple_rb_loaded(struct simple_rb_per_cpu *cpu_buffer)
8634e5b958SVincent Donnefort {
8734e5b958SVincent Donnefort return !!cpu_buffer->bpages;
8834e5b958SVincent Donnefort }
8934e5b958SVincent Donnefort
simple_rb_find_head(struct simple_rb_per_cpu * cpu_buffer)9034e5b958SVincent Donnefort static int simple_rb_find_head(struct simple_rb_per_cpu *cpu_buffer)
9134e5b958SVincent Donnefort {
9234e5b958SVincent Donnefort int retry = cpu_buffer->nr_pages * 2;
9334e5b958SVincent Donnefort struct simple_buffer_page *head;
9434e5b958SVincent Donnefort
9534e5b958SVincent Donnefort head = cpu_buffer->head_page;
9634e5b958SVincent Donnefort
9734e5b958SVincent Donnefort while (retry--) {
9834e5b958SVincent Donnefort unsigned long link;
9934e5b958SVincent Donnefort
10034e5b958SVincent Donnefort spin:
10134e5b958SVincent Donnefort /* See smp_store_release in simple_bpage_set_head_link() */
10234e5b958SVincent Donnefort link = (unsigned long)smp_load_acquire(&head->link.prev->next);
10334e5b958SVincent Donnefort
10434e5b958SVincent Donnefort switch (link & ~SIMPLE_RB_LINK_MASK) {
10534e5b958SVincent Donnefort /* Found the head */
10634e5b958SVincent Donnefort case SIMPLE_RB_LINK_HEAD:
10734e5b958SVincent Donnefort cpu_buffer->head_page = head;
10834e5b958SVincent Donnefort return 0;
10934e5b958SVincent Donnefort /* The writer caught the head, we can spin, that won't be long */
11034e5b958SVincent Donnefort case SIMPLE_RB_LINK_HEAD_MOVING:
11134e5b958SVincent Donnefort goto spin;
11234e5b958SVincent Donnefort }
11334e5b958SVincent Donnefort
11434e5b958SVincent Donnefort head = simple_bpage_next_page(head);
11534e5b958SVincent Donnefort }
11634e5b958SVincent Donnefort
11734e5b958SVincent Donnefort return -EBUSY;
11834e5b958SVincent Donnefort }
11934e5b958SVincent Donnefort
12034e5b958SVincent Donnefort /**
12134e5b958SVincent Donnefort * simple_ring_buffer_swap_reader_page - Swap ring-buffer head with the reader
12234e5b958SVincent Donnefort * @cpu_buffer: A simple_rb_per_cpu
12334e5b958SVincent Donnefort *
12434e5b958SVincent Donnefort * This function enables consuming reading. It ensures the current head page will not be overwritten
12534e5b958SVincent Donnefort * and can be safely read.
12634e5b958SVincent Donnefort *
12734e5b958SVincent Donnefort * Returns 0 on success, -ENODEV if @cpu_buffer was unloaded or -EBUSY if we failed to catch the
12834e5b958SVincent Donnefort * head page.
12934e5b958SVincent Donnefort */
simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu * cpu_buffer)13034e5b958SVincent Donnefort int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer)
13134e5b958SVincent Donnefort {
13234e5b958SVincent Donnefort struct simple_buffer_page *last, *head, *reader;
13334e5b958SVincent Donnefort unsigned long overrun;
13434e5b958SVincent Donnefort int retry = 8;
13534e5b958SVincent Donnefort int ret;
13634e5b958SVincent Donnefort
13734e5b958SVincent Donnefort if (!simple_rb_loaded(cpu_buffer))
13834e5b958SVincent Donnefort return -ENODEV;
13934e5b958SVincent Donnefort
14034e5b958SVincent Donnefort reader = cpu_buffer->reader_page;
14134e5b958SVincent Donnefort
14234e5b958SVincent Donnefort do {
14334e5b958SVincent Donnefort /* Run after the writer to find the head */
14434e5b958SVincent Donnefort ret = simple_rb_find_head(cpu_buffer);
14534e5b958SVincent Donnefort if (ret)
14634e5b958SVincent Donnefort return ret;
14734e5b958SVincent Donnefort
14834e5b958SVincent Donnefort head = cpu_buffer->head_page;
14934e5b958SVincent Donnefort
15034e5b958SVincent Donnefort /* Connect the reader page around the header page */
15134e5b958SVincent Donnefort reader->link.next = head->link.next;
15234e5b958SVincent Donnefort reader->link.prev = head->link.prev;
15334e5b958SVincent Donnefort
15434e5b958SVincent Donnefort /* The last page before the head */
15534e5b958SVincent Donnefort last = simple_bpage_from_link(head->link.prev);
15634e5b958SVincent Donnefort
15734e5b958SVincent Donnefort /* The reader page points to the new header page */
15834e5b958SVincent Donnefort simple_bpage_set_head_link(reader);
15934e5b958SVincent Donnefort
16034e5b958SVincent Donnefort overrun = cpu_buffer->meta->overrun;
16134e5b958SVincent Donnefort } while (!simple_bpage_unset_head_link(last, reader, SIMPLE_RB_LINK_NORMAL) && retry--);
16234e5b958SVincent Donnefort
16334e5b958SVincent Donnefort if (!retry)
16434e5b958SVincent Donnefort return -EINVAL;
16534e5b958SVincent Donnefort
16634e5b958SVincent Donnefort cpu_buffer->head_page = simple_bpage_from_link(reader->link.next);
16734e5b958SVincent Donnefort cpu_buffer->head_page->link.prev = &reader->link;
16834e5b958SVincent Donnefort cpu_buffer->reader_page = head;
16934e5b958SVincent Donnefort cpu_buffer->meta->reader.lost_events = overrun - cpu_buffer->last_overrun;
17034e5b958SVincent Donnefort cpu_buffer->meta->reader.id = cpu_buffer->reader_page->id;
17134e5b958SVincent Donnefort cpu_buffer->last_overrun = overrun;
17234e5b958SVincent Donnefort
17334e5b958SVincent Donnefort return 0;
17434e5b958SVincent Donnefort }
17534e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_swap_reader_page);
17634e5b958SVincent Donnefort
simple_rb_move_tail(struct simple_rb_per_cpu * cpu_buffer)17734e5b958SVincent Donnefort static struct simple_buffer_page *simple_rb_move_tail(struct simple_rb_per_cpu *cpu_buffer)
17834e5b958SVincent Donnefort {
17934e5b958SVincent Donnefort struct simple_buffer_page *tail, *new_tail;
18034e5b958SVincent Donnefort
18134e5b958SVincent Donnefort tail = cpu_buffer->tail_page;
18234e5b958SVincent Donnefort new_tail = simple_bpage_next_page(tail);
18334e5b958SVincent Donnefort
18434e5b958SVincent Donnefort if (simple_bpage_unset_head_link(tail, new_tail, SIMPLE_RB_LINK_HEAD_MOVING)) {
18534e5b958SVincent Donnefort /*
18634e5b958SVincent Donnefort * Oh no! we've caught the head. There is none anymore and
18734e5b958SVincent Donnefort * swap_reader will spin until we set the new one. Overrun must
18834e5b958SVincent Donnefort * be written first, to make sure we report the correct number
18934e5b958SVincent Donnefort * of lost events.
19034e5b958SVincent Donnefort */
19134e5b958SVincent Donnefort simple_rb_meta_inc(cpu_buffer->meta->overrun, new_tail->entries);
19234e5b958SVincent Donnefort simple_rb_meta_inc(cpu_buffer->meta->pages_lost, 1);
19334e5b958SVincent Donnefort
19434e5b958SVincent Donnefort simple_bpage_set_head_link(new_tail);
19534e5b958SVincent Donnefort simple_bpage_set_normal_link(tail);
19634e5b958SVincent Donnefort }
19734e5b958SVincent Donnefort
19834e5b958SVincent Donnefort simple_bpage_reset(new_tail);
19934e5b958SVincent Donnefort cpu_buffer->tail_page = new_tail;
20034e5b958SVincent Donnefort
20134e5b958SVincent Donnefort simple_rb_meta_inc(cpu_buffer->meta->pages_touched, 1);
20234e5b958SVincent Donnefort
20334e5b958SVincent Donnefort return new_tail;
20434e5b958SVincent Donnefort }
20534e5b958SVincent Donnefort
rb_event_size(unsigned long length)20634e5b958SVincent Donnefort static unsigned long rb_event_size(unsigned long length)
20734e5b958SVincent Donnefort {
20834e5b958SVincent Donnefort struct ring_buffer_event *event;
20934e5b958SVincent Donnefort
21034e5b958SVincent Donnefort return length + RB_EVNT_HDR_SIZE + sizeof(event->array[0]);
21134e5b958SVincent Donnefort }
21234e5b958SVincent Donnefort
21334e5b958SVincent Donnefort static struct ring_buffer_event *
rb_event_add_ts_extend(struct ring_buffer_event * event,u64 delta)21434e5b958SVincent Donnefort rb_event_add_ts_extend(struct ring_buffer_event *event, u64 delta)
21534e5b958SVincent Donnefort {
21634e5b958SVincent Donnefort event->type_len = RINGBUF_TYPE_TIME_EXTEND;
21734e5b958SVincent Donnefort event->time_delta = delta & TS_MASK;
21834e5b958SVincent Donnefort event->array[0] = delta >> TS_SHIFT;
21934e5b958SVincent Donnefort
22034e5b958SVincent Donnefort return (struct ring_buffer_event *)((unsigned long)event + 8);
22134e5b958SVincent Donnefort }
22234e5b958SVincent Donnefort
22334e5b958SVincent Donnefort static struct ring_buffer_event *
simple_rb_reserve_next(struct simple_rb_per_cpu * cpu_buffer,unsigned long length,u64 timestamp)22434e5b958SVincent Donnefort simple_rb_reserve_next(struct simple_rb_per_cpu *cpu_buffer, unsigned long length, u64 timestamp)
22534e5b958SVincent Donnefort {
22634e5b958SVincent Donnefort unsigned long ts_ext_size = 0, event_size = rb_event_size(length);
22734e5b958SVincent Donnefort struct simple_buffer_page *tail = cpu_buffer->tail_page;
22834e5b958SVincent Donnefort struct ring_buffer_event *event;
22934e5b958SVincent Donnefort u32 write, prev_write;
23034e5b958SVincent Donnefort u64 time_delta;
23134e5b958SVincent Donnefort
23234e5b958SVincent Donnefort time_delta = timestamp - cpu_buffer->write_stamp;
23334e5b958SVincent Donnefort
23434e5b958SVincent Donnefort if (test_time_stamp(time_delta))
23534e5b958SVincent Donnefort ts_ext_size = 8;
23634e5b958SVincent Donnefort
23734e5b958SVincent Donnefort prev_write = tail->write;
23834e5b958SVincent Donnefort write = prev_write + event_size + ts_ext_size;
23934e5b958SVincent Donnefort
24034e5b958SVincent Donnefort if (unlikely(write > (PAGE_SIZE - BUF_PAGE_HDR_SIZE)))
24134e5b958SVincent Donnefort tail = simple_rb_move_tail(cpu_buffer);
24234e5b958SVincent Donnefort
24334e5b958SVincent Donnefort if (!tail->entries) {
24434e5b958SVincent Donnefort tail->page->time_stamp = timestamp;
24534e5b958SVincent Donnefort time_delta = 0;
24634e5b958SVincent Donnefort ts_ext_size = 0;
24734e5b958SVincent Donnefort write = event_size;
24834e5b958SVincent Donnefort prev_write = 0;
24934e5b958SVincent Donnefort }
25034e5b958SVincent Donnefort
25134e5b958SVincent Donnefort tail->write = write;
25234e5b958SVincent Donnefort tail->entries++;
25334e5b958SVincent Donnefort
25434e5b958SVincent Donnefort cpu_buffer->write_stamp = timestamp;
25534e5b958SVincent Donnefort
25634e5b958SVincent Donnefort event = (struct ring_buffer_event *)(tail->page->data + prev_write);
25734e5b958SVincent Donnefort if (ts_ext_size) {
25834e5b958SVincent Donnefort event = rb_event_add_ts_extend(event, time_delta);
25934e5b958SVincent Donnefort time_delta = 0;
26034e5b958SVincent Donnefort }
26134e5b958SVincent Donnefort
26234e5b958SVincent Donnefort event->type_len = 0;
26334e5b958SVincent Donnefort event->time_delta = time_delta;
26434e5b958SVincent Donnefort event->array[0] = event_size - RB_EVNT_HDR_SIZE;
26534e5b958SVincent Donnefort
26634e5b958SVincent Donnefort return event;
26734e5b958SVincent Donnefort }
26834e5b958SVincent Donnefort
26934e5b958SVincent Donnefort /**
27034e5b958SVincent Donnefort * simple_ring_buffer_reserve - Reserve an entry in @cpu_buffer
27134e5b958SVincent Donnefort * @cpu_buffer: A simple_rb_per_cpu
27234e5b958SVincent Donnefort * @length: Size of the entry in bytes
27334e5b958SVincent Donnefort * @timestamp: Timestamp of the entry
27434e5b958SVincent Donnefort *
27534e5b958SVincent Donnefort * Returns the address of the entry where to write data or NULL
27634e5b958SVincent Donnefort */
simple_ring_buffer_reserve(struct simple_rb_per_cpu * cpu_buffer,unsigned long length,u64 timestamp)27734e5b958SVincent Donnefort void *simple_ring_buffer_reserve(struct simple_rb_per_cpu *cpu_buffer, unsigned long length,
27834e5b958SVincent Donnefort u64 timestamp)
27934e5b958SVincent Donnefort {
28034e5b958SVincent Donnefort struct ring_buffer_event *rb_event;
28134e5b958SVincent Donnefort
28234e5b958SVincent Donnefort if (cmpxchg(&cpu_buffer->status, SIMPLE_RB_READY, SIMPLE_RB_WRITING) != SIMPLE_RB_READY)
28334e5b958SVincent Donnefort return NULL;
28434e5b958SVincent Donnefort
28534e5b958SVincent Donnefort rb_event = simple_rb_reserve_next(cpu_buffer, length, timestamp);
28634e5b958SVincent Donnefort
28734e5b958SVincent Donnefort return &rb_event->array[1];
28834e5b958SVincent Donnefort }
28934e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_reserve);
29034e5b958SVincent Donnefort
29134e5b958SVincent Donnefort /**
29234e5b958SVincent Donnefort * simple_ring_buffer_commit - Commit the entry reserved with simple_ring_buffer_reserve()
29334e5b958SVincent Donnefort * @cpu_buffer: The simple_rb_per_cpu where the entry has been reserved
29434e5b958SVincent Donnefort */
simple_ring_buffer_commit(struct simple_rb_per_cpu * cpu_buffer)29534e5b958SVincent Donnefort void simple_ring_buffer_commit(struct simple_rb_per_cpu *cpu_buffer)
29634e5b958SVincent Donnefort {
29734e5b958SVincent Donnefort local_set(&cpu_buffer->tail_page->page->commit,
29834e5b958SVincent Donnefort cpu_buffer->tail_page->write);
29934e5b958SVincent Donnefort simple_rb_meta_inc(cpu_buffer->meta->entries, 1);
30034e5b958SVincent Donnefort
30134e5b958SVincent Donnefort /*
30234e5b958SVincent Donnefort * Paired with simple_rb_enable_tracing() to ensure data is
30334e5b958SVincent Donnefort * written to the ring-buffer before teardown.
30434e5b958SVincent Donnefort */
30534e5b958SVincent Donnefort smp_store_release(&cpu_buffer->status, SIMPLE_RB_READY);
30634e5b958SVincent Donnefort }
30734e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_commit);
30834e5b958SVincent Donnefort
simple_rb_enable_tracing(struct simple_rb_per_cpu * cpu_buffer,bool enable)30934e5b958SVincent Donnefort static u32 simple_rb_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
31034e5b958SVincent Donnefort {
31134e5b958SVincent Donnefort u32 prev_status;
31234e5b958SVincent Donnefort
31334e5b958SVincent Donnefort if (enable)
31434e5b958SVincent Donnefort return cmpxchg(&cpu_buffer->status, SIMPLE_RB_UNAVAILABLE, SIMPLE_RB_READY);
31534e5b958SVincent Donnefort
31634e5b958SVincent Donnefort /* Wait for the buffer to be released */
31734e5b958SVincent Donnefort do {
31834e5b958SVincent Donnefort prev_status = cmpxchg_acquire(&cpu_buffer->status,
31934e5b958SVincent Donnefort SIMPLE_RB_READY,
32034e5b958SVincent Donnefort SIMPLE_RB_UNAVAILABLE);
32134e5b958SVincent Donnefort } while (prev_status == SIMPLE_RB_WRITING);
32234e5b958SVincent Donnefort
32334e5b958SVincent Donnefort return prev_status;
32434e5b958SVincent Donnefort }
32534e5b958SVincent Donnefort
32634e5b958SVincent Donnefort /**
32734e5b958SVincent Donnefort * simple_ring_buffer_reset - Reset @cpu_buffer
32834e5b958SVincent Donnefort * @cpu_buffer: A simple_rb_per_cpu
32934e5b958SVincent Donnefort *
33034e5b958SVincent Donnefort * This will not clear the content of the data, only reset counters and pointers
33134e5b958SVincent Donnefort *
33234e5b958SVincent Donnefort * Returns 0 on success or -ENODEV if @cpu_buffer was unloaded.
33334e5b958SVincent Donnefort */
simple_ring_buffer_reset(struct simple_rb_per_cpu * cpu_buffer)33434e5b958SVincent Donnefort int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer)
33534e5b958SVincent Donnefort {
33634e5b958SVincent Donnefort struct simple_buffer_page *bpage;
33734e5b958SVincent Donnefort u32 prev_status;
33834e5b958SVincent Donnefort int ret;
33934e5b958SVincent Donnefort
34034e5b958SVincent Donnefort if (!simple_rb_loaded(cpu_buffer))
34134e5b958SVincent Donnefort return -ENODEV;
34234e5b958SVincent Donnefort
34334e5b958SVincent Donnefort prev_status = simple_rb_enable_tracing(cpu_buffer, false);
34434e5b958SVincent Donnefort
34534e5b958SVincent Donnefort ret = simple_rb_find_head(cpu_buffer);
34634e5b958SVincent Donnefort if (ret)
34734e5b958SVincent Donnefort return ret;
34834e5b958SVincent Donnefort
34934e5b958SVincent Donnefort bpage = cpu_buffer->tail_page = cpu_buffer->head_page;
35034e5b958SVincent Donnefort do {
35134e5b958SVincent Donnefort simple_bpage_reset(bpage);
35234e5b958SVincent Donnefort bpage = simple_bpage_next_page(bpage);
35334e5b958SVincent Donnefort } while (bpage != cpu_buffer->head_page);
35434e5b958SVincent Donnefort
35534e5b958SVincent Donnefort simple_bpage_reset(cpu_buffer->reader_page);
35634e5b958SVincent Donnefort
35734e5b958SVincent Donnefort cpu_buffer->last_overrun = 0;
35834e5b958SVincent Donnefort cpu_buffer->write_stamp = 0;
35934e5b958SVincent Donnefort
36034e5b958SVincent Donnefort cpu_buffer->meta->reader.read = 0;
36134e5b958SVincent Donnefort cpu_buffer->meta->reader.lost_events = 0;
36234e5b958SVincent Donnefort cpu_buffer->meta->entries = 0;
36334e5b958SVincent Donnefort cpu_buffer->meta->overrun = 0;
36434e5b958SVincent Donnefort cpu_buffer->meta->read = 0;
36534e5b958SVincent Donnefort cpu_buffer->meta->pages_lost = 0;
36634e5b958SVincent Donnefort cpu_buffer->meta->pages_touched = 0;
36734e5b958SVincent Donnefort
36834e5b958SVincent Donnefort if (prev_status == SIMPLE_RB_READY)
36934e5b958SVincent Donnefort simple_rb_enable_tracing(cpu_buffer, true);
37034e5b958SVincent Donnefort
37134e5b958SVincent Donnefort return 0;
37234e5b958SVincent Donnefort }
37334e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_reset);
37434e5b958SVincent Donnefort
simple_ring_buffer_init_mm(struct simple_rb_per_cpu * cpu_buffer,struct simple_buffer_page * bpages,const struct ring_buffer_desc * desc,void * (* load_page)(unsigned long va),void (* unload_page)(void * va))375*63592308SVincent Donnefort int simple_ring_buffer_init_mm(struct simple_rb_per_cpu *cpu_buffer,
376*63592308SVincent Donnefort struct simple_buffer_page *bpages,
377*63592308SVincent Donnefort const struct ring_buffer_desc *desc,
378*63592308SVincent Donnefort void *(*load_page)(unsigned long va),
379*63592308SVincent Donnefort void (*unload_page)(void *va))
380*63592308SVincent Donnefort {
381*63592308SVincent Donnefort struct simple_buffer_page *bpage = bpages;
382*63592308SVincent Donnefort int ret = 0;
383*63592308SVincent Donnefort void *page;
384*63592308SVincent Donnefort int i;
385*63592308SVincent Donnefort
386*63592308SVincent Donnefort /* At least 1 reader page and two pages in the ring-buffer */
387*63592308SVincent Donnefort if (desc->nr_page_va < 3)
388*63592308SVincent Donnefort return -EINVAL;
389*63592308SVincent Donnefort
390*63592308SVincent Donnefort memset(cpu_buffer, 0, sizeof(*cpu_buffer));
391*63592308SVincent Donnefort
392*63592308SVincent Donnefort cpu_buffer->meta = load_page(desc->meta_va);
393*63592308SVincent Donnefort if (!cpu_buffer->meta)
394*63592308SVincent Donnefort return -EINVAL;
395*63592308SVincent Donnefort
396*63592308SVincent Donnefort memset(cpu_buffer->meta, 0, sizeof(*cpu_buffer->meta));
397*63592308SVincent Donnefort cpu_buffer->meta->meta_page_size = PAGE_SIZE;
398*63592308SVincent Donnefort cpu_buffer->meta->nr_subbufs = cpu_buffer->nr_pages;
399*63592308SVincent Donnefort
400*63592308SVincent Donnefort /* The reader page is not part of the ring initially */
401*63592308SVincent Donnefort page = load_page(desc->page_va[0]);
402*63592308SVincent Donnefort if (!page) {
403*63592308SVincent Donnefort unload_page(cpu_buffer->meta);
404*63592308SVincent Donnefort return -EINVAL;
405*63592308SVincent Donnefort }
406*63592308SVincent Donnefort
407*63592308SVincent Donnefort simple_bpage_init(bpage, page);
408*63592308SVincent Donnefort bpage->id = 0;
409*63592308SVincent Donnefort
410*63592308SVincent Donnefort cpu_buffer->nr_pages = 1;
411*63592308SVincent Donnefort
412*63592308SVincent Donnefort cpu_buffer->reader_page = bpage;
413*63592308SVincent Donnefort cpu_buffer->tail_page = bpage + 1;
414*63592308SVincent Donnefort cpu_buffer->head_page = bpage + 1;
415*63592308SVincent Donnefort
416*63592308SVincent Donnefort for (i = 1; i < desc->nr_page_va; i++) {
417*63592308SVincent Donnefort page = load_page(desc->page_va[i]);
418*63592308SVincent Donnefort if (!page) {
419*63592308SVincent Donnefort ret = -EINVAL;
420*63592308SVincent Donnefort break;
421*63592308SVincent Donnefort }
422*63592308SVincent Donnefort
423*63592308SVincent Donnefort simple_bpage_init(++bpage, page);
424*63592308SVincent Donnefort
425*63592308SVincent Donnefort bpage->link.next = &(bpage + 1)->link;
426*63592308SVincent Donnefort bpage->link.prev = &(bpage - 1)->link;
427*63592308SVincent Donnefort bpage->id = i;
428*63592308SVincent Donnefort
429*63592308SVincent Donnefort cpu_buffer->nr_pages = i + 1;
430*63592308SVincent Donnefort }
431*63592308SVincent Donnefort
432*63592308SVincent Donnefort if (ret) {
433*63592308SVincent Donnefort for (i--; i >= 0; i--)
434*63592308SVincent Donnefort unload_page((void *)desc->page_va[i]);
435*63592308SVincent Donnefort unload_page(cpu_buffer->meta);
436*63592308SVincent Donnefort
437*63592308SVincent Donnefort return ret;
438*63592308SVincent Donnefort }
439*63592308SVincent Donnefort
440*63592308SVincent Donnefort /* Close the ring */
441*63592308SVincent Donnefort bpage->link.next = &cpu_buffer->tail_page->link;
442*63592308SVincent Donnefort cpu_buffer->tail_page->link.prev = &bpage->link;
443*63592308SVincent Donnefort
444*63592308SVincent Donnefort /* The last init'ed page points to the head page */
445*63592308SVincent Donnefort simple_bpage_set_head_link(bpage);
446*63592308SVincent Donnefort
447*63592308SVincent Donnefort cpu_buffer->bpages = bpages;
448*63592308SVincent Donnefort
449*63592308SVincent Donnefort return 0;
450*63592308SVincent Donnefort }
451*63592308SVincent Donnefort
__load_page(unsigned long page)452*63592308SVincent Donnefort static void *__load_page(unsigned long page)
453*63592308SVincent Donnefort {
454*63592308SVincent Donnefort return (void *)page;
455*63592308SVincent Donnefort }
456*63592308SVincent Donnefort
__unload_page(void * page)457*63592308SVincent Donnefort static void __unload_page(void *page) { }
458*63592308SVincent Donnefort
45934e5b958SVincent Donnefort /**
46034e5b958SVincent Donnefort * simple_ring_buffer_init - Init @cpu_buffer based on @desc
46134e5b958SVincent Donnefort * @cpu_buffer: A simple_rb_per_cpu buffer to init, allocated by the caller.
46234e5b958SVincent Donnefort * @bpages: Array of simple_buffer_pages, with as many elements as @desc->nr_page_va
46334e5b958SVincent Donnefort * @desc: A ring_buffer_desc
46434e5b958SVincent Donnefort *
46534e5b958SVincent Donnefort * Returns 0 on success or -EINVAL if the content of @desc is invalid
46634e5b958SVincent Donnefort */
simple_ring_buffer_init(struct simple_rb_per_cpu * cpu_buffer,struct simple_buffer_page * bpages,const struct ring_buffer_desc * desc)46734e5b958SVincent Donnefort int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
46834e5b958SVincent Donnefort const struct ring_buffer_desc *desc)
46934e5b958SVincent Donnefort {
470*63592308SVincent Donnefort return simple_ring_buffer_init_mm(cpu_buffer, bpages, desc, __load_page, __unload_page);
47134e5b958SVincent Donnefort }
47234e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_init);
47334e5b958SVincent Donnefort
simple_ring_buffer_unload_mm(struct simple_rb_per_cpu * cpu_buffer,void (* unload_page)(void *))474*63592308SVincent Donnefort void simple_ring_buffer_unload_mm(struct simple_rb_per_cpu *cpu_buffer,
475*63592308SVincent Donnefort void (*unload_page)(void *))
476*63592308SVincent Donnefort {
477*63592308SVincent Donnefort int p;
478*63592308SVincent Donnefort
479*63592308SVincent Donnefort if (!simple_rb_loaded(cpu_buffer))
480*63592308SVincent Donnefort return;
481*63592308SVincent Donnefort
482*63592308SVincent Donnefort simple_rb_enable_tracing(cpu_buffer, false);
483*63592308SVincent Donnefort
484*63592308SVincent Donnefort unload_page(cpu_buffer->meta);
485*63592308SVincent Donnefort for (p = 0; p < cpu_buffer->nr_pages; p++)
486*63592308SVincent Donnefort unload_page(cpu_buffer->bpages[p].page);
487*63592308SVincent Donnefort
488*63592308SVincent Donnefort cpu_buffer->bpages = NULL;
489*63592308SVincent Donnefort }
490*63592308SVincent Donnefort
49134e5b958SVincent Donnefort /**
49234e5b958SVincent Donnefort * simple_ring_buffer_unload - Prepare @cpu_buffer for deletion
49334e5b958SVincent Donnefort * @cpu_buffer: A simple_rb_per_cpu that will be deleted.
49434e5b958SVincent Donnefort */
simple_ring_buffer_unload(struct simple_rb_per_cpu * cpu_buffer)49534e5b958SVincent Donnefort void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
49634e5b958SVincent Donnefort {
497*63592308SVincent Donnefort return simple_ring_buffer_unload_mm(cpu_buffer, __unload_page);
49834e5b958SVincent Donnefort }
49934e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_unload);
50034e5b958SVincent Donnefort
50134e5b958SVincent Donnefort /**
50234e5b958SVincent Donnefort * simple_ring_buffer_enable_tracing - Enable or disable writing to @cpu_buffer
50334e5b958SVincent Donnefort * @cpu_buffer: A simple_rb_per_cpu
50434e5b958SVincent Donnefort * @enable: True to enable tracing, False to disable it
50534e5b958SVincent Donnefort *
50634e5b958SVincent Donnefort * Returns 0 on success or -ENODEV if @cpu_buffer was unloaded
50734e5b958SVincent Donnefort */
simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu * cpu_buffer,bool enable)50834e5b958SVincent Donnefort int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
50934e5b958SVincent Donnefort {
51034e5b958SVincent Donnefort if (!simple_rb_loaded(cpu_buffer))
51134e5b958SVincent Donnefort return -ENODEV;
51234e5b958SVincent Donnefort
51334e5b958SVincent Donnefort simple_rb_enable_tracing(cpu_buffer, enable);
51434e5b958SVincent Donnefort
51534e5b958SVincent Donnefort return 0;
51634e5b958SVincent Donnefort }
51734e5b958SVincent Donnefort EXPORT_SYMBOL_GPL(simple_ring_buffer_enable_tracing);
518