1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic ring buffer
4  *
5  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6  */
7 #include <linux/trace_recursion.h>
8 #include <linux/trace_events.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/trace_clock.h>
11 #include <linux/sched/clock.h>
12 #include <linux/cacheflush.h>
13 #include <linux/trace_seq.h>
14 #include <linux/spinlock.h>
15 #include <linux/irq_work.h>
16 #include <linux/security.h>
17 #include <linux/uaccess.h>
18 #include <linux/hardirq.h>
19 #include <linux/kthread.h>	/* for self test */
20 #include <linux/module.h>
21 #include <linux/percpu.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/list.h>
28 #include <linux/cpu.h>
29 #include <linux/oom.h>
30 #include <linux/mm.h>
31 
32 #include <asm/local64.h>
33 #include <asm/local.h>
34 #include <asm/setup.h>
35 
36 #include "trace.h"
37 
38 /*
39  * The "absolute" timestamp in the buffer is only 59 bits.
40  * If a clock has the 5 MSBs set, it needs to be saved and
41  * reinserted.
42  */
43 #define TS_MSB		(0xf8ULL << 56)
44 #define ABS_TS_MASK	(~TS_MSB)
45 
46 static void update_pages_handler(struct work_struct *work);
47 
48 #define RING_BUFFER_META_MAGIC	0xBADFEED
49 
50 struct ring_buffer_meta {
51 	int		magic;
52 	int		struct_sizes;
53 	unsigned long	total_size;
54 	unsigned long	buffers_offset;
55 };
56 
57 struct ring_buffer_cpu_meta {
58 	unsigned long	first_buffer;
59 	unsigned long	head_buffer;
60 	unsigned long	commit_buffer;
61 	__u32		subbuf_size;
62 	__u32		nr_subbufs;
63 	int		buffers[];
64 };
65 
66 /*
67  * The ring buffer header is special. We must manually up keep it.
68  */
ring_buffer_print_entry_header(struct trace_seq * s)69 int ring_buffer_print_entry_header(struct trace_seq *s)
70 {
71 	trace_seq_puts(s, "# compressed entry header\n");
72 	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
73 	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
74 	trace_seq_puts(s, "\tarray       :   32 bits\n");
75 	trace_seq_putc(s, '\n');
76 	trace_seq_printf(s, "\tpadding     : type == %d\n",
77 			 RINGBUF_TYPE_PADDING);
78 	trace_seq_printf(s, "\ttime_extend : type == %d\n",
79 			 RINGBUF_TYPE_TIME_EXTEND);
80 	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
81 			 RINGBUF_TYPE_TIME_STAMP);
82 	trace_seq_printf(s, "\tdata max type_len  == %d\n",
83 			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
84 
85 	return !trace_seq_has_overflowed(s);
86 }
87 
88 /*
89  * The ring buffer is made up of a list of pages. A separate list of pages is
90  * allocated for each CPU. A writer may only write to a buffer that is
91  * associated with the CPU it is currently executing on.  A reader may read
92  * from any per cpu buffer.
93  *
94  * The reader is special. For each per cpu buffer, the reader has its own
95  * reader page. When a reader has read the entire reader page, this reader
96  * page is swapped with another page in the ring buffer.
97  *
98  * Now, as long as the writer is off the reader page, the reader can do what
99  * ever it wants with that page. The writer will never write to that page
100  * again (as long as it is out of the ring buffer).
101  *
102  * Here's some silly ASCII art.
103  *
104  *   +------+
105  *   |reader|          RING BUFFER
106  *   |page  |
107  *   +------+        +---+   +---+   +---+
108  *                   |   |-->|   |-->|   |
109  *                   +---+   +---+   +---+
110  *                     ^               |
111  *                     |               |
112  *                     +---------------+
113  *
114  *
115  *   +------+
116  *   |reader|          RING BUFFER
117  *   |page  |------------------v
118  *   +------+        +---+   +---+   +---+
119  *                   |   |-->|   |-->|   |
120  *                   +---+   +---+   +---+
121  *                     ^               |
122  *                     |               |
123  *                     +---------------+
124  *
125  *
126  *   +------+
127  *   |reader|          RING BUFFER
128  *   |page  |------------------v
129  *   +------+        +---+   +---+   +---+
130  *      ^            |   |-->|   |-->|   |
131  *      |            +---+   +---+   +---+
132  *      |                              |
133  *      |                              |
134  *      +------------------------------+
135  *
136  *
137  *   +------+
138  *   |buffer|          RING BUFFER
139  *   |page  |------------------v
140  *   +------+        +---+   +---+   +---+
141  *      ^            |   |   |   |-->|   |
142  *      |   New      +---+   +---+   +---+
143  *      |  Reader------^               |
144  *      |   page                       |
145  *      +------------------------------+
146  *
147  *
148  * After we make this swap, the reader can hand this page off to the splice
149  * code and be done with it. It can even allocate a new page if it needs to
150  * and swap that into the ring buffer.
151  *
152  * We will be using cmpxchg soon to make all this lockless.
153  *
154  */
155 
156 /* Used for individual buffers (after the counter) */
157 #define RB_BUFFER_OFF		(1 << 20)
158 
159 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
160 
161 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
162 #define RB_ALIGNMENT		4U
163 #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
164 #define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
165 
166 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
167 # define RB_FORCE_8BYTE_ALIGNMENT	0
168 # define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
169 #else
170 # define RB_FORCE_8BYTE_ALIGNMENT	1
171 # define RB_ARCH_ALIGNMENT		8U
172 #endif
173 
174 #define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
175 
176 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
177 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
178 
179 enum {
180 	RB_LEN_TIME_EXTEND = 8,
181 	RB_LEN_TIME_STAMP =  8,
182 };
183 
184 #define skip_time_extend(event) \
185 	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
186 
187 #define extended_time(event) \
188 	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
189 
rb_null_event(struct ring_buffer_event * event)190 static inline bool rb_null_event(struct ring_buffer_event *event)
191 {
192 	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
193 }
194 
rb_event_set_padding(struct ring_buffer_event * event)195 static void rb_event_set_padding(struct ring_buffer_event *event)
196 {
197 	/* padding has a NULL time_delta */
198 	event->type_len = RINGBUF_TYPE_PADDING;
199 	event->time_delta = 0;
200 }
201 
202 static unsigned
rb_event_data_length(struct ring_buffer_event * event)203 rb_event_data_length(struct ring_buffer_event *event)
204 {
205 	unsigned length;
206 
207 	if (event->type_len)
208 		length = event->type_len * RB_ALIGNMENT;
209 	else
210 		length = event->array[0];
211 	return length + RB_EVNT_HDR_SIZE;
212 }
213 
214 /*
215  * Return the length of the given event. Will return
216  * the length of the time extend if the event is a
217  * time extend.
218  */
219 static inline unsigned
rb_event_length(struct ring_buffer_event * event)220 rb_event_length(struct ring_buffer_event *event)
221 {
222 	switch (event->type_len) {
223 	case RINGBUF_TYPE_PADDING:
224 		if (rb_null_event(event))
225 			/* undefined */
226 			return -1;
227 		return  event->array[0] + RB_EVNT_HDR_SIZE;
228 
229 	case RINGBUF_TYPE_TIME_EXTEND:
230 		return RB_LEN_TIME_EXTEND;
231 
232 	case RINGBUF_TYPE_TIME_STAMP:
233 		return RB_LEN_TIME_STAMP;
234 
235 	case RINGBUF_TYPE_DATA:
236 		return rb_event_data_length(event);
237 	default:
238 		WARN_ON_ONCE(1);
239 	}
240 	/* not hit */
241 	return 0;
242 }
243 
244 /*
245  * Return total length of time extend and data,
246  *   or just the event length for all other events.
247  */
248 static inline unsigned
rb_event_ts_length(struct ring_buffer_event * event)249 rb_event_ts_length(struct ring_buffer_event *event)
250 {
251 	unsigned len = 0;
252 
253 	if (extended_time(event)) {
254 		/* time extends include the data event after it */
255 		len = RB_LEN_TIME_EXTEND;
256 		event = skip_time_extend(event);
257 	}
258 	return len + rb_event_length(event);
259 }
260 
261 /**
262  * ring_buffer_event_length - return the length of the event
263  * @event: the event to get the length of
264  *
265  * Returns the size of the data load of a data event.
266  * If the event is something other than a data event, it
267  * returns the size of the event itself. With the exception
268  * of a TIME EXTEND, where it still returns the size of the
269  * data load of the data event after it.
270  */
ring_buffer_event_length(struct ring_buffer_event * event)271 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272 {
273 	unsigned length;
274 
275 	if (extended_time(event))
276 		event = skip_time_extend(event);
277 
278 	length = rb_event_length(event);
279 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
280 		return length;
281 	length -= RB_EVNT_HDR_SIZE;
282 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283                 length -= sizeof(event->array[0]);
284 	return length;
285 }
286 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
287 
288 /* inline for ring buffer fast paths */
289 static __always_inline void *
rb_event_data(struct ring_buffer_event * event)290 rb_event_data(struct ring_buffer_event *event)
291 {
292 	if (extended_time(event))
293 		event = skip_time_extend(event);
294 	WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
295 	/* If length is in len field, then array[0] has the data */
296 	if (event->type_len)
297 		return (void *)&event->array[0];
298 	/* Otherwise length is in array[0] and array[1] has the data */
299 	return (void *)&event->array[1];
300 }
301 
302 /**
303  * ring_buffer_event_data - return the data of the event
304  * @event: the event to get the data from
305  */
ring_buffer_event_data(struct ring_buffer_event * event)306 void *ring_buffer_event_data(struct ring_buffer_event *event)
307 {
308 	return rb_event_data(event);
309 }
310 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
311 
312 #define for_each_buffer_cpu(buffer, cpu)		\
313 	for_each_cpu(cpu, buffer->cpumask)
314 
315 #define for_each_online_buffer_cpu(buffer, cpu)		\
316 	for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
317 
318 #define TS_SHIFT	27
319 #define TS_MASK		((1ULL << TS_SHIFT) - 1)
320 #define TS_DELTA_TEST	(~TS_MASK)
321 
rb_event_time_stamp(struct ring_buffer_event * event)322 static u64 rb_event_time_stamp(struct ring_buffer_event *event)
323 {
324 	u64 ts;
325 
326 	ts = event->array[0];
327 	ts <<= TS_SHIFT;
328 	ts += event->time_delta;
329 
330 	return ts;
331 }
332 
333 /* Flag when events were overwritten */
334 #define RB_MISSED_EVENTS	(1 << 31)
335 /* Missed count stored at end */
336 #define RB_MISSED_STORED	(1 << 30)
337 
338 #define RB_MISSED_MASK		(3 << 30)
339 
340 struct buffer_data_page {
341 	u64		 time_stamp;	/* page time stamp */
342 	local_t		 commit;	/* write committed index */
343 	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
344 };
345 
346 struct buffer_data_read_page {
347 	unsigned		order;	/* order of the page */
348 	struct buffer_data_page	*data;	/* actual data, stored in this page */
349 };
350 
351 /*
352  * Note, the buffer_page list must be first. The buffer pages
353  * are allocated in cache lines, which means that each buffer
354  * page will be at the beginning of a cache line, and thus
355  * the least significant bits will be zero. We use this to
356  * add flags in the list struct pointers, to make the ring buffer
357  * lockless.
358  */
359 struct buffer_page {
360 	struct list_head list;		/* list of buffer pages */
361 	local_t		 write;		/* index for next write */
362 	unsigned	 read;		/* index for next read */
363 	local_t		 entries;	/* entries on this page */
364 	unsigned long	 real_end;	/* real end of data */
365 	unsigned	 order;		/* order of the page */
366 	u32		 id:30;		/* ID for external mapping */
367 	u32		 range:1;	/* Mapped via a range */
368 	struct buffer_data_page *page;	/* Actual data page */
369 };
370 
371 /*
372  * The buffer page counters, write and entries, must be reset
373  * atomically when crossing page boundaries. To synchronize this
374  * update, two counters are inserted into the number. One is
375  * the actual counter for the write position or count on the page.
376  *
377  * The other is a counter of updaters. Before an update happens
378  * the update partition of the counter is incremented. This will
379  * allow the updater to update the counter atomically.
380  *
381  * The counter is 20 bits, and the state data is 12.
382  */
383 #define RB_WRITE_MASK		0xfffff
384 #define RB_WRITE_INTCNT		(1 << 20)
385 
rb_init_page(struct buffer_data_page * bpage)386 static void rb_init_page(struct buffer_data_page *bpage)
387 {
388 	local_set(&bpage->commit, 0);
389 }
390 
rb_page_commit(struct buffer_page * bpage)391 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
392 {
393 	return local_read(&bpage->page->commit);
394 }
395 
free_buffer_page(struct buffer_page * bpage)396 static void free_buffer_page(struct buffer_page *bpage)
397 {
398 	/* Range pages are not to be freed */
399 	if (!bpage->range)
400 		free_pages((unsigned long)bpage->page, bpage->order);
401 	kfree(bpage);
402 }
403 
404 /*
405  * We need to fit the time_stamp delta into 27 bits.
406  */
test_time_stamp(u64 delta)407 static inline bool test_time_stamp(u64 delta)
408 {
409 	return !!(delta & TS_DELTA_TEST);
410 }
411 
412 struct rb_irq_work {
413 	struct irq_work			work;
414 	wait_queue_head_t		waiters;
415 	wait_queue_head_t		full_waiters;
416 	atomic_t			seq;
417 	bool				waiters_pending;
418 	bool				full_waiters_pending;
419 	bool				wakeup_full;
420 };
421 
422 /*
423  * Structure to hold event state and handle nested events.
424  */
425 struct rb_event_info {
426 	u64			ts;
427 	u64			delta;
428 	u64			before;
429 	u64			after;
430 	unsigned long		length;
431 	struct buffer_page	*tail_page;
432 	int			add_timestamp;
433 };
434 
435 /*
436  * Used for the add_timestamp
437  *  NONE
438  *  EXTEND - wants a time extend
439  *  ABSOLUTE - the buffer requests all events to have absolute time stamps
440  *  FORCE - force a full time stamp.
441  */
442 enum {
443 	RB_ADD_STAMP_NONE		= 0,
444 	RB_ADD_STAMP_EXTEND		= BIT(1),
445 	RB_ADD_STAMP_ABSOLUTE		= BIT(2),
446 	RB_ADD_STAMP_FORCE		= BIT(3)
447 };
448 /*
449  * Used for which event context the event is in.
450  *  TRANSITION = 0
451  *  NMI     = 1
452  *  IRQ     = 2
453  *  SOFTIRQ = 3
454  *  NORMAL  = 4
455  *
456  * See trace_recursive_lock() comment below for more details.
457  */
458 enum {
459 	RB_CTX_TRANSITION,
460 	RB_CTX_NMI,
461 	RB_CTX_IRQ,
462 	RB_CTX_SOFTIRQ,
463 	RB_CTX_NORMAL,
464 	RB_CTX_MAX
465 };
466 
467 struct rb_time_struct {
468 	local64_t	time;
469 };
470 typedef struct rb_time_struct rb_time_t;
471 
472 #define MAX_NEST	5
473 
474 /*
475  * head_page == tail_page && head == tail then buffer is empty.
476  */
477 struct ring_buffer_per_cpu {
478 	int				cpu;
479 	atomic_t			record_disabled;
480 	atomic_t			resize_disabled;
481 	struct trace_buffer	*buffer;
482 	raw_spinlock_t			reader_lock;	/* serialize readers */
483 	arch_spinlock_t			lock;
484 	struct lock_class_key		lock_key;
485 	struct buffer_data_page		*free_page;
486 	unsigned long			nr_pages;
487 	unsigned int			current_context;
488 	struct list_head		*pages;
489 	/* pages generation counter, incremented when the list changes */
490 	unsigned long			cnt;
491 	struct buffer_page		*head_page;	/* read from head */
492 	struct buffer_page		*tail_page;	/* write to tail */
493 	struct buffer_page		*commit_page;	/* committed pages */
494 	struct buffer_page		*reader_page;
495 	unsigned long			lost_events;
496 	unsigned long			last_overrun;
497 	unsigned long			nest;
498 	local_t				entries_bytes;
499 	local_t				entries;
500 	local_t				overrun;
501 	local_t				commit_overrun;
502 	local_t				dropped_events;
503 	local_t				committing;
504 	local_t				commits;
505 	local_t				pages_touched;
506 	local_t				pages_lost;
507 	local_t				pages_read;
508 	long				last_pages_touch;
509 	size_t				shortest_full;
510 	unsigned long			read;
511 	unsigned long			read_bytes;
512 	rb_time_t			write_stamp;
513 	rb_time_t			before_stamp;
514 	u64				event_stamp[MAX_NEST];
515 	u64				read_stamp;
516 	/* pages removed since last reset */
517 	unsigned long			pages_removed;
518 
519 	unsigned int			mapped;
520 	unsigned int			user_mapped;	/* user space mapping */
521 	struct mutex			mapping_lock;
522 	unsigned long			*subbuf_ids;	/* ID to subbuf VA */
523 	struct trace_buffer_meta	*meta_page;
524 	struct ring_buffer_cpu_meta	*ring_meta;
525 
526 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
527 	long				nr_pages_to_update;
528 	struct list_head		new_pages; /* new pages to add */
529 	struct work_struct		update_pages_work;
530 	struct completion		update_done;
531 
532 	struct rb_irq_work		irq_work;
533 };
534 
535 struct trace_buffer {
536 	unsigned			flags;
537 	int				cpus;
538 	atomic_t			record_disabled;
539 	atomic_t			resizing;
540 	cpumask_var_t			cpumask;
541 
542 	struct lock_class_key		*reader_lock_key;
543 
544 	struct mutex			mutex;
545 
546 	struct ring_buffer_per_cpu	**buffers;
547 
548 	struct hlist_node		node;
549 	u64				(*clock)(void);
550 
551 	struct rb_irq_work		irq_work;
552 	bool				time_stamp_abs;
553 
554 	unsigned long			range_addr_start;
555 	unsigned long			range_addr_end;
556 
557 	struct ring_buffer_meta		*meta;
558 
559 	unsigned int			subbuf_size;
560 	unsigned int			subbuf_order;
561 	unsigned int			max_data_size;
562 };
563 
564 struct ring_buffer_iter {
565 	struct ring_buffer_per_cpu	*cpu_buffer;
566 	unsigned long			head;
567 	unsigned long			next_event;
568 	struct buffer_page		*head_page;
569 	struct buffer_page		*cache_reader_page;
570 	unsigned long			cache_read;
571 	unsigned long			cache_pages_removed;
572 	u64				read_stamp;
573 	u64				page_stamp;
574 	struct ring_buffer_event	*event;
575 	size_t				event_size;
576 	int				missed_events;
577 };
578 
ring_buffer_print_page_header(struct trace_buffer * buffer,struct trace_seq * s)579 int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
580 {
581 	struct buffer_data_page field;
582 
583 	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
584 			 "offset:0;\tsize:%u;\tsigned:%u;\n",
585 			 (unsigned int)sizeof(field.time_stamp),
586 			 (unsigned int)is_signed_type(u64));
587 
588 	trace_seq_printf(s, "\tfield: local_t commit;\t"
589 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
590 			 (unsigned int)offsetof(typeof(field), commit),
591 			 (unsigned int)sizeof(field.commit),
592 			 (unsigned int)is_signed_type(long));
593 
594 	trace_seq_printf(s, "\tfield: int overwrite;\t"
595 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
596 			 (unsigned int)offsetof(typeof(field), commit),
597 			 1,
598 			 (unsigned int)is_signed_type(long));
599 
600 	trace_seq_printf(s, "\tfield: char data;\t"
601 			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
602 			 (unsigned int)offsetof(typeof(field), data),
603 			 (unsigned int)buffer->subbuf_size,
604 			 (unsigned int)is_signed_type(char));
605 
606 	return !trace_seq_has_overflowed(s);
607 }
608 
rb_time_read(rb_time_t * t,u64 * ret)609 static inline void rb_time_read(rb_time_t *t, u64 *ret)
610 {
611 	*ret = local64_read(&t->time);
612 }
rb_time_set(rb_time_t * t,u64 val)613 static void rb_time_set(rb_time_t *t, u64 val)
614 {
615 	local64_set(&t->time, val);
616 }
617 
618 /*
619  * Enable this to make sure that the event passed to
620  * ring_buffer_event_time_stamp() is not committed and also
621  * is on the buffer that it passed in.
622  */
623 //#define RB_VERIFY_EVENT
624 #ifdef RB_VERIFY_EVENT
625 static struct list_head *rb_list_head(struct list_head *list);
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)626 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
627 			 void *event)
628 {
629 	struct buffer_page *page = cpu_buffer->commit_page;
630 	struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
631 	struct list_head *next;
632 	long commit, write;
633 	unsigned long addr = (unsigned long)event;
634 	bool done = false;
635 	int stop = 0;
636 
637 	/* Make sure the event exists and is not committed yet */
638 	do {
639 		if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
640 			done = true;
641 		commit = local_read(&page->page->commit);
642 		write = local_read(&page->write);
643 		if (addr >= (unsigned long)&page->page->data[commit] &&
644 		    addr < (unsigned long)&page->page->data[write])
645 			return;
646 
647 		next = rb_list_head(page->list.next);
648 		page = list_entry(next, struct buffer_page, list);
649 	} while (!done);
650 	WARN_ON_ONCE(1);
651 }
652 #else
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)653 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
654 			 void *event)
655 {
656 }
657 #endif
658 
659 /*
660  * The absolute time stamp drops the 5 MSBs and some clocks may
661  * require them. The rb_fix_abs_ts() will take a previous full
662  * time stamp, and add the 5 MSB of that time stamp on to the
663  * saved absolute time stamp. Then they are compared in case of
664  * the unlikely event that the latest time stamp incremented
665  * the 5 MSB.
666  */
rb_fix_abs_ts(u64 abs,u64 save_ts)667 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
668 {
669 	if (save_ts & TS_MSB) {
670 		abs |= save_ts & TS_MSB;
671 		/* Check for overflow */
672 		if (unlikely(abs < save_ts))
673 			abs += 1ULL << 59;
674 	}
675 	return abs;
676 }
677 
678 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
679 
680 /**
681  * ring_buffer_event_time_stamp - return the event's current time stamp
682  * @buffer: The buffer that the event is on
683  * @event: the event to get the time stamp of
684  *
685  * Note, this must be called after @event is reserved, and before it is
686  * committed to the ring buffer. And must be called from the same
687  * context where the event was reserved (normal, softirq, irq, etc).
688  *
689  * Returns the time stamp associated with the current event.
690  * If the event has an extended time stamp, then that is used as
691  * the time stamp to return.
692  * In the highly unlikely case that the event was nested more than
693  * the max nesting, then the write_stamp of the buffer is returned,
694  * otherwise  current time is returned, but that really neither of
695  * the last two cases should ever happen.
696  */
ring_buffer_event_time_stamp(struct trace_buffer * buffer,struct ring_buffer_event * event)697 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
698 				 struct ring_buffer_event *event)
699 {
700 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
701 	unsigned int nest;
702 	u64 ts;
703 
704 	/* If the event includes an absolute time, then just use that */
705 	if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
706 		ts = rb_event_time_stamp(event);
707 		return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
708 	}
709 
710 	nest = local_read(&cpu_buffer->committing);
711 	verify_event(cpu_buffer, event);
712 	if (WARN_ON_ONCE(!nest))
713 		goto fail;
714 
715 	/* Read the current saved nesting level time stamp */
716 	if (likely(--nest < MAX_NEST))
717 		return cpu_buffer->event_stamp[nest];
718 
719 	/* Shouldn't happen, warn if it does */
720 	WARN_ONCE(1, "nest (%d) greater than max", nest);
721 
722  fail:
723 	rb_time_read(&cpu_buffer->write_stamp, &ts);
724 
725 	return ts;
726 }
727 
728 /**
729  * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
730  * @buffer: The ring_buffer to get the number of pages from
731  * @cpu: The cpu of the ring_buffer to get the number of pages from
732  *
733  * Returns the number of pages that have content in the ring buffer.
734  */
ring_buffer_nr_dirty_pages(struct trace_buffer * buffer,int cpu)735 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
736 {
737 	size_t read;
738 	size_t lost;
739 	size_t cnt;
740 
741 	read = local_read(&buffer->buffers[cpu]->pages_read);
742 	lost = local_read(&buffer->buffers[cpu]->pages_lost);
743 	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
744 
745 	if (WARN_ON_ONCE(cnt < lost))
746 		return 0;
747 
748 	cnt -= lost;
749 
750 	/* The reader can read an empty page, but not more than that */
751 	if (cnt < read) {
752 		WARN_ON_ONCE(read > cnt + 1);
753 		return 0;
754 	}
755 
756 	return cnt - read;
757 }
758 
full_hit(struct trace_buffer * buffer,int cpu,int full)759 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
760 {
761 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
762 	size_t nr_pages;
763 	size_t dirty;
764 
765 	nr_pages = cpu_buffer->nr_pages;
766 	if (!nr_pages || !full)
767 		return true;
768 
769 	/*
770 	 * Add one as dirty will never equal nr_pages, as the sub-buffer
771 	 * that the writer is on is not counted as dirty.
772 	 * This is needed if "buffer_percent" is set to 100.
773 	 */
774 	dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
775 
776 	return (dirty * 100) >= (full * nr_pages);
777 }
778 
779 /*
780  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
781  *
782  * Schedules a delayed work to wake up any task that is blocked on the
783  * ring buffer waiters queue.
784  */
rb_wake_up_waiters(struct irq_work * work)785 static void rb_wake_up_waiters(struct irq_work *work)
786 {
787 	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
788 
789 	/* For waiters waiting for the first wake up */
790 	(void)atomic_fetch_inc_release(&rbwork->seq);
791 
792 	wake_up_all(&rbwork->waiters);
793 	if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
794 		/* Only cpu_buffer sets the above flags */
795 		struct ring_buffer_per_cpu *cpu_buffer =
796 			container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
797 
798 		/* Called from interrupt context */
799 		raw_spin_lock(&cpu_buffer->reader_lock);
800 		rbwork->wakeup_full = false;
801 		rbwork->full_waiters_pending = false;
802 
803 		/* Waking up all waiters, they will reset the shortest full */
804 		cpu_buffer->shortest_full = 0;
805 		raw_spin_unlock(&cpu_buffer->reader_lock);
806 
807 		wake_up_all(&rbwork->full_waiters);
808 	}
809 }
810 
811 /**
812  * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
813  * @buffer: The ring buffer to wake waiters on
814  * @cpu: The CPU buffer to wake waiters on
815  *
816  * In the case of a file that represents a ring buffer is closing,
817  * it is prudent to wake up any waiters that are on this.
818  */
ring_buffer_wake_waiters(struct trace_buffer * buffer,int cpu)819 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
820 {
821 	struct ring_buffer_per_cpu *cpu_buffer;
822 	struct rb_irq_work *rbwork;
823 
824 	if (!buffer)
825 		return;
826 
827 	if (cpu == RING_BUFFER_ALL_CPUS) {
828 
829 		/* Wake up individual ones too. One level recursion */
830 		for_each_buffer_cpu(buffer, cpu)
831 			ring_buffer_wake_waiters(buffer, cpu);
832 
833 		rbwork = &buffer->irq_work;
834 	} else {
835 		if (WARN_ON_ONCE(!buffer->buffers))
836 			return;
837 		if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
838 			return;
839 
840 		cpu_buffer = buffer->buffers[cpu];
841 		/* The CPU buffer may not have been initialized yet */
842 		if (!cpu_buffer)
843 			return;
844 		rbwork = &cpu_buffer->irq_work;
845 	}
846 
847 	/* This can be called in any context */
848 	irq_work_queue(&rbwork->work);
849 }
850 
rb_watermark_hit(struct trace_buffer * buffer,int cpu,int full)851 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
852 {
853 	struct ring_buffer_per_cpu *cpu_buffer;
854 	bool ret = false;
855 
856 	/* Reads of all CPUs always waits for any data */
857 	if (cpu == RING_BUFFER_ALL_CPUS)
858 		return !ring_buffer_empty(buffer);
859 
860 	cpu_buffer = buffer->buffers[cpu];
861 
862 	if (!ring_buffer_empty_cpu(buffer, cpu)) {
863 		unsigned long flags;
864 		bool pagebusy;
865 
866 		if (!full)
867 			return true;
868 
869 		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
870 		pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
871 		ret = !pagebusy && full_hit(buffer, cpu, full);
872 
873 		if (!ret && (!cpu_buffer->shortest_full ||
874 			     cpu_buffer->shortest_full > full)) {
875 		    cpu_buffer->shortest_full = full;
876 		}
877 		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
878 	}
879 	return ret;
880 }
881 
882 static inline bool
rb_wait_cond(struct rb_irq_work * rbwork,struct trace_buffer * buffer,int cpu,int full,ring_buffer_cond_fn cond,void * data)883 rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
884 	     int cpu, int full, ring_buffer_cond_fn cond, void *data)
885 {
886 	if (rb_watermark_hit(buffer, cpu, full))
887 		return true;
888 
889 	if (cond(data))
890 		return true;
891 
892 	/*
893 	 * The events can happen in critical sections where
894 	 * checking a work queue can cause deadlocks.
895 	 * After adding a task to the queue, this flag is set
896 	 * only to notify events to try to wake up the queue
897 	 * using irq_work.
898 	 *
899 	 * We don't clear it even if the buffer is no longer
900 	 * empty. The flag only causes the next event to run
901 	 * irq_work to do the work queue wake up. The worse
902 	 * that can happen if we race with !trace_empty() is that
903 	 * an event will cause an irq_work to try to wake up
904 	 * an empty queue.
905 	 *
906 	 * There's no reason to protect this flag either, as
907 	 * the work queue and irq_work logic will do the necessary
908 	 * synchronization for the wake ups. The only thing
909 	 * that is necessary is that the wake up happens after
910 	 * a task has been queued. It's OK for spurious wake ups.
911 	 */
912 	if (full)
913 		rbwork->full_waiters_pending = true;
914 	else
915 		rbwork->waiters_pending = true;
916 
917 	return false;
918 }
919 
920 struct rb_wait_data {
921 	struct rb_irq_work		*irq_work;
922 	int				seq;
923 };
924 
925 /*
926  * The default wait condition for ring_buffer_wait() is to just to exit the
927  * wait loop the first time it is woken up.
928  */
rb_wait_once(void * data)929 static bool rb_wait_once(void *data)
930 {
931 	struct rb_wait_data *rdata = data;
932 	struct rb_irq_work *rbwork = rdata->irq_work;
933 
934 	return atomic_read_acquire(&rbwork->seq) != rdata->seq;
935 }
936 
937 /**
938  * ring_buffer_wait - wait for input to the ring buffer
939  * @buffer: buffer to wait on
940  * @cpu: the cpu buffer to wait on
941  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
942  * @cond: condition function to break out of wait (NULL to run once)
943  * @data: the data to pass to @cond.
944  *
945  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
946  * as data is added to any of the @buffer's cpu buffers. Otherwise
947  * it will wait for data to be added to a specific cpu buffer.
948  */
ring_buffer_wait(struct trace_buffer * buffer,int cpu,int full,ring_buffer_cond_fn cond,void * data)949 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
950 		     ring_buffer_cond_fn cond, void *data)
951 {
952 	struct ring_buffer_per_cpu *cpu_buffer;
953 	struct wait_queue_head *waitq;
954 	struct rb_irq_work *rbwork;
955 	struct rb_wait_data rdata;
956 	int ret = 0;
957 
958 	/*
959 	 * Depending on what the caller is waiting for, either any
960 	 * data in any cpu buffer, or a specific buffer, put the
961 	 * caller on the appropriate wait queue.
962 	 */
963 	if (cpu == RING_BUFFER_ALL_CPUS) {
964 		rbwork = &buffer->irq_work;
965 		/* Full only makes sense on per cpu reads */
966 		full = 0;
967 	} else {
968 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
969 			return -ENODEV;
970 		cpu_buffer = buffer->buffers[cpu];
971 		rbwork = &cpu_buffer->irq_work;
972 	}
973 
974 	if (full)
975 		waitq = &rbwork->full_waiters;
976 	else
977 		waitq = &rbwork->waiters;
978 
979 	/* Set up to exit loop as soon as it is woken */
980 	if (!cond) {
981 		cond = rb_wait_once;
982 		rdata.irq_work = rbwork;
983 		rdata.seq = atomic_read_acquire(&rbwork->seq);
984 		data = &rdata;
985 	}
986 
987 	ret = wait_event_interruptible((*waitq),
988 				rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
989 
990 	return ret;
991 }
992 
993 /**
994  * ring_buffer_poll_wait - poll on buffer input
995  * @buffer: buffer to wait on
996  * @cpu: the cpu buffer to wait on
997  * @filp: the file descriptor
998  * @poll_table: The poll descriptor
999  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
1000  *
1001  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1002  * as data is added to any of the @buffer's cpu buffers. Otherwise
1003  * it will wait for data to be added to a specific cpu buffer.
1004  *
1005  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
1006  * zero otherwise.
1007  */
ring_buffer_poll_wait(struct trace_buffer * buffer,int cpu,struct file * filp,poll_table * poll_table,int full)1008 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1009 			  struct file *filp, poll_table *poll_table, int full)
1010 {
1011 	struct ring_buffer_per_cpu *cpu_buffer;
1012 	struct rb_irq_work *rbwork;
1013 
1014 	if (cpu == RING_BUFFER_ALL_CPUS) {
1015 		rbwork = &buffer->irq_work;
1016 		full = 0;
1017 	} else {
1018 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
1019 			return EPOLLERR;
1020 
1021 		cpu_buffer = buffer->buffers[cpu];
1022 		rbwork = &cpu_buffer->irq_work;
1023 	}
1024 
1025 	if (full) {
1026 		poll_wait(filp, &rbwork->full_waiters, poll_table);
1027 
1028 		if (rb_watermark_hit(buffer, cpu, full))
1029 			return EPOLLIN | EPOLLRDNORM;
1030 		/*
1031 		 * Only allow full_waiters_pending update to be seen after
1032 		 * the shortest_full is set (in rb_watermark_hit). If the
1033 		 * writer sees the full_waiters_pending flag set, it will
1034 		 * compare the amount in the ring buffer to shortest_full.
1035 		 * If the amount in the ring buffer is greater than the
1036 		 * shortest_full percent, it will call the irq_work handler
1037 		 * to wake up this list. The irq_handler will reset shortest_full
1038 		 * back to zero. That's done under the reader_lock, but
1039 		 * the below smp_mb() makes sure that the update to
1040 		 * full_waiters_pending doesn't leak up into the above.
1041 		 */
1042 		smp_mb();
1043 		rbwork->full_waiters_pending = true;
1044 		return 0;
1045 	}
1046 
1047 	poll_wait(filp, &rbwork->waiters, poll_table);
1048 	rbwork->waiters_pending = true;
1049 
1050 	/*
1051 	 * There's a tight race between setting the waiters_pending and
1052 	 * checking if the ring buffer is empty.  Once the waiters_pending bit
1053 	 * is set, the next event will wake the task up, but we can get stuck
1054 	 * if there's only a single event in.
1055 	 *
1056 	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1057 	 * but adding a memory barrier to all events will cause too much of a
1058 	 * performance hit in the fast path.  We only need a memory barrier when
1059 	 * the buffer goes from empty to having content.  But as this race is
1060 	 * extremely small, and it's not a problem if another event comes in, we
1061 	 * will fix it later.
1062 	 */
1063 	smp_mb();
1064 
1065 	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1066 	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1067 		return EPOLLIN | EPOLLRDNORM;
1068 	return 0;
1069 }
1070 
1071 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1072 #define RB_WARN_ON(b, cond)						\
1073 	({								\
1074 		int _____ret = unlikely(cond);				\
1075 		if (_____ret) {						\
1076 			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1077 				struct ring_buffer_per_cpu *__b =	\
1078 					(void *)b;			\
1079 				atomic_inc(&__b->buffer->record_disabled); \
1080 			} else						\
1081 				atomic_inc(&b->record_disabled);	\
1082 			WARN_ON(1);					\
1083 		}							\
1084 		_____ret;						\
1085 	})
1086 
1087 /* Up this if you want to test the TIME_EXTENTS and normalization */
1088 #define DEBUG_SHIFT 0
1089 
rb_time_stamp(struct trace_buffer * buffer)1090 static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1091 {
1092 	u64 ts;
1093 
1094 	/* Skip retpolines :-( */
1095 	if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1096 		ts = trace_clock_local();
1097 	else
1098 		ts = buffer->clock();
1099 
1100 	/* shift to debug/test normalization and TIME_EXTENTS */
1101 	return ts << DEBUG_SHIFT;
1102 }
1103 
ring_buffer_time_stamp(struct trace_buffer * buffer)1104 u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1105 {
1106 	u64 time;
1107 
1108 	preempt_disable_notrace();
1109 	time = rb_time_stamp(buffer);
1110 	preempt_enable_notrace();
1111 
1112 	return time;
1113 }
1114 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1115 
ring_buffer_normalize_time_stamp(struct trace_buffer * buffer,int cpu,u64 * ts)1116 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1117 				      int cpu, u64 *ts)
1118 {
1119 	/* Just stupid testing the normalize function and deltas */
1120 	*ts >>= DEBUG_SHIFT;
1121 }
1122 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1123 
1124 /*
1125  * Making the ring buffer lockless makes things tricky.
1126  * Although writes only happen on the CPU that they are on,
1127  * and they only need to worry about interrupts. Reads can
1128  * happen on any CPU.
1129  *
1130  * The reader page is always off the ring buffer, but when the
1131  * reader finishes with a page, it needs to swap its page with
1132  * a new one from the buffer. The reader needs to take from
1133  * the head (writes go to the tail). But if a writer is in overwrite
1134  * mode and wraps, it must push the head page forward.
1135  *
1136  * Here lies the problem.
1137  *
1138  * The reader must be careful to replace only the head page, and
1139  * not another one. As described at the top of the file in the
1140  * ASCII art, the reader sets its old page to point to the next
1141  * page after head. It then sets the page after head to point to
1142  * the old reader page. But if the writer moves the head page
1143  * during this operation, the reader could end up with the tail.
1144  *
1145  * We use cmpxchg to help prevent this race. We also do something
1146  * special with the page before head. We set the LSB to 1.
1147  *
1148  * When the writer must push the page forward, it will clear the
1149  * bit that points to the head page, move the head, and then set
1150  * the bit that points to the new head page.
1151  *
1152  * We also don't want an interrupt coming in and moving the head
1153  * page on another writer. Thus we use the second LSB to catch
1154  * that too. Thus:
1155  *
1156  * head->list->prev->next        bit 1          bit 0
1157  *                              -------        -------
1158  * Normal page                     0              0
1159  * Points to head page             0              1
1160  * New head page                   1              0
1161  *
1162  * Note we can not trust the prev pointer of the head page, because:
1163  *
1164  * +----+       +-----+        +-----+
1165  * |    |------>|  T  |---X--->|  N  |
1166  * |    |<------|     |        |     |
1167  * +----+       +-----+        +-----+
1168  *   ^                           ^ |
1169  *   |          +-----+          | |
1170  *   +----------|  R  |----------+ |
1171  *              |     |<-----------+
1172  *              +-----+
1173  *
1174  * Key:  ---X-->  HEAD flag set in pointer
1175  *         T      Tail page
1176  *         R      Reader page
1177  *         N      Next page
1178  *
1179  * (see __rb_reserve_next() to see where this happens)
1180  *
1181  *  What the above shows is that the reader just swapped out
1182  *  the reader page with a page in the buffer, but before it
1183  *  could make the new header point back to the new page added
1184  *  it was preempted by a writer. The writer moved forward onto
1185  *  the new page added by the reader and is about to move forward
1186  *  again.
1187  *
1188  *  You can see, it is legitimate for the previous pointer of
1189  *  the head (or any page) not to point back to itself. But only
1190  *  temporarily.
1191  */
1192 
1193 #define RB_PAGE_NORMAL		0UL
1194 #define RB_PAGE_HEAD		1UL
1195 #define RB_PAGE_UPDATE		2UL
1196 
1197 
1198 #define RB_FLAG_MASK		3UL
1199 
1200 /* PAGE_MOVED is not part of the mask */
1201 #define RB_PAGE_MOVED		4UL
1202 
1203 /*
1204  * rb_list_head - remove any bit
1205  */
rb_list_head(struct list_head * list)1206 static struct list_head *rb_list_head(struct list_head *list)
1207 {
1208 	unsigned long val = (unsigned long)list;
1209 
1210 	return (struct list_head *)(val & ~RB_FLAG_MASK);
1211 }
1212 
1213 /*
1214  * rb_is_head_page - test if the given page is the head page
1215  *
1216  * Because the reader may move the head_page pointer, we can
1217  * not trust what the head page is (it may be pointing to
1218  * the reader page). But if the next page is a header page,
1219  * its flags will be non zero.
1220  */
1221 static inline int
rb_is_head_page(struct buffer_page * page,struct list_head * list)1222 rb_is_head_page(struct buffer_page *page, struct list_head *list)
1223 {
1224 	unsigned long val;
1225 
1226 	val = (unsigned long)list->next;
1227 
1228 	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1229 		return RB_PAGE_MOVED;
1230 
1231 	return val & RB_FLAG_MASK;
1232 }
1233 
1234 /*
1235  * rb_is_reader_page
1236  *
1237  * The unique thing about the reader page, is that, if the
1238  * writer is ever on it, the previous pointer never points
1239  * back to the reader page.
1240  */
rb_is_reader_page(struct buffer_page * page)1241 static bool rb_is_reader_page(struct buffer_page *page)
1242 {
1243 	struct list_head *list = page->list.prev;
1244 
1245 	return rb_list_head(list->next) != &page->list;
1246 }
1247 
1248 /*
1249  * rb_set_list_to_head - set a list_head to be pointing to head.
1250  */
rb_set_list_to_head(struct list_head * list)1251 static void rb_set_list_to_head(struct list_head *list)
1252 {
1253 	unsigned long *ptr;
1254 
1255 	ptr = (unsigned long *)&list->next;
1256 	*ptr |= RB_PAGE_HEAD;
1257 	*ptr &= ~RB_PAGE_UPDATE;
1258 }
1259 
1260 /*
1261  * rb_head_page_activate - sets up head page
1262  */
rb_head_page_activate(struct ring_buffer_per_cpu * cpu_buffer)1263 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1264 {
1265 	struct buffer_page *head;
1266 
1267 	head = cpu_buffer->head_page;
1268 	if (!head)
1269 		return;
1270 
1271 	/*
1272 	 * Set the previous list pointer to have the HEAD flag.
1273 	 */
1274 	rb_set_list_to_head(head->list.prev);
1275 
1276 	if (cpu_buffer->ring_meta) {
1277 		struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
1278 		meta->head_buffer = (unsigned long)head->page;
1279 	}
1280 }
1281 
rb_list_head_clear(struct list_head * list)1282 static void rb_list_head_clear(struct list_head *list)
1283 {
1284 	unsigned long *ptr = (unsigned long *)&list->next;
1285 
1286 	*ptr &= ~RB_FLAG_MASK;
1287 }
1288 
1289 /*
1290  * rb_head_page_deactivate - clears head page ptr (for free list)
1291  */
1292 static void
rb_head_page_deactivate(struct ring_buffer_per_cpu * cpu_buffer)1293 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1294 {
1295 	struct list_head *hd;
1296 
1297 	/* Go through the whole list and clear any pointers found. */
1298 	rb_list_head_clear(cpu_buffer->pages);
1299 
1300 	list_for_each(hd, cpu_buffer->pages)
1301 		rb_list_head_clear(hd);
1302 }
1303 
rb_head_page_set(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag,int new_flag)1304 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1305 			    struct buffer_page *head,
1306 			    struct buffer_page *prev,
1307 			    int old_flag, int new_flag)
1308 {
1309 	struct list_head *list;
1310 	unsigned long val = (unsigned long)&head->list;
1311 	unsigned long ret;
1312 
1313 	list = &prev->list;
1314 
1315 	val &= ~RB_FLAG_MASK;
1316 
1317 	ret = cmpxchg((unsigned long *)&list->next,
1318 		      val | old_flag, val | new_flag);
1319 
1320 	/* check if the reader took the page */
1321 	if ((ret & ~RB_FLAG_MASK) != val)
1322 		return RB_PAGE_MOVED;
1323 
1324 	return ret & RB_FLAG_MASK;
1325 }
1326 
rb_head_page_set_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1327 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1328 				   struct buffer_page *head,
1329 				   struct buffer_page *prev,
1330 				   int old_flag)
1331 {
1332 	return rb_head_page_set(cpu_buffer, head, prev,
1333 				old_flag, RB_PAGE_UPDATE);
1334 }
1335 
rb_head_page_set_head(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1336 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1337 				 struct buffer_page *head,
1338 				 struct buffer_page *prev,
1339 				 int old_flag)
1340 {
1341 	return rb_head_page_set(cpu_buffer, head, prev,
1342 				old_flag, RB_PAGE_HEAD);
1343 }
1344 
rb_head_page_set_normal(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)1345 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1346 				   struct buffer_page *head,
1347 				   struct buffer_page *prev,
1348 				   int old_flag)
1349 {
1350 	return rb_head_page_set(cpu_buffer, head, prev,
1351 				old_flag, RB_PAGE_NORMAL);
1352 }
1353 
rb_inc_page(struct buffer_page ** bpage)1354 static inline void rb_inc_page(struct buffer_page **bpage)
1355 {
1356 	struct list_head *p = rb_list_head((*bpage)->list.next);
1357 
1358 	*bpage = list_entry(p, struct buffer_page, list);
1359 }
1360 
1361 static struct buffer_page *
rb_set_head_page(struct ring_buffer_per_cpu * cpu_buffer)1362 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1363 {
1364 	struct buffer_page *head;
1365 	struct buffer_page *page;
1366 	struct list_head *list;
1367 	int i;
1368 
1369 	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1370 		return NULL;
1371 
1372 	/* sanity check */
1373 	list = cpu_buffer->pages;
1374 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1375 		return NULL;
1376 
1377 	page = head = cpu_buffer->head_page;
1378 	/*
1379 	 * It is possible that the writer moves the header behind
1380 	 * where we started, and we miss in one loop.
1381 	 * A second loop should grab the header, but we'll do
1382 	 * three loops just because I'm paranoid.
1383 	 */
1384 	for (i = 0; i < 3; i++) {
1385 		do {
1386 			if (rb_is_head_page(page, page->list.prev)) {
1387 				cpu_buffer->head_page = page;
1388 				return page;
1389 			}
1390 			rb_inc_page(&page);
1391 		} while (page != head);
1392 	}
1393 
1394 	RB_WARN_ON(cpu_buffer, 1);
1395 
1396 	return NULL;
1397 }
1398 
rb_head_page_replace(struct buffer_page * old,struct buffer_page * new)1399 static bool rb_head_page_replace(struct buffer_page *old,
1400 				struct buffer_page *new)
1401 {
1402 	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1403 	unsigned long val;
1404 
1405 	val = *ptr & ~RB_FLAG_MASK;
1406 	val |= RB_PAGE_HEAD;
1407 
1408 	return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
1409 }
1410 
1411 /*
1412  * rb_tail_page_update - move the tail page forward
1413  */
rb_tail_page_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)1414 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1415 			       struct buffer_page *tail_page,
1416 			       struct buffer_page *next_page)
1417 {
1418 	unsigned long old_entries;
1419 	unsigned long old_write;
1420 
1421 	/*
1422 	 * The tail page now needs to be moved forward.
1423 	 *
1424 	 * We need to reset the tail page, but without messing
1425 	 * with possible erasing of data brought in by interrupts
1426 	 * that have moved the tail page and are currently on it.
1427 	 *
1428 	 * We add a counter to the write field to denote this.
1429 	 */
1430 	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1431 	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1432 
1433 	/*
1434 	 * Just make sure we have seen our old_write and synchronize
1435 	 * with any interrupts that come in.
1436 	 */
1437 	barrier();
1438 
1439 	/*
1440 	 * If the tail page is still the same as what we think
1441 	 * it is, then it is up to us to update the tail
1442 	 * pointer.
1443 	 */
1444 	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1445 		/* Zero the write counter */
1446 		unsigned long val = old_write & ~RB_WRITE_MASK;
1447 		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1448 
1449 		/*
1450 		 * This will only succeed if an interrupt did
1451 		 * not come in and change it. In which case, we
1452 		 * do not want to modify it.
1453 		 *
1454 		 * We add (void) to let the compiler know that we do not care
1455 		 * about the return value of these functions. We use the
1456 		 * cmpxchg to only update if an interrupt did not already
1457 		 * do it for us. If the cmpxchg fails, we don't care.
1458 		 */
1459 		(void)local_cmpxchg(&next_page->write, old_write, val);
1460 		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1461 
1462 		/*
1463 		 * No need to worry about races with clearing out the commit.
1464 		 * it only can increment when a commit takes place. But that
1465 		 * only happens in the outer most nested commit.
1466 		 */
1467 		local_set(&next_page->page->commit, 0);
1468 
1469 		/* Either we update tail_page or an interrupt does */
1470 		if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
1471 			local_inc(&cpu_buffer->pages_touched);
1472 	}
1473 }
1474 
rb_check_bpage(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * bpage)1475 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1476 			  struct buffer_page *bpage)
1477 {
1478 	unsigned long val = (unsigned long)bpage;
1479 
1480 	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
1481 }
1482 
rb_check_links(struct ring_buffer_per_cpu * cpu_buffer,struct list_head * list)1483 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer,
1484 			   struct list_head *list)
1485 {
1486 	if (RB_WARN_ON(cpu_buffer,
1487 		       rb_list_head(rb_list_head(list->next)->prev) != list))
1488 		return false;
1489 
1490 	if (RB_WARN_ON(cpu_buffer,
1491 		       rb_list_head(rb_list_head(list->prev)->next) != list))
1492 		return false;
1493 
1494 	return true;
1495 }
1496 
1497 /**
1498  * rb_check_pages - integrity check of buffer pages
1499  * @cpu_buffer: CPU buffer with pages to test
1500  *
1501  * As a safety measure we check to make sure the data pages have not
1502  * been corrupted.
1503  */
rb_check_pages(struct ring_buffer_per_cpu * cpu_buffer)1504 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1505 {
1506 	struct list_head *head, *tmp;
1507 	unsigned long buffer_cnt;
1508 	unsigned long flags;
1509 	int nr_loops = 0;
1510 
1511 	/*
1512 	 * Walk the linked list underpinning the ring buffer and validate all
1513 	 * its next and prev links.
1514 	 *
1515 	 * The check acquires the reader_lock to avoid concurrent processing
1516 	 * with code that could be modifying the list. However, the lock cannot
1517 	 * be held for the entire duration of the walk, as this would make the
1518 	 * time when interrupts are disabled non-deterministic, dependent on the
1519 	 * ring buffer size. Therefore, the code releases and re-acquires the
1520 	 * lock after checking each page. The ring_buffer_per_cpu.cnt variable
1521 	 * is then used to detect if the list was modified while the lock was
1522 	 * not held, in which case the check needs to be restarted.
1523 	 *
1524 	 * The code attempts to perform the check at most three times before
1525 	 * giving up. This is acceptable because this is only a self-validation
1526 	 * to detect problems early on. In practice, the list modification
1527 	 * operations are fairly spaced, and so this check typically succeeds at
1528 	 * most on the second try.
1529 	 */
1530 again:
1531 	if (++nr_loops > 3)
1532 		return;
1533 
1534 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1535 	head = rb_list_head(cpu_buffer->pages);
1536 	if (!rb_check_links(cpu_buffer, head))
1537 		goto out_locked;
1538 	buffer_cnt = cpu_buffer->cnt;
1539 	tmp = head;
1540 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1541 
1542 	while (true) {
1543 		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1544 
1545 		if (buffer_cnt != cpu_buffer->cnt) {
1546 			/* The list was updated, try again. */
1547 			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1548 			goto again;
1549 		}
1550 
1551 		tmp = rb_list_head(tmp->next);
1552 		if (tmp == head)
1553 			/* The iteration circled back, all is done. */
1554 			goto out_locked;
1555 
1556 		if (!rb_check_links(cpu_buffer, tmp))
1557 			goto out_locked;
1558 
1559 		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1560 	}
1561 
1562 out_locked:
1563 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1564 }
1565 
1566 /*
1567  * Take an address, add the meta data size as well as the array of
1568  * array subbuffer indexes, then align it to a subbuffer size.
1569  *
1570  * This is used to help find the next per cpu subbuffer within a mapped range.
1571  */
1572 static unsigned long
rb_range_align_subbuf(unsigned long addr,int subbuf_size,int nr_subbufs)1573 rb_range_align_subbuf(unsigned long addr, int subbuf_size, int nr_subbufs)
1574 {
1575 	addr += sizeof(struct ring_buffer_cpu_meta) +
1576 		sizeof(int) * nr_subbufs;
1577 	return ALIGN(addr, subbuf_size);
1578 }
1579 
1580 /*
1581  * Return the ring_buffer_meta for a given @cpu.
1582  */
rb_range_meta(struct trace_buffer * buffer,int nr_pages,int cpu)1583 static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu)
1584 {
1585 	int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
1586 	struct ring_buffer_cpu_meta *meta;
1587 	struct ring_buffer_meta *bmeta;
1588 	unsigned long ptr;
1589 	int nr_subbufs;
1590 
1591 	bmeta = buffer->meta;
1592 	if (!bmeta)
1593 		return NULL;
1594 
1595 	ptr = (unsigned long)bmeta + bmeta->buffers_offset;
1596 	meta = (struct ring_buffer_cpu_meta *)ptr;
1597 
1598 	/* When nr_pages passed in is zero, the first meta has already been initialized */
1599 	if (!nr_pages) {
1600 		nr_subbufs = meta->nr_subbufs;
1601 	} else {
1602 		/* Include the reader page */
1603 		nr_subbufs = nr_pages + 1;
1604 	}
1605 
1606 	/*
1607 	 * The first chunk may not be subbuffer aligned, where as
1608 	 * the rest of the chunks are.
1609 	 */
1610 	if (cpu) {
1611 		ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1612 		ptr += subbuf_size * nr_subbufs;
1613 
1614 		/* We can use multiplication to find chunks greater than 1 */
1615 		if (cpu > 1) {
1616 			unsigned long size;
1617 			unsigned long p;
1618 
1619 			/* Save the beginning of this CPU chunk */
1620 			p = ptr;
1621 			ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1622 			ptr += subbuf_size * nr_subbufs;
1623 
1624 			/* Now all chunks after this are the same size */
1625 			size = ptr - p;
1626 			ptr += size * (cpu - 2);
1627 		}
1628 	}
1629 	return (void *)ptr;
1630 }
1631 
1632 /* Return the start of subbufs given the meta pointer */
rb_subbufs_from_meta(struct ring_buffer_cpu_meta * meta)1633 static void *rb_subbufs_from_meta(struct ring_buffer_cpu_meta *meta)
1634 {
1635 	int subbuf_size = meta->subbuf_size;
1636 	unsigned long ptr;
1637 
1638 	ptr = (unsigned long)meta;
1639 	ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs);
1640 
1641 	return (void *)ptr;
1642 }
1643 
1644 /*
1645  * Return a specific sub-buffer for a given @cpu defined by @idx.
1646  */
rb_range_buffer(struct ring_buffer_per_cpu * cpu_buffer,int idx)1647 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
1648 {
1649 	struct ring_buffer_cpu_meta *meta;
1650 	unsigned long ptr;
1651 	int subbuf_size;
1652 
1653 	meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu);
1654 	if (!meta)
1655 		return NULL;
1656 
1657 	if (WARN_ON_ONCE(idx >= meta->nr_subbufs))
1658 		return NULL;
1659 
1660 	subbuf_size = meta->subbuf_size;
1661 
1662 	/* Map this buffer to the order that's in meta->buffers[] */
1663 	idx = meta->buffers[idx];
1664 
1665 	ptr = (unsigned long)rb_subbufs_from_meta(meta);
1666 
1667 	ptr += subbuf_size * idx;
1668 	if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end)
1669 		return NULL;
1670 
1671 	return (void *)ptr;
1672 }
1673 
1674 /*
1675  * See if the existing memory contains a valid meta section.
1676  * if so, use that, otherwise initialize it.
1677  */
rb_meta_init(struct trace_buffer * buffer,int scratch_size)1678 static bool rb_meta_init(struct trace_buffer *buffer, int scratch_size)
1679 {
1680 	unsigned long ptr = buffer->range_addr_start;
1681 	struct ring_buffer_meta *bmeta;
1682 	unsigned long total_size;
1683 	int struct_sizes;
1684 
1685 	bmeta = (struct ring_buffer_meta *)ptr;
1686 	buffer->meta = bmeta;
1687 
1688 	total_size = buffer->range_addr_end - buffer->range_addr_start;
1689 
1690 	struct_sizes = sizeof(struct ring_buffer_cpu_meta);
1691 	struct_sizes |= sizeof(*bmeta) << 16;
1692 
1693 	/* The first buffer will start word size after the meta page */
1694 	ptr += sizeof(*bmeta);
1695 	ptr = ALIGN(ptr, sizeof(long));
1696 	ptr += scratch_size;
1697 
1698 	if (bmeta->magic != RING_BUFFER_META_MAGIC) {
1699 		pr_info("Ring buffer boot meta mismatch of magic\n");
1700 		goto init;
1701 	}
1702 
1703 	if (bmeta->struct_sizes != struct_sizes) {
1704 		pr_info("Ring buffer boot meta mismatch of struct size\n");
1705 		goto init;
1706 	}
1707 
1708 	if (bmeta->total_size != total_size) {
1709 		pr_info("Ring buffer boot meta mismatch of total size\n");
1710 		goto init;
1711 	}
1712 
1713 	if (bmeta->buffers_offset > bmeta->total_size) {
1714 		pr_info("Ring buffer boot meta mismatch of offset outside of total size\n");
1715 		goto init;
1716 	}
1717 
1718 	if (bmeta->buffers_offset != (void *)ptr - (void *)bmeta) {
1719 		pr_info("Ring buffer boot meta mismatch of first buffer offset\n");
1720 		goto init;
1721 	}
1722 
1723 	return true;
1724 
1725  init:
1726 	bmeta->magic = RING_BUFFER_META_MAGIC;
1727 	bmeta->struct_sizes = struct_sizes;
1728 	bmeta->total_size = total_size;
1729 	bmeta->buffers_offset = (void *)ptr - (void *)bmeta;
1730 
1731 	/* Zero out the scatch pad */
1732 	memset((void *)bmeta + sizeof(*bmeta), 0, bmeta->buffers_offset - sizeof(*bmeta));
1733 
1734 	return false;
1735 }
1736 
1737 /*
1738  * See if the existing memory contains valid ring buffer data.
1739  * As the previous kernel must be the same as this kernel, all
1740  * the calculations (size of buffers and number of buffers)
1741  * must be the same.
1742  */
rb_cpu_meta_valid(struct ring_buffer_cpu_meta * meta,int cpu,struct trace_buffer * buffer,int nr_pages,unsigned long * subbuf_mask)1743 static bool rb_cpu_meta_valid(struct ring_buffer_cpu_meta *meta, int cpu,
1744 			      struct trace_buffer *buffer, int nr_pages,
1745 			      unsigned long *subbuf_mask)
1746 {
1747 	int subbuf_size = PAGE_SIZE;
1748 	struct buffer_data_page *subbuf;
1749 	unsigned long buffers_start;
1750 	unsigned long buffers_end;
1751 	int i;
1752 
1753 	if (!subbuf_mask)
1754 		return false;
1755 
1756 	buffers_start = meta->first_buffer;
1757 	buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs);
1758 
1759 	/* Is the head and commit buffers within the range of buffers? */
1760 	if (meta->head_buffer < buffers_start ||
1761 	    meta->head_buffer >= buffers_end) {
1762 		pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu);
1763 		return false;
1764 	}
1765 
1766 	if (meta->commit_buffer < buffers_start ||
1767 	    meta->commit_buffer >= buffers_end) {
1768 		pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu);
1769 		return false;
1770 	}
1771 
1772 	subbuf = rb_subbufs_from_meta(meta);
1773 
1774 	bitmap_clear(subbuf_mask, 0, meta->nr_subbufs);
1775 
1776 	/* Is the meta buffers and the subbufs themselves have correct data? */
1777 	for (i = 0; i < meta->nr_subbufs; i++) {
1778 		if (meta->buffers[i] < 0 ||
1779 		    meta->buffers[i] >= meta->nr_subbufs) {
1780 			pr_info("Ring buffer boot meta [%d] array out of range\n", cpu);
1781 			return false;
1782 		}
1783 
1784 		if ((unsigned)local_read(&subbuf->commit) > subbuf_size) {
1785 			pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu);
1786 			return false;
1787 		}
1788 
1789 		if (test_bit(meta->buffers[i], subbuf_mask)) {
1790 			pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu);
1791 			return false;
1792 		}
1793 
1794 		set_bit(meta->buffers[i], subbuf_mask);
1795 		subbuf = (void *)subbuf + subbuf_size;
1796 	}
1797 
1798 	return true;
1799 }
1800 
1801 static int rb_meta_subbuf_idx(struct ring_buffer_cpu_meta *meta, void *subbuf);
1802 
rb_read_data_buffer(struct buffer_data_page * dpage,int tail,int cpu,unsigned long long * timestamp,u64 * delta_ptr)1803 static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu,
1804 			       unsigned long long *timestamp, u64 *delta_ptr)
1805 {
1806 	struct ring_buffer_event *event;
1807 	u64 ts, delta;
1808 	int events = 0;
1809 	int e;
1810 
1811 	*delta_ptr = 0;
1812 	*timestamp = 0;
1813 
1814 	ts = dpage->time_stamp;
1815 
1816 	for (e = 0; e < tail; e += rb_event_length(event)) {
1817 
1818 		event = (struct ring_buffer_event *)(dpage->data + e);
1819 
1820 		switch (event->type_len) {
1821 
1822 		case RINGBUF_TYPE_TIME_EXTEND:
1823 			delta = rb_event_time_stamp(event);
1824 			ts += delta;
1825 			break;
1826 
1827 		case RINGBUF_TYPE_TIME_STAMP:
1828 			delta = rb_event_time_stamp(event);
1829 			delta = rb_fix_abs_ts(delta, ts);
1830 			if (delta < ts) {
1831 				*delta_ptr = delta;
1832 				*timestamp = ts;
1833 				return -1;
1834 			}
1835 			ts = delta;
1836 			break;
1837 
1838 		case RINGBUF_TYPE_PADDING:
1839 			if (event->time_delta == 1)
1840 				break;
1841 			fallthrough;
1842 		case RINGBUF_TYPE_DATA:
1843 			events++;
1844 			ts += event->time_delta;
1845 			break;
1846 
1847 		default:
1848 			return -1;
1849 		}
1850 	}
1851 	*timestamp = ts;
1852 	return events;
1853 }
1854 
rb_validate_buffer(struct buffer_data_page * dpage,int cpu)1855 static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu)
1856 {
1857 	unsigned long long ts;
1858 	u64 delta;
1859 	int tail;
1860 
1861 	tail = local_read(&dpage->commit);
1862 	return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
1863 }
1864 
1865 /* If the meta data has been validated, now validate the events */
rb_meta_validate_events(struct ring_buffer_per_cpu * cpu_buffer)1866 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
1867 {
1868 	struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
1869 	struct buffer_page *head_page;
1870 	unsigned long entry_bytes = 0;
1871 	unsigned long entries = 0;
1872 	int ret;
1873 	int i;
1874 
1875 	if (!meta || !meta->head_buffer)
1876 		return;
1877 
1878 	/* Do the reader page first */
1879 	ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
1880 	if (ret < 0) {
1881 		pr_info("Ring buffer reader page is invalid\n");
1882 		goto invalid;
1883 	}
1884 	entries += ret;
1885 	entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
1886 	local_set(&cpu_buffer->reader_page->entries, ret);
1887 
1888 	head_page = cpu_buffer->head_page;
1889 
1890 	/* If the commit_buffer is the reader page, update the commit page */
1891 	if (meta->commit_buffer == (unsigned long)cpu_buffer->reader_page->page) {
1892 		cpu_buffer->commit_page = cpu_buffer->reader_page;
1893 		/* Nothing more to do, the only page is the reader page */
1894 		goto done;
1895 	}
1896 
1897 	/* Iterate until finding the commit page */
1898 	for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) {
1899 
1900 		/* Reader page has already been done */
1901 		if (head_page == cpu_buffer->reader_page)
1902 			continue;
1903 
1904 		ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
1905 		if (ret < 0) {
1906 			pr_info("Ring buffer meta [%d] invalid buffer page\n",
1907 				cpu_buffer->cpu);
1908 			goto invalid;
1909 		}
1910 
1911 		/* If the buffer has content, update pages_touched */
1912 		if (ret)
1913 			local_inc(&cpu_buffer->pages_touched);
1914 
1915 		entries += ret;
1916 		entry_bytes += local_read(&head_page->page->commit);
1917 		local_set(&cpu_buffer->head_page->entries, ret);
1918 
1919 		if (head_page == cpu_buffer->commit_page)
1920 			break;
1921 	}
1922 
1923 	if (head_page != cpu_buffer->commit_page) {
1924 		pr_info("Ring buffer meta [%d] commit page not found\n",
1925 			cpu_buffer->cpu);
1926 		goto invalid;
1927 	}
1928  done:
1929 	local_set(&cpu_buffer->entries, entries);
1930 	local_set(&cpu_buffer->entries_bytes, entry_bytes);
1931 
1932 	pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu);
1933 	return;
1934 
1935  invalid:
1936 	/* The content of the buffers are invalid, reset the meta data */
1937 	meta->head_buffer = 0;
1938 	meta->commit_buffer = 0;
1939 
1940 	/* Reset the reader page */
1941 	local_set(&cpu_buffer->reader_page->entries, 0);
1942 	local_set(&cpu_buffer->reader_page->page->commit, 0);
1943 
1944 	/* Reset all the subbuffers */
1945 	for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) {
1946 		local_set(&head_page->entries, 0);
1947 		local_set(&head_page->page->commit, 0);
1948 	}
1949 }
1950 
rb_range_meta_init(struct trace_buffer * buffer,int nr_pages,int scratch_size)1951 static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int scratch_size)
1952 {
1953 	struct ring_buffer_cpu_meta *meta;
1954 	unsigned long *subbuf_mask;
1955 	unsigned long delta;
1956 	void *subbuf;
1957 	bool valid = false;
1958 	int cpu;
1959 	int i;
1960 
1961 	/* Create a mask to test the subbuf array */
1962 	subbuf_mask = bitmap_alloc(nr_pages + 1, GFP_KERNEL);
1963 	/* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
1964 
1965 	if (rb_meta_init(buffer, scratch_size))
1966 		valid = true;
1967 
1968 	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1969 		void *next_meta;
1970 
1971 		meta = rb_range_meta(buffer, nr_pages, cpu);
1972 
1973 		if (valid && rb_cpu_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) {
1974 			/* Make the mappings match the current address */
1975 			subbuf = rb_subbufs_from_meta(meta);
1976 			delta = (unsigned long)subbuf - meta->first_buffer;
1977 			meta->first_buffer += delta;
1978 			meta->head_buffer += delta;
1979 			meta->commit_buffer += delta;
1980 			continue;
1981 		}
1982 
1983 		if (cpu < nr_cpu_ids - 1)
1984 			next_meta = rb_range_meta(buffer, nr_pages, cpu + 1);
1985 		else
1986 			next_meta = (void *)buffer->range_addr_end;
1987 
1988 		memset(meta, 0, next_meta - (void *)meta);
1989 
1990 		meta->nr_subbufs = nr_pages + 1;
1991 		meta->subbuf_size = PAGE_SIZE;
1992 
1993 		subbuf = rb_subbufs_from_meta(meta);
1994 
1995 		meta->first_buffer = (unsigned long)subbuf;
1996 
1997 		/*
1998 		 * The buffers[] array holds the order of the sub-buffers
1999 		 * that are after the meta data. The sub-buffers may
2000 		 * be swapped out when read and inserted into a different
2001 		 * location of the ring buffer. Although their addresses
2002 		 * remain the same, the buffers[] array contains the
2003 		 * index into the sub-buffers holding their actual order.
2004 		 */
2005 		for (i = 0; i < meta->nr_subbufs; i++) {
2006 			meta->buffers[i] = i;
2007 			rb_init_page(subbuf);
2008 			subbuf += meta->subbuf_size;
2009 		}
2010 	}
2011 	bitmap_free(subbuf_mask);
2012 }
2013 
rbm_start(struct seq_file * m,loff_t * pos)2014 static void *rbm_start(struct seq_file *m, loff_t *pos)
2015 {
2016 	struct ring_buffer_per_cpu *cpu_buffer = m->private;
2017 	struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2018 	unsigned long val;
2019 
2020 	if (!meta)
2021 		return NULL;
2022 
2023 	if (*pos > meta->nr_subbufs)
2024 		return NULL;
2025 
2026 	val = *pos;
2027 	val++;
2028 
2029 	return (void *)val;
2030 }
2031 
rbm_next(struct seq_file * m,void * v,loff_t * pos)2032 static void *rbm_next(struct seq_file *m, void *v, loff_t *pos)
2033 {
2034 	(*pos)++;
2035 
2036 	return rbm_start(m, pos);
2037 }
2038 
rbm_show(struct seq_file * m,void * v)2039 static int rbm_show(struct seq_file *m, void *v)
2040 {
2041 	struct ring_buffer_per_cpu *cpu_buffer = m->private;
2042 	struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2043 	unsigned long val = (unsigned long)v;
2044 
2045 	if (val == 1) {
2046 		seq_printf(m, "head_buffer:   %d\n",
2047 			   rb_meta_subbuf_idx(meta, (void *)meta->head_buffer));
2048 		seq_printf(m, "commit_buffer: %d\n",
2049 			   rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer));
2050 		seq_printf(m, "subbuf_size:   %d\n", meta->subbuf_size);
2051 		seq_printf(m, "nr_subbufs:    %d\n", meta->nr_subbufs);
2052 		return 0;
2053 	}
2054 
2055 	val -= 2;
2056 	seq_printf(m, "buffer[%ld]:    %d\n", val, meta->buffers[val]);
2057 
2058 	return 0;
2059 }
2060 
rbm_stop(struct seq_file * m,void * p)2061 static void rbm_stop(struct seq_file *m, void *p)
2062 {
2063 }
2064 
2065 static const struct seq_operations rb_meta_seq_ops = {
2066 	.start		= rbm_start,
2067 	.next		= rbm_next,
2068 	.show		= rbm_show,
2069 	.stop		= rbm_stop,
2070 };
2071 
ring_buffer_meta_seq_init(struct file * file,struct trace_buffer * buffer,int cpu)2072 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu)
2073 {
2074 	struct seq_file *m;
2075 	int ret;
2076 
2077 	ret = seq_open(file, &rb_meta_seq_ops);
2078 	if (ret)
2079 		return ret;
2080 
2081 	m = file->private_data;
2082 	m->private = buffer->buffers[cpu];
2083 
2084 	return 0;
2085 }
2086 
2087 /* Map the buffer_pages to the previous head and commit pages */
rb_meta_buffer_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * bpage)2088 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
2089 				  struct buffer_page *bpage)
2090 {
2091 	struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2092 
2093 	if (meta->head_buffer == (unsigned long)bpage->page)
2094 		cpu_buffer->head_page = bpage;
2095 
2096 	if (meta->commit_buffer == (unsigned long)bpage->page) {
2097 		cpu_buffer->commit_page = bpage;
2098 		cpu_buffer->tail_page = bpage;
2099 	}
2100 }
2101 
__rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,long nr_pages,struct list_head * pages)2102 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
2103 		long nr_pages, struct list_head *pages)
2104 {
2105 	struct trace_buffer *buffer = cpu_buffer->buffer;
2106 	struct ring_buffer_cpu_meta *meta = NULL;
2107 	struct buffer_page *bpage, *tmp;
2108 	bool user_thread = current->mm != NULL;
2109 	gfp_t mflags;
2110 	long i;
2111 
2112 	/*
2113 	 * Check if the available memory is there first.
2114 	 * Note, si_mem_available() only gives us a rough estimate of available
2115 	 * memory. It may not be accurate. But we don't care, we just want
2116 	 * to prevent doing any allocation when it is obvious that it is
2117 	 * not going to succeed.
2118 	 */
2119 	i = si_mem_available();
2120 	if (i < nr_pages)
2121 		return -ENOMEM;
2122 
2123 	/*
2124 	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
2125 	 * gracefully without invoking oom-killer and the system is not
2126 	 * destabilized.
2127 	 */
2128 	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
2129 
2130 	/*
2131 	 * If a user thread allocates too much, and si_mem_available()
2132 	 * reports there's enough memory, even though there is not.
2133 	 * Make sure the OOM killer kills this thread. This can happen
2134 	 * even with RETRY_MAYFAIL because another task may be doing
2135 	 * an allocation after this task has taken all memory.
2136 	 * This is the task the OOM killer needs to take out during this
2137 	 * loop, even if it was triggered by an allocation somewhere else.
2138 	 */
2139 	if (user_thread)
2140 		set_current_oom_origin();
2141 
2142 	if (buffer->range_addr_start)
2143 		meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
2144 
2145 	for (i = 0; i < nr_pages; i++) {
2146 		struct page *page;
2147 
2148 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2149 				    mflags, cpu_to_node(cpu_buffer->cpu));
2150 		if (!bpage)
2151 			goto free_pages;
2152 
2153 		rb_check_bpage(cpu_buffer, bpage);
2154 
2155 		/*
2156 		 * Append the pages as for mapped buffers we want to keep
2157 		 * the order
2158 		 */
2159 		list_add_tail(&bpage->list, pages);
2160 
2161 		if (meta) {
2162 			/* A range was given. Use that for the buffer page */
2163 			bpage->page = rb_range_buffer(cpu_buffer, i + 1);
2164 			if (!bpage->page)
2165 				goto free_pages;
2166 			/* If this is valid from a previous boot */
2167 			if (meta->head_buffer)
2168 				rb_meta_buffer_update(cpu_buffer, bpage);
2169 			bpage->range = 1;
2170 			bpage->id = i + 1;
2171 		} else {
2172 			page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
2173 						mflags | __GFP_COMP | __GFP_ZERO,
2174 						cpu_buffer->buffer->subbuf_order);
2175 			if (!page)
2176 				goto free_pages;
2177 			bpage->page = page_address(page);
2178 			rb_init_page(bpage->page);
2179 		}
2180 		bpage->order = cpu_buffer->buffer->subbuf_order;
2181 
2182 		if (user_thread && fatal_signal_pending(current))
2183 			goto free_pages;
2184 	}
2185 	if (user_thread)
2186 		clear_current_oom_origin();
2187 
2188 	return 0;
2189 
2190 free_pages:
2191 	list_for_each_entry_safe(bpage, tmp, pages, list) {
2192 		list_del_init(&bpage->list);
2193 		free_buffer_page(bpage);
2194 	}
2195 	if (user_thread)
2196 		clear_current_oom_origin();
2197 
2198 	return -ENOMEM;
2199 }
2200 
rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)2201 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
2202 			     unsigned long nr_pages)
2203 {
2204 	LIST_HEAD(pages);
2205 
2206 	WARN_ON(!nr_pages);
2207 
2208 	if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
2209 		return -ENOMEM;
2210 
2211 	/*
2212 	 * The ring buffer page list is a circular list that does not
2213 	 * start and end with a list head. All page list items point to
2214 	 * other pages.
2215 	 */
2216 	cpu_buffer->pages = pages.next;
2217 	list_del(&pages);
2218 
2219 	cpu_buffer->nr_pages = nr_pages;
2220 
2221 	rb_check_pages(cpu_buffer);
2222 
2223 	return 0;
2224 }
2225 
2226 static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct trace_buffer * buffer,long nr_pages,int cpu)2227 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
2228 {
2229 	struct ring_buffer_per_cpu *cpu_buffer;
2230 	struct ring_buffer_cpu_meta *meta;
2231 	struct buffer_page *bpage;
2232 	struct page *page;
2233 	int ret;
2234 
2235 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
2236 				  GFP_KERNEL, cpu_to_node(cpu));
2237 	if (!cpu_buffer)
2238 		return NULL;
2239 
2240 	cpu_buffer->cpu = cpu;
2241 	cpu_buffer->buffer = buffer;
2242 	raw_spin_lock_init(&cpu_buffer->reader_lock);
2243 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
2244 	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
2245 	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
2246 	init_completion(&cpu_buffer->update_done);
2247 	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
2248 	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
2249 	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
2250 	mutex_init(&cpu_buffer->mapping_lock);
2251 
2252 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2253 			    GFP_KERNEL, cpu_to_node(cpu));
2254 	if (!bpage)
2255 		goto fail_free_buffer;
2256 
2257 	rb_check_bpage(cpu_buffer, bpage);
2258 
2259 	cpu_buffer->reader_page = bpage;
2260 
2261 	if (buffer->range_addr_start) {
2262 		/*
2263 		 * Range mapped buffers have the same restrictions as memory
2264 		 * mapped ones do.
2265 		 */
2266 		cpu_buffer->mapped = 1;
2267 		cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu);
2268 		bpage->page = rb_range_buffer(cpu_buffer, 0);
2269 		if (!bpage->page)
2270 			goto fail_free_reader;
2271 		if (cpu_buffer->ring_meta->head_buffer)
2272 			rb_meta_buffer_update(cpu_buffer, bpage);
2273 		bpage->range = 1;
2274 	} else {
2275 		page = alloc_pages_node(cpu_to_node(cpu),
2276 					GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
2277 					cpu_buffer->buffer->subbuf_order);
2278 		if (!page)
2279 			goto fail_free_reader;
2280 		bpage->page = page_address(page);
2281 		rb_init_page(bpage->page);
2282 	}
2283 
2284 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2285 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
2286 
2287 	ret = rb_allocate_pages(cpu_buffer, nr_pages);
2288 	if (ret < 0)
2289 		goto fail_free_reader;
2290 
2291 	rb_meta_validate_events(cpu_buffer);
2292 
2293 	/* If the boot meta was valid then this has already been updated */
2294 	meta = cpu_buffer->ring_meta;
2295 	if (!meta || !meta->head_buffer ||
2296 	    !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) {
2297 		if (meta && meta->head_buffer &&
2298 		    (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) {
2299 			pr_warn("Ring buffer meta buffers not all mapped\n");
2300 			if (!cpu_buffer->head_page)
2301 				pr_warn("   Missing head_page\n");
2302 			if (!cpu_buffer->commit_page)
2303 				pr_warn("   Missing commit_page\n");
2304 			if (!cpu_buffer->tail_page)
2305 				pr_warn("   Missing tail_page\n");
2306 		}
2307 
2308 		cpu_buffer->head_page
2309 			= list_entry(cpu_buffer->pages, struct buffer_page, list);
2310 		cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
2311 
2312 		rb_head_page_activate(cpu_buffer);
2313 
2314 		if (cpu_buffer->ring_meta)
2315 			meta->commit_buffer = meta->head_buffer;
2316 	} else {
2317 		/* The valid meta buffer still needs to activate the head page */
2318 		rb_head_page_activate(cpu_buffer);
2319 	}
2320 
2321 	return cpu_buffer;
2322 
2323  fail_free_reader:
2324 	free_buffer_page(cpu_buffer->reader_page);
2325 
2326  fail_free_buffer:
2327 	kfree(cpu_buffer);
2328 	return NULL;
2329 }
2330 
rb_free_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)2331 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
2332 {
2333 	struct list_head *head = cpu_buffer->pages;
2334 	struct buffer_page *bpage, *tmp;
2335 
2336 	irq_work_sync(&cpu_buffer->irq_work.work);
2337 
2338 	free_buffer_page(cpu_buffer->reader_page);
2339 
2340 	if (head) {
2341 		rb_head_page_deactivate(cpu_buffer);
2342 
2343 		list_for_each_entry_safe(bpage, tmp, head, list) {
2344 			list_del_init(&bpage->list);
2345 			free_buffer_page(bpage);
2346 		}
2347 		bpage = list_entry(head, struct buffer_page, list);
2348 		free_buffer_page(bpage);
2349 	}
2350 
2351 	free_page((unsigned long)cpu_buffer->free_page);
2352 
2353 	kfree(cpu_buffer);
2354 }
2355 
alloc_buffer(unsigned long size,unsigned flags,int order,unsigned long start,unsigned long end,unsigned long scratch_size,struct lock_class_key * key)2356 static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
2357 					 int order, unsigned long start,
2358 					 unsigned long end,
2359 					 unsigned long scratch_size,
2360 					 struct lock_class_key *key)
2361 {
2362 	struct trace_buffer *buffer;
2363 	long nr_pages;
2364 	int subbuf_size;
2365 	int bsize;
2366 	int cpu;
2367 	int ret;
2368 
2369 	/* keep it in its own cache line */
2370 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
2371 			 GFP_KERNEL);
2372 	if (!buffer)
2373 		return NULL;
2374 
2375 	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
2376 		goto fail_free_buffer;
2377 
2378 	buffer->subbuf_order = order;
2379 	subbuf_size = (PAGE_SIZE << order);
2380 	buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE;
2381 
2382 	/* Max payload is buffer page size - header (8bytes) */
2383 	buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
2384 
2385 	buffer->flags = flags;
2386 	buffer->clock = trace_clock_local;
2387 	buffer->reader_lock_key = key;
2388 
2389 	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
2390 	init_waitqueue_head(&buffer->irq_work.waiters);
2391 
2392 	buffer->cpus = nr_cpu_ids;
2393 
2394 	bsize = sizeof(void *) * nr_cpu_ids;
2395 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
2396 				  GFP_KERNEL);
2397 	if (!buffer->buffers)
2398 		goto fail_free_cpumask;
2399 
2400 	/* If start/end are specified, then that overrides size */
2401 	if (start && end) {
2402 		unsigned long buffers_start;
2403 		unsigned long ptr;
2404 		int n;
2405 
2406 		/* Make sure that start is word aligned */
2407 		start = ALIGN(start, sizeof(long));
2408 
2409 		/* scratch_size needs to be aligned too */
2410 		scratch_size = ALIGN(scratch_size, sizeof(long));
2411 
2412 		/* Subtract the buffer meta data and word aligned */
2413 		buffers_start = start + sizeof(struct ring_buffer_cpu_meta);
2414 		buffers_start = ALIGN(buffers_start, sizeof(long));
2415 		buffers_start += scratch_size;
2416 
2417 		/* Calculate the size for the per CPU data */
2418 		size = end - buffers_start;
2419 		size = size / nr_cpu_ids;
2420 
2421 		/*
2422 		 * The number of sub-buffers (nr_pages) is determined by the
2423 		 * total size allocated minus the meta data size.
2424 		 * Then that is divided by the number of per CPU buffers
2425 		 * needed, plus account for the integer array index that
2426 		 * will be appended to the meta data.
2427 		 */
2428 		nr_pages = (size - sizeof(struct ring_buffer_cpu_meta)) /
2429 			(subbuf_size + sizeof(int));
2430 		/* Need at least two pages plus the reader page */
2431 		if (nr_pages < 3)
2432 			goto fail_free_buffers;
2433 
2434  again:
2435 		/* Make sure that the size fits aligned */
2436 		for (n = 0, ptr = buffers_start; n < nr_cpu_ids; n++) {
2437 			ptr += sizeof(struct ring_buffer_cpu_meta) +
2438 				sizeof(int) * nr_pages;
2439 			ptr = ALIGN(ptr, subbuf_size);
2440 			ptr += subbuf_size * nr_pages;
2441 		}
2442 		if (ptr > end) {
2443 			if (nr_pages <= 3)
2444 				goto fail_free_buffers;
2445 			nr_pages--;
2446 			goto again;
2447 		}
2448 
2449 		/* nr_pages should not count the reader page */
2450 		nr_pages--;
2451 		buffer->range_addr_start = start;
2452 		buffer->range_addr_end = end;
2453 
2454 		rb_range_meta_init(buffer, nr_pages, scratch_size);
2455 	} else {
2456 
2457 		/* need at least two pages */
2458 		nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2459 		if (nr_pages < 2)
2460 			nr_pages = 2;
2461 	}
2462 
2463 	cpu = raw_smp_processor_id();
2464 	cpumask_set_cpu(cpu, buffer->cpumask);
2465 	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
2466 	if (!buffer->buffers[cpu])
2467 		goto fail_free_buffers;
2468 
2469 	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2470 	if (ret < 0)
2471 		goto fail_free_buffers;
2472 
2473 	mutex_init(&buffer->mutex);
2474 
2475 	return buffer;
2476 
2477  fail_free_buffers:
2478 	for_each_buffer_cpu(buffer, cpu) {
2479 		if (buffer->buffers[cpu])
2480 			rb_free_cpu_buffer(buffer->buffers[cpu]);
2481 	}
2482 	kfree(buffer->buffers);
2483 
2484  fail_free_cpumask:
2485 	free_cpumask_var(buffer->cpumask);
2486 
2487  fail_free_buffer:
2488 	kfree(buffer);
2489 	return NULL;
2490 }
2491 
2492 /**
2493  * __ring_buffer_alloc - allocate a new ring_buffer
2494  * @size: the size in bytes per cpu that is needed.
2495  * @flags: attributes to set for the ring buffer.
2496  * @key: ring buffer reader_lock_key.
2497  *
2498  * Currently the only flag that is available is the RB_FL_OVERWRITE
2499  * flag. This flag means that the buffer will overwrite old data
2500  * when the buffer wraps. If this flag is not set, the buffer will
2501  * drop data when the tail hits the head.
2502  */
__ring_buffer_alloc(unsigned long size,unsigned flags,struct lock_class_key * key)2503 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
2504 					struct lock_class_key *key)
2505 {
2506 	/* Default buffer page size - one system page */
2507 	return alloc_buffer(size, flags, 0, 0, 0, 0, key);
2508 
2509 }
2510 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
2511 
2512 /**
2513  * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
2514  * @size: the size in bytes per cpu that is needed.
2515  * @flags: attributes to set for the ring buffer.
2516  * @order: sub-buffer order
2517  * @start: start of allocated range
2518  * @range_size: size of allocated range
2519  * @scratch_size: size of scratch area (for preallocated memory buffers)
2520  * @key: ring buffer reader_lock_key.
2521  *
2522  * Currently the only flag that is available is the RB_FL_OVERWRITE
2523  * flag. This flag means that the buffer will overwrite old data
2524  * when the buffer wraps. If this flag is not set, the buffer will
2525  * drop data when the tail hits the head.
2526  */
__ring_buffer_alloc_range(unsigned long size,unsigned flags,int order,unsigned long start,unsigned long range_size,unsigned long scratch_size,struct lock_class_key * key)2527 struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
2528 					       int order, unsigned long start,
2529 					       unsigned long range_size,
2530 					       unsigned long scratch_size,
2531 					       struct lock_class_key *key)
2532 {
2533 	return alloc_buffer(size, flags, order, start, start + range_size,
2534 			    scratch_size, key);
2535 }
2536 
ring_buffer_meta_scratch(struct trace_buffer * buffer,unsigned int * size)2537 void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size)
2538 {
2539 	struct ring_buffer_meta *meta;
2540 	void *ptr;
2541 
2542 	if (!buffer || !buffer->meta)
2543 		return NULL;
2544 
2545 	meta = buffer->meta;
2546 
2547 	ptr = (void *)ALIGN((unsigned long)meta + sizeof(*meta), sizeof(long));
2548 
2549 	if (size)
2550 		*size = (void *)meta + meta->buffers_offset - ptr;
2551 
2552 	return ptr;
2553 }
2554 
2555 /**
2556  * ring_buffer_free - free a ring buffer.
2557  * @buffer: the buffer to free.
2558  */
2559 void
ring_buffer_free(struct trace_buffer * buffer)2560 ring_buffer_free(struct trace_buffer *buffer)
2561 {
2562 	int cpu;
2563 
2564 	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2565 
2566 	irq_work_sync(&buffer->irq_work.work);
2567 
2568 	for_each_buffer_cpu(buffer, cpu)
2569 		rb_free_cpu_buffer(buffer->buffers[cpu]);
2570 
2571 	kfree(buffer->buffers);
2572 	free_cpumask_var(buffer->cpumask);
2573 
2574 	kfree(buffer);
2575 }
2576 EXPORT_SYMBOL_GPL(ring_buffer_free);
2577 
ring_buffer_set_clock(struct trace_buffer * buffer,u64 (* clock)(void))2578 void ring_buffer_set_clock(struct trace_buffer *buffer,
2579 			   u64 (*clock)(void))
2580 {
2581 	buffer->clock = clock;
2582 }
2583 
ring_buffer_set_time_stamp_abs(struct trace_buffer * buffer,bool abs)2584 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
2585 {
2586 	buffer->time_stamp_abs = abs;
2587 }
2588 
ring_buffer_time_stamp_abs(struct trace_buffer * buffer)2589 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
2590 {
2591 	return buffer->time_stamp_abs;
2592 }
2593 
rb_page_entries(struct buffer_page * bpage)2594 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
2595 {
2596 	return local_read(&bpage->entries) & RB_WRITE_MASK;
2597 }
2598 
rb_page_write(struct buffer_page * bpage)2599 static inline unsigned long rb_page_write(struct buffer_page *bpage)
2600 {
2601 	return local_read(&bpage->write) & RB_WRITE_MASK;
2602 }
2603 
2604 static bool
rb_remove_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)2605 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
2606 {
2607 	struct list_head *tail_page, *to_remove, *next_page;
2608 	struct buffer_page *to_remove_page, *tmp_iter_page;
2609 	struct buffer_page *last_page, *first_page;
2610 	unsigned long nr_removed;
2611 	unsigned long head_bit;
2612 	int page_entries;
2613 
2614 	head_bit = 0;
2615 
2616 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
2617 	atomic_inc(&cpu_buffer->record_disabled);
2618 	/*
2619 	 * We don't race with the readers since we have acquired the reader
2620 	 * lock. We also don't race with writers after disabling recording.
2621 	 * This makes it easy to figure out the first and the last page to be
2622 	 * removed from the list. We unlink all the pages in between including
2623 	 * the first and last pages. This is done in a busy loop so that we
2624 	 * lose the least number of traces.
2625 	 * The pages are freed after we restart recording and unlock readers.
2626 	 */
2627 	tail_page = &cpu_buffer->tail_page->list;
2628 
2629 	/*
2630 	 * tail page might be on reader page, we remove the next page
2631 	 * from the ring buffer
2632 	 */
2633 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2634 		tail_page = rb_list_head(tail_page->next);
2635 	to_remove = tail_page;
2636 
2637 	/* start of pages to remove */
2638 	first_page = list_entry(rb_list_head(to_remove->next),
2639 				struct buffer_page, list);
2640 
2641 	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
2642 		to_remove = rb_list_head(to_remove)->next;
2643 		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
2644 	}
2645 	/* Read iterators need to reset themselves when some pages removed */
2646 	cpu_buffer->pages_removed += nr_removed;
2647 
2648 	next_page = rb_list_head(to_remove)->next;
2649 
2650 	/*
2651 	 * Now we remove all pages between tail_page and next_page.
2652 	 * Make sure that we have head_bit value preserved for the
2653 	 * next page
2654 	 */
2655 	tail_page->next = (struct list_head *)((unsigned long)next_page |
2656 						head_bit);
2657 	next_page = rb_list_head(next_page);
2658 	next_page->prev = tail_page;
2659 
2660 	/* make sure pages points to a valid page in the ring buffer */
2661 	cpu_buffer->pages = next_page;
2662 	cpu_buffer->cnt++;
2663 
2664 	/* update head page */
2665 	if (head_bit)
2666 		cpu_buffer->head_page = list_entry(next_page,
2667 						struct buffer_page, list);
2668 
2669 	/* pages are removed, resume tracing and then free the pages */
2670 	atomic_dec(&cpu_buffer->record_disabled);
2671 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
2672 
2673 	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
2674 
2675 	/* last buffer page to remove */
2676 	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
2677 				list);
2678 	tmp_iter_page = first_page;
2679 
2680 	do {
2681 		cond_resched();
2682 
2683 		to_remove_page = tmp_iter_page;
2684 		rb_inc_page(&tmp_iter_page);
2685 
2686 		/* update the counters */
2687 		page_entries = rb_page_entries(to_remove_page);
2688 		if (page_entries) {
2689 			/*
2690 			 * If something was added to this page, it was full
2691 			 * since it is not the tail page. So we deduct the
2692 			 * bytes consumed in ring buffer from here.
2693 			 * Increment overrun to account for the lost events.
2694 			 */
2695 			local_add(page_entries, &cpu_buffer->overrun);
2696 			local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
2697 			local_inc(&cpu_buffer->pages_lost);
2698 		}
2699 
2700 		/*
2701 		 * We have already removed references to this list item, just
2702 		 * free up the buffer_page and its page
2703 		 */
2704 		free_buffer_page(to_remove_page);
2705 		nr_removed--;
2706 
2707 	} while (to_remove_page != last_page);
2708 
2709 	RB_WARN_ON(cpu_buffer, nr_removed);
2710 
2711 	return nr_removed == 0;
2712 }
2713 
2714 static bool
rb_insert_pages(struct ring_buffer_per_cpu * cpu_buffer)2715 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
2716 {
2717 	struct list_head *pages = &cpu_buffer->new_pages;
2718 	unsigned long flags;
2719 	bool success;
2720 	int retries;
2721 
2722 	/* Can be called at early boot up, where interrupts must not been enabled */
2723 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2724 	/*
2725 	 * We are holding the reader lock, so the reader page won't be swapped
2726 	 * in the ring buffer. Now we are racing with the writer trying to
2727 	 * move head page and the tail page.
2728 	 * We are going to adapt the reader page update process where:
2729 	 * 1. We first splice the start and end of list of new pages between
2730 	 *    the head page and its previous page.
2731 	 * 2. We cmpxchg the prev_page->next to point from head page to the
2732 	 *    start of new pages list.
2733 	 * 3. Finally, we update the head->prev to the end of new list.
2734 	 *
2735 	 * We will try this process 10 times, to make sure that we don't keep
2736 	 * spinning.
2737 	 */
2738 	retries = 10;
2739 	success = false;
2740 	while (retries--) {
2741 		struct list_head *head_page, *prev_page;
2742 		struct list_head *last_page, *first_page;
2743 		struct list_head *head_page_with_bit;
2744 		struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
2745 
2746 		if (!hpage)
2747 			break;
2748 		head_page = &hpage->list;
2749 		prev_page = head_page->prev;
2750 
2751 		first_page = pages->next;
2752 		last_page  = pages->prev;
2753 
2754 		head_page_with_bit = (struct list_head *)
2755 				     ((unsigned long)head_page | RB_PAGE_HEAD);
2756 
2757 		last_page->next = head_page_with_bit;
2758 		first_page->prev = prev_page;
2759 
2760 		/* caution: head_page_with_bit gets updated on cmpxchg failure */
2761 		if (try_cmpxchg(&prev_page->next,
2762 				&head_page_with_bit, first_page)) {
2763 			/*
2764 			 * yay, we replaced the page pointer to our new list,
2765 			 * now, we just have to update to head page's prev
2766 			 * pointer to point to end of list
2767 			 */
2768 			head_page->prev = last_page;
2769 			cpu_buffer->cnt++;
2770 			success = true;
2771 			break;
2772 		}
2773 	}
2774 
2775 	if (success)
2776 		INIT_LIST_HEAD(pages);
2777 	/*
2778 	 * If we weren't successful in adding in new pages, warn and stop
2779 	 * tracing
2780 	 */
2781 	RB_WARN_ON(cpu_buffer, !success);
2782 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2783 
2784 	/* free pages if they weren't inserted */
2785 	if (!success) {
2786 		struct buffer_page *bpage, *tmp;
2787 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2788 					 list) {
2789 			list_del_init(&bpage->list);
2790 			free_buffer_page(bpage);
2791 		}
2792 	}
2793 	return success;
2794 }
2795 
rb_update_pages(struct ring_buffer_per_cpu * cpu_buffer)2796 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2797 {
2798 	bool success;
2799 
2800 	if (cpu_buffer->nr_pages_to_update > 0)
2801 		success = rb_insert_pages(cpu_buffer);
2802 	else
2803 		success = rb_remove_pages(cpu_buffer,
2804 					-cpu_buffer->nr_pages_to_update);
2805 
2806 	if (success)
2807 		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2808 }
2809 
update_pages_handler(struct work_struct * work)2810 static void update_pages_handler(struct work_struct *work)
2811 {
2812 	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2813 			struct ring_buffer_per_cpu, update_pages_work);
2814 	rb_update_pages(cpu_buffer);
2815 	complete(&cpu_buffer->update_done);
2816 }
2817 
2818 /**
2819  * ring_buffer_resize - resize the ring buffer
2820  * @buffer: the buffer to resize.
2821  * @size: the new size.
2822  * @cpu_id: the cpu buffer to resize
2823  *
2824  * Minimum size is 2 * buffer->subbuf_size.
2825  *
2826  * Returns 0 on success and < 0 on failure.
2827  */
ring_buffer_resize(struct trace_buffer * buffer,unsigned long size,int cpu_id)2828 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2829 			int cpu_id)
2830 {
2831 	struct ring_buffer_per_cpu *cpu_buffer;
2832 	unsigned long nr_pages;
2833 	int cpu, err;
2834 
2835 	/*
2836 	 * Always succeed at resizing a non-existent buffer:
2837 	 */
2838 	if (!buffer)
2839 		return 0;
2840 
2841 	/* Make sure the requested buffer exists */
2842 	if (cpu_id != RING_BUFFER_ALL_CPUS &&
2843 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
2844 		return 0;
2845 
2846 	nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2847 
2848 	/* we need a minimum of two pages */
2849 	if (nr_pages < 2)
2850 		nr_pages = 2;
2851 
2852 	/* prevent another thread from changing buffer sizes */
2853 	mutex_lock(&buffer->mutex);
2854 	atomic_inc(&buffer->resizing);
2855 
2856 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
2857 		/*
2858 		 * Don't succeed if resizing is disabled, as a reader might be
2859 		 * manipulating the ring buffer and is expecting a sane state while
2860 		 * this is true.
2861 		 */
2862 		for_each_buffer_cpu(buffer, cpu) {
2863 			cpu_buffer = buffer->buffers[cpu];
2864 			if (atomic_read(&cpu_buffer->resize_disabled)) {
2865 				err = -EBUSY;
2866 				goto out_err_unlock;
2867 			}
2868 		}
2869 
2870 		/* calculate the pages to update */
2871 		for_each_buffer_cpu(buffer, cpu) {
2872 			cpu_buffer = buffer->buffers[cpu];
2873 
2874 			cpu_buffer->nr_pages_to_update = nr_pages -
2875 							cpu_buffer->nr_pages;
2876 			/*
2877 			 * nothing more to do for removing pages or no update
2878 			 */
2879 			if (cpu_buffer->nr_pages_to_update <= 0)
2880 				continue;
2881 			/*
2882 			 * to add pages, make sure all new pages can be
2883 			 * allocated without receiving ENOMEM
2884 			 */
2885 			INIT_LIST_HEAD(&cpu_buffer->new_pages);
2886 			if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2887 						&cpu_buffer->new_pages)) {
2888 				/* not enough memory for new pages */
2889 				err = -ENOMEM;
2890 				goto out_err;
2891 			}
2892 
2893 			cond_resched();
2894 		}
2895 
2896 		cpus_read_lock();
2897 		/*
2898 		 * Fire off all the required work handlers
2899 		 * We can't schedule on offline CPUs, but it's not necessary
2900 		 * since we can change their buffer sizes without any race.
2901 		 */
2902 		for_each_buffer_cpu(buffer, cpu) {
2903 			cpu_buffer = buffer->buffers[cpu];
2904 			if (!cpu_buffer->nr_pages_to_update)
2905 				continue;
2906 
2907 			/* Can't run something on an offline CPU. */
2908 			if (!cpu_online(cpu)) {
2909 				rb_update_pages(cpu_buffer);
2910 				cpu_buffer->nr_pages_to_update = 0;
2911 			} else {
2912 				/* Run directly if possible. */
2913 				migrate_disable();
2914 				if (cpu != smp_processor_id()) {
2915 					migrate_enable();
2916 					schedule_work_on(cpu,
2917 							 &cpu_buffer->update_pages_work);
2918 				} else {
2919 					update_pages_handler(&cpu_buffer->update_pages_work);
2920 					migrate_enable();
2921 				}
2922 			}
2923 		}
2924 
2925 		/* wait for all the updates to complete */
2926 		for_each_buffer_cpu(buffer, cpu) {
2927 			cpu_buffer = buffer->buffers[cpu];
2928 			if (!cpu_buffer->nr_pages_to_update)
2929 				continue;
2930 
2931 			if (cpu_online(cpu))
2932 				wait_for_completion(&cpu_buffer->update_done);
2933 			cpu_buffer->nr_pages_to_update = 0;
2934 		}
2935 
2936 		cpus_read_unlock();
2937 	} else {
2938 		cpu_buffer = buffer->buffers[cpu_id];
2939 
2940 		if (nr_pages == cpu_buffer->nr_pages)
2941 			goto out;
2942 
2943 		/*
2944 		 * Don't succeed if resizing is disabled, as a reader might be
2945 		 * manipulating the ring buffer and is expecting a sane state while
2946 		 * this is true.
2947 		 */
2948 		if (atomic_read(&cpu_buffer->resize_disabled)) {
2949 			err = -EBUSY;
2950 			goto out_err_unlock;
2951 		}
2952 
2953 		cpu_buffer->nr_pages_to_update = nr_pages -
2954 						cpu_buffer->nr_pages;
2955 
2956 		INIT_LIST_HEAD(&cpu_buffer->new_pages);
2957 		if (cpu_buffer->nr_pages_to_update > 0 &&
2958 			__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2959 					    &cpu_buffer->new_pages)) {
2960 			err = -ENOMEM;
2961 			goto out_err;
2962 		}
2963 
2964 		cpus_read_lock();
2965 
2966 		/* Can't run something on an offline CPU. */
2967 		if (!cpu_online(cpu_id))
2968 			rb_update_pages(cpu_buffer);
2969 		else {
2970 			/* Run directly if possible. */
2971 			migrate_disable();
2972 			if (cpu_id == smp_processor_id()) {
2973 				rb_update_pages(cpu_buffer);
2974 				migrate_enable();
2975 			} else {
2976 				migrate_enable();
2977 				schedule_work_on(cpu_id,
2978 						 &cpu_buffer->update_pages_work);
2979 				wait_for_completion(&cpu_buffer->update_done);
2980 			}
2981 		}
2982 
2983 		cpu_buffer->nr_pages_to_update = 0;
2984 		cpus_read_unlock();
2985 	}
2986 
2987  out:
2988 	/*
2989 	 * The ring buffer resize can happen with the ring buffer
2990 	 * enabled, so that the update disturbs the tracing as little
2991 	 * as possible. But if the buffer is disabled, we do not need
2992 	 * to worry about that, and we can take the time to verify
2993 	 * that the buffer is not corrupt.
2994 	 */
2995 	if (atomic_read(&buffer->record_disabled)) {
2996 		atomic_inc(&buffer->record_disabled);
2997 		/*
2998 		 * Even though the buffer was disabled, we must make sure
2999 		 * that it is truly disabled before calling rb_check_pages.
3000 		 * There could have been a race between checking
3001 		 * record_disable and incrementing it.
3002 		 */
3003 		synchronize_rcu();
3004 		for_each_buffer_cpu(buffer, cpu) {
3005 			cpu_buffer = buffer->buffers[cpu];
3006 			rb_check_pages(cpu_buffer);
3007 		}
3008 		atomic_dec(&buffer->record_disabled);
3009 	}
3010 
3011 	atomic_dec(&buffer->resizing);
3012 	mutex_unlock(&buffer->mutex);
3013 	return 0;
3014 
3015  out_err:
3016 	for_each_buffer_cpu(buffer, cpu) {
3017 		struct buffer_page *bpage, *tmp;
3018 
3019 		cpu_buffer = buffer->buffers[cpu];
3020 		cpu_buffer->nr_pages_to_update = 0;
3021 
3022 		if (list_empty(&cpu_buffer->new_pages))
3023 			continue;
3024 
3025 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
3026 					list) {
3027 			list_del_init(&bpage->list);
3028 			free_buffer_page(bpage);
3029 		}
3030 	}
3031  out_err_unlock:
3032 	atomic_dec(&buffer->resizing);
3033 	mutex_unlock(&buffer->mutex);
3034 	return err;
3035 }
3036 EXPORT_SYMBOL_GPL(ring_buffer_resize);
3037 
ring_buffer_change_overwrite(struct trace_buffer * buffer,int val)3038 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
3039 {
3040 	mutex_lock(&buffer->mutex);
3041 	if (val)
3042 		buffer->flags |= RB_FL_OVERWRITE;
3043 	else
3044 		buffer->flags &= ~RB_FL_OVERWRITE;
3045 	mutex_unlock(&buffer->mutex);
3046 }
3047 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
3048 
__rb_page_index(struct buffer_page * bpage,unsigned index)3049 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
3050 {
3051 	return bpage->page->data + index;
3052 }
3053 
3054 static __always_inline struct ring_buffer_event *
rb_reader_event(struct ring_buffer_per_cpu * cpu_buffer)3055 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
3056 {
3057 	return __rb_page_index(cpu_buffer->reader_page,
3058 			       cpu_buffer->reader_page->read);
3059 }
3060 
3061 static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter * iter)3062 rb_iter_head_event(struct ring_buffer_iter *iter)
3063 {
3064 	struct ring_buffer_event *event;
3065 	struct buffer_page *iter_head_page = iter->head_page;
3066 	unsigned long commit;
3067 	unsigned length;
3068 
3069 	if (iter->head != iter->next_event)
3070 		return iter->event;
3071 
3072 	/*
3073 	 * When the writer goes across pages, it issues a cmpxchg which
3074 	 * is a mb(), which will synchronize with the rmb here.
3075 	 * (see rb_tail_page_update() and __rb_reserve_next())
3076 	 */
3077 	commit = rb_page_commit(iter_head_page);
3078 	smp_rmb();
3079 
3080 	/* An event needs to be at least 8 bytes in size */
3081 	if (iter->head > commit - 8)
3082 		goto reset;
3083 
3084 	event = __rb_page_index(iter_head_page, iter->head);
3085 	length = rb_event_length(event);
3086 
3087 	/*
3088 	 * READ_ONCE() doesn't work on functions and we don't want the
3089 	 * compiler doing any crazy optimizations with length.
3090 	 */
3091 	barrier();
3092 
3093 	if ((iter->head + length) > commit || length > iter->event_size)
3094 		/* Writer corrupted the read? */
3095 		goto reset;
3096 
3097 	memcpy(iter->event, event, length);
3098 	/*
3099 	 * If the page stamp is still the same after this rmb() then the
3100 	 * event was safely copied without the writer entering the page.
3101 	 */
3102 	smp_rmb();
3103 
3104 	/* Make sure the page didn't change since we read this */
3105 	if (iter->page_stamp != iter_head_page->page->time_stamp ||
3106 	    commit > rb_page_commit(iter_head_page))
3107 		goto reset;
3108 
3109 	iter->next_event = iter->head + length;
3110 	return iter->event;
3111  reset:
3112 	/* Reset to the beginning */
3113 	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
3114 	iter->head = 0;
3115 	iter->next_event = 0;
3116 	iter->missed_events = 1;
3117 	return NULL;
3118 }
3119 
3120 /* Size is determined by what has been committed */
rb_page_size(struct buffer_page * bpage)3121 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
3122 {
3123 	return rb_page_commit(bpage) & ~RB_MISSED_MASK;
3124 }
3125 
3126 static __always_inline unsigned
rb_commit_index(struct ring_buffer_per_cpu * cpu_buffer)3127 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
3128 {
3129 	return rb_page_commit(cpu_buffer->commit_page);
3130 }
3131 
3132 static __always_inline unsigned
rb_event_index(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)3133 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
3134 {
3135 	unsigned long addr = (unsigned long)event;
3136 
3137 	addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
3138 
3139 	return addr - BUF_PAGE_HDR_SIZE;
3140 }
3141 
rb_inc_iter(struct ring_buffer_iter * iter)3142 static void rb_inc_iter(struct ring_buffer_iter *iter)
3143 {
3144 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3145 
3146 	/*
3147 	 * The iterator could be on the reader page (it starts there).
3148 	 * But the head could have moved, since the reader was
3149 	 * found. Check for this case and assign the iterator
3150 	 * to the head page instead of next.
3151 	 */
3152 	if (iter->head_page == cpu_buffer->reader_page)
3153 		iter->head_page = rb_set_head_page(cpu_buffer);
3154 	else
3155 		rb_inc_page(&iter->head_page);
3156 
3157 	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
3158 	iter->head = 0;
3159 	iter->next_event = 0;
3160 }
3161 
3162 /* Return the index into the sub-buffers for a given sub-buffer */
rb_meta_subbuf_idx(struct ring_buffer_cpu_meta * meta,void * subbuf)3163 static int rb_meta_subbuf_idx(struct ring_buffer_cpu_meta *meta, void *subbuf)
3164 {
3165 	void *subbuf_array;
3166 
3167 	subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs;
3168 	subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size);
3169 	return (subbuf - subbuf_array) / meta->subbuf_size;
3170 }
3171 
rb_update_meta_head(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * next_page)3172 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer,
3173 				struct buffer_page *next_page)
3174 {
3175 	struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3176 	unsigned long old_head = (unsigned long)next_page->page;
3177 	unsigned long new_head;
3178 
3179 	rb_inc_page(&next_page);
3180 	new_head = (unsigned long)next_page->page;
3181 
3182 	/*
3183 	 * Only move it forward once, if something else came in and
3184 	 * moved it forward, then we don't want to touch it.
3185 	 */
3186 	(void)cmpxchg(&meta->head_buffer, old_head, new_head);
3187 }
3188 
rb_update_meta_reader(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * reader)3189 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer,
3190 				  struct buffer_page *reader)
3191 {
3192 	struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3193 	void *old_reader = cpu_buffer->reader_page->page;
3194 	void *new_reader = reader->page;
3195 	int id;
3196 
3197 	id = reader->id;
3198 	cpu_buffer->reader_page->id = id;
3199 	reader->id = 0;
3200 
3201 	meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader);
3202 	meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader);
3203 
3204 	/* The head pointer is the one after the reader */
3205 	rb_update_meta_head(cpu_buffer, reader);
3206 }
3207 
3208 /*
3209  * rb_handle_head_page - writer hit the head page
3210  *
3211  * Returns: +1 to retry page
3212  *           0 to continue
3213  *          -1 on error
3214  */
3215 static int
rb_handle_head_page(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)3216 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
3217 		    struct buffer_page *tail_page,
3218 		    struct buffer_page *next_page)
3219 {
3220 	struct buffer_page *new_head;
3221 	int entries;
3222 	int type;
3223 	int ret;
3224 
3225 	entries = rb_page_entries(next_page);
3226 
3227 	/*
3228 	 * The hard part is here. We need to move the head
3229 	 * forward, and protect against both readers on
3230 	 * other CPUs and writers coming in via interrupts.
3231 	 */
3232 	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
3233 				       RB_PAGE_HEAD);
3234 
3235 	/*
3236 	 * type can be one of four:
3237 	 *  NORMAL - an interrupt already moved it for us
3238 	 *  HEAD   - we are the first to get here.
3239 	 *  UPDATE - we are the interrupt interrupting
3240 	 *           a current move.
3241 	 *  MOVED  - a reader on another CPU moved the next
3242 	 *           pointer to its reader page. Give up
3243 	 *           and try again.
3244 	 */
3245 
3246 	switch (type) {
3247 	case RB_PAGE_HEAD:
3248 		/*
3249 		 * We changed the head to UPDATE, thus
3250 		 * it is our responsibility to update
3251 		 * the counters.
3252 		 */
3253 		local_add(entries, &cpu_buffer->overrun);
3254 		local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
3255 		local_inc(&cpu_buffer->pages_lost);
3256 
3257 		if (cpu_buffer->ring_meta)
3258 			rb_update_meta_head(cpu_buffer, next_page);
3259 		/*
3260 		 * The entries will be zeroed out when we move the
3261 		 * tail page.
3262 		 */
3263 
3264 		/* still more to do */
3265 		break;
3266 
3267 	case RB_PAGE_UPDATE:
3268 		/*
3269 		 * This is an interrupt that interrupt the
3270 		 * previous update. Still more to do.
3271 		 */
3272 		break;
3273 	case RB_PAGE_NORMAL:
3274 		/*
3275 		 * An interrupt came in before the update
3276 		 * and processed this for us.
3277 		 * Nothing left to do.
3278 		 */
3279 		return 1;
3280 	case RB_PAGE_MOVED:
3281 		/*
3282 		 * The reader is on another CPU and just did
3283 		 * a swap with our next_page.
3284 		 * Try again.
3285 		 */
3286 		return 1;
3287 	default:
3288 		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
3289 		return -1;
3290 	}
3291 
3292 	/*
3293 	 * Now that we are here, the old head pointer is
3294 	 * set to UPDATE. This will keep the reader from
3295 	 * swapping the head page with the reader page.
3296 	 * The reader (on another CPU) will spin till
3297 	 * we are finished.
3298 	 *
3299 	 * We just need to protect against interrupts
3300 	 * doing the job. We will set the next pointer
3301 	 * to HEAD. After that, we set the old pointer
3302 	 * to NORMAL, but only if it was HEAD before.
3303 	 * otherwise we are an interrupt, and only
3304 	 * want the outer most commit to reset it.
3305 	 */
3306 	new_head = next_page;
3307 	rb_inc_page(&new_head);
3308 
3309 	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
3310 				    RB_PAGE_NORMAL);
3311 
3312 	/*
3313 	 * Valid returns are:
3314 	 *  HEAD   - an interrupt came in and already set it.
3315 	 *  NORMAL - One of two things:
3316 	 *            1) We really set it.
3317 	 *            2) A bunch of interrupts came in and moved
3318 	 *               the page forward again.
3319 	 */
3320 	switch (ret) {
3321 	case RB_PAGE_HEAD:
3322 	case RB_PAGE_NORMAL:
3323 		/* OK */
3324 		break;
3325 	default:
3326 		RB_WARN_ON(cpu_buffer, 1);
3327 		return -1;
3328 	}
3329 
3330 	/*
3331 	 * It is possible that an interrupt came in,
3332 	 * set the head up, then more interrupts came in
3333 	 * and moved it again. When we get back here,
3334 	 * the page would have been set to NORMAL but we
3335 	 * just set it back to HEAD.
3336 	 *
3337 	 * How do you detect this? Well, if that happened
3338 	 * the tail page would have moved.
3339 	 */
3340 	if (ret == RB_PAGE_NORMAL) {
3341 		struct buffer_page *buffer_tail_page;
3342 
3343 		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
3344 		/*
3345 		 * If the tail had moved passed next, then we need
3346 		 * to reset the pointer.
3347 		 */
3348 		if (buffer_tail_page != tail_page &&
3349 		    buffer_tail_page != next_page)
3350 			rb_head_page_set_normal(cpu_buffer, new_head,
3351 						next_page,
3352 						RB_PAGE_HEAD);
3353 	}
3354 
3355 	/*
3356 	 * If this was the outer most commit (the one that
3357 	 * changed the original pointer from HEAD to UPDATE),
3358 	 * then it is up to us to reset it to NORMAL.
3359 	 */
3360 	if (type == RB_PAGE_HEAD) {
3361 		ret = rb_head_page_set_normal(cpu_buffer, next_page,
3362 					      tail_page,
3363 					      RB_PAGE_UPDATE);
3364 		if (RB_WARN_ON(cpu_buffer,
3365 			       ret != RB_PAGE_UPDATE))
3366 			return -1;
3367 	}
3368 
3369 	return 0;
3370 }
3371 
3372 static inline void
rb_reset_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)3373 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
3374 	      unsigned long tail, struct rb_event_info *info)
3375 {
3376 	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
3377 	struct buffer_page *tail_page = info->tail_page;
3378 	struct ring_buffer_event *event;
3379 	unsigned long length = info->length;
3380 
3381 	/*
3382 	 * Only the event that crossed the page boundary
3383 	 * must fill the old tail_page with padding.
3384 	 */
3385 	if (tail >= bsize) {
3386 		/*
3387 		 * If the page was filled, then we still need
3388 		 * to update the real_end. Reset it to zero
3389 		 * and the reader will ignore it.
3390 		 */
3391 		if (tail == bsize)
3392 			tail_page->real_end = 0;
3393 
3394 		local_sub(length, &tail_page->write);
3395 		return;
3396 	}
3397 
3398 	event = __rb_page_index(tail_page, tail);
3399 
3400 	/*
3401 	 * Save the original length to the meta data.
3402 	 * This will be used by the reader to add lost event
3403 	 * counter.
3404 	 */
3405 	tail_page->real_end = tail;
3406 
3407 	/*
3408 	 * If this event is bigger than the minimum size, then
3409 	 * we need to be careful that we don't subtract the
3410 	 * write counter enough to allow another writer to slip
3411 	 * in on this page.
3412 	 * We put in a discarded commit instead, to make sure
3413 	 * that this space is not used again, and this space will
3414 	 * not be accounted into 'entries_bytes'.
3415 	 *
3416 	 * If we are less than the minimum size, we don't need to
3417 	 * worry about it.
3418 	 */
3419 	if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
3420 		/* No room for any events */
3421 
3422 		/* Mark the rest of the page with padding */
3423 		rb_event_set_padding(event);
3424 
3425 		/* Make sure the padding is visible before the write update */
3426 		smp_wmb();
3427 
3428 		/* Set the write back to the previous setting */
3429 		local_sub(length, &tail_page->write);
3430 		return;
3431 	}
3432 
3433 	/* Put in a discarded event */
3434 	event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
3435 	event->type_len = RINGBUF_TYPE_PADDING;
3436 	/* time delta must be non zero */
3437 	event->time_delta = 1;
3438 
3439 	/* account for padding bytes */
3440 	local_add(bsize - tail, &cpu_buffer->entries_bytes);
3441 
3442 	/* Make sure the padding is visible before the tail_page->write update */
3443 	smp_wmb();
3444 
3445 	/* Set write to end of buffer */
3446 	length = (tail + length) - bsize;
3447 	local_sub(length, &tail_page->write);
3448 }
3449 
3450 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3451 
3452 /*
3453  * This is the slow path, force gcc not to inline it.
3454  */
3455 static noinline struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)3456 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
3457 	     unsigned long tail, struct rb_event_info *info)
3458 {
3459 	struct buffer_page *tail_page = info->tail_page;
3460 	struct buffer_page *commit_page = cpu_buffer->commit_page;
3461 	struct trace_buffer *buffer = cpu_buffer->buffer;
3462 	struct buffer_page *next_page;
3463 	int ret;
3464 
3465 	next_page = tail_page;
3466 
3467 	rb_inc_page(&next_page);
3468 
3469 	/*
3470 	 * If for some reason, we had an interrupt storm that made
3471 	 * it all the way around the buffer, bail, and warn
3472 	 * about it.
3473 	 */
3474 	if (unlikely(next_page == commit_page)) {
3475 		local_inc(&cpu_buffer->commit_overrun);
3476 		goto out_reset;
3477 	}
3478 
3479 	/*
3480 	 * This is where the fun begins!
3481 	 *
3482 	 * We are fighting against races between a reader that
3483 	 * could be on another CPU trying to swap its reader
3484 	 * page with the buffer head.
3485 	 *
3486 	 * We are also fighting against interrupts coming in and
3487 	 * moving the head or tail on us as well.
3488 	 *
3489 	 * If the next page is the head page then we have filled
3490 	 * the buffer, unless the commit page is still on the
3491 	 * reader page.
3492 	 */
3493 	if (rb_is_head_page(next_page, &tail_page->list)) {
3494 
3495 		/*
3496 		 * If the commit is not on the reader page, then
3497 		 * move the header page.
3498 		 */
3499 		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
3500 			/*
3501 			 * If we are not in overwrite mode,
3502 			 * this is easy, just stop here.
3503 			 */
3504 			if (!(buffer->flags & RB_FL_OVERWRITE)) {
3505 				local_inc(&cpu_buffer->dropped_events);
3506 				goto out_reset;
3507 			}
3508 
3509 			ret = rb_handle_head_page(cpu_buffer,
3510 						  tail_page,
3511 						  next_page);
3512 			if (ret < 0)
3513 				goto out_reset;
3514 			if (ret)
3515 				goto out_again;
3516 		} else {
3517 			/*
3518 			 * We need to be careful here too. The
3519 			 * commit page could still be on the reader
3520 			 * page. We could have a small buffer, and
3521 			 * have filled up the buffer with events
3522 			 * from interrupts and such, and wrapped.
3523 			 *
3524 			 * Note, if the tail page is also on the
3525 			 * reader_page, we let it move out.
3526 			 */
3527 			if (unlikely((cpu_buffer->commit_page !=
3528 				      cpu_buffer->tail_page) &&
3529 				     (cpu_buffer->commit_page ==
3530 				      cpu_buffer->reader_page))) {
3531 				local_inc(&cpu_buffer->commit_overrun);
3532 				goto out_reset;
3533 			}
3534 		}
3535 	}
3536 
3537 	rb_tail_page_update(cpu_buffer, tail_page, next_page);
3538 
3539  out_again:
3540 
3541 	rb_reset_tail(cpu_buffer, tail, info);
3542 
3543 	/* Commit what we have for now. */
3544 	rb_end_commit(cpu_buffer);
3545 	/* rb_end_commit() decs committing */
3546 	local_inc(&cpu_buffer->committing);
3547 
3548 	/* fail and let the caller try again */
3549 	return ERR_PTR(-EAGAIN);
3550 
3551  out_reset:
3552 	/* reset write */
3553 	rb_reset_tail(cpu_buffer, tail, info);
3554 
3555 	return NULL;
3556 }
3557 
3558 /* Slow path */
3559 static struct ring_buffer_event *
rb_add_time_stamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event,u64 delta,bool abs)3560 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3561 		  struct ring_buffer_event *event, u64 delta, bool abs)
3562 {
3563 	if (abs)
3564 		event->type_len = RINGBUF_TYPE_TIME_STAMP;
3565 	else
3566 		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
3567 
3568 	/* Not the first event on the page, or not delta? */
3569 	if (abs || rb_event_index(cpu_buffer, event)) {
3570 		event->time_delta = delta & TS_MASK;
3571 		event->array[0] = delta >> TS_SHIFT;
3572 	} else {
3573 		/* nope, just zero it */
3574 		event->time_delta = 0;
3575 		event->array[0] = 0;
3576 	}
3577 
3578 	return skip_time_extend(event);
3579 }
3580 
3581 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
sched_clock_stable(void)3582 static inline bool sched_clock_stable(void)
3583 {
3584 	return true;
3585 }
3586 #endif
3587 
3588 static void
rb_check_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)3589 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
3590 		   struct rb_event_info *info)
3591 {
3592 	u64 write_stamp;
3593 
3594 	WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
3595 		  (unsigned long long)info->delta,
3596 		  (unsigned long long)info->ts,
3597 		  (unsigned long long)info->before,
3598 		  (unsigned long long)info->after,
3599 		  (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
3600 		  sched_clock_stable() ? "" :
3601 		  "If you just came from a suspend/resume,\n"
3602 		  "please switch to the trace global clock:\n"
3603 		  "  echo global > /sys/kernel/tracing/trace_clock\n"
3604 		  "or add trace_clock=global to the kernel command line\n");
3605 }
3606 
rb_add_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event ** event,struct rb_event_info * info,u64 * delta,unsigned int * length)3607 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
3608 				      struct ring_buffer_event **event,
3609 				      struct rb_event_info *info,
3610 				      u64 *delta,
3611 				      unsigned int *length)
3612 {
3613 	bool abs = info->add_timestamp &
3614 		(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
3615 
3616 	if (unlikely(info->delta > (1ULL << 59))) {
3617 		/*
3618 		 * Some timers can use more than 59 bits, and when a timestamp
3619 		 * is added to the buffer, it will lose those bits.
3620 		 */
3621 		if (abs && (info->ts & TS_MSB)) {
3622 			info->delta &= ABS_TS_MASK;
3623 
3624 		/* did the clock go backwards */
3625 		} else if (info->before == info->after && info->before > info->ts) {
3626 			/* not interrupted */
3627 			static int once;
3628 
3629 			/*
3630 			 * This is possible with a recalibrating of the TSC.
3631 			 * Do not produce a call stack, but just report it.
3632 			 */
3633 			if (!once) {
3634 				once++;
3635 				pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
3636 					info->before, info->ts);
3637 			}
3638 		} else
3639 			rb_check_timestamp(cpu_buffer, info);
3640 		if (!abs)
3641 			info->delta = 0;
3642 	}
3643 	*event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
3644 	*length -= RB_LEN_TIME_EXTEND;
3645 	*delta = 0;
3646 }
3647 
3648 /**
3649  * rb_update_event - update event type and data
3650  * @cpu_buffer: The per cpu buffer of the @event
3651  * @event: the event to update
3652  * @info: The info to update the @event with (contains length and delta)
3653  *
3654  * Update the type and data fields of the @event. The length
3655  * is the actual size that is written to the ring buffer,
3656  * and with this, we can determine what to place into the
3657  * data field.
3658  */
3659 static void
rb_update_event(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event,struct rb_event_info * info)3660 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
3661 		struct ring_buffer_event *event,
3662 		struct rb_event_info *info)
3663 {
3664 	unsigned length = info->length;
3665 	u64 delta = info->delta;
3666 	unsigned int nest = local_read(&cpu_buffer->committing) - 1;
3667 
3668 	if (!WARN_ON_ONCE(nest >= MAX_NEST))
3669 		cpu_buffer->event_stamp[nest] = info->ts;
3670 
3671 	/*
3672 	 * If we need to add a timestamp, then we
3673 	 * add it to the start of the reserved space.
3674 	 */
3675 	if (unlikely(info->add_timestamp))
3676 		rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
3677 
3678 	event->time_delta = delta;
3679 	length -= RB_EVNT_HDR_SIZE;
3680 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
3681 		event->type_len = 0;
3682 		event->array[0] = length;
3683 	} else
3684 		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
3685 }
3686 
rb_calculate_event_length(unsigned length)3687 static unsigned rb_calculate_event_length(unsigned length)
3688 {
3689 	struct ring_buffer_event event; /* Used only for sizeof array */
3690 
3691 	/* zero length can cause confusions */
3692 	if (!length)
3693 		length++;
3694 
3695 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
3696 		length += sizeof(event.array[0]);
3697 
3698 	length += RB_EVNT_HDR_SIZE;
3699 	length = ALIGN(length, RB_ARCH_ALIGNMENT);
3700 
3701 	/*
3702 	 * In case the time delta is larger than the 27 bits for it
3703 	 * in the header, we need to add a timestamp. If another
3704 	 * event comes in when trying to discard this one to increase
3705 	 * the length, then the timestamp will be added in the allocated
3706 	 * space of this event. If length is bigger than the size needed
3707 	 * for the TIME_EXTEND, then padding has to be used. The events
3708 	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
3709 	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
3710 	 * As length is a multiple of 4, we only need to worry if it
3711 	 * is 12 (RB_LEN_TIME_EXTEND + 4).
3712 	 */
3713 	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
3714 		length += RB_ALIGNMENT;
3715 
3716 	return length;
3717 }
3718 
3719 static inline bool
rb_try_to_discard(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)3720 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
3721 		  struct ring_buffer_event *event)
3722 {
3723 	unsigned long new_index, old_index;
3724 	struct buffer_page *bpage;
3725 	unsigned long addr;
3726 
3727 	new_index = rb_event_index(cpu_buffer, event);
3728 	old_index = new_index + rb_event_ts_length(event);
3729 	addr = (unsigned long)event;
3730 	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3731 
3732 	bpage = READ_ONCE(cpu_buffer->tail_page);
3733 
3734 	/*
3735 	 * Make sure the tail_page is still the same and
3736 	 * the next write location is the end of this event
3737 	 */
3738 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
3739 		unsigned long write_mask =
3740 			local_read(&bpage->write) & ~RB_WRITE_MASK;
3741 		unsigned long event_length = rb_event_length(event);
3742 
3743 		/*
3744 		 * For the before_stamp to be different than the write_stamp
3745 		 * to make sure that the next event adds an absolute
3746 		 * value and does not rely on the saved write stamp, which
3747 		 * is now going to be bogus.
3748 		 *
3749 		 * By setting the before_stamp to zero, the next event
3750 		 * is not going to use the write_stamp and will instead
3751 		 * create an absolute timestamp. This means there's no
3752 		 * reason to update the wirte_stamp!
3753 		 */
3754 		rb_time_set(&cpu_buffer->before_stamp, 0);
3755 
3756 		/*
3757 		 * If an event were to come in now, it would see that the
3758 		 * write_stamp and the before_stamp are different, and assume
3759 		 * that this event just added itself before updating
3760 		 * the write stamp. The interrupting event will fix the
3761 		 * write stamp for us, and use an absolute timestamp.
3762 		 */
3763 
3764 		/*
3765 		 * This is on the tail page. It is possible that
3766 		 * a write could come in and move the tail page
3767 		 * and write to the next page. That is fine
3768 		 * because we just shorten what is on this page.
3769 		 */
3770 		old_index += write_mask;
3771 		new_index += write_mask;
3772 
3773 		/* caution: old_index gets updated on cmpxchg failure */
3774 		if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
3775 			/* update counters */
3776 			local_sub(event_length, &cpu_buffer->entries_bytes);
3777 			return true;
3778 		}
3779 	}
3780 
3781 	/* could not discard */
3782 	return false;
3783 }
3784 
rb_start_commit(struct ring_buffer_per_cpu * cpu_buffer)3785 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
3786 {
3787 	local_inc(&cpu_buffer->committing);
3788 	local_inc(&cpu_buffer->commits);
3789 }
3790 
3791 static __always_inline void
rb_set_commit_to_write(struct ring_buffer_per_cpu * cpu_buffer)3792 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3793 {
3794 	unsigned long max_count;
3795 
3796 	/*
3797 	 * We only race with interrupts and NMIs on this CPU.
3798 	 * If we own the commit event, then we can commit
3799 	 * all others that interrupted us, since the interruptions
3800 	 * are in stack format (they finish before they come
3801 	 * back to us). This allows us to do a simple loop to
3802 	 * assign the commit to the tail.
3803 	 */
3804  again:
3805 	max_count = cpu_buffer->nr_pages * 100;
3806 
3807 	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
3808 		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
3809 			return;
3810 		if (RB_WARN_ON(cpu_buffer,
3811 			       rb_is_reader_page(cpu_buffer->tail_page)))
3812 			return;
3813 		/*
3814 		 * No need for a memory barrier here, as the update
3815 		 * of the tail_page did it for this page.
3816 		 */
3817 		local_set(&cpu_buffer->commit_page->page->commit,
3818 			  rb_page_write(cpu_buffer->commit_page));
3819 		rb_inc_page(&cpu_buffer->commit_page);
3820 		if (cpu_buffer->ring_meta) {
3821 			struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3822 			meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page;
3823 		}
3824 		/* add barrier to keep gcc from optimizing too much */
3825 		barrier();
3826 	}
3827 	while (rb_commit_index(cpu_buffer) !=
3828 	       rb_page_write(cpu_buffer->commit_page)) {
3829 
3830 		/* Make sure the readers see the content of what is committed. */
3831 		smp_wmb();
3832 		local_set(&cpu_buffer->commit_page->page->commit,
3833 			  rb_page_write(cpu_buffer->commit_page));
3834 		RB_WARN_ON(cpu_buffer,
3835 			   local_read(&cpu_buffer->commit_page->page->commit) &
3836 			   ~RB_WRITE_MASK);
3837 		barrier();
3838 	}
3839 
3840 	/* again, keep gcc from optimizing */
3841 	barrier();
3842 
3843 	/*
3844 	 * If an interrupt came in just after the first while loop
3845 	 * and pushed the tail page forward, we will be left with
3846 	 * a dangling commit that will never go forward.
3847 	 */
3848 	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3849 		goto again;
3850 }
3851 
rb_end_commit(struct ring_buffer_per_cpu * cpu_buffer)3852 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3853 {
3854 	unsigned long commits;
3855 
3856 	if (RB_WARN_ON(cpu_buffer,
3857 		       !local_read(&cpu_buffer->committing)))
3858 		return;
3859 
3860  again:
3861 	commits = local_read(&cpu_buffer->commits);
3862 	/* synchronize with interrupts */
3863 	barrier();
3864 	if (local_read(&cpu_buffer->committing) == 1)
3865 		rb_set_commit_to_write(cpu_buffer);
3866 
3867 	local_dec(&cpu_buffer->committing);
3868 
3869 	/* synchronize with interrupts */
3870 	barrier();
3871 
3872 	/*
3873 	 * Need to account for interrupts coming in between the
3874 	 * updating of the commit page and the clearing of the
3875 	 * committing counter.
3876 	 */
3877 	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3878 	    !local_read(&cpu_buffer->committing)) {
3879 		local_inc(&cpu_buffer->committing);
3880 		goto again;
3881 	}
3882 }
3883 
rb_event_discard(struct ring_buffer_event * event)3884 static inline void rb_event_discard(struct ring_buffer_event *event)
3885 {
3886 	if (extended_time(event))
3887 		event = skip_time_extend(event);
3888 
3889 	/* array[0] holds the actual length for the discarded event */
3890 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3891 	event->type_len = RINGBUF_TYPE_PADDING;
3892 	/* time delta must be non zero */
3893 	if (!event->time_delta)
3894 		event->time_delta = 1;
3895 }
3896 
rb_commit(struct ring_buffer_per_cpu * cpu_buffer)3897 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3898 {
3899 	local_inc(&cpu_buffer->entries);
3900 	rb_end_commit(cpu_buffer);
3901 }
3902 
3903 static __always_inline void
rb_wakeups(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer)3904 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3905 {
3906 	if (buffer->irq_work.waiters_pending) {
3907 		buffer->irq_work.waiters_pending = false;
3908 		/* irq_work_queue() supplies it's own memory barriers */
3909 		irq_work_queue(&buffer->irq_work.work);
3910 	}
3911 
3912 	if (cpu_buffer->irq_work.waiters_pending) {
3913 		cpu_buffer->irq_work.waiters_pending = false;
3914 		/* irq_work_queue() supplies it's own memory barriers */
3915 		irq_work_queue(&cpu_buffer->irq_work.work);
3916 	}
3917 
3918 	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3919 		return;
3920 
3921 	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3922 		return;
3923 
3924 	if (!cpu_buffer->irq_work.full_waiters_pending)
3925 		return;
3926 
3927 	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3928 
3929 	if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3930 		return;
3931 
3932 	cpu_buffer->irq_work.wakeup_full = true;
3933 	cpu_buffer->irq_work.full_waiters_pending = false;
3934 	/* irq_work_queue() supplies it's own memory barriers */
3935 	irq_work_queue(&cpu_buffer->irq_work.work);
3936 }
3937 
3938 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3939 # define do_ring_buffer_record_recursion()	\
3940 	do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3941 #else
3942 # define do_ring_buffer_record_recursion() do { } while (0)
3943 #endif
3944 
3945 /*
3946  * The lock and unlock are done within a preempt disable section.
3947  * The current_context per_cpu variable can only be modified
3948  * by the current task between lock and unlock. But it can
3949  * be modified more than once via an interrupt. To pass this
3950  * information from the lock to the unlock without having to
3951  * access the 'in_interrupt()' functions again (which do show
3952  * a bit of overhead in something as critical as function tracing,
3953  * we use a bitmask trick.
3954  *
3955  *  bit 1 =  NMI context
3956  *  bit 2 =  IRQ context
3957  *  bit 3 =  SoftIRQ context
3958  *  bit 4 =  normal context.
3959  *
3960  * This works because this is the order of contexts that can
3961  * preempt other contexts. A SoftIRQ never preempts an IRQ
3962  * context.
3963  *
3964  * When the context is determined, the corresponding bit is
3965  * checked and set (if it was set, then a recursion of that context
3966  * happened).
3967  *
3968  * On unlock, we need to clear this bit. To do so, just subtract
3969  * 1 from the current_context and AND it to itself.
3970  *
3971  * (binary)
3972  *  101 - 1 = 100
3973  *  101 & 100 = 100 (clearing bit zero)
3974  *
3975  *  1010 - 1 = 1001
3976  *  1010 & 1001 = 1000 (clearing bit 1)
3977  *
3978  * The least significant bit can be cleared this way, and it
3979  * just so happens that it is the same bit corresponding to
3980  * the current context.
3981  *
3982  * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3983  * is set when a recursion is detected at the current context, and if
3984  * the TRANSITION bit is already set, it will fail the recursion.
3985  * This is needed because there's a lag between the changing of
3986  * interrupt context and updating the preempt count. In this case,
3987  * a false positive will be found. To handle this, one extra recursion
3988  * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3989  * bit is already set, then it is considered a recursion and the function
3990  * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3991  *
3992  * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3993  * to be cleared. Even if it wasn't the context that set it. That is,
3994  * if an interrupt comes in while NORMAL bit is set and the ring buffer
3995  * is called before preempt_count() is updated, since the check will
3996  * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3997  * NMI then comes in, it will set the NMI bit, but when the NMI code
3998  * does the trace_recursive_unlock() it will clear the TRANSITION bit
3999  * and leave the NMI bit set. But this is fine, because the interrupt
4000  * code that set the TRANSITION bit will then clear the NMI bit when it
4001  * calls trace_recursive_unlock(). If another NMI comes in, it will
4002  * set the TRANSITION bit and continue.
4003  *
4004  * Note: The TRANSITION bit only handles a single transition between context.
4005  */
4006 
4007 static __always_inline bool
trace_recursive_lock(struct ring_buffer_per_cpu * cpu_buffer)4008 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
4009 {
4010 	unsigned int val = cpu_buffer->current_context;
4011 	int bit = interrupt_context_level();
4012 
4013 	bit = RB_CTX_NORMAL - bit;
4014 
4015 	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
4016 		/*
4017 		 * It is possible that this was called by transitioning
4018 		 * between interrupt context, and preempt_count() has not
4019 		 * been updated yet. In this case, use the TRANSITION bit.
4020 		 */
4021 		bit = RB_CTX_TRANSITION;
4022 		if (val & (1 << (bit + cpu_buffer->nest))) {
4023 			do_ring_buffer_record_recursion();
4024 			return true;
4025 		}
4026 	}
4027 
4028 	val |= (1 << (bit + cpu_buffer->nest));
4029 	cpu_buffer->current_context = val;
4030 
4031 	return false;
4032 }
4033 
4034 static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu * cpu_buffer)4035 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
4036 {
4037 	cpu_buffer->current_context &=
4038 		cpu_buffer->current_context - (1 << cpu_buffer->nest);
4039 }
4040 
4041 /* The recursive locking above uses 5 bits */
4042 #define NESTED_BITS 5
4043 
4044 /**
4045  * ring_buffer_nest_start - Allow to trace while nested
4046  * @buffer: The ring buffer to modify
4047  *
4048  * The ring buffer has a safety mechanism to prevent recursion.
4049  * But there may be a case where a trace needs to be done while
4050  * tracing something else. In this case, calling this function
4051  * will allow this function to nest within a currently active
4052  * ring_buffer_lock_reserve().
4053  *
4054  * Call this function before calling another ring_buffer_lock_reserve() and
4055  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
4056  */
ring_buffer_nest_start(struct trace_buffer * buffer)4057 void ring_buffer_nest_start(struct trace_buffer *buffer)
4058 {
4059 	struct ring_buffer_per_cpu *cpu_buffer;
4060 	int cpu;
4061 
4062 	/* Enabled by ring_buffer_nest_end() */
4063 	preempt_disable_notrace();
4064 	cpu = raw_smp_processor_id();
4065 	cpu_buffer = buffer->buffers[cpu];
4066 	/* This is the shift value for the above recursive locking */
4067 	cpu_buffer->nest += NESTED_BITS;
4068 }
4069 
4070 /**
4071  * ring_buffer_nest_end - Allow to trace while nested
4072  * @buffer: The ring buffer to modify
4073  *
4074  * Must be called after ring_buffer_nest_start() and after the
4075  * ring_buffer_unlock_commit().
4076  */
ring_buffer_nest_end(struct trace_buffer * buffer)4077 void ring_buffer_nest_end(struct trace_buffer *buffer)
4078 {
4079 	struct ring_buffer_per_cpu *cpu_buffer;
4080 	int cpu;
4081 
4082 	/* disabled by ring_buffer_nest_start() */
4083 	cpu = raw_smp_processor_id();
4084 	cpu_buffer = buffer->buffers[cpu];
4085 	/* This is the shift value for the above recursive locking */
4086 	cpu_buffer->nest -= NESTED_BITS;
4087 	preempt_enable_notrace();
4088 }
4089 
4090 /**
4091  * ring_buffer_unlock_commit - commit a reserved
4092  * @buffer: The buffer to commit to
4093  *
4094  * This commits the data to the ring buffer, and releases any locks held.
4095  *
4096  * Must be paired with ring_buffer_lock_reserve.
4097  */
ring_buffer_unlock_commit(struct trace_buffer * buffer)4098 int ring_buffer_unlock_commit(struct trace_buffer *buffer)
4099 {
4100 	struct ring_buffer_per_cpu *cpu_buffer;
4101 	int cpu = raw_smp_processor_id();
4102 
4103 	cpu_buffer = buffer->buffers[cpu];
4104 
4105 	rb_commit(cpu_buffer);
4106 
4107 	rb_wakeups(buffer, cpu_buffer);
4108 
4109 	trace_recursive_unlock(cpu_buffer);
4110 
4111 	preempt_enable_notrace();
4112 
4113 	return 0;
4114 }
4115 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
4116 
4117 /* Special value to validate all deltas on a page. */
4118 #define CHECK_FULL_PAGE		1L
4119 
4120 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
4121 
show_irq_str(int bits)4122 static const char *show_irq_str(int bits)
4123 {
4124 	const char *type[] = {
4125 		".",	// 0
4126 		"s",	// 1
4127 		"h",	// 2
4128 		"Hs",	// 3
4129 		"n",	// 4
4130 		"Ns",	// 5
4131 		"Nh",	// 6
4132 		"NHs",	// 7
4133 	};
4134 
4135 	return type[bits];
4136 }
4137 
4138 /* Assume this is a trace event */
show_flags(struct ring_buffer_event * event)4139 static const char *show_flags(struct ring_buffer_event *event)
4140 {
4141 	struct trace_entry *entry;
4142 	int bits = 0;
4143 
4144 	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4145 		return "X";
4146 
4147 	entry = ring_buffer_event_data(event);
4148 
4149 	if (entry->flags & TRACE_FLAG_SOFTIRQ)
4150 		bits |= 1;
4151 
4152 	if (entry->flags & TRACE_FLAG_HARDIRQ)
4153 		bits |= 2;
4154 
4155 	if (entry->flags & TRACE_FLAG_NMI)
4156 		bits |= 4;
4157 
4158 	return show_irq_str(bits);
4159 }
4160 
show_irq(struct ring_buffer_event * event)4161 static const char *show_irq(struct ring_buffer_event *event)
4162 {
4163 	struct trace_entry *entry;
4164 
4165 	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4166 		return "";
4167 
4168 	entry = ring_buffer_event_data(event);
4169 	if (entry->flags & TRACE_FLAG_IRQS_OFF)
4170 		return "d";
4171 	return "";
4172 }
4173 
show_interrupt_level(void)4174 static const char *show_interrupt_level(void)
4175 {
4176 	unsigned long pc = preempt_count();
4177 	unsigned char level = 0;
4178 
4179 	if (pc & SOFTIRQ_OFFSET)
4180 		level |= 1;
4181 
4182 	if (pc & HARDIRQ_MASK)
4183 		level |= 2;
4184 
4185 	if (pc & NMI_MASK)
4186 		level |= 4;
4187 
4188 	return show_irq_str(level);
4189 }
4190 
dump_buffer_page(struct buffer_data_page * bpage,struct rb_event_info * info,unsigned long tail)4191 static void dump_buffer_page(struct buffer_data_page *bpage,
4192 			     struct rb_event_info *info,
4193 			     unsigned long tail)
4194 {
4195 	struct ring_buffer_event *event;
4196 	u64 ts, delta;
4197 	int e;
4198 
4199 	ts = bpage->time_stamp;
4200 	pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
4201 
4202 	for (e = 0; e < tail; e += rb_event_length(event)) {
4203 
4204 		event = (struct ring_buffer_event *)(bpage->data + e);
4205 
4206 		switch (event->type_len) {
4207 
4208 		case RINGBUF_TYPE_TIME_EXTEND:
4209 			delta = rb_event_time_stamp(event);
4210 			ts += delta;
4211 			pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
4212 				e, ts, delta);
4213 			break;
4214 
4215 		case RINGBUF_TYPE_TIME_STAMP:
4216 			delta = rb_event_time_stamp(event);
4217 			ts = rb_fix_abs_ts(delta, ts);
4218 			pr_warn(" 0x%x:  [%lld] absolute:%lld TIME STAMP\n",
4219 				e, ts, delta);
4220 			break;
4221 
4222 		case RINGBUF_TYPE_PADDING:
4223 			ts += event->time_delta;
4224 			pr_warn(" 0x%x:  [%lld] delta:%d PADDING\n",
4225 				e, ts, event->time_delta);
4226 			break;
4227 
4228 		case RINGBUF_TYPE_DATA:
4229 			ts += event->time_delta;
4230 			pr_warn(" 0x%x:  [%lld] delta:%d %s%s\n",
4231 				e, ts, event->time_delta,
4232 				show_flags(event), show_irq(event));
4233 			break;
4234 
4235 		default:
4236 			break;
4237 		}
4238 	}
4239 	pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
4240 }
4241 
4242 static DEFINE_PER_CPU(atomic_t, checking);
4243 static atomic_t ts_dump;
4244 
4245 #define buffer_warn_return(fmt, ...)					\
4246 	do {								\
4247 		/* If another report is happening, ignore this one */	\
4248 		if (atomic_inc_return(&ts_dump) != 1) {			\
4249 			atomic_dec(&ts_dump);				\
4250 			goto out;					\
4251 		}							\
4252 		atomic_inc(&cpu_buffer->record_disabled);		\
4253 		pr_warn(fmt, ##__VA_ARGS__);				\
4254 		dump_buffer_page(bpage, info, tail);			\
4255 		atomic_dec(&ts_dump);					\
4256 		/* There's some cases in boot up that this can happen */ \
4257 		if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING))	\
4258 			/* Do not re-enable checking */			\
4259 			return;						\
4260 	} while (0)
4261 
4262 /*
4263  * Check if the current event time stamp matches the deltas on
4264  * the buffer page.
4265  */
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)4266 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
4267 			 struct rb_event_info *info,
4268 			 unsigned long tail)
4269 {
4270 	struct buffer_data_page *bpage;
4271 	u64 ts, delta;
4272 	bool full = false;
4273 	int ret;
4274 
4275 	bpage = info->tail_page->page;
4276 
4277 	if (tail == CHECK_FULL_PAGE) {
4278 		full = true;
4279 		tail = local_read(&bpage->commit);
4280 	} else if (info->add_timestamp &
4281 		   (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
4282 		/* Ignore events with absolute time stamps */
4283 		return;
4284 	}
4285 
4286 	/*
4287 	 * Do not check the first event (skip possible extends too).
4288 	 * Also do not check if previous events have not been committed.
4289 	 */
4290 	if (tail <= 8 || tail > local_read(&bpage->commit))
4291 		return;
4292 
4293 	/*
4294 	 * If this interrupted another event,
4295 	 */
4296 	if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
4297 		goto out;
4298 
4299 	ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
4300 	if (ret < 0) {
4301 		if (delta < ts) {
4302 			buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
4303 					   cpu_buffer->cpu, ts, delta);
4304 			goto out;
4305 		}
4306 	}
4307 	if ((full && ts > info->ts) ||
4308 	    (!full && ts + info->delta != info->ts)) {
4309 		buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
4310 				   cpu_buffer->cpu,
4311 				   ts + info->delta, info->ts, info->delta,
4312 				   info->before, info->after,
4313 				   full ? " (full)" : "", show_interrupt_level());
4314 	}
4315 out:
4316 	atomic_dec(this_cpu_ptr(&checking));
4317 }
4318 #else
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)4319 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
4320 			 struct rb_event_info *info,
4321 			 unsigned long tail)
4322 {
4323 }
4324 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
4325 
4326 static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)4327 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
4328 		  struct rb_event_info *info)
4329 {
4330 	struct ring_buffer_event *event;
4331 	struct buffer_page *tail_page;
4332 	unsigned long tail, write, w;
4333 
4334 	/* Don't let the compiler play games with cpu_buffer->tail_page */
4335 	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
4336 
4337  /*A*/	w = local_read(&tail_page->write) & RB_WRITE_MASK;
4338 	barrier();
4339 	rb_time_read(&cpu_buffer->before_stamp, &info->before);
4340 	rb_time_read(&cpu_buffer->write_stamp, &info->after);
4341 	barrier();
4342 	info->ts = rb_time_stamp(cpu_buffer->buffer);
4343 
4344 	if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
4345 		info->delta = info->ts;
4346 	} else {
4347 		/*
4348 		 * If interrupting an event time update, we may need an
4349 		 * absolute timestamp.
4350 		 * Don't bother if this is the start of a new page (w == 0).
4351 		 */
4352 		if (!w) {
4353 			/* Use the sub-buffer timestamp */
4354 			info->delta = 0;
4355 		} else if (unlikely(info->before != info->after)) {
4356 			info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
4357 			info->length += RB_LEN_TIME_EXTEND;
4358 		} else {
4359 			info->delta = info->ts - info->after;
4360 			if (unlikely(test_time_stamp(info->delta))) {
4361 				info->add_timestamp |= RB_ADD_STAMP_EXTEND;
4362 				info->length += RB_LEN_TIME_EXTEND;
4363 			}
4364 		}
4365 	}
4366 
4367  /*B*/	rb_time_set(&cpu_buffer->before_stamp, info->ts);
4368 
4369  /*C*/	write = local_add_return(info->length, &tail_page->write);
4370 
4371 	/* set write to only the index of the write */
4372 	write &= RB_WRITE_MASK;
4373 
4374 	tail = write - info->length;
4375 
4376 	/* See if we shot pass the end of this buffer page */
4377 	if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
4378 		check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
4379 		return rb_move_tail(cpu_buffer, tail, info);
4380 	}
4381 
4382 	if (likely(tail == w)) {
4383 		/* Nothing interrupted us between A and C */
4384  /*D*/		rb_time_set(&cpu_buffer->write_stamp, info->ts);
4385 		/*
4386 		 * If something came in between C and D, the write stamp
4387 		 * may now not be in sync. But that's fine as the before_stamp
4388 		 * will be different and then next event will just be forced
4389 		 * to use an absolute timestamp.
4390 		 */
4391 		if (likely(!(info->add_timestamp &
4392 			     (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4393 			/* This did not interrupt any time update */
4394 			info->delta = info->ts - info->after;
4395 		else
4396 			/* Just use full timestamp for interrupting event */
4397 			info->delta = info->ts;
4398 		check_buffer(cpu_buffer, info, tail);
4399 	} else {
4400 		u64 ts;
4401 		/* SLOW PATH - Interrupted between A and C */
4402 
4403 		/* Save the old before_stamp */
4404 		rb_time_read(&cpu_buffer->before_stamp, &info->before);
4405 
4406 		/*
4407 		 * Read a new timestamp and update the before_stamp to make
4408 		 * the next event after this one force using an absolute
4409 		 * timestamp. This is in case an interrupt were to come in
4410 		 * between E and F.
4411 		 */
4412 		ts = rb_time_stamp(cpu_buffer->buffer);
4413 		rb_time_set(&cpu_buffer->before_stamp, ts);
4414 
4415 		barrier();
4416  /*E*/		rb_time_read(&cpu_buffer->write_stamp, &info->after);
4417 		barrier();
4418  /*F*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
4419 		    info->after == info->before && info->after < ts) {
4420 			/*
4421 			 * Nothing came after this event between C and F, it is
4422 			 * safe to use info->after for the delta as it
4423 			 * matched info->before and is still valid.
4424 			 */
4425 			info->delta = ts - info->after;
4426 		} else {
4427 			/*
4428 			 * Interrupted between C and F:
4429 			 * Lost the previous events time stamp. Just set the
4430 			 * delta to zero, and this will be the same time as
4431 			 * the event this event interrupted. And the events that
4432 			 * came after this will still be correct (as they would
4433 			 * have built their delta on the previous event.
4434 			 */
4435 			info->delta = 0;
4436 		}
4437 		info->ts = ts;
4438 		info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
4439 	}
4440 
4441 	/*
4442 	 * If this is the first commit on the page, then it has the same
4443 	 * timestamp as the page itself.
4444 	 */
4445 	if (unlikely(!tail && !(info->add_timestamp &
4446 				(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4447 		info->delta = 0;
4448 
4449 	/* We reserved something on the buffer */
4450 
4451 	event = __rb_page_index(tail_page, tail);
4452 	rb_update_event(cpu_buffer, event, info);
4453 
4454 	local_inc(&tail_page->entries);
4455 
4456 	/*
4457 	 * If this is the first commit on the page, then update
4458 	 * its timestamp.
4459 	 */
4460 	if (unlikely(!tail))
4461 		tail_page->page->time_stamp = info->ts;
4462 
4463 	/* account for these added bytes */
4464 	local_add(info->length, &cpu_buffer->entries_bytes);
4465 
4466 	return event;
4467 }
4468 
4469 static __always_inline struct ring_buffer_event *
rb_reserve_next_event(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer,unsigned long length)4470 rb_reserve_next_event(struct trace_buffer *buffer,
4471 		      struct ring_buffer_per_cpu *cpu_buffer,
4472 		      unsigned long length)
4473 {
4474 	struct ring_buffer_event *event;
4475 	struct rb_event_info info;
4476 	int nr_loops = 0;
4477 	int add_ts_default;
4478 
4479 	/*
4480 	 * ring buffer does cmpxchg as well as atomic64 operations
4481 	 * (which some archs use locking for atomic64), make sure this
4482 	 * is safe in NMI context
4483 	 */
4484 	if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
4485 	     IS_ENABLED(CONFIG_GENERIC_ATOMIC64)) &&
4486 	    (unlikely(in_nmi()))) {
4487 		return NULL;
4488 	}
4489 
4490 	rb_start_commit(cpu_buffer);
4491 	/* The commit page can not change after this */
4492 
4493 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4494 	/*
4495 	 * Due to the ability to swap a cpu buffer from a buffer
4496 	 * it is possible it was swapped before we committed.
4497 	 * (committing stops a swap). We check for it here and
4498 	 * if it happened, we have to fail the write.
4499 	 */
4500 	barrier();
4501 	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
4502 		local_dec(&cpu_buffer->committing);
4503 		local_dec(&cpu_buffer->commits);
4504 		return NULL;
4505 	}
4506 #endif
4507 
4508 	info.length = rb_calculate_event_length(length);
4509 
4510 	if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
4511 		add_ts_default = RB_ADD_STAMP_ABSOLUTE;
4512 		info.length += RB_LEN_TIME_EXTEND;
4513 		if (info.length > cpu_buffer->buffer->max_data_size)
4514 			goto out_fail;
4515 	} else {
4516 		add_ts_default = RB_ADD_STAMP_NONE;
4517 	}
4518 
4519  again:
4520 	info.add_timestamp = add_ts_default;
4521 	info.delta = 0;
4522 
4523 	/*
4524 	 * We allow for interrupts to reenter here and do a trace.
4525 	 * If one does, it will cause this original code to loop
4526 	 * back here. Even with heavy interrupts happening, this
4527 	 * should only happen a few times in a row. If this happens
4528 	 * 1000 times in a row, there must be either an interrupt
4529 	 * storm or we have something buggy.
4530 	 * Bail!
4531 	 */
4532 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
4533 		goto out_fail;
4534 
4535 	event = __rb_reserve_next(cpu_buffer, &info);
4536 
4537 	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
4538 		if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
4539 			info.length -= RB_LEN_TIME_EXTEND;
4540 		goto again;
4541 	}
4542 
4543 	if (likely(event))
4544 		return event;
4545  out_fail:
4546 	rb_end_commit(cpu_buffer);
4547 	return NULL;
4548 }
4549 
4550 /**
4551  * ring_buffer_lock_reserve - reserve a part of the buffer
4552  * @buffer: the ring buffer to reserve from
4553  * @length: the length of the data to reserve (excluding event header)
4554  *
4555  * Returns a reserved event on the ring buffer to copy directly to.
4556  * The user of this interface will need to get the body to write into
4557  * and can use the ring_buffer_event_data() interface.
4558  *
4559  * The length is the length of the data needed, not the event length
4560  * which also includes the event header.
4561  *
4562  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
4563  * If NULL is returned, then nothing has been allocated or locked.
4564  */
4565 struct ring_buffer_event *
ring_buffer_lock_reserve(struct trace_buffer * buffer,unsigned long length)4566 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
4567 {
4568 	struct ring_buffer_per_cpu *cpu_buffer;
4569 	struct ring_buffer_event *event;
4570 	int cpu;
4571 
4572 	/* If we are tracing schedule, we don't want to recurse */
4573 	preempt_disable_notrace();
4574 
4575 	if (unlikely(atomic_read(&buffer->record_disabled)))
4576 		goto out;
4577 
4578 	cpu = raw_smp_processor_id();
4579 
4580 	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
4581 		goto out;
4582 
4583 	cpu_buffer = buffer->buffers[cpu];
4584 
4585 	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
4586 		goto out;
4587 
4588 	if (unlikely(length > buffer->max_data_size))
4589 		goto out;
4590 
4591 	if (unlikely(trace_recursive_lock(cpu_buffer)))
4592 		goto out;
4593 
4594 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
4595 	if (!event)
4596 		goto out_unlock;
4597 
4598 	return event;
4599 
4600  out_unlock:
4601 	trace_recursive_unlock(cpu_buffer);
4602  out:
4603 	preempt_enable_notrace();
4604 	return NULL;
4605 }
4606 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
4607 
4608 /*
4609  * Decrement the entries to the page that an event is on.
4610  * The event does not even need to exist, only the pointer
4611  * to the page it is on. This may only be called before the commit
4612  * takes place.
4613  */
4614 static inline void
rb_decrement_entry(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)4615 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
4616 		   struct ring_buffer_event *event)
4617 {
4618 	unsigned long addr = (unsigned long)event;
4619 	struct buffer_page *bpage = cpu_buffer->commit_page;
4620 	struct buffer_page *start;
4621 
4622 	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
4623 
4624 	/* Do the likely case first */
4625 	if (likely(bpage->page == (void *)addr)) {
4626 		local_dec(&bpage->entries);
4627 		return;
4628 	}
4629 
4630 	/*
4631 	 * Because the commit page may be on the reader page we
4632 	 * start with the next page and check the end loop there.
4633 	 */
4634 	rb_inc_page(&bpage);
4635 	start = bpage;
4636 	do {
4637 		if (bpage->page == (void *)addr) {
4638 			local_dec(&bpage->entries);
4639 			return;
4640 		}
4641 		rb_inc_page(&bpage);
4642 	} while (bpage != start);
4643 
4644 	/* commit not part of this buffer?? */
4645 	RB_WARN_ON(cpu_buffer, 1);
4646 }
4647 
4648 /**
4649  * ring_buffer_discard_commit - discard an event that has not been committed
4650  * @buffer: the ring buffer
4651  * @event: non committed event to discard
4652  *
4653  * Sometimes an event that is in the ring buffer needs to be ignored.
4654  * This function lets the user discard an event in the ring buffer
4655  * and then that event will not be read later.
4656  *
4657  * This function only works if it is called before the item has been
4658  * committed. It will try to free the event from the ring buffer
4659  * if another event has not been added behind it.
4660  *
4661  * If another event has been added behind it, it will set the event
4662  * up as discarded, and perform the commit.
4663  *
4664  * If this function is called, do not call ring_buffer_unlock_commit on
4665  * the event.
4666  */
ring_buffer_discard_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)4667 void ring_buffer_discard_commit(struct trace_buffer *buffer,
4668 				struct ring_buffer_event *event)
4669 {
4670 	struct ring_buffer_per_cpu *cpu_buffer;
4671 	int cpu;
4672 
4673 	/* The event is discarded regardless */
4674 	rb_event_discard(event);
4675 
4676 	cpu = smp_processor_id();
4677 	cpu_buffer = buffer->buffers[cpu];
4678 
4679 	/*
4680 	 * This must only be called if the event has not been
4681 	 * committed yet. Thus we can assume that preemption
4682 	 * is still disabled.
4683 	 */
4684 	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
4685 
4686 	rb_decrement_entry(cpu_buffer, event);
4687 	if (rb_try_to_discard(cpu_buffer, event))
4688 		goto out;
4689 
4690  out:
4691 	rb_end_commit(cpu_buffer);
4692 
4693 	trace_recursive_unlock(cpu_buffer);
4694 
4695 	preempt_enable_notrace();
4696 
4697 }
4698 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
4699 
4700 /**
4701  * ring_buffer_write - write data to the buffer without reserving
4702  * @buffer: The ring buffer to write to.
4703  * @length: The length of the data being written (excluding the event header)
4704  * @data: The data to write to the buffer.
4705  *
4706  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
4707  * one function. If you already have the data to write to the buffer, it
4708  * may be easier to simply call this function.
4709  *
4710  * Note, like ring_buffer_lock_reserve, the length is the length of the data
4711  * and not the length of the event which would hold the header.
4712  */
ring_buffer_write(struct trace_buffer * buffer,unsigned long length,void * data)4713 int ring_buffer_write(struct trace_buffer *buffer,
4714 		      unsigned long length,
4715 		      void *data)
4716 {
4717 	struct ring_buffer_per_cpu *cpu_buffer;
4718 	struct ring_buffer_event *event;
4719 	void *body;
4720 	int ret = -EBUSY;
4721 	int cpu;
4722 
4723 	preempt_disable_notrace();
4724 
4725 	if (atomic_read(&buffer->record_disabled))
4726 		goto out;
4727 
4728 	cpu = raw_smp_processor_id();
4729 
4730 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4731 		goto out;
4732 
4733 	cpu_buffer = buffer->buffers[cpu];
4734 
4735 	if (atomic_read(&cpu_buffer->record_disabled))
4736 		goto out;
4737 
4738 	if (length > buffer->max_data_size)
4739 		goto out;
4740 
4741 	if (unlikely(trace_recursive_lock(cpu_buffer)))
4742 		goto out;
4743 
4744 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
4745 	if (!event)
4746 		goto out_unlock;
4747 
4748 	body = rb_event_data(event);
4749 
4750 	memcpy(body, data, length);
4751 
4752 	rb_commit(cpu_buffer);
4753 
4754 	rb_wakeups(buffer, cpu_buffer);
4755 
4756 	ret = 0;
4757 
4758  out_unlock:
4759 	trace_recursive_unlock(cpu_buffer);
4760 
4761  out:
4762 	preempt_enable_notrace();
4763 
4764 	return ret;
4765 }
4766 EXPORT_SYMBOL_GPL(ring_buffer_write);
4767 
4768 /*
4769  * The total entries in the ring buffer is the running counter
4770  * of entries entered into the ring buffer, minus the sum of
4771  * the entries read from the ring buffer and the number of
4772  * entries that were overwritten.
4773  */
4774 static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu * cpu_buffer)4775 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4776 {
4777 	return local_read(&cpu_buffer->entries) -
4778 		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4779 }
4780 
rb_per_cpu_empty(struct ring_buffer_per_cpu * cpu_buffer)4781 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
4782 {
4783 	return !rb_num_of_entries(cpu_buffer);
4784 }
4785 
4786 /**
4787  * ring_buffer_record_disable - stop all writes into the buffer
4788  * @buffer: The ring buffer to stop writes to.
4789  *
4790  * This prevents all writes to the buffer. Any attempt to write
4791  * to the buffer after this will fail and return NULL.
4792  *
4793  * The caller should call synchronize_rcu() after this.
4794  */
ring_buffer_record_disable(struct trace_buffer * buffer)4795 void ring_buffer_record_disable(struct trace_buffer *buffer)
4796 {
4797 	atomic_inc(&buffer->record_disabled);
4798 }
4799 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
4800 
4801 /**
4802  * ring_buffer_record_enable - enable writes to the buffer
4803  * @buffer: The ring buffer to enable writes
4804  *
4805  * Note, multiple disables will need the same number of enables
4806  * to truly enable the writing (much like preempt_disable).
4807  */
ring_buffer_record_enable(struct trace_buffer * buffer)4808 void ring_buffer_record_enable(struct trace_buffer *buffer)
4809 {
4810 	atomic_dec(&buffer->record_disabled);
4811 }
4812 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
4813 
4814 /**
4815  * ring_buffer_record_off - stop all writes into the buffer
4816  * @buffer: The ring buffer to stop writes to.
4817  *
4818  * This prevents all writes to the buffer. Any attempt to write
4819  * to the buffer after this will fail and return NULL.
4820  *
4821  * This is different than ring_buffer_record_disable() as
4822  * it works like an on/off switch, where as the disable() version
4823  * must be paired with a enable().
4824  */
ring_buffer_record_off(struct trace_buffer * buffer)4825 void ring_buffer_record_off(struct trace_buffer *buffer)
4826 {
4827 	unsigned int rd;
4828 	unsigned int new_rd;
4829 
4830 	rd = atomic_read(&buffer->record_disabled);
4831 	do {
4832 		new_rd = rd | RB_BUFFER_OFF;
4833 	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4834 }
4835 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4836 
4837 /**
4838  * ring_buffer_record_on - restart writes into the buffer
4839  * @buffer: The ring buffer to start writes to.
4840  *
4841  * This enables all writes to the buffer that was disabled by
4842  * ring_buffer_record_off().
4843  *
4844  * This is different than ring_buffer_record_enable() as
4845  * it works like an on/off switch, where as the enable() version
4846  * must be paired with a disable().
4847  */
ring_buffer_record_on(struct trace_buffer * buffer)4848 void ring_buffer_record_on(struct trace_buffer *buffer)
4849 {
4850 	unsigned int rd;
4851 	unsigned int new_rd;
4852 
4853 	rd = atomic_read(&buffer->record_disabled);
4854 	do {
4855 		new_rd = rd & ~RB_BUFFER_OFF;
4856 	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4857 }
4858 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4859 
4860 /**
4861  * ring_buffer_record_is_on - return true if the ring buffer can write
4862  * @buffer: The ring buffer to see if write is enabled
4863  *
4864  * Returns true if the ring buffer is in a state that it accepts writes.
4865  */
ring_buffer_record_is_on(struct trace_buffer * buffer)4866 bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4867 {
4868 	return !atomic_read(&buffer->record_disabled);
4869 }
4870 
4871 /**
4872  * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4873  * @buffer: The ring buffer to see if write is set enabled
4874  *
4875  * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4876  * Note that this does NOT mean it is in a writable state.
4877  *
4878  * It may return true when the ring buffer has been disabled by
4879  * ring_buffer_record_disable(), as that is a temporary disabling of
4880  * the ring buffer.
4881  */
ring_buffer_record_is_set_on(struct trace_buffer * buffer)4882 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4883 {
4884 	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4885 }
4886 
4887 /**
4888  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4889  * @buffer: The ring buffer to stop writes to.
4890  * @cpu: The CPU buffer to stop
4891  *
4892  * This prevents all writes to the buffer. Any attempt to write
4893  * to the buffer after this will fail and return NULL.
4894  *
4895  * The caller should call synchronize_rcu() after this.
4896  */
ring_buffer_record_disable_cpu(struct trace_buffer * buffer,int cpu)4897 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4898 {
4899 	struct ring_buffer_per_cpu *cpu_buffer;
4900 
4901 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4902 		return;
4903 
4904 	cpu_buffer = buffer->buffers[cpu];
4905 	atomic_inc(&cpu_buffer->record_disabled);
4906 }
4907 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4908 
4909 /**
4910  * ring_buffer_record_enable_cpu - enable writes to the buffer
4911  * @buffer: The ring buffer to enable writes
4912  * @cpu: The CPU to enable.
4913  *
4914  * Note, multiple disables will need the same number of enables
4915  * to truly enable the writing (much like preempt_disable).
4916  */
ring_buffer_record_enable_cpu(struct trace_buffer * buffer,int cpu)4917 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4918 {
4919 	struct ring_buffer_per_cpu *cpu_buffer;
4920 
4921 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4922 		return;
4923 
4924 	cpu_buffer = buffer->buffers[cpu];
4925 	atomic_dec(&cpu_buffer->record_disabled);
4926 }
4927 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4928 
4929 /**
4930  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4931  * @buffer: The ring buffer
4932  * @cpu: The per CPU buffer to read from.
4933  */
ring_buffer_oldest_event_ts(struct trace_buffer * buffer,int cpu)4934 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4935 {
4936 	unsigned long flags;
4937 	struct ring_buffer_per_cpu *cpu_buffer;
4938 	struct buffer_page *bpage;
4939 	u64 ret = 0;
4940 
4941 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4942 		return 0;
4943 
4944 	cpu_buffer = buffer->buffers[cpu];
4945 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4946 	/*
4947 	 * if the tail is on reader_page, oldest time stamp is on the reader
4948 	 * page
4949 	 */
4950 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4951 		bpage = cpu_buffer->reader_page;
4952 	else
4953 		bpage = rb_set_head_page(cpu_buffer);
4954 	if (bpage)
4955 		ret = bpage->page->time_stamp;
4956 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4957 
4958 	return ret;
4959 }
4960 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4961 
4962 /**
4963  * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4964  * @buffer: The ring buffer
4965  * @cpu: The per CPU buffer to read from.
4966  */
ring_buffer_bytes_cpu(struct trace_buffer * buffer,int cpu)4967 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4968 {
4969 	struct ring_buffer_per_cpu *cpu_buffer;
4970 	unsigned long ret;
4971 
4972 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4973 		return 0;
4974 
4975 	cpu_buffer = buffer->buffers[cpu];
4976 	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4977 
4978 	return ret;
4979 }
4980 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4981 
4982 /**
4983  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4984  * @buffer: The ring buffer
4985  * @cpu: The per CPU buffer to get the entries from.
4986  */
ring_buffer_entries_cpu(struct trace_buffer * buffer,int cpu)4987 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4988 {
4989 	struct ring_buffer_per_cpu *cpu_buffer;
4990 
4991 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4992 		return 0;
4993 
4994 	cpu_buffer = buffer->buffers[cpu];
4995 
4996 	return rb_num_of_entries(cpu_buffer);
4997 }
4998 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4999 
5000 /**
5001  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
5002  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
5003  * @buffer: The ring buffer
5004  * @cpu: The per CPU buffer to get the number of overruns from
5005  */
ring_buffer_overrun_cpu(struct trace_buffer * buffer,int cpu)5006 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
5007 {
5008 	struct ring_buffer_per_cpu *cpu_buffer;
5009 	unsigned long ret;
5010 
5011 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5012 		return 0;
5013 
5014 	cpu_buffer = buffer->buffers[cpu];
5015 	ret = local_read(&cpu_buffer->overrun);
5016 
5017 	return ret;
5018 }
5019 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
5020 
5021 /**
5022  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
5023  * commits failing due to the buffer wrapping around while there are uncommitted
5024  * events, such as during an interrupt storm.
5025  * @buffer: The ring buffer
5026  * @cpu: The per CPU buffer to get the number of overruns from
5027  */
5028 unsigned long
ring_buffer_commit_overrun_cpu(struct trace_buffer * buffer,int cpu)5029 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
5030 {
5031 	struct ring_buffer_per_cpu *cpu_buffer;
5032 	unsigned long ret;
5033 
5034 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5035 		return 0;
5036 
5037 	cpu_buffer = buffer->buffers[cpu];
5038 	ret = local_read(&cpu_buffer->commit_overrun);
5039 
5040 	return ret;
5041 }
5042 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
5043 
5044 /**
5045  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
5046  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
5047  * @buffer: The ring buffer
5048  * @cpu: The per CPU buffer to get the number of overruns from
5049  */
5050 unsigned long
ring_buffer_dropped_events_cpu(struct trace_buffer * buffer,int cpu)5051 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
5052 {
5053 	struct ring_buffer_per_cpu *cpu_buffer;
5054 	unsigned long ret;
5055 
5056 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5057 		return 0;
5058 
5059 	cpu_buffer = buffer->buffers[cpu];
5060 	ret = local_read(&cpu_buffer->dropped_events);
5061 
5062 	return ret;
5063 }
5064 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
5065 
5066 /**
5067  * ring_buffer_read_events_cpu - get the number of events successfully read
5068  * @buffer: The ring buffer
5069  * @cpu: The per CPU buffer to get the number of events read
5070  */
5071 unsigned long
ring_buffer_read_events_cpu(struct trace_buffer * buffer,int cpu)5072 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
5073 {
5074 	struct ring_buffer_per_cpu *cpu_buffer;
5075 
5076 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5077 		return 0;
5078 
5079 	cpu_buffer = buffer->buffers[cpu];
5080 	return cpu_buffer->read;
5081 }
5082 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
5083 
5084 /**
5085  * ring_buffer_entries - get the number of entries in a buffer
5086  * @buffer: The ring buffer
5087  *
5088  * Returns the total number of entries in the ring buffer
5089  * (all CPU entries)
5090  */
ring_buffer_entries(struct trace_buffer * buffer)5091 unsigned long ring_buffer_entries(struct trace_buffer *buffer)
5092 {
5093 	struct ring_buffer_per_cpu *cpu_buffer;
5094 	unsigned long entries = 0;
5095 	int cpu;
5096 
5097 	/* if you care about this being correct, lock the buffer */
5098 	for_each_buffer_cpu(buffer, cpu) {
5099 		cpu_buffer = buffer->buffers[cpu];
5100 		entries += rb_num_of_entries(cpu_buffer);
5101 	}
5102 
5103 	return entries;
5104 }
5105 EXPORT_SYMBOL_GPL(ring_buffer_entries);
5106 
5107 /**
5108  * ring_buffer_overruns - get the number of overruns in buffer
5109  * @buffer: The ring buffer
5110  *
5111  * Returns the total number of overruns in the ring buffer
5112  * (all CPU entries)
5113  */
ring_buffer_overruns(struct trace_buffer * buffer)5114 unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
5115 {
5116 	struct ring_buffer_per_cpu *cpu_buffer;
5117 	unsigned long overruns = 0;
5118 	int cpu;
5119 
5120 	/* if you care about this being correct, lock the buffer */
5121 	for_each_buffer_cpu(buffer, cpu) {
5122 		cpu_buffer = buffer->buffers[cpu];
5123 		overruns += local_read(&cpu_buffer->overrun);
5124 	}
5125 
5126 	return overruns;
5127 }
5128 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
5129 
rb_iter_reset(struct ring_buffer_iter * iter)5130 static void rb_iter_reset(struct ring_buffer_iter *iter)
5131 {
5132 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5133 
5134 	/* Iterator usage is expected to have record disabled */
5135 	iter->head_page = cpu_buffer->reader_page;
5136 	iter->head = cpu_buffer->reader_page->read;
5137 	iter->next_event = iter->head;
5138 
5139 	iter->cache_reader_page = iter->head_page;
5140 	iter->cache_read = cpu_buffer->read;
5141 	iter->cache_pages_removed = cpu_buffer->pages_removed;
5142 
5143 	if (iter->head) {
5144 		iter->read_stamp = cpu_buffer->read_stamp;
5145 		iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
5146 	} else {
5147 		iter->read_stamp = iter->head_page->page->time_stamp;
5148 		iter->page_stamp = iter->read_stamp;
5149 	}
5150 }
5151 
5152 /**
5153  * ring_buffer_iter_reset - reset an iterator
5154  * @iter: The iterator to reset
5155  *
5156  * Resets the iterator, so that it will start from the beginning
5157  * again.
5158  */
ring_buffer_iter_reset(struct ring_buffer_iter * iter)5159 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
5160 {
5161 	struct ring_buffer_per_cpu *cpu_buffer;
5162 	unsigned long flags;
5163 
5164 	if (!iter)
5165 		return;
5166 
5167 	cpu_buffer = iter->cpu_buffer;
5168 
5169 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5170 	rb_iter_reset(iter);
5171 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5172 }
5173 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
5174 
5175 /**
5176  * ring_buffer_iter_empty - check if an iterator has no more to read
5177  * @iter: The iterator to check
5178  */
ring_buffer_iter_empty(struct ring_buffer_iter * iter)5179 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
5180 {
5181 	struct ring_buffer_per_cpu *cpu_buffer;
5182 	struct buffer_page *reader;
5183 	struct buffer_page *head_page;
5184 	struct buffer_page *commit_page;
5185 	struct buffer_page *curr_commit_page;
5186 	unsigned commit;
5187 	u64 curr_commit_ts;
5188 	u64 commit_ts;
5189 
5190 	cpu_buffer = iter->cpu_buffer;
5191 	reader = cpu_buffer->reader_page;
5192 	head_page = cpu_buffer->head_page;
5193 	commit_page = READ_ONCE(cpu_buffer->commit_page);
5194 	commit_ts = commit_page->page->time_stamp;
5195 
5196 	/*
5197 	 * When the writer goes across pages, it issues a cmpxchg which
5198 	 * is a mb(), which will synchronize with the rmb here.
5199 	 * (see rb_tail_page_update())
5200 	 */
5201 	smp_rmb();
5202 	commit = rb_page_commit(commit_page);
5203 	/* We want to make sure that the commit page doesn't change */
5204 	smp_rmb();
5205 
5206 	/* Make sure commit page didn't change */
5207 	curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
5208 	curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
5209 
5210 	/* If the commit page changed, then there's more data */
5211 	if (curr_commit_page != commit_page ||
5212 	    curr_commit_ts != commit_ts)
5213 		return 0;
5214 
5215 	/* Still racy, as it may return a false positive, but that's OK */
5216 	return ((iter->head_page == commit_page && iter->head >= commit) ||
5217 		(iter->head_page == reader && commit_page == head_page &&
5218 		 head_page->read == commit &&
5219 		 iter->head == rb_page_size(cpu_buffer->reader_page)));
5220 }
5221 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
5222 
5223 static void
rb_update_read_stamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)5224 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
5225 		     struct ring_buffer_event *event)
5226 {
5227 	u64 delta;
5228 
5229 	switch (event->type_len) {
5230 	case RINGBUF_TYPE_PADDING:
5231 		return;
5232 
5233 	case RINGBUF_TYPE_TIME_EXTEND:
5234 		delta = rb_event_time_stamp(event);
5235 		cpu_buffer->read_stamp += delta;
5236 		return;
5237 
5238 	case RINGBUF_TYPE_TIME_STAMP:
5239 		delta = rb_event_time_stamp(event);
5240 		delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
5241 		cpu_buffer->read_stamp = delta;
5242 		return;
5243 
5244 	case RINGBUF_TYPE_DATA:
5245 		cpu_buffer->read_stamp += event->time_delta;
5246 		return;
5247 
5248 	default:
5249 		RB_WARN_ON(cpu_buffer, 1);
5250 	}
5251 }
5252 
5253 static void
rb_update_iter_read_stamp(struct ring_buffer_iter * iter,struct ring_buffer_event * event)5254 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
5255 			  struct ring_buffer_event *event)
5256 {
5257 	u64 delta;
5258 
5259 	switch (event->type_len) {
5260 	case RINGBUF_TYPE_PADDING:
5261 		return;
5262 
5263 	case RINGBUF_TYPE_TIME_EXTEND:
5264 		delta = rb_event_time_stamp(event);
5265 		iter->read_stamp += delta;
5266 		return;
5267 
5268 	case RINGBUF_TYPE_TIME_STAMP:
5269 		delta = rb_event_time_stamp(event);
5270 		delta = rb_fix_abs_ts(delta, iter->read_stamp);
5271 		iter->read_stamp = delta;
5272 		return;
5273 
5274 	case RINGBUF_TYPE_DATA:
5275 		iter->read_stamp += event->time_delta;
5276 		return;
5277 
5278 	default:
5279 		RB_WARN_ON(iter->cpu_buffer, 1);
5280 	}
5281 }
5282 
5283 static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu * cpu_buffer)5284 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
5285 {
5286 	struct buffer_page *reader = NULL;
5287 	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
5288 	unsigned long overwrite;
5289 	unsigned long flags;
5290 	int nr_loops = 0;
5291 	bool ret;
5292 
5293 	local_irq_save(flags);
5294 	arch_spin_lock(&cpu_buffer->lock);
5295 
5296  again:
5297 	/*
5298 	 * This should normally only loop twice. But because the
5299 	 * start of the reader inserts an empty page, it causes
5300 	 * a case where we will loop three times. There should be no
5301 	 * reason to loop four times (that I know of).
5302 	 */
5303 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
5304 		reader = NULL;
5305 		goto out;
5306 	}
5307 
5308 	reader = cpu_buffer->reader_page;
5309 
5310 	/* If there's more to read, return this page */
5311 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
5312 		goto out;
5313 
5314 	/* Never should we have an index greater than the size */
5315 	if (RB_WARN_ON(cpu_buffer,
5316 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
5317 		goto out;
5318 
5319 	/* check if we caught up to the tail */
5320 	reader = NULL;
5321 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
5322 		goto out;
5323 
5324 	/* Don't bother swapping if the ring buffer is empty */
5325 	if (rb_num_of_entries(cpu_buffer) == 0)
5326 		goto out;
5327 
5328 	/*
5329 	 * Reset the reader page to size zero.
5330 	 */
5331 	local_set(&cpu_buffer->reader_page->write, 0);
5332 	local_set(&cpu_buffer->reader_page->entries, 0);
5333 	local_set(&cpu_buffer->reader_page->page->commit, 0);
5334 	cpu_buffer->reader_page->real_end = 0;
5335 
5336  spin:
5337 	/*
5338 	 * Splice the empty reader page into the list around the head.
5339 	 */
5340 	reader = rb_set_head_page(cpu_buffer);
5341 	if (!reader)
5342 		goto out;
5343 	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
5344 	cpu_buffer->reader_page->list.prev = reader->list.prev;
5345 
5346 	/*
5347 	 * cpu_buffer->pages just needs to point to the buffer, it
5348 	 *  has no specific buffer page to point to. Lets move it out
5349 	 *  of our way so we don't accidentally swap it.
5350 	 */
5351 	cpu_buffer->pages = reader->list.prev;
5352 
5353 	/* The reader page will be pointing to the new head */
5354 	rb_set_list_to_head(&cpu_buffer->reader_page->list);
5355 
5356 	/*
5357 	 * We want to make sure we read the overruns after we set up our
5358 	 * pointers to the next object. The writer side does a
5359 	 * cmpxchg to cross pages which acts as the mb on the writer
5360 	 * side. Note, the reader will constantly fail the swap
5361 	 * while the writer is updating the pointers, so this
5362 	 * guarantees that the overwrite recorded here is the one we
5363 	 * want to compare with the last_overrun.
5364 	 */
5365 	smp_mb();
5366 	overwrite = local_read(&(cpu_buffer->overrun));
5367 
5368 	/*
5369 	 * Here's the tricky part.
5370 	 *
5371 	 * We need to move the pointer past the header page.
5372 	 * But we can only do that if a writer is not currently
5373 	 * moving it. The page before the header page has the
5374 	 * flag bit '1' set if it is pointing to the page we want.
5375 	 * but if the writer is in the process of moving it
5376 	 * then it will be '2' or already moved '0'.
5377 	 */
5378 
5379 	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
5380 
5381 	/*
5382 	 * If we did not convert it, then we must try again.
5383 	 */
5384 	if (!ret)
5385 		goto spin;
5386 
5387 	if (cpu_buffer->ring_meta)
5388 		rb_update_meta_reader(cpu_buffer, reader);
5389 
5390 	/*
5391 	 * Yay! We succeeded in replacing the page.
5392 	 *
5393 	 * Now make the new head point back to the reader page.
5394 	 */
5395 	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
5396 	rb_inc_page(&cpu_buffer->head_page);
5397 
5398 	cpu_buffer->cnt++;
5399 	local_inc(&cpu_buffer->pages_read);
5400 
5401 	/* Finally update the reader page to the new head */
5402 	cpu_buffer->reader_page = reader;
5403 	cpu_buffer->reader_page->read = 0;
5404 
5405 	if (overwrite != cpu_buffer->last_overrun) {
5406 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
5407 		cpu_buffer->last_overrun = overwrite;
5408 	}
5409 
5410 	goto again;
5411 
5412  out:
5413 	/* Update the read_stamp on the first event */
5414 	if (reader && reader->read == 0)
5415 		cpu_buffer->read_stamp = reader->page->time_stamp;
5416 
5417 	arch_spin_unlock(&cpu_buffer->lock);
5418 	local_irq_restore(flags);
5419 
5420 	/*
5421 	 * The writer has preempt disable, wait for it. But not forever
5422 	 * Although, 1 second is pretty much "forever"
5423 	 */
5424 #define USECS_WAIT	1000000
5425         for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
5426 		/* If the write is past the end of page, a writer is still updating it */
5427 		if (likely(!reader || rb_page_write(reader) <= bsize))
5428 			break;
5429 
5430 		udelay(1);
5431 
5432 		/* Get the latest version of the reader write value */
5433 		smp_rmb();
5434 	}
5435 
5436 	/* The writer is not moving forward? Something is wrong */
5437 	if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
5438 		reader = NULL;
5439 
5440 	/*
5441 	 * Make sure we see any padding after the write update
5442 	 * (see rb_reset_tail()).
5443 	 *
5444 	 * In addition, a writer may be writing on the reader page
5445 	 * if the page has not been fully filled, so the read barrier
5446 	 * is also needed to make sure we see the content of what is
5447 	 * committed by the writer (see rb_set_commit_to_write()).
5448 	 */
5449 	smp_rmb();
5450 
5451 
5452 	return reader;
5453 }
5454 
rb_advance_reader(struct ring_buffer_per_cpu * cpu_buffer)5455 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
5456 {
5457 	struct ring_buffer_event *event;
5458 	struct buffer_page *reader;
5459 	unsigned length;
5460 
5461 	reader = rb_get_reader_page(cpu_buffer);
5462 
5463 	/* This function should not be called when buffer is empty */
5464 	if (RB_WARN_ON(cpu_buffer, !reader))
5465 		return;
5466 
5467 	event = rb_reader_event(cpu_buffer);
5468 
5469 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
5470 		cpu_buffer->read++;
5471 
5472 	rb_update_read_stamp(cpu_buffer, event);
5473 
5474 	length = rb_event_length(event);
5475 	cpu_buffer->reader_page->read += length;
5476 	cpu_buffer->read_bytes += length;
5477 }
5478 
rb_advance_iter(struct ring_buffer_iter * iter)5479 static void rb_advance_iter(struct ring_buffer_iter *iter)
5480 {
5481 	struct ring_buffer_per_cpu *cpu_buffer;
5482 
5483 	cpu_buffer = iter->cpu_buffer;
5484 
5485 	/* If head == next_event then we need to jump to the next event */
5486 	if (iter->head == iter->next_event) {
5487 		/* If the event gets overwritten again, there's nothing to do */
5488 		if (rb_iter_head_event(iter) == NULL)
5489 			return;
5490 	}
5491 
5492 	iter->head = iter->next_event;
5493 
5494 	/*
5495 	 * Check if we are at the end of the buffer.
5496 	 */
5497 	if (iter->next_event >= rb_page_size(iter->head_page)) {
5498 		/* discarded commits can make the page empty */
5499 		if (iter->head_page == cpu_buffer->commit_page)
5500 			return;
5501 		rb_inc_iter(iter);
5502 		return;
5503 	}
5504 
5505 	rb_update_iter_read_stamp(iter, iter->event);
5506 }
5507 
rb_lost_events(struct ring_buffer_per_cpu * cpu_buffer)5508 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
5509 {
5510 	return cpu_buffer->lost_events;
5511 }
5512 
5513 static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer_per_cpu * cpu_buffer,u64 * ts,unsigned long * lost_events)5514 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
5515 	       unsigned long *lost_events)
5516 {
5517 	struct ring_buffer_event *event;
5518 	struct buffer_page *reader;
5519 	int nr_loops = 0;
5520 
5521 	if (ts)
5522 		*ts = 0;
5523  again:
5524 	/*
5525 	 * We repeat when a time extend is encountered.
5526 	 * Since the time extend is always attached to a data event,
5527 	 * we should never loop more than once.
5528 	 * (We never hit the following condition more than twice).
5529 	 */
5530 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
5531 		return NULL;
5532 
5533 	reader = rb_get_reader_page(cpu_buffer);
5534 	if (!reader)
5535 		return NULL;
5536 
5537 	event = rb_reader_event(cpu_buffer);
5538 
5539 	switch (event->type_len) {
5540 	case RINGBUF_TYPE_PADDING:
5541 		if (rb_null_event(event))
5542 			RB_WARN_ON(cpu_buffer, 1);
5543 		/*
5544 		 * Because the writer could be discarding every
5545 		 * event it creates (which would probably be bad)
5546 		 * if we were to go back to "again" then we may never
5547 		 * catch up, and will trigger the warn on, or lock
5548 		 * the box. Return the padding, and we will release
5549 		 * the current locks, and try again.
5550 		 */
5551 		return event;
5552 
5553 	case RINGBUF_TYPE_TIME_EXTEND:
5554 		/* Internal data, OK to advance */
5555 		rb_advance_reader(cpu_buffer);
5556 		goto again;
5557 
5558 	case RINGBUF_TYPE_TIME_STAMP:
5559 		if (ts) {
5560 			*ts = rb_event_time_stamp(event);
5561 			*ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
5562 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5563 							 cpu_buffer->cpu, ts);
5564 		}
5565 		/* Internal data, OK to advance */
5566 		rb_advance_reader(cpu_buffer);
5567 		goto again;
5568 
5569 	case RINGBUF_TYPE_DATA:
5570 		if (ts && !(*ts)) {
5571 			*ts = cpu_buffer->read_stamp + event->time_delta;
5572 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5573 							 cpu_buffer->cpu, ts);
5574 		}
5575 		if (lost_events)
5576 			*lost_events = rb_lost_events(cpu_buffer);
5577 		return event;
5578 
5579 	default:
5580 		RB_WARN_ON(cpu_buffer, 1);
5581 	}
5582 
5583 	return NULL;
5584 }
5585 EXPORT_SYMBOL_GPL(ring_buffer_peek);
5586 
5587 static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter * iter,u64 * ts)5588 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5589 {
5590 	struct trace_buffer *buffer;
5591 	struct ring_buffer_per_cpu *cpu_buffer;
5592 	struct ring_buffer_event *event;
5593 	int nr_loops = 0;
5594 
5595 	if (ts)
5596 		*ts = 0;
5597 
5598 	cpu_buffer = iter->cpu_buffer;
5599 	buffer = cpu_buffer->buffer;
5600 
5601 	/*
5602 	 * Check if someone performed a consuming read to the buffer
5603 	 * or removed some pages from the buffer. In these cases,
5604 	 * iterator was invalidated and we need to reset it.
5605 	 */
5606 	if (unlikely(iter->cache_read != cpu_buffer->read ||
5607 		     iter->cache_reader_page != cpu_buffer->reader_page ||
5608 		     iter->cache_pages_removed != cpu_buffer->pages_removed))
5609 		rb_iter_reset(iter);
5610 
5611  again:
5612 	if (ring_buffer_iter_empty(iter))
5613 		return NULL;
5614 
5615 	/*
5616 	 * As the writer can mess with what the iterator is trying
5617 	 * to read, just give up if we fail to get an event after
5618 	 * three tries. The iterator is not as reliable when reading
5619 	 * the ring buffer with an active write as the consumer is.
5620 	 * Do not warn if the three failures is reached.
5621 	 */
5622 	if (++nr_loops > 3)
5623 		return NULL;
5624 
5625 	if (rb_per_cpu_empty(cpu_buffer))
5626 		return NULL;
5627 
5628 	if (iter->head >= rb_page_size(iter->head_page)) {
5629 		rb_inc_iter(iter);
5630 		goto again;
5631 	}
5632 
5633 	event = rb_iter_head_event(iter);
5634 	if (!event)
5635 		goto again;
5636 
5637 	switch (event->type_len) {
5638 	case RINGBUF_TYPE_PADDING:
5639 		if (rb_null_event(event)) {
5640 			rb_inc_iter(iter);
5641 			goto again;
5642 		}
5643 		rb_advance_iter(iter);
5644 		return event;
5645 
5646 	case RINGBUF_TYPE_TIME_EXTEND:
5647 		/* Internal data, OK to advance */
5648 		rb_advance_iter(iter);
5649 		goto again;
5650 
5651 	case RINGBUF_TYPE_TIME_STAMP:
5652 		if (ts) {
5653 			*ts = rb_event_time_stamp(event);
5654 			*ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
5655 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5656 							 cpu_buffer->cpu, ts);
5657 		}
5658 		/* Internal data, OK to advance */
5659 		rb_advance_iter(iter);
5660 		goto again;
5661 
5662 	case RINGBUF_TYPE_DATA:
5663 		if (ts && !(*ts)) {
5664 			*ts = iter->read_stamp + event->time_delta;
5665 			ring_buffer_normalize_time_stamp(buffer,
5666 							 cpu_buffer->cpu, ts);
5667 		}
5668 		return event;
5669 
5670 	default:
5671 		RB_WARN_ON(cpu_buffer, 1);
5672 	}
5673 
5674 	return NULL;
5675 }
5676 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
5677 
rb_reader_lock(struct ring_buffer_per_cpu * cpu_buffer)5678 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
5679 {
5680 	if (likely(!in_nmi())) {
5681 		raw_spin_lock(&cpu_buffer->reader_lock);
5682 		return true;
5683 	}
5684 
5685 	/*
5686 	 * If an NMI die dumps out the content of the ring buffer
5687 	 * trylock must be used to prevent a deadlock if the NMI
5688 	 * preempted a task that holds the ring buffer locks. If
5689 	 * we get the lock then all is fine, if not, then continue
5690 	 * to do the read, but this can corrupt the ring buffer,
5691 	 * so it must be permanently disabled from future writes.
5692 	 * Reading from NMI is a oneshot deal.
5693 	 */
5694 	if (raw_spin_trylock(&cpu_buffer->reader_lock))
5695 		return true;
5696 
5697 	/* Continue without locking, but disable the ring buffer */
5698 	atomic_inc(&cpu_buffer->record_disabled);
5699 	return false;
5700 }
5701 
5702 static inline void
rb_reader_unlock(struct ring_buffer_per_cpu * cpu_buffer,bool locked)5703 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
5704 {
5705 	if (likely(locked))
5706 		raw_spin_unlock(&cpu_buffer->reader_lock);
5707 }
5708 
5709 /**
5710  * ring_buffer_peek - peek at the next event to be read
5711  * @buffer: The ring buffer to read
5712  * @cpu: The cpu to peak at
5713  * @ts: The timestamp counter of this event.
5714  * @lost_events: a variable to store if events were lost (may be NULL)
5715  *
5716  * This will return the event that will be read next, but does
5717  * not consume the data.
5718  */
5719 struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)5720 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
5721 		 unsigned long *lost_events)
5722 {
5723 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5724 	struct ring_buffer_event *event;
5725 	unsigned long flags;
5726 	bool dolock;
5727 
5728 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5729 		return NULL;
5730 
5731  again:
5732 	local_irq_save(flags);
5733 	dolock = rb_reader_lock(cpu_buffer);
5734 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5735 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5736 		rb_advance_reader(cpu_buffer);
5737 	rb_reader_unlock(cpu_buffer, dolock);
5738 	local_irq_restore(flags);
5739 
5740 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5741 		goto again;
5742 
5743 	return event;
5744 }
5745 
5746 /** ring_buffer_iter_dropped - report if there are dropped events
5747  * @iter: The ring buffer iterator
5748  *
5749  * Returns true if there was dropped events since the last peek.
5750  */
ring_buffer_iter_dropped(struct ring_buffer_iter * iter)5751 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
5752 {
5753 	bool ret = iter->missed_events != 0;
5754 
5755 	iter->missed_events = 0;
5756 	return ret;
5757 }
5758 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
5759 
5760 /**
5761  * ring_buffer_iter_peek - peek at the next event to be read
5762  * @iter: The ring buffer iterator
5763  * @ts: The timestamp counter of this event.
5764  *
5765  * This will return the event that will be read next, but does
5766  * not increment the iterator.
5767  */
5768 struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter * iter,u64 * ts)5769 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5770 {
5771 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5772 	struct ring_buffer_event *event;
5773 	unsigned long flags;
5774 
5775  again:
5776 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5777 	event = rb_iter_peek(iter, ts);
5778 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5779 
5780 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5781 		goto again;
5782 
5783 	return event;
5784 }
5785 
5786 /**
5787  * ring_buffer_consume - return an event and consume it
5788  * @buffer: The ring buffer to get the next event from
5789  * @cpu: the cpu to read the buffer from
5790  * @ts: a variable to store the timestamp (may be NULL)
5791  * @lost_events: a variable to store if events were lost (may be NULL)
5792  *
5793  * Returns the next event in the ring buffer, and that event is consumed.
5794  * Meaning, that sequential reads will keep returning a different event,
5795  * and eventually empty the ring buffer if the producer is slower.
5796  */
5797 struct ring_buffer_event *
ring_buffer_consume(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)5798 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5799 		    unsigned long *lost_events)
5800 {
5801 	struct ring_buffer_per_cpu *cpu_buffer;
5802 	struct ring_buffer_event *event = NULL;
5803 	unsigned long flags;
5804 	bool dolock;
5805 
5806  again:
5807 	/* might be called in atomic */
5808 	preempt_disable();
5809 
5810 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5811 		goto out;
5812 
5813 	cpu_buffer = buffer->buffers[cpu];
5814 	local_irq_save(flags);
5815 	dolock = rb_reader_lock(cpu_buffer);
5816 
5817 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5818 	if (event) {
5819 		cpu_buffer->lost_events = 0;
5820 		rb_advance_reader(cpu_buffer);
5821 	}
5822 
5823 	rb_reader_unlock(cpu_buffer, dolock);
5824 	local_irq_restore(flags);
5825 
5826  out:
5827 	preempt_enable();
5828 
5829 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5830 		goto again;
5831 
5832 	return event;
5833 }
5834 EXPORT_SYMBOL_GPL(ring_buffer_consume);
5835 
5836 /**
5837  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5838  * @buffer: The ring buffer to read from
5839  * @cpu: The cpu buffer to iterate over
5840  * @flags: gfp flags to use for memory allocation
5841  *
5842  * This performs the initial preparations necessary to iterate
5843  * through the buffer.  Memory is allocated, buffer resizing
5844  * is disabled, and the iterator pointer is returned to the caller.
5845  *
5846  * After a sequence of ring_buffer_read_prepare calls, the user is
5847  * expected to make at least one call to ring_buffer_read_prepare_sync.
5848  * Afterwards, ring_buffer_read_start is invoked to get things going
5849  * for real.
5850  *
5851  * This overall must be paired with ring_buffer_read_finish.
5852  */
5853 struct ring_buffer_iter *
ring_buffer_read_prepare(struct trace_buffer * buffer,int cpu,gfp_t flags)5854 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5855 {
5856 	struct ring_buffer_per_cpu *cpu_buffer;
5857 	struct ring_buffer_iter *iter;
5858 
5859 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5860 		return NULL;
5861 
5862 	iter = kzalloc(sizeof(*iter), flags);
5863 	if (!iter)
5864 		return NULL;
5865 
5866 	/* Holds the entire event: data and meta data */
5867 	iter->event_size = buffer->subbuf_size;
5868 	iter->event = kmalloc(iter->event_size, flags);
5869 	if (!iter->event) {
5870 		kfree(iter);
5871 		return NULL;
5872 	}
5873 
5874 	cpu_buffer = buffer->buffers[cpu];
5875 
5876 	iter->cpu_buffer = cpu_buffer;
5877 
5878 	atomic_inc(&cpu_buffer->resize_disabled);
5879 
5880 	return iter;
5881 }
5882 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5883 
5884 /**
5885  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5886  *
5887  * All previously invoked ring_buffer_read_prepare calls to prepare
5888  * iterators will be synchronized.  Afterwards, read_buffer_read_start
5889  * calls on those iterators are allowed.
5890  */
5891 void
ring_buffer_read_prepare_sync(void)5892 ring_buffer_read_prepare_sync(void)
5893 {
5894 	synchronize_rcu();
5895 }
5896 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5897 
5898 /**
5899  * ring_buffer_read_start - start a non consuming read of the buffer
5900  * @iter: The iterator returned by ring_buffer_read_prepare
5901  *
5902  * This finalizes the startup of an iteration through the buffer.
5903  * The iterator comes from a call to ring_buffer_read_prepare and
5904  * an intervening ring_buffer_read_prepare_sync must have been
5905  * performed.
5906  *
5907  * Must be paired with ring_buffer_read_finish.
5908  */
5909 void
ring_buffer_read_start(struct ring_buffer_iter * iter)5910 ring_buffer_read_start(struct ring_buffer_iter *iter)
5911 {
5912 	struct ring_buffer_per_cpu *cpu_buffer;
5913 	unsigned long flags;
5914 
5915 	if (!iter)
5916 		return;
5917 
5918 	cpu_buffer = iter->cpu_buffer;
5919 
5920 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5921 	arch_spin_lock(&cpu_buffer->lock);
5922 	rb_iter_reset(iter);
5923 	arch_spin_unlock(&cpu_buffer->lock);
5924 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5925 }
5926 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5927 
5928 /**
5929  * ring_buffer_read_finish - finish reading the iterator of the buffer
5930  * @iter: The iterator retrieved by ring_buffer_start
5931  *
5932  * This re-enables resizing of the buffer, and frees the iterator.
5933  */
5934 void
ring_buffer_read_finish(struct ring_buffer_iter * iter)5935 ring_buffer_read_finish(struct ring_buffer_iter *iter)
5936 {
5937 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5938 
5939 	/* Use this opportunity to check the integrity of the ring buffer. */
5940 	rb_check_pages(cpu_buffer);
5941 
5942 	atomic_dec(&cpu_buffer->resize_disabled);
5943 	kfree(iter->event);
5944 	kfree(iter);
5945 }
5946 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5947 
5948 /**
5949  * ring_buffer_iter_advance - advance the iterator to the next location
5950  * @iter: The ring buffer iterator
5951  *
5952  * Move the location of the iterator such that the next read will
5953  * be the next location of the iterator.
5954  */
ring_buffer_iter_advance(struct ring_buffer_iter * iter)5955 void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5956 {
5957 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5958 	unsigned long flags;
5959 
5960 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5961 
5962 	rb_advance_iter(iter);
5963 
5964 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5965 }
5966 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5967 
5968 /**
5969  * ring_buffer_size - return the size of the ring buffer (in bytes)
5970  * @buffer: The ring buffer.
5971  * @cpu: The CPU to get ring buffer size from.
5972  */
ring_buffer_size(struct trace_buffer * buffer,int cpu)5973 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5974 {
5975 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5976 		return 0;
5977 
5978 	return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
5979 }
5980 EXPORT_SYMBOL_GPL(ring_buffer_size);
5981 
5982 /**
5983  * ring_buffer_max_event_size - return the max data size of an event
5984  * @buffer: The ring buffer.
5985  *
5986  * Returns the maximum size an event can be.
5987  */
ring_buffer_max_event_size(struct trace_buffer * buffer)5988 unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5989 {
5990 	/* If abs timestamp is requested, events have a timestamp too */
5991 	if (ring_buffer_time_stamp_abs(buffer))
5992 		return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5993 	return buffer->max_data_size;
5994 }
5995 EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5996 
rb_clear_buffer_page(struct buffer_page * page)5997 static void rb_clear_buffer_page(struct buffer_page *page)
5998 {
5999 	local_set(&page->write, 0);
6000 	local_set(&page->entries, 0);
6001 	rb_init_page(page->page);
6002 	page->read = 0;
6003 }
6004 
rb_update_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6005 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6006 {
6007 	struct trace_buffer_meta *meta = cpu_buffer->meta_page;
6008 
6009 	if (!meta)
6010 		return;
6011 
6012 	meta->reader.read = cpu_buffer->reader_page->read;
6013 	meta->reader.id = cpu_buffer->reader_page->id;
6014 	meta->reader.lost_events = cpu_buffer->lost_events;
6015 
6016 	meta->entries = local_read(&cpu_buffer->entries);
6017 	meta->overrun = local_read(&cpu_buffer->overrun);
6018 	meta->read = cpu_buffer->read;
6019 
6020 	/* Some archs do not have data cache coherency between kernel and user-space */
6021 	flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
6022 }
6023 
6024 static void
rb_reset_cpu(struct ring_buffer_per_cpu * cpu_buffer)6025 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
6026 {
6027 	struct buffer_page *page;
6028 
6029 	rb_head_page_deactivate(cpu_buffer);
6030 
6031 	cpu_buffer->head_page
6032 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
6033 	rb_clear_buffer_page(cpu_buffer->head_page);
6034 	list_for_each_entry(page, cpu_buffer->pages, list) {
6035 		rb_clear_buffer_page(page);
6036 	}
6037 
6038 	cpu_buffer->tail_page = cpu_buffer->head_page;
6039 	cpu_buffer->commit_page = cpu_buffer->head_page;
6040 
6041 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
6042 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
6043 	rb_clear_buffer_page(cpu_buffer->reader_page);
6044 
6045 	local_set(&cpu_buffer->entries_bytes, 0);
6046 	local_set(&cpu_buffer->overrun, 0);
6047 	local_set(&cpu_buffer->commit_overrun, 0);
6048 	local_set(&cpu_buffer->dropped_events, 0);
6049 	local_set(&cpu_buffer->entries, 0);
6050 	local_set(&cpu_buffer->committing, 0);
6051 	local_set(&cpu_buffer->commits, 0);
6052 	local_set(&cpu_buffer->pages_touched, 0);
6053 	local_set(&cpu_buffer->pages_lost, 0);
6054 	local_set(&cpu_buffer->pages_read, 0);
6055 	cpu_buffer->last_pages_touch = 0;
6056 	cpu_buffer->shortest_full = 0;
6057 	cpu_buffer->read = 0;
6058 	cpu_buffer->read_bytes = 0;
6059 
6060 	rb_time_set(&cpu_buffer->write_stamp, 0);
6061 	rb_time_set(&cpu_buffer->before_stamp, 0);
6062 
6063 	memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
6064 
6065 	cpu_buffer->lost_events = 0;
6066 	cpu_buffer->last_overrun = 0;
6067 
6068 	rb_head_page_activate(cpu_buffer);
6069 	cpu_buffer->pages_removed = 0;
6070 
6071 	if (cpu_buffer->mapped) {
6072 		rb_update_meta_page(cpu_buffer);
6073 		if (cpu_buffer->ring_meta) {
6074 			struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
6075 			meta->commit_buffer = meta->head_buffer;
6076 		}
6077 	}
6078 }
6079 
6080 /* Must have disabled the cpu buffer then done a synchronize_rcu */
reset_disabled_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)6081 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6082 {
6083 	unsigned long flags;
6084 
6085 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6086 
6087 	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
6088 		goto out;
6089 
6090 	arch_spin_lock(&cpu_buffer->lock);
6091 
6092 	rb_reset_cpu(cpu_buffer);
6093 
6094 	arch_spin_unlock(&cpu_buffer->lock);
6095 
6096  out:
6097 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6098 }
6099 
6100 /**
6101  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
6102  * @buffer: The ring buffer to reset a per cpu buffer of
6103  * @cpu: The CPU buffer to be reset
6104  */
ring_buffer_reset_cpu(struct trace_buffer * buffer,int cpu)6105 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
6106 {
6107 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
6108 
6109 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
6110 		return;
6111 
6112 	/* prevent another thread from changing buffer sizes */
6113 	mutex_lock(&buffer->mutex);
6114 
6115 	atomic_inc(&cpu_buffer->resize_disabled);
6116 	atomic_inc(&cpu_buffer->record_disabled);
6117 
6118 	/* Make sure all commits have finished */
6119 	synchronize_rcu();
6120 
6121 	reset_disabled_cpu_buffer(cpu_buffer);
6122 
6123 	atomic_dec(&cpu_buffer->record_disabled);
6124 	atomic_dec(&cpu_buffer->resize_disabled);
6125 
6126 	mutex_unlock(&buffer->mutex);
6127 }
6128 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
6129 
6130 /* Flag to ensure proper resetting of atomic variables */
6131 #define RESET_BIT	(1 << 30)
6132 
6133 /**
6134  * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
6135  * @buffer: The ring buffer to reset a per cpu buffer of
6136  */
ring_buffer_reset_online_cpus(struct trace_buffer * buffer)6137 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
6138 {
6139 	struct ring_buffer_per_cpu *cpu_buffer;
6140 	int cpu;
6141 
6142 	/* prevent another thread from changing buffer sizes */
6143 	mutex_lock(&buffer->mutex);
6144 
6145 	for_each_online_buffer_cpu(buffer, cpu) {
6146 		cpu_buffer = buffer->buffers[cpu];
6147 
6148 		atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
6149 		atomic_inc(&cpu_buffer->record_disabled);
6150 	}
6151 
6152 	/* Make sure all commits have finished */
6153 	synchronize_rcu();
6154 
6155 	for_each_buffer_cpu(buffer, cpu) {
6156 		cpu_buffer = buffer->buffers[cpu];
6157 
6158 		/*
6159 		 * If a CPU came online during the synchronize_rcu(), then
6160 		 * ignore it.
6161 		 */
6162 		if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
6163 			continue;
6164 
6165 		reset_disabled_cpu_buffer(cpu_buffer);
6166 
6167 		atomic_dec(&cpu_buffer->record_disabled);
6168 		atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
6169 	}
6170 
6171 	mutex_unlock(&buffer->mutex);
6172 }
6173 
6174 /**
6175  * ring_buffer_reset - reset a ring buffer
6176  * @buffer: The ring buffer to reset all cpu buffers
6177  */
ring_buffer_reset(struct trace_buffer * buffer)6178 void ring_buffer_reset(struct trace_buffer *buffer)
6179 {
6180 	struct ring_buffer_per_cpu *cpu_buffer;
6181 	int cpu;
6182 
6183 	/* prevent another thread from changing buffer sizes */
6184 	mutex_lock(&buffer->mutex);
6185 
6186 	for_each_buffer_cpu(buffer, cpu) {
6187 		cpu_buffer = buffer->buffers[cpu];
6188 
6189 		atomic_inc(&cpu_buffer->resize_disabled);
6190 		atomic_inc(&cpu_buffer->record_disabled);
6191 	}
6192 
6193 	/* Make sure all commits have finished */
6194 	synchronize_rcu();
6195 
6196 	for_each_buffer_cpu(buffer, cpu) {
6197 		cpu_buffer = buffer->buffers[cpu];
6198 
6199 		reset_disabled_cpu_buffer(cpu_buffer);
6200 
6201 		atomic_dec(&cpu_buffer->record_disabled);
6202 		atomic_dec(&cpu_buffer->resize_disabled);
6203 	}
6204 
6205 	mutex_unlock(&buffer->mutex);
6206 }
6207 EXPORT_SYMBOL_GPL(ring_buffer_reset);
6208 
6209 /**
6210  * ring_buffer_empty - is the ring buffer empty?
6211  * @buffer: The ring buffer to test
6212  */
ring_buffer_empty(struct trace_buffer * buffer)6213 bool ring_buffer_empty(struct trace_buffer *buffer)
6214 {
6215 	struct ring_buffer_per_cpu *cpu_buffer;
6216 	unsigned long flags;
6217 	bool dolock;
6218 	bool ret;
6219 	int cpu;
6220 
6221 	/* yes this is racy, but if you don't like the race, lock the buffer */
6222 	for_each_buffer_cpu(buffer, cpu) {
6223 		cpu_buffer = buffer->buffers[cpu];
6224 		local_irq_save(flags);
6225 		dolock = rb_reader_lock(cpu_buffer);
6226 		ret = rb_per_cpu_empty(cpu_buffer);
6227 		rb_reader_unlock(cpu_buffer, dolock);
6228 		local_irq_restore(flags);
6229 
6230 		if (!ret)
6231 			return false;
6232 	}
6233 
6234 	return true;
6235 }
6236 EXPORT_SYMBOL_GPL(ring_buffer_empty);
6237 
6238 /**
6239  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
6240  * @buffer: The ring buffer
6241  * @cpu: The CPU buffer to test
6242  */
ring_buffer_empty_cpu(struct trace_buffer * buffer,int cpu)6243 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
6244 {
6245 	struct ring_buffer_per_cpu *cpu_buffer;
6246 	unsigned long flags;
6247 	bool dolock;
6248 	bool ret;
6249 
6250 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
6251 		return true;
6252 
6253 	cpu_buffer = buffer->buffers[cpu];
6254 	local_irq_save(flags);
6255 	dolock = rb_reader_lock(cpu_buffer);
6256 	ret = rb_per_cpu_empty(cpu_buffer);
6257 	rb_reader_unlock(cpu_buffer, dolock);
6258 	local_irq_restore(flags);
6259 
6260 	return ret;
6261 }
6262 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
6263 
6264 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
6265 /**
6266  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
6267  * @buffer_a: One buffer to swap with
6268  * @buffer_b: The other buffer to swap with
6269  * @cpu: the CPU of the buffers to swap
6270  *
6271  * This function is useful for tracers that want to take a "snapshot"
6272  * of a CPU buffer and has another back up buffer lying around.
6273  * it is expected that the tracer handles the cpu buffer not being
6274  * used at the moment.
6275  */
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)6276 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
6277 			 struct trace_buffer *buffer_b, int cpu)
6278 {
6279 	struct ring_buffer_per_cpu *cpu_buffer_a;
6280 	struct ring_buffer_per_cpu *cpu_buffer_b;
6281 	int ret = -EINVAL;
6282 
6283 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
6284 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
6285 		goto out;
6286 
6287 	cpu_buffer_a = buffer_a->buffers[cpu];
6288 	cpu_buffer_b = buffer_b->buffers[cpu];
6289 
6290 	/* It's up to the callers to not try to swap mapped buffers */
6291 	if (WARN_ON_ONCE(cpu_buffer_a->mapped || cpu_buffer_b->mapped)) {
6292 		ret = -EBUSY;
6293 		goto out;
6294 	}
6295 
6296 	/* At least make sure the two buffers are somewhat the same */
6297 	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
6298 		goto out;
6299 
6300 	if (buffer_a->subbuf_order != buffer_b->subbuf_order)
6301 		goto out;
6302 
6303 	ret = -EAGAIN;
6304 
6305 	if (atomic_read(&buffer_a->record_disabled))
6306 		goto out;
6307 
6308 	if (atomic_read(&buffer_b->record_disabled))
6309 		goto out;
6310 
6311 	if (atomic_read(&cpu_buffer_a->record_disabled))
6312 		goto out;
6313 
6314 	if (atomic_read(&cpu_buffer_b->record_disabled))
6315 		goto out;
6316 
6317 	/*
6318 	 * We can't do a synchronize_rcu here because this
6319 	 * function can be called in atomic context.
6320 	 * Normally this will be called from the same CPU as cpu.
6321 	 * If not it's up to the caller to protect this.
6322 	 */
6323 	atomic_inc(&cpu_buffer_a->record_disabled);
6324 	atomic_inc(&cpu_buffer_b->record_disabled);
6325 
6326 	ret = -EBUSY;
6327 	if (local_read(&cpu_buffer_a->committing))
6328 		goto out_dec;
6329 	if (local_read(&cpu_buffer_b->committing))
6330 		goto out_dec;
6331 
6332 	/*
6333 	 * When resize is in progress, we cannot swap it because
6334 	 * it will mess the state of the cpu buffer.
6335 	 */
6336 	if (atomic_read(&buffer_a->resizing))
6337 		goto out_dec;
6338 	if (atomic_read(&buffer_b->resizing))
6339 		goto out_dec;
6340 
6341 	buffer_a->buffers[cpu] = cpu_buffer_b;
6342 	buffer_b->buffers[cpu] = cpu_buffer_a;
6343 
6344 	cpu_buffer_b->buffer = buffer_a;
6345 	cpu_buffer_a->buffer = buffer_b;
6346 
6347 	ret = 0;
6348 
6349 out_dec:
6350 	atomic_dec(&cpu_buffer_a->record_disabled);
6351 	atomic_dec(&cpu_buffer_b->record_disabled);
6352 out:
6353 	return ret;
6354 }
6355 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
6356 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
6357 
6358 /**
6359  * ring_buffer_alloc_read_page - allocate a page to read from buffer
6360  * @buffer: the buffer to allocate for.
6361  * @cpu: the cpu buffer to allocate.
6362  *
6363  * This function is used in conjunction with ring_buffer_read_page.
6364  * When reading a full page from the ring buffer, these functions
6365  * can be used to speed up the process. The calling function should
6366  * allocate a few pages first with this function. Then when it
6367  * needs to get pages from the ring buffer, it passes the result
6368  * of this function into ring_buffer_read_page, which will swap
6369  * the page that was allocated, with the read page of the buffer.
6370  *
6371  * Returns:
6372  *  The page allocated, or ERR_PTR
6373  */
6374 struct buffer_data_read_page *
ring_buffer_alloc_read_page(struct trace_buffer * buffer,int cpu)6375 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
6376 {
6377 	struct ring_buffer_per_cpu *cpu_buffer;
6378 	struct buffer_data_read_page *bpage = NULL;
6379 	unsigned long flags;
6380 	struct page *page;
6381 
6382 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
6383 		return ERR_PTR(-ENODEV);
6384 
6385 	bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
6386 	if (!bpage)
6387 		return ERR_PTR(-ENOMEM);
6388 
6389 	bpage->order = buffer->subbuf_order;
6390 	cpu_buffer = buffer->buffers[cpu];
6391 	local_irq_save(flags);
6392 	arch_spin_lock(&cpu_buffer->lock);
6393 
6394 	if (cpu_buffer->free_page) {
6395 		bpage->data = cpu_buffer->free_page;
6396 		cpu_buffer->free_page = NULL;
6397 	}
6398 
6399 	arch_spin_unlock(&cpu_buffer->lock);
6400 	local_irq_restore(flags);
6401 
6402 	if (bpage->data)
6403 		goto out;
6404 
6405 	page = alloc_pages_node(cpu_to_node(cpu),
6406 				GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | __GFP_ZERO,
6407 				cpu_buffer->buffer->subbuf_order);
6408 	if (!page) {
6409 		kfree(bpage);
6410 		return ERR_PTR(-ENOMEM);
6411 	}
6412 
6413 	bpage->data = page_address(page);
6414 
6415  out:
6416 	rb_init_page(bpage->data);
6417 
6418 	return bpage;
6419 }
6420 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
6421 
6422 /**
6423  * ring_buffer_free_read_page - free an allocated read page
6424  * @buffer: the buffer the page was allocate for
6425  * @cpu: the cpu buffer the page came from
6426  * @data_page: the page to free
6427  *
6428  * Free a page allocated from ring_buffer_alloc_read_page.
6429  */
ring_buffer_free_read_page(struct trace_buffer * buffer,int cpu,struct buffer_data_read_page * data_page)6430 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
6431 				struct buffer_data_read_page *data_page)
6432 {
6433 	struct ring_buffer_per_cpu *cpu_buffer;
6434 	struct buffer_data_page *bpage = data_page->data;
6435 	struct page *page = virt_to_page(bpage);
6436 	unsigned long flags;
6437 
6438 	if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
6439 		return;
6440 
6441 	cpu_buffer = buffer->buffers[cpu];
6442 
6443 	/*
6444 	 * If the page is still in use someplace else, or order of the page
6445 	 * is different from the subbuffer order of the buffer -
6446 	 * we can't reuse it
6447 	 */
6448 	if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
6449 		goto out;
6450 
6451 	local_irq_save(flags);
6452 	arch_spin_lock(&cpu_buffer->lock);
6453 
6454 	if (!cpu_buffer->free_page) {
6455 		cpu_buffer->free_page = bpage;
6456 		bpage = NULL;
6457 	}
6458 
6459 	arch_spin_unlock(&cpu_buffer->lock);
6460 	local_irq_restore(flags);
6461 
6462  out:
6463 	free_pages((unsigned long)bpage, data_page->order);
6464 	kfree(data_page);
6465 }
6466 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
6467 
6468 /**
6469  * ring_buffer_read_page - extract a page from the ring buffer
6470  * @buffer: buffer to extract from
6471  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
6472  * @len: amount to extract
6473  * @cpu: the cpu of the buffer to extract
6474  * @full: should the extraction only happen when the page is full.
6475  *
6476  * This function will pull out a page from the ring buffer and consume it.
6477  * @data_page must be the address of the variable that was returned
6478  * from ring_buffer_alloc_read_page. This is because the page might be used
6479  * to swap with a page in the ring buffer.
6480  *
6481  * for example:
6482  *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
6483  *	if (IS_ERR(rpage))
6484  *		return PTR_ERR(rpage);
6485  *	ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
6486  *	if (ret >= 0)
6487  *		process_page(ring_buffer_read_page_data(rpage), ret);
6488  *	ring_buffer_free_read_page(buffer, cpu, rpage);
6489  *
6490  * When @full is set, the function will not return true unless
6491  * the writer is off the reader page.
6492  *
6493  * Note: it is up to the calling functions to handle sleeps and wakeups.
6494  *  The ring buffer can be used anywhere in the kernel and can not
6495  *  blindly call wake_up. The layer that uses the ring buffer must be
6496  *  responsible for that.
6497  *
6498  * Returns:
6499  *  >=0 if data has been transferred, returns the offset of consumed data.
6500  *  <0 if no data has been transferred.
6501  */
ring_buffer_read_page(struct trace_buffer * buffer,struct buffer_data_read_page * data_page,size_t len,int cpu,int full)6502 int ring_buffer_read_page(struct trace_buffer *buffer,
6503 			  struct buffer_data_read_page *data_page,
6504 			  size_t len, int cpu, int full)
6505 {
6506 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
6507 	struct ring_buffer_event *event;
6508 	struct buffer_data_page *bpage;
6509 	struct buffer_page *reader;
6510 	unsigned long missed_events;
6511 	unsigned long flags;
6512 	unsigned int commit;
6513 	unsigned int read;
6514 	u64 save_timestamp;
6515 	int ret = -1;
6516 
6517 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
6518 		goto out;
6519 
6520 	/*
6521 	 * If len is not big enough to hold the page header, then
6522 	 * we can not copy anything.
6523 	 */
6524 	if (len <= BUF_PAGE_HDR_SIZE)
6525 		goto out;
6526 
6527 	len -= BUF_PAGE_HDR_SIZE;
6528 
6529 	if (!data_page || !data_page->data)
6530 		goto out;
6531 	if (data_page->order != buffer->subbuf_order)
6532 		goto out;
6533 
6534 	bpage = data_page->data;
6535 	if (!bpage)
6536 		goto out;
6537 
6538 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6539 
6540 	reader = rb_get_reader_page(cpu_buffer);
6541 	if (!reader)
6542 		goto out_unlock;
6543 
6544 	event = rb_reader_event(cpu_buffer);
6545 
6546 	read = reader->read;
6547 	commit = rb_page_size(reader);
6548 
6549 	/* Check if any events were dropped */
6550 	missed_events = cpu_buffer->lost_events;
6551 
6552 	/*
6553 	 * If this page has been partially read or
6554 	 * if len is not big enough to read the rest of the page or
6555 	 * a writer is still on the page, then
6556 	 * we must copy the data from the page to the buffer.
6557 	 * Otherwise, we can simply swap the page with the one passed in.
6558 	 */
6559 	if (read || (len < (commit - read)) ||
6560 	    cpu_buffer->reader_page == cpu_buffer->commit_page ||
6561 	    cpu_buffer->mapped) {
6562 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
6563 		unsigned int rpos = read;
6564 		unsigned int pos = 0;
6565 		unsigned int size;
6566 
6567 		/*
6568 		 * If a full page is expected, this can still be returned
6569 		 * if there's been a previous partial read and the
6570 		 * rest of the page can be read and the commit page is off
6571 		 * the reader page.
6572 		 */
6573 		if (full &&
6574 		    (!read || (len < (commit - read)) ||
6575 		     cpu_buffer->reader_page == cpu_buffer->commit_page))
6576 			goto out_unlock;
6577 
6578 		if (len > (commit - read))
6579 			len = (commit - read);
6580 
6581 		/* Always keep the time extend and data together */
6582 		size = rb_event_ts_length(event);
6583 
6584 		if (len < size)
6585 			goto out_unlock;
6586 
6587 		/* save the current timestamp, since the user will need it */
6588 		save_timestamp = cpu_buffer->read_stamp;
6589 
6590 		/* Need to copy one event at a time */
6591 		do {
6592 			/* We need the size of one event, because
6593 			 * rb_advance_reader only advances by one event,
6594 			 * whereas rb_event_ts_length may include the size of
6595 			 * one or two events.
6596 			 * We have already ensured there's enough space if this
6597 			 * is a time extend. */
6598 			size = rb_event_length(event);
6599 			memcpy(bpage->data + pos, rpage->data + rpos, size);
6600 
6601 			len -= size;
6602 
6603 			rb_advance_reader(cpu_buffer);
6604 			rpos = reader->read;
6605 			pos += size;
6606 
6607 			if (rpos >= commit)
6608 				break;
6609 
6610 			event = rb_reader_event(cpu_buffer);
6611 			/* Always keep the time extend and data together */
6612 			size = rb_event_ts_length(event);
6613 		} while (len >= size);
6614 
6615 		/* update bpage */
6616 		local_set(&bpage->commit, pos);
6617 		bpage->time_stamp = save_timestamp;
6618 
6619 		/* we copied everything to the beginning */
6620 		read = 0;
6621 	} else {
6622 		/* update the entry counter */
6623 		cpu_buffer->read += rb_page_entries(reader);
6624 		cpu_buffer->read_bytes += rb_page_size(reader);
6625 
6626 		/* swap the pages */
6627 		rb_init_page(bpage);
6628 		bpage = reader->page;
6629 		reader->page = data_page->data;
6630 		local_set(&reader->write, 0);
6631 		local_set(&reader->entries, 0);
6632 		reader->read = 0;
6633 		data_page->data = bpage;
6634 
6635 		/*
6636 		 * Use the real_end for the data size,
6637 		 * This gives us a chance to store the lost events
6638 		 * on the page.
6639 		 */
6640 		if (reader->real_end)
6641 			local_set(&bpage->commit, reader->real_end);
6642 	}
6643 	ret = read;
6644 
6645 	cpu_buffer->lost_events = 0;
6646 
6647 	commit = local_read(&bpage->commit);
6648 	/*
6649 	 * Set a flag in the commit field if we lost events
6650 	 */
6651 	if (missed_events) {
6652 		/* If there is room at the end of the page to save the
6653 		 * missed events, then record it there.
6654 		 */
6655 		if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
6656 			memcpy(&bpage->data[commit], &missed_events,
6657 			       sizeof(missed_events));
6658 			local_add(RB_MISSED_STORED, &bpage->commit);
6659 			commit += sizeof(missed_events);
6660 		}
6661 		local_add(RB_MISSED_EVENTS, &bpage->commit);
6662 	}
6663 
6664 	/*
6665 	 * This page may be off to user land. Zero it out here.
6666 	 */
6667 	if (commit < buffer->subbuf_size)
6668 		memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
6669 
6670  out_unlock:
6671 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6672 
6673  out:
6674 	return ret;
6675 }
6676 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
6677 
6678 /**
6679  * ring_buffer_read_page_data - get pointer to the data in the page.
6680  * @page:  the page to get the data from
6681  *
6682  * Returns pointer to the actual data in this page.
6683  */
ring_buffer_read_page_data(struct buffer_data_read_page * page)6684 void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
6685 {
6686 	return page->data;
6687 }
6688 EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
6689 
6690 /**
6691  * ring_buffer_subbuf_size_get - get size of the sub buffer.
6692  * @buffer: the buffer to get the sub buffer size from
6693  *
6694  * Returns size of the sub buffer, in bytes.
6695  */
ring_buffer_subbuf_size_get(struct trace_buffer * buffer)6696 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
6697 {
6698 	return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
6699 }
6700 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
6701 
6702 /**
6703  * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
6704  * @buffer: The ring_buffer to get the system sub page order from
6705  *
6706  * By default, one ring buffer sub page equals to one system page. This parameter
6707  * is configurable, per ring buffer. The size of the ring buffer sub page can be
6708  * extended, but must be an order of system page size.
6709  *
6710  * Returns the order of buffer sub page size, in system pages:
6711  * 0 means the sub buffer size is 1 system page and so forth.
6712  * In case of an error < 0 is returned.
6713  */
ring_buffer_subbuf_order_get(struct trace_buffer * buffer)6714 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
6715 {
6716 	if (!buffer)
6717 		return -EINVAL;
6718 
6719 	return buffer->subbuf_order;
6720 }
6721 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
6722 
6723 /**
6724  * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
6725  * @buffer: The ring_buffer to set the new page size.
6726  * @order: Order of the system pages in one sub buffer page
6727  *
6728  * By default, one ring buffer pages equals to one system page. This API can be
6729  * used to set new size of the ring buffer page. The size must be order of
6730  * system page size, that's why the input parameter @order is the order of
6731  * system pages that are allocated for one ring buffer page:
6732  *  0 - 1 system page
6733  *  1 - 2 system pages
6734  *  3 - 4 system pages
6735  *  ...
6736  *
6737  * Returns 0 on success or < 0 in case of an error.
6738  */
ring_buffer_subbuf_order_set(struct trace_buffer * buffer,int order)6739 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
6740 {
6741 	struct ring_buffer_per_cpu *cpu_buffer;
6742 	struct buffer_page *bpage, *tmp;
6743 	int old_order, old_size;
6744 	int nr_pages;
6745 	int psize;
6746 	int err;
6747 	int cpu;
6748 
6749 	if (!buffer || order < 0)
6750 		return -EINVAL;
6751 
6752 	if (buffer->subbuf_order == order)
6753 		return 0;
6754 
6755 	psize = (1 << order) * PAGE_SIZE;
6756 	if (psize <= BUF_PAGE_HDR_SIZE)
6757 		return -EINVAL;
6758 
6759 	/* Size of a subbuf cannot be greater than the write counter */
6760 	if (psize > RB_WRITE_MASK + 1)
6761 		return -EINVAL;
6762 
6763 	old_order = buffer->subbuf_order;
6764 	old_size = buffer->subbuf_size;
6765 
6766 	/* prevent another thread from changing buffer sizes */
6767 	mutex_lock(&buffer->mutex);
6768 	atomic_inc(&buffer->record_disabled);
6769 
6770 	/* Make sure all commits have finished */
6771 	synchronize_rcu();
6772 
6773 	buffer->subbuf_order = order;
6774 	buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
6775 
6776 	/* Make sure all new buffers are allocated, before deleting the old ones */
6777 	for_each_buffer_cpu(buffer, cpu) {
6778 
6779 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
6780 			continue;
6781 
6782 		cpu_buffer = buffer->buffers[cpu];
6783 
6784 		if (cpu_buffer->mapped) {
6785 			err = -EBUSY;
6786 			goto error;
6787 		}
6788 
6789 		/* Update the number of pages to match the new size */
6790 		nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
6791 		nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
6792 
6793 		/* we need a minimum of two pages */
6794 		if (nr_pages < 2)
6795 			nr_pages = 2;
6796 
6797 		cpu_buffer->nr_pages_to_update = nr_pages;
6798 
6799 		/* Include the reader page */
6800 		nr_pages++;
6801 
6802 		/* Allocate the new size buffer */
6803 		INIT_LIST_HEAD(&cpu_buffer->new_pages);
6804 		if (__rb_allocate_pages(cpu_buffer, nr_pages,
6805 					&cpu_buffer->new_pages)) {
6806 			/* not enough memory for new pages */
6807 			err = -ENOMEM;
6808 			goto error;
6809 		}
6810 	}
6811 
6812 	for_each_buffer_cpu(buffer, cpu) {
6813 		struct buffer_data_page *old_free_data_page;
6814 		struct list_head old_pages;
6815 		unsigned long flags;
6816 
6817 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
6818 			continue;
6819 
6820 		cpu_buffer = buffer->buffers[cpu];
6821 
6822 		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6823 
6824 		/* Clear the head bit to make the link list normal to read */
6825 		rb_head_page_deactivate(cpu_buffer);
6826 
6827 		/*
6828 		 * Collect buffers from the cpu_buffer pages list and the
6829 		 * reader_page on old_pages, so they can be freed later when not
6830 		 * under a spinlock. The pages list is a linked list with no
6831 		 * head, adding old_pages turns it into a regular list with
6832 		 * old_pages being the head.
6833 		 */
6834 		list_add(&old_pages, cpu_buffer->pages);
6835 		list_add(&cpu_buffer->reader_page->list, &old_pages);
6836 
6837 		/* One page was allocated for the reader page */
6838 		cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
6839 						     struct buffer_page, list);
6840 		list_del_init(&cpu_buffer->reader_page->list);
6841 
6842 		/* Install the new pages, remove the head from the list */
6843 		cpu_buffer->pages = cpu_buffer->new_pages.next;
6844 		list_del_init(&cpu_buffer->new_pages);
6845 		cpu_buffer->cnt++;
6846 
6847 		cpu_buffer->head_page
6848 			= list_entry(cpu_buffer->pages, struct buffer_page, list);
6849 		cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
6850 
6851 		cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
6852 		cpu_buffer->nr_pages_to_update = 0;
6853 
6854 		old_free_data_page = cpu_buffer->free_page;
6855 		cpu_buffer->free_page = NULL;
6856 
6857 		rb_head_page_activate(cpu_buffer);
6858 
6859 		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6860 
6861 		/* Free old sub buffers */
6862 		list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
6863 			list_del_init(&bpage->list);
6864 			free_buffer_page(bpage);
6865 		}
6866 		free_pages((unsigned long)old_free_data_page, old_order);
6867 
6868 		rb_check_pages(cpu_buffer);
6869 	}
6870 
6871 	atomic_dec(&buffer->record_disabled);
6872 	mutex_unlock(&buffer->mutex);
6873 
6874 	return 0;
6875 
6876 error:
6877 	buffer->subbuf_order = old_order;
6878 	buffer->subbuf_size = old_size;
6879 
6880 	atomic_dec(&buffer->record_disabled);
6881 	mutex_unlock(&buffer->mutex);
6882 
6883 	for_each_buffer_cpu(buffer, cpu) {
6884 		cpu_buffer = buffer->buffers[cpu];
6885 
6886 		if (!cpu_buffer->nr_pages_to_update)
6887 			continue;
6888 
6889 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6890 			list_del_init(&bpage->list);
6891 			free_buffer_page(bpage);
6892 		}
6893 	}
6894 
6895 	return err;
6896 }
6897 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6898 
rb_alloc_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6899 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6900 {
6901 	struct page *page;
6902 
6903 	if (cpu_buffer->meta_page)
6904 		return 0;
6905 
6906 	page = alloc_page(GFP_USER | __GFP_ZERO);
6907 	if (!page)
6908 		return -ENOMEM;
6909 
6910 	cpu_buffer->meta_page = page_to_virt(page);
6911 
6912 	return 0;
6913 }
6914 
rb_free_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6915 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6916 {
6917 	unsigned long addr = (unsigned long)cpu_buffer->meta_page;
6918 
6919 	free_page(addr);
6920 	cpu_buffer->meta_page = NULL;
6921 }
6922 
rb_setup_ids_meta_page(struct ring_buffer_per_cpu * cpu_buffer,unsigned long * subbuf_ids)6923 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
6924 				   unsigned long *subbuf_ids)
6925 {
6926 	struct trace_buffer_meta *meta = cpu_buffer->meta_page;
6927 	unsigned int nr_subbufs = cpu_buffer->nr_pages + 1;
6928 	struct buffer_page *first_subbuf, *subbuf;
6929 	int id = 0;
6930 
6931 	subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page;
6932 	cpu_buffer->reader_page->id = id++;
6933 
6934 	first_subbuf = subbuf = rb_set_head_page(cpu_buffer);
6935 	do {
6936 		if (WARN_ON(id >= nr_subbufs))
6937 			break;
6938 
6939 		subbuf_ids[id] = (unsigned long)subbuf->page;
6940 		subbuf->id = id;
6941 
6942 		rb_inc_page(&subbuf);
6943 		id++;
6944 	} while (subbuf != first_subbuf);
6945 
6946 	/* install subbuf ID to kern VA translation */
6947 	cpu_buffer->subbuf_ids = subbuf_ids;
6948 
6949 	meta->meta_struct_len = sizeof(*meta);
6950 	meta->nr_subbufs = nr_subbufs;
6951 	meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
6952 	meta->meta_page_size = meta->subbuf_size;
6953 
6954 	rb_update_meta_page(cpu_buffer);
6955 }
6956 
6957 static struct ring_buffer_per_cpu *
rb_get_mapped_buffer(struct trace_buffer * buffer,int cpu)6958 rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
6959 {
6960 	struct ring_buffer_per_cpu *cpu_buffer;
6961 
6962 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
6963 		return ERR_PTR(-EINVAL);
6964 
6965 	cpu_buffer = buffer->buffers[cpu];
6966 
6967 	mutex_lock(&cpu_buffer->mapping_lock);
6968 
6969 	if (!cpu_buffer->user_mapped) {
6970 		mutex_unlock(&cpu_buffer->mapping_lock);
6971 		return ERR_PTR(-ENODEV);
6972 	}
6973 
6974 	return cpu_buffer;
6975 }
6976 
rb_put_mapped_buffer(struct ring_buffer_per_cpu * cpu_buffer)6977 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6978 {
6979 	mutex_unlock(&cpu_buffer->mapping_lock);
6980 }
6981 
6982 /*
6983  * Fast-path for rb_buffer_(un)map(). Called whenever the meta-page doesn't need
6984  * to be set-up or torn-down.
6985  */
__rb_inc_dec_mapped(struct ring_buffer_per_cpu * cpu_buffer,bool inc)6986 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
6987 			       bool inc)
6988 {
6989 	unsigned long flags;
6990 
6991 	lockdep_assert_held(&cpu_buffer->mapping_lock);
6992 
6993 	/* mapped is always greater or equal to user_mapped */
6994 	if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
6995 		return -EINVAL;
6996 
6997 	if (inc && cpu_buffer->mapped == UINT_MAX)
6998 		return -EBUSY;
6999 
7000 	if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
7001 		return -EINVAL;
7002 
7003 	mutex_lock(&cpu_buffer->buffer->mutex);
7004 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7005 
7006 	if (inc) {
7007 		cpu_buffer->user_mapped++;
7008 		cpu_buffer->mapped++;
7009 	} else {
7010 		cpu_buffer->user_mapped--;
7011 		cpu_buffer->mapped--;
7012 	}
7013 
7014 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7015 	mutex_unlock(&cpu_buffer->buffer->mutex);
7016 
7017 	return 0;
7018 }
7019 
7020 /*
7021  *   +--------------+  pgoff == 0
7022  *   |   meta page  |
7023  *   +--------------+  pgoff == 1
7024  *   | subbuffer 0  |
7025  *   |              |
7026  *   +--------------+  pgoff == (1 + (1 << subbuf_order))
7027  *   | subbuffer 1  |
7028  *   |              |
7029  *         ...
7030  */
7031 #ifdef CONFIG_MMU
__rb_map_vma(struct ring_buffer_per_cpu * cpu_buffer,struct vm_area_struct * vma)7032 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
7033 			struct vm_area_struct *vma)
7034 {
7035 	unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff;
7036 	unsigned int subbuf_pages, subbuf_order;
7037 	struct page **pages;
7038 	int p = 0, s = 0;
7039 	int err;
7040 
7041 	/* Refuse MP_PRIVATE or writable mappings */
7042 	if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
7043 	    !(vma->vm_flags & VM_MAYSHARE))
7044 		return -EPERM;
7045 
7046 	subbuf_order = cpu_buffer->buffer->subbuf_order;
7047 	subbuf_pages = 1 << subbuf_order;
7048 
7049 	if (subbuf_order && pgoff % subbuf_pages)
7050 		return -EINVAL;
7051 
7052 	/*
7053 	 * Make sure the mapping cannot become writable later. Also tell the VM
7054 	 * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
7055 	 */
7056 	vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP,
7057 		     VM_MAYWRITE);
7058 
7059 	lockdep_assert_held(&cpu_buffer->mapping_lock);
7060 
7061 	nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
7062 	nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */
7063 	if (nr_pages <= pgoff)
7064 		return -EINVAL;
7065 
7066 	nr_pages -= pgoff;
7067 
7068 	nr_vma_pages = vma_pages(vma);
7069 	if (!nr_vma_pages || nr_vma_pages > nr_pages)
7070 		return -EINVAL;
7071 
7072 	nr_pages = nr_vma_pages;
7073 
7074 	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
7075 	if (!pages)
7076 		return -ENOMEM;
7077 
7078 	if (!pgoff) {
7079 		unsigned long meta_page_padding;
7080 
7081 		pages[p++] = virt_to_page(cpu_buffer->meta_page);
7082 
7083 		/*
7084 		 * Pad with the zero-page to align the meta-page with the
7085 		 * sub-buffers.
7086 		 */
7087 		meta_page_padding = subbuf_pages - 1;
7088 		while (meta_page_padding-- && p < nr_pages) {
7089 			unsigned long __maybe_unused zero_addr =
7090 				vma->vm_start + (PAGE_SIZE * p);
7091 
7092 			pages[p++] = ZERO_PAGE(zero_addr);
7093 		}
7094 	} else {
7095 		/* Skip the meta-page */
7096 		pgoff -= subbuf_pages;
7097 
7098 		s += pgoff / subbuf_pages;
7099 	}
7100 
7101 	while (p < nr_pages) {
7102 		struct page *page;
7103 		int off = 0;
7104 
7105 		if (WARN_ON_ONCE(s >= nr_subbufs)) {
7106 			err = -EINVAL;
7107 			goto out;
7108 		}
7109 
7110 		page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
7111 
7112 		for (; off < (1 << (subbuf_order)); off++, page++) {
7113 			if (p >= nr_pages)
7114 				break;
7115 
7116 			pages[p++] = page;
7117 		}
7118 		s++;
7119 	}
7120 
7121 	err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
7122 
7123 out:
7124 	kfree(pages);
7125 
7126 	return err;
7127 }
7128 #else
__rb_map_vma(struct ring_buffer_per_cpu * cpu_buffer,struct vm_area_struct * vma)7129 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
7130 			struct vm_area_struct *vma)
7131 {
7132 	return -EOPNOTSUPP;
7133 }
7134 #endif
7135 
ring_buffer_map(struct trace_buffer * buffer,int cpu,struct vm_area_struct * vma)7136 int ring_buffer_map(struct trace_buffer *buffer, int cpu,
7137 		    struct vm_area_struct *vma)
7138 {
7139 	struct ring_buffer_per_cpu *cpu_buffer;
7140 	unsigned long flags, *subbuf_ids;
7141 	int err = 0;
7142 
7143 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
7144 		return -EINVAL;
7145 
7146 	cpu_buffer = buffer->buffers[cpu];
7147 
7148 	mutex_lock(&cpu_buffer->mapping_lock);
7149 
7150 	if (cpu_buffer->user_mapped) {
7151 		err = __rb_map_vma(cpu_buffer, vma);
7152 		if (!err)
7153 			err = __rb_inc_dec_mapped(cpu_buffer, true);
7154 		mutex_unlock(&cpu_buffer->mapping_lock);
7155 		return err;
7156 	}
7157 
7158 	/* prevent another thread from changing buffer/sub-buffer sizes */
7159 	mutex_lock(&buffer->mutex);
7160 
7161 	err = rb_alloc_meta_page(cpu_buffer);
7162 	if (err)
7163 		goto unlock;
7164 
7165 	/* subbuf_ids include the reader while nr_pages does not */
7166 	subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
7167 	if (!subbuf_ids) {
7168 		rb_free_meta_page(cpu_buffer);
7169 		err = -ENOMEM;
7170 		goto unlock;
7171 	}
7172 
7173 	atomic_inc(&cpu_buffer->resize_disabled);
7174 
7175 	/*
7176 	 * Lock all readers to block any subbuf swap until the subbuf IDs are
7177 	 * assigned.
7178 	 */
7179 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7180 	rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
7181 
7182 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7183 
7184 	err = __rb_map_vma(cpu_buffer, vma);
7185 	if (!err) {
7186 		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7187 		/* This is the first time it is mapped by user */
7188 		cpu_buffer->mapped++;
7189 		cpu_buffer->user_mapped = 1;
7190 		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7191 	} else {
7192 		kfree(cpu_buffer->subbuf_ids);
7193 		cpu_buffer->subbuf_ids = NULL;
7194 		rb_free_meta_page(cpu_buffer);
7195 		atomic_dec(&cpu_buffer->resize_disabled);
7196 	}
7197 
7198 unlock:
7199 	mutex_unlock(&buffer->mutex);
7200 	mutex_unlock(&cpu_buffer->mapping_lock);
7201 
7202 	return err;
7203 }
7204 
ring_buffer_unmap(struct trace_buffer * buffer,int cpu)7205 int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
7206 {
7207 	struct ring_buffer_per_cpu *cpu_buffer;
7208 	unsigned long flags;
7209 	int err = 0;
7210 
7211 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
7212 		return -EINVAL;
7213 
7214 	cpu_buffer = buffer->buffers[cpu];
7215 
7216 	mutex_lock(&cpu_buffer->mapping_lock);
7217 
7218 	if (!cpu_buffer->user_mapped) {
7219 		err = -ENODEV;
7220 		goto out;
7221 	} else if (cpu_buffer->user_mapped > 1) {
7222 		__rb_inc_dec_mapped(cpu_buffer, false);
7223 		goto out;
7224 	}
7225 
7226 	mutex_lock(&buffer->mutex);
7227 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7228 
7229 	/* This is the last user space mapping */
7230 	if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
7231 		cpu_buffer->mapped--;
7232 	cpu_buffer->user_mapped = 0;
7233 
7234 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7235 
7236 	kfree(cpu_buffer->subbuf_ids);
7237 	cpu_buffer->subbuf_ids = NULL;
7238 	rb_free_meta_page(cpu_buffer);
7239 	atomic_dec(&cpu_buffer->resize_disabled);
7240 
7241 	mutex_unlock(&buffer->mutex);
7242 
7243 out:
7244 	mutex_unlock(&cpu_buffer->mapping_lock);
7245 
7246 	return err;
7247 }
7248 
ring_buffer_map_get_reader(struct trace_buffer * buffer,int cpu)7249 int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
7250 {
7251 	struct ring_buffer_per_cpu *cpu_buffer;
7252 	struct buffer_page *reader;
7253 	unsigned long missed_events;
7254 	unsigned long reader_size;
7255 	unsigned long flags;
7256 
7257 	cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
7258 	if (IS_ERR(cpu_buffer))
7259 		return (int)PTR_ERR(cpu_buffer);
7260 
7261 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7262 
7263 consume:
7264 	if (rb_per_cpu_empty(cpu_buffer))
7265 		goto out;
7266 
7267 	reader_size = rb_page_size(cpu_buffer->reader_page);
7268 
7269 	/*
7270 	 * There are data to be read on the current reader page, we can
7271 	 * return to the caller. But before that, we assume the latter will read
7272 	 * everything. Let's update the kernel reader accordingly.
7273 	 */
7274 	if (cpu_buffer->reader_page->read < reader_size) {
7275 		while (cpu_buffer->reader_page->read < reader_size)
7276 			rb_advance_reader(cpu_buffer);
7277 		goto out;
7278 	}
7279 
7280 	reader = rb_get_reader_page(cpu_buffer);
7281 	if (WARN_ON(!reader))
7282 		goto out;
7283 
7284 	/* Check if any events were dropped */
7285 	missed_events = cpu_buffer->lost_events;
7286 
7287 	if (cpu_buffer->reader_page != cpu_buffer->commit_page) {
7288 		if (missed_events) {
7289 			struct buffer_data_page *bpage = reader->page;
7290 			unsigned int commit;
7291 			/*
7292 			 * Use the real_end for the data size,
7293 			 * This gives us a chance to store the lost events
7294 			 * on the page.
7295 			 */
7296 			if (reader->real_end)
7297 				local_set(&bpage->commit, reader->real_end);
7298 			/*
7299 			 * If there is room at the end of the page to save the
7300 			 * missed events, then record it there.
7301 			 */
7302 			commit = rb_page_size(reader);
7303 			if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
7304 				memcpy(&bpage->data[commit], &missed_events,
7305 				       sizeof(missed_events));
7306 				local_add(RB_MISSED_STORED, &bpage->commit);
7307 			}
7308 			local_add(RB_MISSED_EVENTS, &bpage->commit);
7309 		}
7310 	} else {
7311 		/*
7312 		 * There really shouldn't be any missed events if the commit
7313 		 * is on the reader page.
7314 		 */
7315 		WARN_ON_ONCE(missed_events);
7316 	}
7317 
7318 	cpu_buffer->lost_events = 0;
7319 
7320 	goto consume;
7321 
7322 out:
7323 	/* Some archs do not have data cache coherency between kernel and user-space */
7324 	flush_kernel_vmap_range(cpu_buffer->reader_page->page,
7325 				buffer->subbuf_size + BUF_PAGE_HDR_SIZE);
7326 
7327 	rb_update_meta_page(cpu_buffer);
7328 
7329 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7330 	rb_put_mapped_buffer(cpu_buffer);
7331 
7332 	return 0;
7333 }
7334 
7335 /*
7336  * We only allocate new buffers, never free them if the CPU goes down.
7337  * If we were to free the buffer, then the user would lose any trace that was in
7338  * the buffer.
7339  */
trace_rb_cpu_prepare(unsigned int cpu,struct hlist_node * node)7340 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
7341 {
7342 	struct trace_buffer *buffer;
7343 	long nr_pages_same;
7344 	int cpu_i;
7345 	unsigned long nr_pages;
7346 
7347 	buffer = container_of(node, struct trace_buffer, node);
7348 	if (cpumask_test_cpu(cpu, buffer->cpumask))
7349 		return 0;
7350 
7351 	nr_pages = 0;
7352 	nr_pages_same = 1;
7353 	/* check if all cpu sizes are same */
7354 	for_each_buffer_cpu(buffer, cpu_i) {
7355 		/* fill in the size from first enabled cpu */
7356 		if (nr_pages == 0)
7357 			nr_pages = buffer->buffers[cpu_i]->nr_pages;
7358 		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
7359 			nr_pages_same = 0;
7360 			break;
7361 		}
7362 	}
7363 	/* allocate minimum pages, user can later expand it */
7364 	if (!nr_pages_same)
7365 		nr_pages = 2;
7366 	buffer->buffers[cpu] =
7367 		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
7368 	if (!buffer->buffers[cpu]) {
7369 		WARN(1, "failed to allocate ring buffer on CPU %u\n",
7370 		     cpu);
7371 		return -ENOMEM;
7372 	}
7373 	smp_wmb();
7374 	cpumask_set_cpu(cpu, buffer->cpumask);
7375 	return 0;
7376 }
7377 
7378 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
7379 /*
7380  * This is a basic integrity check of the ring buffer.
7381  * Late in the boot cycle this test will run when configured in.
7382  * It will kick off a thread per CPU that will go into a loop
7383  * writing to the per cpu ring buffer various sizes of data.
7384  * Some of the data will be large items, some small.
7385  *
7386  * Another thread is created that goes into a spin, sending out
7387  * IPIs to the other CPUs to also write into the ring buffer.
7388  * this is to test the nesting ability of the buffer.
7389  *
7390  * Basic stats are recorded and reported. If something in the
7391  * ring buffer should happen that's not expected, a big warning
7392  * is displayed and all ring buffers are disabled.
7393  */
7394 static struct task_struct *rb_threads[NR_CPUS] __initdata;
7395 
7396 struct rb_test_data {
7397 	struct trace_buffer *buffer;
7398 	unsigned long		events;
7399 	unsigned long		bytes_written;
7400 	unsigned long		bytes_alloc;
7401 	unsigned long		bytes_dropped;
7402 	unsigned long		events_nested;
7403 	unsigned long		bytes_written_nested;
7404 	unsigned long		bytes_alloc_nested;
7405 	unsigned long		bytes_dropped_nested;
7406 	int			min_size_nested;
7407 	int			max_size_nested;
7408 	int			max_size;
7409 	int			min_size;
7410 	int			cpu;
7411 	int			cnt;
7412 };
7413 
7414 static struct rb_test_data rb_data[NR_CPUS] __initdata;
7415 
7416 /* 1 meg per cpu */
7417 #define RB_TEST_BUFFER_SIZE	1048576
7418 
7419 static char rb_string[] __initdata =
7420 	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
7421 	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
7422 	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
7423 
7424 static bool rb_test_started __initdata;
7425 
7426 struct rb_item {
7427 	int size;
7428 	char str[];
7429 };
7430 
rb_write_something(struct rb_test_data * data,bool nested)7431 static __init int rb_write_something(struct rb_test_data *data, bool nested)
7432 {
7433 	struct ring_buffer_event *event;
7434 	struct rb_item *item;
7435 	bool started;
7436 	int event_len;
7437 	int size;
7438 	int len;
7439 	int cnt;
7440 
7441 	/* Have nested writes different that what is written */
7442 	cnt = data->cnt + (nested ? 27 : 0);
7443 
7444 	/* Multiply cnt by ~e, to make some unique increment */
7445 	size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
7446 
7447 	len = size + sizeof(struct rb_item);
7448 
7449 	started = rb_test_started;
7450 	/* read rb_test_started before checking buffer enabled */
7451 	smp_rmb();
7452 
7453 	event = ring_buffer_lock_reserve(data->buffer, len);
7454 	if (!event) {
7455 		/* Ignore dropped events before test starts. */
7456 		if (started) {
7457 			if (nested)
7458 				data->bytes_dropped_nested += len;
7459 			else
7460 				data->bytes_dropped += len;
7461 		}
7462 		return len;
7463 	}
7464 
7465 	event_len = ring_buffer_event_length(event);
7466 
7467 	if (RB_WARN_ON(data->buffer, event_len < len))
7468 		goto out;
7469 
7470 	item = ring_buffer_event_data(event);
7471 	item->size = size;
7472 	memcpy(item->str, rb_string, size);
7473 
7474 	if (nested) {
7475 		data->bytes_alloc_nested += event_len;
7476 		data->bytes_written_nested += len;
7477 		data->events_nested++;
7478 		if (!data->min_size_nested || len < data->min_size_nested)
7479 			data->min_size_nested = len;
7480 		if (len > data->max_size_nested)
7481 			data->max_size_nested = len;
7482 	} else {
7483 		data->bytes_alloc += event_len;
7484 		data->bytes_written += len;
7485 		data->events++;
7486 		if (!data->min_size || len < data->min_size)
7487 			data->max_size = len;
7488 		if (len > data->max_size)
7489 			data->max_size = len;
7490 	}
7491 
7492  out:
7493 	ring_buffer_unlock_commit(data->buffer);
7494 
7495 	return 0;
7496 }
7497 
rb_test(void * arg)7498 static __init int rb_test(void *arg)
7499 {
7500 	struct rb_test_data *data = arg;
7501 
7502 	while (!kthread_should_stop()) {
7503 		rb_write_something(data, false);
7504 		data->cnt++;
7505 
7506 		set_current_state(TASK_INTERRUPTIBLE);
7507 		/* Now sleep between a min of 100-300us and a max of 1ms */
7508 		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
7509 	}
7510 
7511 	return 0;
7512 }
7513 
rb_ipi(void * ignore)7514 static __init void rb_ipi(void *ignore)
7515 {
7516 	struct rb_test_data *data;
7517 	int cpu = smp_processor_id();
7518 
7519 	data = &rb_data[cpu];
7520 	rb_write_something(data, true);
7521 }
7522 
rb_hammer_test(void * arg)7523 static __init int rb_hammer_test(void *arg)
7524 {
7525 	while (!kthread_should_stop()) {
7526 
7527 		/* Send an IPI to all cpus to write data! */
7528 		smp_call_function(rb_ipi, NULL, 1);
7529 		/* No sleep, but for non preempt, let others run */
7530 		schedule();
7531 	}
7532 
7533 	return 0;
7534 }
7535 
test_ringbuffer(void)7536 static __init int test_ringbuffer(void)
7537 {
7538 	struct task_struct *rb_hammer;
7539 	struct trace_buffer *buffer;
7540 	int cpu;
7541 	int ret = 0;
7542 
7543 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
7544 		pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
7545 		return 0;
7546 	}
7547 
7548 	pr_info("Running ring buffer tests...\n");
7549 
7550 	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
7551 	if (WARN_ON(!buffer))
7552 		return 0;
7553 
7554 	/* Disable buffer so that threads can't write to it yet */
7555 	ring_buffer_record_off(buffer);
7556 
7557 	for_each_online_cpu(cpu) {
7558 		rb_data[cpu].buffer = buffer;
7559 		rb_data[cpu].cpu = cpu;
7560 		rb_data[cpu].cnt = cpu;
7561 		rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
7562 						     cpu, "rbtester/%u");
7563 		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
7564 			pr_cont("FAILED\n");
7565 			ret = PTR_ERR(rb_threads[cpu]);
7566 			goto out_free;
7567 		}
7568 	}
7569 
7570 	/* Now create the rb hammer! */
7571 	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
7572 	if (WARN_ON(IS_ERR(rb_hammer))) {
7573 		pr_cont("FAILED\n");
7574 		ret = PTR_ERR(rb_hammer);
7575 		goto out_free;
7576 	}
7577 
7578 	ring_buffer_record_on(buffer);
7579 	/*
7580 	 * Show buffer is enabled before setting rb_test_started.
7581 	 * Yes there's a small race window where events could be
7582 	 * dropped and the thread wont catch it. But when a ring
7583 	 * buffer gets enabled, there will always be some kind of
7584 	 * delay before other CPUs see it. Thus, we don't care about
7585 	 * those dropped events. We care about events dropped after
7586 	 * the threads see that the buffer is active.
7587 	 */
7588 	smp_wmb();
7589 	rb_test_started = true;
7590 
7591 	set_current_state(TASK_INTERRUPTIBLE);
7592 	/* Just run for 10 seconds */;
7593 	schedule_timeout(10 * HZ);
7594 
7595 	kthread_stop(rb_hammer);
7596 
7597  out_free:
7598 	for_each_online_cpu(cpu) {
7599 		if (!rb_threads[cpu])
7600 			break;
7601 		kthread_stop(rb_threads[cpu]);
7602 	}
7603 	if (ret) {
7604 		ring_buffer_free(buffer);
7605 		return ret;
7606 	}
7607 
7608 	/* Report! */
7609 	pr_info("finished\n");
7610 	for_each_online_cpu(cpu) {
7611 		struct ring_buffer_event *event;
7612 		struct rb_test_data *data = &rb_data[cpu];
7613 		struct rb_item *item;
7614 		unsigned long total_events;
7615 		unsigned long total_dropped;
7616 		unsigned long total_written;
7617 		unsigned long total_alloc;
7618 		unsigned long total_read = 0;
7619 		unsigned long total_size = 0;
7620 		unsigned long total_len = 0;
7621 		unsigned long total_lost = 0;
7622 		unsigned long lost;
7623 		int big_event_size;
7624 		int small_event_size;
7625 
7626 		ret = -1;
7627 
7628 		total_events = data->events + data->events_nested;
7629 		total_written = data->bytes_written + data->bytes_written_nested;
7630 		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
7631 		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
7632 
7633 		big_event_size = data->max_size + data->max_size_nested;
7634 		small_event_size = data->min_size + data->min_size_nested;
7635 
7636 		pr_info("CPU %d:\n", cpu);
7637 		pr_info("              events:    %ld\n", total_events);
7638 		pr_info("       dropped bytes:    %ld\n", total_dropped);
7639 		pr_info("       alloced bytes:    %ld\n", total_alloc);
7640 		pr_info("       written bytes:    %ld\n", total_written);
7641 		pr_info("       biggest event:    %d\n", big_event_size);
7642 		pr_info("      smallest event:    %d\n", small_event_size);
7643 
7644 		if (RB_WARN_ON(buffer, total_dropped))
7645 			break;
7646 
7647 		ret = 0;
7648 
7649 		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
7650 			total_lost += lost;
7651 			item = ring_buffer_event_data(event);
7652 			total_len += ring_buffer_event_length(event);
7653 			total_size += item->size + sizeof(struct rb_item);
7654 			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
7655 				pr_info("FAILED!\n");
7656 				pr_info("buffer had: %.*s\n", item->size, item->str);
7657 				pr_info("expected:   %.*s\n", item->size, rb_string);
7658 				RB_WARN_ON(buffer, 1);
7659 				ret = -1;
7660 				break;
7661 			}
7662 			total_read++;
7663 		}
7664 		if (ret)
7665 			break;
7666 
7667 		ret = -1;
7668 
7669 		pr_info("         read events:   %ld\n", total_read);
7670 		pr_info("         lost events:   %ld\n", total_lost);
7671 		pr_info("        total events:   %ld\n", total_lost + total_read);
7672 		pr_info("  recorded len bytes:   %ld\n", total_len);
7673 		pr_info(" recorded size bytes:   %ld\n", total_size);
7674 		if (total_lost) {
7675 			pr_info(" With dropped events, record len and size may not match\n"
7676 				" alloced and written from above\n");
7677 		} else {
7678 			if (RB_WARN_ON(buffer, total_len != total_alloc ||
7679 				       total_size != total_written))
7680 				break;
7681 		}
7682 		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
7683 			break;
7684 
7685 		ret = 0;
7686 	}
7687 	if (!ret)
7688 		pr_info("Ring buffer PASSED!\n");
7689 
7690 	ring_buffer_free(buffer);
7691 	return 0;
7692 }
7693 
7694 late_initcall(test_ringbuffer);
7695 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
7696