Lines Matching +full:min +full:- +full:sample +full:- +full:time +full:- +full:nsecs

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
12 * Copyright (C) 2004-2006 Ingo Molnar
61 * A selftest will lurk into the ring-buffer to count the
63 * insertions into the ring-buffer such as trace_printk could occurred
64 * at the same time, giving false positive or negative results.
69 * If boot-time tracing including tracers/events via kernel cmdline
121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
232 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
238 return -1;
264 int left = sizeof(boot_instance_info) - boot_instance_index;
268 return -1;
331 if (export->flags & flag) {
334 export->write(export, entry, size);
348 if (export->flags & TRACE_EXPORT_FUNCTION)
351 if (export->flags & TRACE_EXPORT_EVENT)
354 if (export->flags & TRACE_EXPORT_MARKER)
360 if (export->flags & TRACE_EXPORT_FUNCTION)
363 if (export->flags & TRACE_EXPORT_EVENT)
366 if (export->flags & TRACE_EXPORT_MARKER)
379 export = rcu_dereference_raw_check(export->next);
388 rcu_assign_pointer(export->next, *list);
392 * the export->next pointer is valid before another CPU sees
403 for (p = list; *p != NULL; p = &(*p)->next)
408 return -1;
410 rcu_assign_pointer(*p, (*p)->next);
436 if (WARN_ON_ONCE(!export->write))
437 return -1;
481 * The global_trace is the descriptor that holds the top-level tracing
492 tr->ring_buffer_expanded = true;
500 int ret = -ENODEV;
505 tr->ref++;
517 WARN_ON(!this_tr->ref);
518 this_tr->ref--;
522 * trace_array_put - Decrement the reference counter for this trace array.
550 return -ENODEV;
553 return -ENODEV;
562 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
563 !filter_match_preds(call->filter, rec)) {
572 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
585 * trace_ignore_this_task - should a task be ignored for tracing
608 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
610 trace_find_filtered_pid(filtered_no_pids, task->pid));
614 * trace_filter_add_remove_task - Add or remove a task from a pid_list
634 if (!trace_find_filtered_pid(pid_list, self->pid))
640 trace_pid_list_set(pid_list, task->pid);
642 trace_pid_list_clear(pid_list, task->pid);
646 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
675 * trace_pid_start - Used for seq_file to start reading pid lists
704 * trace_pid_show - show the current pid in seq_file processing
713 unsigned long pid = (unsigned long)v - 1;
736 return -ENOMEM;
747 return -ENOMEM;
771 cnt -= ret;
776 ret = -EINVAL;
783 ret = -1;
814 if (!buf->buffer)
817 ts = ring_buffer_time_stamp(buf->buffer);
818 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
829 * tracing_is_enabled - Show if global_trace has been enabled
856 * boot time and run time configurable.
888 * These primitives don't distinguish read-only and read-consume access.
889 * Multi read-only access are also serialized.
1002 if (tr->array_buffer.buffer)
1003 ring_buffer_record_on(tr->array_buffer.buffer);
1012 tr->buffer_disabled = 0;
1018 * tracing_on - enable tracing buffers
1037 /* Length is in event->array[0] */
1038 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1056 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1068 buffer = tr->array_buffer.buffer;
1078 entry->ip = ip;
1080 memcpy(&entry->buf, str, size);
1083 if (entry->buf[size - 1] != '\n') {
1084 entry->buf[size] = '\n';
1085 entry->buf[size + 1] = '\0';
1087 entry->buf[size] = '\0';
1098 * __trace_puts - write a constant string into the trace buffer.
1110 * __trace_bputs - write the pointer to a constant string into trace buffer
1139 entry->ip = ip;
1140 entry->str = str;
1156 struct tracer *tracer = tr->current_trace;
1165 if (!tr->allocated_snapshot) {
1173 if (tracer->use_max_tr) {
1190 * tracing_snapshot - take a snapshot of the current buffer.
1212 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1217 * conditional - the snapshot will only happen if the
1231 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1235 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1239 * the tr->max_lock lock, which the code calling
1249 arch_spin_lock(&tr->max_lock);
1251 if (tr->cond_snapshot)
1252 cond_data = tr->cond_snapshot->cond_data;
1254 arch_spin_unlock(&tr->max_lock);
1270 if (!tr->allocated_snapshot) {
1273 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1274 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1279 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1280 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1284 tr->allocated_snapshot = true;
1294 * The max_tr ring buffer has some state (e.g. ring->clock) and
1297 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1298 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1299 set_buffer_entries(&tr->max_buffer, 1);
1300 tracing_reset_online_cpus(&tr->max_buffer);
1301 tr->allocated_snapshot = false;
1305 * tracing_alloc_snapshot - allocate snapshot buffer.
1308 * allocated - it doesn't also take a snapshot.
1327 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1350 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1357 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1370 return -ENOMEM;
1372 cond_snapshot->cond_data = cond_data;
1373 cond_snapshot->update = update;
1381 if (tr->current_trace->use_max_tr) {
1382 ret = -EBUSY;
1394 if (tr->cond_snapshot) {
1395 ret = -EBUSY;
1400 arch_spin_lock(&tr->max_lock);
1401 tr->cond_snapshot = cond_snapshot;
1402 arch_spin_unlock(&tr->max_lock);
1417 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1422 * otherwise return -EINVAL.
1431 arch_spin_lock(&tr->max_lock);
1433 if (!tr->cond_snapshot)
1434 ret = -EINVAL;
1436 kfree(tr->cond_snapshot);
1437 tr->cond_snapshot = NULL;
1440 arch_spin_unlock(&tr->max_lock);
1460 return -ENODEV;
1476 return -ENODEV;
1489 if (tr->array_buffer.buffer)
1490 ring_buffer_record_off(tr->array_buffer.buffer);
1499 tr->buffer_disabled = 1;
1505 * tracing_off - turn off tracing buffers
1528 * tracer_tracing_is_on - show real state of ring buffer enabled
1535 if (tr->array_buffer.buffer)
1536 return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1537 return !tr->buffer_disabled;
1541 * tracing_is_on - show state of ring buffers enabled
1581 unsigned long nsecs_to_usecs(unsigned long nsecs)
1583 return nsecs / 1000;
1620 if (trace_clocks[tr->clock_id].in_ns)
1627 * trace_parser_get_init - gets the buffer for trace parser
1633 parser->buffer = kmalloc(size, GFP_KERNEL);
1634 if (!parser->buffer)
1637 parser->size = size;
1642 * trace_parser_put - frees the buffer for trace parser
1646 kfree(parser->buffer);
1647 parser->buffer = NULL;
1651 * trace_get_user - reads the user input string separated by space
1676 cnt--;
1682 if (!parser->cont) {
1689 cnt--;
1692 parser->idx = 0;
1702 /* read the non-space input */
1704 if (parser->idx < parser->size - 1)
1705 parser->buffer[parser->idx++] = ch;
1707 ret = -EINVAL;
1714 cnt--;
1719 parser->buffer[parser->idx] = 0;
1720 parser->cont = false;
1721 } else if (parser->idx < parser->size - 1) {
1722 parser->cont = true;
1723 parser->buffer[parser->idx++] = ch;
1725 parser->buffer[parser->idx] = 0;
1727 ret = -EINVAL;
1743 if (trace_seq_used(s) <= s->readpos)
1744 return -EBUSY;
1746 len = trace_seq_used(s) - s->readpos;
1749 memcpy(buf, s->buffer + s->readpos, cnt);
1751 s->readpos += cnt;
1768 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1775 queue_work(fsnotify_wq, &tr->fsnotify_work);
1781 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1782 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1783 tr->d_max_latency = trace_create_file("tracing_max_latency",
1795 return -ENOMEM;
1807 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1811 irq_work_queue(&tr->fsnotify_irqwork);
1823 * Copy the new maximum trace into the separate maximum-trace
1830 struct array_buffer *trace_buf = &tr->array_buffer;
1831 struct array_buffer *max_buf = &tr->max_buffer;
1832 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1833 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1835 max_buf->cpu = cpu;
1836 max_buf->time_start = data->preempt_timestamp;
1838 max_data->saved_latency = tr->max_latency;
1839 max_data->critical_start = data->critical_start;
1840 max_data->critical_end = data->critical_end;
1842 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1843 max_data->pid = tsk->pid;
1849 max_data->uid = current_uid();
1851 max_data->uid = task_uid(tsk);
1853 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1854 max_data->policy = tsk->policy;
1855 max_data->rt_priority = tsk->rt_priority;
1863 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1876 if (tr->stop_count)
1881 if (!tr->allocated_snapshot) {
1883 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1887 arch_spin_lock(&tr->max_lock);
1890 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1891 ring_buffer_record_on(tr->max_buffer.buffer);
1893 ring_buffer_record_off(tr->max_buffer.buffer);
1896 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1897 arch_spin_unlock(&tr->max_lock);
1901 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1905 arch_spin_unlock(&tr->max_lock);
1908 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1912 * update_max_tr_single - only copy one trace over, and reset the rest
1924 if (tr->stop_count)
1928 if (!tr->allocated_snapshot) {
1930 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1934 arch_spin_lock(&tr->max_lock);
1936 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1938 if (ret == -EBUSY) {
1946 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1950 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1953 arch_spin_unlock(&tr->max_lock);
1963 if (trace_buffer_iter(iter, iter->cpu_file))
1966 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
1973 if (iter->snapshot)
1974 iter->array_buffer = &iter->tr->max_buffer;
1995 return -ENOMEM;
1997 selftest->type = type;
1998 list_add(&selftest->list, &postponed_selftests);
2005 struct tracer *saved_tracer = tr->current_trace;
2008 if (!type->selftest || tracing_selftest_disabled)
2021 type->name);
2032 tracing_reset_online_cpus(&tr->array_buffer);
2034 tr->current_trace = type;
2037 if (type->use_max_tr) {
2039 if (tr->ring_buffer_expanded)
2040 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2042 tr->allocated_snapshot = true;
2047 pr_info("Testing tracer %s: ", type->name);
2048 ret = type->selftest(type, tr);
2050 tr->current_trace = saved_tracer;
2055 return -1;
2058 tracing_reset_online_cpus(&tr->array_buffer);
2061 if (type->use_max_tr) {
2062 tr->allocated_snapshot = false;
2065 if (tr->ring_buffer_expanded)
2066 ring_buffer_resize(tr->max_buffer.buffer, 1,
2080 * Tests can take a long time, especially if they are run one after the
2114 ret = run_tracer_selftest(p->type);
2118 p->type->name);
2120 for (t = trace_types; t; t = t->next) {
2121 if (t == p->type) {
2122 *last = t->next;
2125 last = &t->next;
2128 list_del(&p->list);
2155 * register_tracer - register a tracer with the ftrace system.
2165 if (!type->name) {
2167 return -1;
2170 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2172 return -1;
2177 type->name);
2178 return -EPERM;
2183 for (t = trace_types; t; t = t->next) {
2184 if (strcmp(type->name, t->name) == 0) {
2187 type->name);
2188 ret = -1;
2193 if (!type->set_flag)
2194 type->set_flag = &dummy_set_flag;
2195 if (!type->flags) {
2197 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2198 if (!type->flags) {
2199 ret = -ENOMEM;
2202 type->flags->val = 0;
2203 type->flags->opts = dummy_tracer_opt;
2205 if (!type->flags->opts)
2206 type->flags->opts = dummy_tracer_opt;
2209 type->flags->trace = type;
2215 type->next = trace_types;
2225 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2228 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2230 tracing_set_tracer(&global_trace, type->name);
2244 struct trace_buffer *buffer = buf->buffer;
2260 struct trace_buffer *buffer = buf->buffer;
2270 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2285 if (!tr->clear_trace)
2287 tr->clear_trace = false;
2288 tracing_reset_online_cpus(&tr->array_buffer);
2290 tracing_reset_online_cpus(&tr->max_buffer);
2330 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2340 int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
2342 kfree(s->map_cmdline_to_pid);
2367 val = (size - sizeof(*s)) / TASK_COMM_LEN;
2368 s->cmdline_num = val;
2370 s->map_cmdline_to_pid = kmalloc_array(val,
2371 sizeof(*s->map_cmdline_to_pid),
2373 if (!s->map_cmdline_to_pid) {
2378 s->cmdline_idx = 0;
2379 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2380 sizeof(s->map_pid_to_cmdline));
2381 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2382 val * sizeof(*s->map_cmdline_to_pid));
2391 return savedcmd ? 0 : -ENOMEM;
2407 raw_spin_lock_irqsave(&tr->start_lock, flags);
2408 if (--tr->stop_count) {
2409 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2411 tr->stop_count = 0;
2417 arch_spin_lock(&tr->max_lock);
2419 buffer = tr->array_buffer.buffer;
2424 buffer = tr->max_buffer.buffer;
2429 arch_spin_unlock(&tr->max_lock);
2432 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2436 * tracing_start - quick start of the tracer
2452 raw_spin_lock_irqsave(&tr->start_lock, flags);
2453 if (tr->stop_count++)
2457 arch_spin_lock(&tr->max_lock);
2459 buffer = tr->array_buffer.buffer;
2464 buffer = tr->max_buffer.buffer;
2469 arch_spin_unlock(&tr->max_lock);
2472 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2476 * tracing_stop - quick stop of the tracer
2491 if (!tsk->pid)
2494 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2500 * so if we miss here, then better luck next time.
2509 idx = savedcmd->map_pid_to_cmdline[tpid];
2511 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2513 savedcmd->map_pid_to_cmdline[tpid] = idx;
2514 savedcmd->cmdline_idx = idx;
2517 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2518 set_cmdline(idx, tsk->comm);
2540 tpid = pid & (PID_MAX_DEFAULT - 1);
2541 map = savedcmd->map_pid_to_cmdline[tpid];
2543 tpid = savedcmd->map_cmdline_to_pid[map];
2567 * if we observe a non-NULL tgid_map then we also observe the correct
2590 if (!tsk->pid)
2593 ptr = trace_find_tgid_ptr(tsk->pid);
2597 *ptr = tsk->tgid;
2611 * tracing_record_taskinfo - record the task info of a task
2639 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2696 return current->migration_disabled;
2740 * trace_buffered_event_enable - enable buffering events
2800 * trace_buffered_event_disable - disable buffering events
2816 if (--trace_buffered_event_ref)
2836 * could wrongly decide to use the pointed-to buffer which is now freed.
2854 struct trace_array *tr = trace_file->tr;
2857 *current_rb = tr->array_buffer.buffer;
2859 if (!tr->no_filter_buffering_ref &&
2860 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2867 * (see include/linux/ring-buffer.h for details on
2880 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2903 entry->array[0] = len;
2921 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2945 event_call = fbuffer->trace_file->event_call;
2946 if (!event_call || !event_call->event.funcs ||
2947 !event_call->event.funcs->trace)
2950 file = fbuffer->trace_file;
2951 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2952 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2953 !filter_match_preds(file->filter, fbuffer->entry)))
2956 event = &fbuffer->trace_file->event_call->event;
2959 trace_seq_init(&iter->seq);
2960 iter->ent = fbuffer->entry;
2961 event_call->event.funcs->trace(iter, 0, event);
2962 trace_seq_putc(&iter->seq, 0);
2963 printk("%s", iter->seq.buffer);
3004 struct trace_event_file *file = fbuffer->trace_file;
3006 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
3007 fbuffer->entry, &tt))
3014 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
3016 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
3017 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
3068 struct trace_buffer *buffer = tr->array_buffer.buffer;
3077 entry->ip = ip;
3078 entry->parent_ip = parent_ip;
3128 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3144 size = ARRAY_SIZE(fstack->calls);
3147 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3150 nr_entries = stack_trace_save(fstack->calls, size, skip);
3160 entry->size = nr_entries;
3161 memcpy(&entry->caller, fstack->calls,
3180 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3189 struct trace_buffer *buffer = tr->array_buffer.buffer;
3214 * trace_dump_stack - record a stack back trace in the trace buffer
3242 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3268 entry->tgid = current->tgid;
3269 memset(&entry->caller, 0, sizeof(entry->caller));
3271 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3294 entry->bottom_delta_ts = delta & U32_MAX;
3295 entry->top_delta_ts = (delta >> 32);
3302 struct trace_buffer *buffer = tr->array_buffer.buffer;
3312 delta = ring_buffer_event_time_stamp(buffer, event) -
3313 last_info->ts_last_call;
3316 entry->ip = last_info->ip;
3317 entry->parent_ip = last_info->parent_ip;
3318 entry->count = last_info->count;
3340 if (!trace_percpu_buffer || buffer->nesting >= 4)
3343 buffer->nesting++;
3347 return &buffer->buffer[buffer->nesting - 1][0];
3354 this_cpu_dec(trace_percpu_buffer->nesting);
3366 return -ENOMEM;
3435 * trace_vbprintk - write binary msg to tracing buffer
3472 buffer = tr->array_buffer.buffer;
3479 entry->ip = ip;
3480 entry->fmt = fmt;
3482 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3538 entry->ip = ip;
3540 memcpy(&entry->buf, tbuffer, len + 1);
3564 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3568 * trace_array_printk - Print a message to a specific instance
3595 return -ENOENT;
3601 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3612 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3622 return -ENOENT;
3626 return -EINVAL;
3657 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3659 iter->idx++;
3675 (unsigned long)-1 : 0;
3677 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3682 iter->ent_size = ring_buffer_event_length(event);
3685 iter->ent_size = 0;
3693 struct trace_buffer *buffer = iter->array_buffer->buffer;
3696 int cpu_file = iter->cpu_file;
3698 int next_cpu = -1;
3731 next_size = iter->ent_size;
3735 iter->ent_size = next_size;
3757 * iter->tr is NULL when used with tp_printk, which makes
3760 if (!iter->tr || iter->fmt == static_fmt_buf)
3763 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3766 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3767 iter->fmt = tmp;
3786 if ((addr >= (unsigned long)iter->ent) &&
3787 (addr < (unsigned long)iter->ent + iter->ent_size))
3791 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3792 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3806 if (!iter->ent)
3809 trace_event = ftrace_find_event(iter->ent->type);
3814 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3818 if (within_module_core(addr, event->module))
3858 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3863 * This writes the data into the @iter->seq buffer using the data from
3883 if (iter->fmt == static_fmt_buf)
3894 if (i + 1 >= iter->fmt_size) {
3930 strncpy(iter->fmt, p, i);
3931 iter->fmt[i] = '\0';
3932 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3935 * If iter->seq is full, the above call no longer guarantees
3941 if (iter->seq.full) {
3955 * was saved at the time of the event, but may not be
3958 * instead. See samples/trace_events/trace-events-sample.h
3963 fmt, seq_buf_str(&iter->seq.seq))) {
3968 if (len + 1 > iter->fmt_size)
3969 len = iter->fmt_size - 1;
3972 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3973 iter->fmt[len] = 0;
3976 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3977 iter->fmt_size);
3980 trace_seq_printf(&iter->seq, "(0x%px)", str);
3982 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3983 str, iter->fmt);
3984 str = "[UNSAFE-MEMORY]";
3985 strcpy(iter->fmt, "%s");
3987 strncpy(iter->fmt, p + i, j + 1);
3988 iter->fmt[j+1] = '\0';
3991 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3993 trace_seq_printf(&iter->seq, iter->fmt, str);
3999 trace_seq_vprintf(&iter->seq, p, ap);
4010 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
4014 new_fmt = q = iter->fmt;
4016 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
4020 q += iter->fmt - new_fmt;
4021 new_fmt = iter->fmt;
4027 if (p[-1] == '%') {
4049 int ent_size = iter->ent_size;
4053 * If called from ftrace_dump(), then the iter->temp buffer
4057 * used to add markers when two consecutive events' time
4060 if (iter->temp == static_temp_buf &&
4066 * call ring_buffer_peek() that may make the contents of iter->ent
4067 * undefined. Need to copy iter->ent now.
4069 if (iter->ent && iter->ent != iter->temp) {
4070 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4071 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4073 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4076 kfree(iter->temp);
4077 iter->temp = temp;
4078 iter->temp_size = iter->ent_size;
4080 memcpy(iter->temp, iter->ent, iter->ent_size);
4081 iter->ent = iter->temp;
4085 iter->ent_size = ent_size;
4093 iter->ent = __find_next_entry(iter, &iter->cpu,
4094 &iter->lost_events, &iter->ts);
4096 if (iter->ent)
4099 return iter->ent ? iter : NULL;
4104 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4105 &iter->lost_events);
4110 struct trace_iterator *iter = m->private;
4114 WARN_ON_ONCE(iter->leftover);
4119 if (iter->idx > i)
4122 if (iter->idx < 0)
4127 while (ent && iter->idx < i)
4130 iter->pos = *pos;
4141 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4155 if (ts >= iter->array_buffer->time_start)
4161 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4170 struct trace_iterator *iter = m->private;
4171 struct trace_array *tr = iter->tr;
4172 int cpu_file = iter->cpu_file;
4178 if (unlikely(tr->current_trace != iter->trace)) {
4179 /* Close iter->trace before switching to the new current tracer */
4180 if (iter->trace->close)
4181 iter->trace->close(iter);
4182 iter->trace = tr->current_trace;
4184 if (iter->trace->open)
4185 iter->trace->open(iter);
4190 if (iter->snapshot && iter->trace->use_max_tr)
4191 return ERR_PTR(-EBUSY);
4194 if (*pos != iter->pos) {
4195 iter->ent = NULL;
4196 iter->cpu = 0;
4197 iter->idx = -1;
4205 iter->leftover = 0;
4214 if (iter->leftover)
4217 l = *pos - 1;
4229 struct trace_iterator *iter = m->private;
4232 if (iter->snapshot && iter->trace->use_max_tr)
4236 trace_access_unlock(iter->cpu_file);
4246 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4250 * ones before the time stamp.
4252 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4253 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4258 ring_buffer_overrun_cpu(buf->buffer, cpu);
4286 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4298 get_total_entries(&tr->array_buffer, &total, &entries);
4305 seq_puts(m, "# _------=> CPU# \n"
4306 "# / _-----=> irqs-off/BH-disabled\n"
4307 "# | / _----=> need-resched \n"
4308 "# || / _---=> hardirq/softirq \n"
4309 "# ||| / _--=> preempt-depth \n"
4310 "# |||| / _-=> migrate-disable \n"
4312 "# cmd pid |||||| time | caller \n"
4322 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4334 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4347 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4348 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4349 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4350 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4351 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4353 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4361 struct array_buffer *buf = iter->array_buffer;
4362 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4363 struct tracer *type = iter->trace;
4366 const char *name = type->name;
4372 seq_puts(m, "# -----------------------------------"
4373 "---------------------------------\n");
4376 nsecs_to_usecs(data->saved_latency),
4379 buf->cpu,
4392 seq_puts(m, "# -----------------\n");
4393 seq_printf(m, "# | task: %.16s-%d "
4395 data->comm, data->pid,
4396 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4397 data->policy, data->rt_priority);
4398 seq_puts(m, "# -----------------\n");
4400 if (data->critical_start) {
4402 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4403 trace_print_seq(m, &iter->seq);
4405 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4406 trace_print_seq(m, &iter->seq);
4415 struct trace_seq *s = &iter->seq;
4416 struct trace_array *tr = iter->tr;
4418 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4421 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4424 if (cpumask_available(iter->started) &&
4425 cpumask_test_cpu(iter->cpu, iter->started))
4428 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4431 if (cpumask_available(iter->started))
4432 cpumask_set_cpu(iter->cpu, iter->started);
4435 if (iter->idx > 1)
4437 iter->cpu);
4442 struct trace_array *tr = iter->tr;
4443 struct trace_seq *s = &iter->seq;
4444 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4448 entry = iter->ent;
4452 event = ftrace_find_event(entry->type);
4454 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4455 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4465 if (tr->trace_flags & TRACE_ITER_FIELDS)
4467 return event->funcs->trace(iter, sym_flags, event);
4470 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4477 struct trace_array *tr = iter->tr;
4478 struct trace_seq *s = &iter->seq;
4482 entry = iter->ent;
4484 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4486 entry->pid, iter->cpu, iter->ts);
4491 event = ftrace_find_event(entry->type);
4493 return event->funcs->raw(iter, 0, event);
4495 trace_seq_printf(s, "%d ?\n", entry->type);
4502 struct trace_array *tr = iter->tr;
4503 struct trace_seq *s = &iter->seq;
4508 entry = iter->ent;
4510 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4511 SEQ_PUT_HEX_FIELD(s, entry->pid);
4512 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4513 SEQ_PUT_HEX_FIELD(s, iter->ts);
4518 event = ftrace_find_event(entry->type);
4520 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4532 struct trace_array *tr = iter->tr;
4533 struct trace_seq *s = &iter->seq;
4537 entry = iter->ent;
4539 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4540 SEQ_PUT_FIELD(s, entry->pid);
4541 SEQ_PUT_FIELD(s, iter->cpu);
4542 SEQ_PUT_FIELD(s, iter->ts);
4547 event = ftrace_find_event(entry->type);
4548 return event ? event->funcs->binary(iter, 0, event) :
4558 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4559 cpu = iter->cpu_file;
4565 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4577 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4588 struct trace_array *tr = iter->tr;
4589 unsigned long trace_flags = tr->trace_flags;
4592 if (iter->lost_events) {
4593 if (iter->lost_events == (unsigned long)-1)
4594 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4595 iter->cpu);
4597 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4598 iter->cpu, iter->lost_events);
4599 if (trace_seq_has_overflowed(&iter->seq))
4603 if (iter->trace && iter->trace->print_line) {
4604 ret = iter->trace->print_line(iter);
4609 if (iter->ent->type == TRACE_BPUTS &&
4614 if (iter->ent->type == TRACE_BPRINT &&
4619 if (iter->ent->type == TRACE_PRINT &&
4638 struct trace_iterator *iter = m->private;
4639 struct trace_array *tr = iter->tr;
4645 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4648 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4654 struct trace_iterator *iter = m->private;
4655 struct trace_array *tr = iter->tr;
4656 unsigned long trace_flags = tr->trace_flags;
4661 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4671 print_func_help_header_irq(iter->array_buffer,
4674 print_func_help_header(iter->array_buffer, m,
4716 if (iter->tr->allocated_snapshot)
4722 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4737 if (iter->ent == NULL) {
4738 if (iter->tr) {
4739 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4743 if (iter->snapshot && trace_empty(iter))
4745 else if (iter->trace && iter->trace->print_header)
4746 iter->trace->print_header(m);
4750 } else if (iter->leftover) {
4755 ret = trace_print_seq(m, &iter->seq);
4757 /* ret should this time be zero, but you never know */
4758 iter->leftover = ret;
4763 iter->seq.full = 0;
4764 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4766 ret = trace_print_seq(m, &iter->seq);
4772 * -1 otherwise.
4774 iter->leftover = ret;
4786 if (inode->i_cdev) /* See trace_create_cpu_file() */
4787 return (long)inode->i_cdev - 1;
4808 if (iter->fmt != static_fmt_buf)
4809 kfree(iter->fmt);
4811 kfree(iter->temp);
4812 kfree(iter->buffer_iter);
4813 mutex_destroy(&iter->mutex);
4814 free_cpumask_var(iter->started);
4820 struct trace_array *tr = inode->i_private;
4825 return ERR_PTR(-ENODEV);
4829 return ERR_PTR(-ENOMEM);
4831 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4833 if (!iter->buffer_iter)
4837 * trace_find_next_entry() may need to save off iter->ent.
4838 * It will place it into the iter->temp buffer. As most
4841 * allocate a new buffer to adjust for the bigger iter->ent.
4844 iter->temp = kmalloc(128, GFP_KERNEL);
4845 if (iter->temp)
4846 iter->temp_size = 128;
4855 iter->fmt = NULL;
4856 iter->fmt_size = 0;
4859 iter->trace = tr->current_trace;
4861 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4864 iter->tr = tr;
4868 if (tr->current_trace->print_max || snapshot)
4869 iter->array_buffer = &tr->max_buffer;
4872 iter->array_buffer = &tr->array_buffer;
4873 iter->snapshot = snapshot;
4874 iter->pos = -1;
4875 iter->cpu_file = tracing_get_cpu(inode);
4876 mutex_init(&iter->mutex);
4879 if (iter->trace->open)
4880 iter->trace->open(iter);
4883 if (ring_buffer_overruns(iter->array_buffer->buffer))
4884 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4887 if (trace_clocks[tr->clock_id].in_ns)
4888 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4891 * If pause-on-trace is enabled, then stop the trace while
4894 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4897 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4899 iter->buffer_iter[cpu] =
4900 ring_buffer_read_prepare(iter->array_buffer->buffer,
4905 ring_buffer_read_start(iter->buffer_iter[cpu]);
4909 cpu = iter->cpu_file;
4910 iter->buffer_iter[cpu] =
4911 ring_buffer_read_prepare(iter->array_buffer->buffer,
4914 ring_buffer_read_start(iter->buffer_iter[cpu]);
4927 return ERR_PTR(-ENOMEM);
4938 filp->private_data = inode->i_private;
4953 struct trace_array *tr = inode->i_private;
4960 filp->private_data = inode->i_private;
4971 struct trace_event_file *file = inode->i_private;
4974 ret = tracing_check_open_get_tr(file->tr);
4981 if (file->flags & EVENT_FILE_FL_FREED) {
4982 trace_array_put(file->tr);
4983 ret = -ENODEV;
4992 filp->private_data = inode->i_private;
4999 struct trace_event_file *file = inode->i_private;
5001 trace_array_put(file->tr);
5021 struct trace_array *tr = inode->i_private;
5022 struct seq_file *m = file->private_data;
5026 if (!(file->f_mode & FMODE_READ)) {
5032 iter = m->private;
5036 if (iter->buffer_iter[cpu])
5037 ring_buffer_read_finish(iter->buffer_iter[cpu]);
5040 if (iter->trace && iter->trace->close)
5041 iter->trace->close(iter);
5043 if (!iter->snapshot && tr->stop_count)
5059 struct trace_array *tr = inode->i_private;
5067 struct trace_array *tr = inode->i_private;
5076 struct trace_array *tr = inode->i_private;
5085 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5087 struct array_buffer *trace_buf = &tr->array_buffer;
5090 if (tr->current_trace->print_max)
5091 trace_buf = &tr->max_buffer;
5100 if (file->f_mode & FMODE_READ) {
5104 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5105 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5122 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5130 t = t->next;
5138 struct trace_array *tr = m->private;
5144 t = get_tracer_for_array(tr, t->next);
5151 struct trace_array *tr = m->private;
5176 seq_puts(m, t->name);
5177 if (t->next)
5194 struct trace_array *tr = inode->i_private;
5208 m = file->private_data;
5209 m->private = tr;
5216 struct trace_array *tr = inode->i_private;
5233 if (file->f_mode & FMODE_READ)
5236 file->f_pos = ret = 0;
5262 struct trace_array *tr = file_inode(filp)->i_private;
5267 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5270 return -ENOMEM;
5273 cpumask_pr_args(tr->tracing_cpumask));
5275 count = -EINVAL;
5292 return -EINVAL;
5295 arch_spin_lock(&tr->max_lock);
5301 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5303 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5304 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5306 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5309 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5311 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5312 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5314 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5318 arch_spin_unlock(&tr->max_lock);
5321 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5330 struct trace_array *tr = file_inode(filp)->i_private;
5335 return -ENOMEM;
5366 struct trace_array *tr = m->private;
5371 tracer_flags = tr->current_trace->flags->val;
5372 trace_opts = tr->current_trace->flags->opts;
5375 if (tr->trace_flags & (1 << i))
5396 struct tracer *trace = tracer_flags->trace;
5399 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5404 tracer_flags->val &= ~opts->bit;
5406 tracer_flags->val |= opts->bit;
5413 struct tracer *trace = tr->current_trace;
5414 struct tracer_flags *tracer_flags = trace->flags;
5418 for (i = 0; tracer_flags->opts[i].name; i++) {
5419 opts = &tracer_flags->opts[i];
5421 if (strcmp(cmp, opts->name) == 0)
5422 return __set_tracer_option(tr, trace->flags, opts, neg);
5425 return -EINVAL;
5431 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5432 return -1;
5446 if (!!(tr->trace_flags & mask) == !!enabled)
5450 if (tr->current_trace->flag_changed)
5451 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5452 return -EINVAL;
5455 tr->trace_flags |= mask;
5457 tr->trace_flags &= ~mask;
5477 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5478 return -ENOMEM;
5491 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5493 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5524 ret = match_string(trace_options, -1, cmp);
5560 *(buf - 1) = ',';
5568 struct seq_file *m = filp->private_data;
5569 struct trace_array *tr = m->private;
5574 return -EINVAL;
5577 return -EFAULT;
5592 struct trace_array *tr = inode->i_private;
5599 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5615 "tracing mini-HOWTO:\n\n"
5617 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5619 " trace\t\t\t- The static contents of the buffer\n"
5621 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5622 " current_tracer\t- function and latency tracers\n"
5623 " available_tracers\t- list of configured tracers for current_tracer\n"
5624 " error_log\t- error log for failed commands (that support it)\n"
5625 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5626 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5627 " trace_clock\t\t- change the clock used to order events\n"
5631 " uptime: Jiffy counter from time of boot\n"
5634 " x86-tsc: TSC cycle counter\n"
5636 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5637 " delta: Delta difference against a buffer-wide timestamp\n"
5639 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5640 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5641 " tracing_cpumask\t- Limit which CPUs to trace\n"
5642 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5643 "\t\t\t Remove sub-buffer with rmdir\n"
5644 " trace_options\t\t- Set format or modify how tracing happens\n"
5647 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5649 "\n available_filter_functions - list of functions that can be filtered on\n"
5650 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5652 "\t accepts: func_full_name or glob-matching-pattern\n"
5654 "\t Format: :mod:<module-name>\n"
5671 "\t The first one will disable tracing every time do_fault is hit\n"
5673 "\t The first time do trap is hit and it disables tracing, the\n"
5681 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5687 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5689 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5693 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5694 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5695 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5698 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5703 " stack_trace\t\t- Shows the max stack trace when active\n"
5704 " stack_max_size\t- Shows current max stack size that was traced\n"
5708 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5713 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5717 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5721 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5726 "\t accepts: event-definitions (one definition per line)\n"
5732 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5738 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5739 "\t -:[<group>/][<event>]\n"
5748 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5752 "\t <argname>[->field[->field|.field...]],\n"
5759 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5761 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5762 "\t symstr, <type>\\[<array-size>\\]\n"
5769 "\t of the <attached-group>/<attached-event>.\n"
5771 " events/\t\t- Directory containing all trace event subsystems:\n"
5772 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5773 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5774 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5776 " filter\t\t- If set, only events passing filter are traced\n"
5777 " events/<system>/<event>/\t- Directory containing control files for\n"
5779 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5780 " filter\t\t- If set, only events passing filter are traced\n"
5781 " trigger\t\t- If set, a command to perform when event is hit\n"
5803 "\t The first disables tracing every time block_unplug is hit.\n"
5815 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5827 "\t common_timestamp - to record current timestamp\n"
5828 "\t common_cpu - to record the CPU the event happened on\n"
5831 "\t - a reference to a field e.g. x=current_timestamp,\n"
5832 "\t - a reference to another variable e.g. y=$x,\n"
5833 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5834 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5836 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5865 "\t .sym-offset display an address as a symbol and offset\n"
5872 "\t .graph display a bar-graph of a value\n\n"
5884 "\t already-attached hist trigger. The syntax is analogous to\n"
5890 "\t onmatch(matching.event) - invoke on addition or update\n"
5891 "\t onmax(var) - invoke if var exceeds current max\n"
5892 "\t onchange(var) - invoke action if var changes\n\n"
5894 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5895 "\t save(field,...) - save current event fields\n"
5897 "\t snapshot() - snapshot the trace buffer\n\n"
5900 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5942 int pid = entry - tgid_map;
5982 if (*pos || m->count)
5987 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5989 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
6006 v = &savedcmd->map_cmdline_to_pid[0];
6066 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6079 return -ENOMEM;
6105 return -EINVAL;
6126 if (!ptr->map.eval_string) {
6127 if (ptr->tail.next) {
6128 ptr = ptr->tail.next;
6184 ptr->map.eval_string, ptr->map.eval_value,
6185 ptr->map.system);
6219 return ptr + ptr->head.length + 1;
6252 if (!ptr->tail.next)
6254 ptr = ptr->tail.next;
6257 ptr->tail.next = map_array;
6259 map_array->head.mod = mod;
6260 map_array->head.length = len;
6264 map_array->map = **map;
6303 struct trace_array *tr = filp->private_data;
6308 r = sprintf(buf, "%s\n", tr->current_trace->name);
6316 tracing_reset_online_cpus(&tr->array_buffer);
6317 return t->init(tr);
6325 per_cpu_ptr(buf->data, cpu)->entries = val;
6331 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6333 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6346 ret = ring_buffer_resize(trace_buf->buffer,
6347 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6350 per_cpu_ptr(trace_buf->data, cpu)->entries =
6351 per_cpu_ptr(size_buf->data, cpu)->entries;
6354 ret = ring_buffer_resize(trace_buf->buffer,
6355 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6357 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6358 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6378 if (!tr->array_buffer.buffer)
6384 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6389 if (!tr->allocated_snapshot)
6392 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6394 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6395 &tr->array_buffer, cpu);
6417 update_buffer_entries(&tr->max_buffer, cpu);
6422 update_buffer_entries(&tr->array_buffer, cpu);
6438 ret = -EINVAL;
6445 ret = -ENOMEM;
6455 * tracing_update_buffers - used by tracing facility to expand ring buffers
6470 if (!tr->ring_buffer_expanded)
6489 if (tr->current_trace == &nop_trace)
6492 tr->current_trace->enabled--;
6494 if (tr->current_trace->reset)
6495 tr->current_trace->reset(tr);
6497 tr->current_trace = &nop_trace;
6505 if (!tr->dir)
6525 if (!tr->ring_buffer_expanded) {
6533 for (t = trace_types; t; t = t->next) {
6534 if (strcmp(t->name, buf) == 0)
6538 ret = -EINVAL;
6541 if (t == tr->current_trace)
6545 if (t->use_max_tr) {
6547 arch_spin_lock(&tr->max_lock);
6548 if (tr->cond_snapshot)
6549 ret = -EBUSY;
6550 arch_spin_unlock(&tr->max_lock);
6557 if (system_state < SYSTEM_RUNNING && t->noboot) {
6559 t->name);
6565 ret = -EINVAL;
6570 if (tr->trace_ref) {
6571 ret = -EBUSY;
6577 tr->current_trace->enabled--;
6579 if (tr->current_trace->reset)
6580 tr->current_trace->reset(tr);
6583 had_max_tr = tr->current_trace->use_max_tr;
6586 tr->current_trace = &nop_trace;
6588 if (had_max_tr && !t->use_max_tr) {
6600 if (t->use_max_tr && !tr->allocated_snapshot) {
6606 tr->current_trace = &nop_trace;
6609 if (t->init) {
6615 tr->current_trace = t;
6616 tr->current_trace->enabled++;
6628 struct trace_array *tr = filp->private_data;
6640 return -EFAULT;
6663 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6696 struct trace_array *tr = filp->private_data;
6704 if (tr->current_trace->update_thresh) {
6705 ret = tr->current_trace->update_thresh(tr);
6723 struct trace_array *tr = filp->private_data;
6725 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6732 struct trace_array *tr = filp->private_data;
6734 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6742 if (cpumask_empty(tr->pipe_cpumask)) {
6743 cpumask_setall(tr->pipe_cpumask);
6746 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6747 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6750 return -EBUSY;
6756 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6757 cpumask_clear(tr->pipe_cpumask);
6759 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6760 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6766 struct trace_array *tr = inode->i_private;
6784 ret = -ENOMEM;
6788 trace_seq_init(&iter->seq);
6789 iter->trace = tr->current_trace;
6791 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6792 ret = -ENOMEM;
6797 cpumask_setall(iter->started);
6799 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6800 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6803 if (trace_clocks[tr->clock_id].in_ns)
6804 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6806 iter->tr = tr;
6807 iter->array_buffer = &tr->array_buffer;
6808 iter->cpu_file = cpu;
6809 mutex_init(&iter->mutex);
6810 filp->private_data = iter;
6812 if (iter->trace->pipe_open)
6813 iter->trace->pipe_open(iter);
6817 tr->trace_ref++;
6834 struct trace_iterator *iter = file->private_data;
6835 struct trace_array *tr = inode->i_private;
6839 tr->trace_ref--;
6841 if (iter->trace->pipe_close)
6842 iter->trace->pipe_close(iter);
6843 close_pipe_on_cpu(tr, iter->cpu_file);
6857 struct trace_array *tr = iter->tr;
6860 if (trace_buffer_iter(iter, iter->cpu_file))
6863 if (tr->trace_flags & TRACE_ITER_BLOCK)
6869 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6870 filp, poll_table, iter->tr->buffer_percent);
6876 struct trace_iterator *iter = filp->private_data;
6881 /* Must be called with iter->mutex held. */
6884 struct trace_iterator *iter = filp->private_data;
6889 if ((filp->f_flags & O_NONBLOCK)) {
6890 return -EAGAIN;
6900 * iter->pos will be 0 if we haven't read anything.
6902 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6905 mutex_unlock(&iter->mutex);
6909 mutex_lock(&iter->mutex);
6925 struct trace_iterator *iter = filp->private_data;
6933 mutex_lock(&iter->mutex);
6936 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6937 if (sret != -EBUSY)
6940 trace_seq_init(&iter->seq);
6942 if (iter->trace->read) {
6943 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6960 cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6964 cpumask_clear(iter->started);
6965 trace_seq_init(&iter->seq);
6968 trace_access_lock(iter->cpu_file);
6971 int save_len = iter->seq.seq.len;
6977 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6979 * this event next time, resulting in an infinite loop.
6982 iter->seq.full = 0;
6983 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6989 iter->seq.seq.len = save_len;
6995 if (trace_seq_used(&iter->seq) >= cnt)
7003 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
7004 iter->ent->type);
7006 trace_access_unlock(iter->cpu_file);
7010 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
7011 if (iter->seq.readpos >= trace_seq_used(&iter->seq))
7012 trace_seq_init(&iter->seq);
7018 if (sret == -EBUSY)
7022 mutex_unlock(&iter->mutex);
7030 __free_page(spd->pages[idx]);
7040 /* Seq buffer is page-sized, exactly what we need. */
7042 save_len = iter->seq.seq.len;
7045 if (trace_seq_has_overflowed(&iter->seq)) {
7046 iter->seq.seq.len = save_len;
7052 * be set if the iter->seq overflowed. But check it
7056 iter->seq.seq.len = save_len;
7060 count = trace_seq_used(&iter->seq) - save_len;
7063 iter->seq.seq.len = save_len;
7069 rem -= count;
7072 iter->ent = NULL;
7088 struct trace_iterator *iter = filp->private_data;
7102 return -ENOMEM;
7104 mutex_lock(&iter->mutex);
7106 if (iter->trace->splice_read) {
7107 ret = iter->trace->splice_read(iter, filp,
7117 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7118 ret = -EFAULT;
7123 trace_access_lock(iter->cpu_file);
7134 ret = trace_seq_to_buffer(&iter->seq,
7136 trace_seq_used(&iter->seq));
7142 spd.partial[i].len = trace_seq_used(&iter->seq);
7144 trace_seq_init(&iter->seq);
7147 trace_access_unlock(iter->cpu_file);
7149 mutex_unlock(&iter->mutex);
7162 mutex_unlock(&iter->mutex);
7171 struct trace_array *tr = inode->i_private;
7189 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7190 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7197 if (!tr->ring_buffer_expanded)
7206 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7219 struct trace_array *tr = inode->i_private;
7229 return -EINVAL;
7246 struct trace_array *tr = filp->private_data;
7253 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7254 if (!tr->ring_buffer_expanded)
7257 if (tr->ring_buffer_expanded)
7283 struct trace_array *tr = inode->i_private;
7286 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7302 struct trace_array *tr = filp->private_data;
7314 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7317 return -EINVAL;
7319 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7320 return -EINVAL;
7323 return -EINVAL;
7334 size += FAULTED_SIZE - cnt;
7336 buffer = tr->array_buffer.buffer;
7347 return -EBADF;
7348 cnt = ring_buffer_max_event_size(buffer) - meta_size;
7351 return -EBADF;
7356 return -EBADF;
7360 entry->ip = _THIS_IP_;
7362 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7364 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7366 written = -EFAULT;
7370 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7372 entry->buf[cnt] = '\0';
7373 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7376 if (entry->buf[cnt - 1] != '\n') {
7377 entry->buf[cnt] = '\n';
7378 entry->buf[cnt + 1] = '\0';
7380 entry->buf[cnt] = '\0';
7387 event_triggers_post_call(tr->trace_marker_file, tt);
7396 struct trace_array *tr = filp->private_data;
7407 return -EINVAL;
7409 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7410 return -EINVAL;
7414 return -EINVAL;
7418 size += FAULT_SIZE_ID - cnt;
7420 buffer = tr->array_buffer.buffer;
7423 return -EINVAL;
7429 return -EBADF;
7433 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7435 entry->id = -1;
7436 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7437 written = -EFAULT;
7448 struct trace_array *tr = m->private;
7454 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7455 i == tr->clock_id ? "]" : "");
7470 return -EINVAL;
7474 tr->clock_id = i;
7476 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7482 tracing_reset_online_cpus(&tr->array_buffer);
7485 if (tr->max_buffer.buffer)
7486 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7487 tracing_reset_online_cpus(&tr->max_buffer);
7498 struct seq_file *m = filp->private_data;
7499 struct trace_array *tr = m->private;
7505 return -EINVAL;
7508 return -EFAULT;
7525 struct trace_array *tr = inode->i_private;
7532 ret = single_open(file, tracing_clock_show, inode->i_private);
7541 struct trace_array *tr = m->private;
7545 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7557 struct trace_array *tr = inode->i_private;
7564 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7588 if (set && tr->no_filter_buffering_ref++)
7592 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7593 ret = -EINVAL;
7597 --tr->no_filter_buffering_ref;
7616 struct trace_array *tr = inode->i_private;
7625 if (file->f_mode & FMODE_READ) {
7631 ret = -ENOMEM;
7642 iter->tr = tr;
7643 iter->array_buffer = &tr->max_buffer;
7644 iter->cpu_file = tracing_get_cpu(inode);
7645 m->private = iter;
7646 file->private_data = m;
7664 struct seq_file *m = filp->private_data;
7665 struct trace_iterator *iter = m->private;
7666 struct trace_array *tr = iter->tr;
7680 if (tr->current_trace->use_max_tr) {
7681 ret = -EBUSY;
7686 arch_spin_lock(&tr->max_lock);
7687 if (tr->cond_snapshot)
7688 ret = -EBUSY;
7689 arch_spin_unlock(&tr->max_lock);
7696 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7697 ret = -EINVAL;
7700 if (tr->allocated_snapshot)
7704 /* Only allow per-cpu swap if the ring buffer supports it */
7706 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7707 ret = -EINVAL;
7711 if (tr->allocated_snapshot)
7712 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7713 &tr->array_buffer, iter->cpu_file);
7719 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7724 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7729 if (tr->allocated_snapshot) {
7730 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7731 tracing_reset_online_cpus(&tr->max_buffer);
7733 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7749 struct seq_file *m = file->private_data;
7754 if (file->f_mode & FMODE_READ)
7759 kfree(m->private);
7782 info = filp->private_data;
7784 if (info->iter.trace->use_max_tr) {
7786 return -EBUSY;
7789 info->iter.snapshot = true;
7790 info->iter.array_buffer = &info->iter.tr->max_buffer;
7900 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7907 * The filp->private_data must point to a trace_min_max_param structure that
7908 * defines where to write the value, the min and the max acceptable values,
7914 struct trace_min_max_param *param = filp->private_data;
7919 return -EFAULT;
7925 if (param->lock)
7926 mutex_lock(param->lock);
7928 if (param->min && val < *param->min)
7929 err = -EINVAL;
7931 if (param->max && val > *param->max)
7932 err = -EINVAL;
7935 *param->val = val;
7937 if (param->lock)
7938 mutex_unlock(param->lock);
7947 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7954 * The filp->private_data must point to a trace_min_max_param struct with valid
7960 struct trace_min_max_param *param = filp->private_data;
7966 return -EFAULT;
7968 val = *param->val;
7990 const char **errs; /* ptr to loc-specific array of err strings */
7991 u8 type; /* index into errs -> specific err string */
8011 return ERR_PTR(-ENOMEM);
8013 err->cmd = kzalloc(len, GFP_KERNEL);
8014 if (!err->cmd) {
8016 return ERR_PTR(-ENOMEM);
8024 kfree(err->cmd);
8034 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
8036 if (PTR_ERR(err) != -ENOMEM)
8037 tr->n_err_log_entries++;
8043 return ERR_PTR(-ENOMEM);
8044 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8045 kfree(err->cmd);
8046 err->cmd = cmd;
8047 list_del(&err->list);
8053 * err_pos - find the position of a string within a command for error careting
8073 return found - cmd;
8079 * tracing_log_err - write an error to the tracing error log
8083 * @errs: The array of loc-specific static error strings
8100 * produce a static error string - this string is not copied and saved
8101 * when the error is logged - only a pointer to it is saved. See
8119 if (PTR_ERR(err) == -ENOMEM) {
8124 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8125 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8127 err->info.errs = errs;
8128 err->info.type = type;
8129 err->info.pos = pos;
8130 err->info.ts = local_clock();
8132 list_add_tail(&err->list, &tr->err_log);
8141 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8142 list_del(&err->list);
8146 tr->n_err_log_entries = 0;
8152 struct trace_array *tr = m->private;
8156 return seq_list_start(&tr->err_log, *pos);
8161 struct trace_array *tr = m->private;
8163 return seq_list_next(v, &tr->err_log, pos);
8175 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8187 const char *err_text = err->info.errs[err->info.type];
8188 u64 sec = err->info.ts;
8193 err->loc, err_text);
8194 seq_printf(m, "%s", err->cmd);
8195 tracing_err_log_show_pos(m, err->info.pos);
8210 struct trace_array *tr = inode->i_private;
8218 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8221 if (file->f_mode & FMODE_READ) {
8224 struct seq_file *m = file->private_data;
8225 m->private = tr;
8242 struct trace_array *tr = inode->i_private;
8246 if (file->f_mode & FMODE_READ)
8262 struct trace_array *tr = inode->i_private;
8273 return -ENOMEM;
8278 info->iter.tr = tr;
8279 info->iter.cpu_file = tracing_get_cpu(inode);
8280 info->iter.trace = tr->current_trace;
8281 info->iter.array_buffer = &tr->array_buffer;
8282 info->spare = NULL;
8284 info->read = (unsigned int)-1;
8286 filp->private_data = info;
8288 tr->trace_ref++;
8302 struct ftrace_buffer_info *info = filp->private_data;
8303 struct trace_iterator *iter = &info->iter;
8312 struct ftrace_buffer_info *info = filp->private_data;
8313 struct trace_iterator *iter = &info->iter;
8323 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8324 return -EBUSY;
8327 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8330 if (info->spare) {
8331 if (page_size != info->spare_size) {
8332 ring_buffer_free_read_page(iter->array_buffer->buffer,
8333 info->spare_cpu, info->spare);
8334 info->spare = NULL;
8338 if (!info->spare) {
8339 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8340 iter->cpu_file);
8341 if (IS_ERR(info->spare)) {
8342 ret = PTR_ERR(info->spare);
8343 info->spare = NULL;
8345 info->spare_cpu = iter->cpu_file;
8346 info->spare_size = page_size;
8349 if (!info->spare)
8353 if (info->read < page_size)
8357 trace_access_lock(iter->cpu_file);
8358 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8359 info->spare,
8361 iter->cpu_file, 0);
8362 trace_access_unlock(iter->cpu_file);
8366 if ((filp->f_flags & O_NONBLOCK))
8367 return -EAGAIN;
8378 info->read = 0;
8380 size = page_size - info->read;
8383 trace_data = ring_buffer_read_page_data(info->spare);
8384 ret = copy_to_user(ubuf, trace_data + info->read, size);
8386 return -EFAULT;
8388 size -= ret;
8391 info->read += size;
8398 struct ftrace_buffer_info *info = file->private_data;
8399 struct trace_iterator *iter = &info->iter;
8401 iter->wait_index++;
8405 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8412 struct ftrace_buffer_info *info = file->private_data;
8413 struct trace_iterator *iter = &info->iter;
8417 iter->tr->trace_ref--;
8419 __trace_array_put(iter->tr);
8421 if (info->spare)
8422 ring_buffer_free_read_page(iter->array_buffer->buffer,
8423 info->spare_cpu, info->spare);
8440 if (!refcount_dec_and_test(&ref->refcount))
8442 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8449 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8452 buf->private = 0;
8458 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8460 if (refcount_read(&ref->refcount) > INT_MAX/2)
8463 refcount_inc(&ref->refcount);
8480 (struct buffer_ref *)spd->partial[i].private;
8483 spd->partial[i].private = 0;
8491 struct ftrace_buffer_info *info = file->private_data;
8492 struct trace_iterator *iter = &info->iter;
8508 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8509 return -EBUSY;
8512 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8513 if (*ppos & (page_size - 1))
8514 return -EINVAL;
8516 if (len & (page_size - 1)) {
8518 return -EINVAL;
8519 len &= (~(page_size - 1));
8523 return -ENOMEM;
8526 trace_access_lock(iter->cpu_file);
8527 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8529 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8535 ret = -ENOMEM;
8539 refcount_set(&ref->refcount, 1);
8540 ref->buffer = iter->array_buffer->buffer;
8541 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8542 if (IS_ERR(ref->page)) {
8543 ret = PTR_ERR(ref->page);
8544 ref->page = NULL;
8548 ref->cpu = iter->cpu_file;
8550 r = ring_buffer_read_page(ref->buffer, ref->page,
8551 len, iter->cpu_file, 1);
8553 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8554 ref->page);
8559 page = virt_to_page(ring_buffer_read_page_data(ref->page));
8568 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8571 trace_access_unlock(iter->cpu_file);
8581 ret = -EAGAIN;
8582 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8585 wait_index = READ_ONCE(iter->wait_index);
8587 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8592 if (!tracer_tracing_is_on(iter->tr))
8597 if (wait_index != iter->wait_index)
8613 struct ftrace_buffer_info *info = file->private_data;
8614 struct trace_iterator *iter = &info->iter;
8617 return -ENOIOCTLCMD;
8621 iter->wait_index++;
8625 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8647 struct trace_array *tr = inode->i_private;
8648 struct array_buffer *trace_buf = &tr->array_buffer;
8657 return -ENOMEM;
8661 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8664 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8667 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8670 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8673 if (trace_clocks[tr->clock_id].in_ns) {
8675 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8680 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8686 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8689 ring_buffer_time_stamp(trace_buf->buffer));
8692 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8695 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8699 s->buffer, trace_seq_used(s));
8726 return -ENOMEM;
8770 (*count)--;
8807 return -ENOMEM;
8847 void *count = (void *)-1;
8852 return -ENODEV;
8856 return -EINVAL;
8905 if (WARN_ON(!tr->dir))
8906 return ERR_PTR(-ENODEV);
8909 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8913 return tr->dir;
8920 if (tr->percpu_dir)
8921 return tr->percpu_dir;
8927 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8929 MEM_FAIL(!tr->percpu_dir,
8932 return tr->percpu_dir;
8942 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8998 struct trace_option_dentry *topt = filp->private_data;
9001 if (topt->flags->val & topt->opt->bit)
9013 struct trace_option_dentry *topt = filp->private_data;
9022 return -EINVAL;
9024 if (!!(topt->flags->val & topt->opt->bit) != val) {
9026 ret = __set_tracer_option(topt->tr, topt->flags,
9027 topt->opt, !val);
9040 struct trace_option_dentry *topt = inode->i_private;
9043 ret = tracing_check_open_get_tr(topt->tr);
9047 filp->private_data = inode->i_private;
9053 struct trace_option_dentry *topt = file->private_data;
9055 trace_array_put(topt->tr);
9086 * ptr - idx == &index[0]
9096 *ptr = container_of(data - *pindex, struct trace_array,
9104 void *tr_index = filp->private_data;
9111 if (tr->trace_flags & (1 << index))
9123 void *tr_index = filp->private_data;
9136 return -EINVAL;
9179 if (tr->options)
9180 return tr->options;
9186 tr->options = tracefs_create_dir("options", d_tracer);
9187 if (!tr->options) {
9192 return tr->options;
9207 topt->flags = flags;
9208 topt->opt = opt;
9209 topt->tr = tr;
9211 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9229 flags = tracer->flags;
9231 if (!flags || !flags->opts)
9241 for (i = 0; i < tr->nr_topts; i++) {
9243 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9247 opts = flags->opts;
9256 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9263 tr->topts = tr_topts;
9264 tr->topts[tr->nr_topts].tracer = tracer;
9265 tr->topts[tr->nr_topts].topts = topts;
9266 tr->nr_topts++;
9288 (void *)&tr->trace_flags_index[index],
9313 struct trace_array *tr = filp->private_data;
9327 struct trace_array *tr = filp->private_data;
9328 struct trace_buffer *buffer = tr->array_buffer.buffer;
9342 if (tr->current_trace->start)
9343 tr->current_trace->start(tr);
9346 if (tr->current_trace->stop)
9347 tr->current_trace->stop(tr);
9371 struct trace_array *tr = filp->private_data;
9375 r = tr->buffer_percent;
9385 struct trace_array *tr = filp->private_data;
9394 return -EINVAL;
9396 tr->buffer_percent = val;
9414 struct trace_array *tr = filp->private_data;
9420 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9432 struct trace_array *tr = filp->private_data;
9446 order = fls(pages - 1);
9450 return -EINVAL;
9455 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9459 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9465 if (!tr->allocated_snapshot)
9468 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9471 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9518 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9520 buf->tr = tr;
9522 buf->buffer = ring_buffer_alloc(size, rb_flags);
9523 if (!buf->buffer)
9524 return -ENOMEM;
9526 buf->data = alloc_percpu(struct trace_array_cpu);
9527 if (!buf->data) {
9528 ring_buffer_free(buf->buffer);
9529 buf->buffer = NULL;
9530 return -ENOMEM;
9534 set_buffer_entries(&tr->array_buffer,
9535 ring_buffer_size(tr->array_buffer.buffer, 0));
9542 if (buf->buffer) {
9543 ring_buffer_free(buf->buffer);
9544 buf->buffer = NULL;
9545 free_percpu(buf->data);
9546 buf->data = NULL;
9554 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9559 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9562 free_trace_buffer(&tr->array_buffer);
9563 return -ENOMEM;
9565 tr->allocated_snapshot = allocate_snapshot;
9578 free_trace_buffer(&tr->array_buffer);
9581 free_trace_buffer(&tr->max_buffer);
9591 tr->trace_flags_index[i] = i;
9598 for (t = trace_types; t; t = t->next)
9616 if (tr->name && strcmp(tr->name, instance) == 0) {
9632 tr->ref++;
9642 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9643 if (!tr->dir)
9644 return -EINVAL;
9646 ret = event_trace_add_tracer(tr->dir, tr);
9648 tracefs_remove(tr->dir);
9652 init_tracer_tracefs(tr, tr->dir);
9664 ret = -ENOMEM;
9669 tr->name = kstrdup(name, GFP_KERNEL);
9670 if (!tr->name)
9673 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9676 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9680 tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9681 if (!tr->system_names)
9685 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9687 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9689 raw_spin_lock_init(&tr->start_lock);
9691 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9693 tr->current_trace = &nop_trace;
9695 INIT_LIST_HEAD(&tr->systems);
9696 INIT_LIST_HEAD(&tr->events);
9697 INIT_LIST_HEAD(&tr->hist_vars);
9698 INIT_LIST_HEAD(&tr->err_log);
9720 list_add(&tr->list, &ftrace_trace_arrays);
9722 tr->ref++;
9729 free_cpumask_var(tr->pipe_cpumask);
9730 free_cpumask_var(tr->tracing_cpumask);
9731 kfree_const(tr->system_names);
9732 kfree(tr->name);
9751 ret = -EEXIST;
9766 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9790 if (tr->name && strcmp(tr->name, name) == 0)
9800 tr->ref++;
9813 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9814 return -EBUSY;
9816 list_del(&tr->list);
9829 tracefs_remove(tr->dir);
9830 free_percpu(tr->last_func_repeats);
9834 for (i = 0; i < tr->nr_topts; i++) {
9835 kfree(tr->topts[i].topts);
9837 kfree(tr->topts);
9839 free_cpumask_var(tr->pipe_cpumask);
9840 free_cpumask_var(tr->tracing_cpumask);
9841 kfree_const(tr->system_names);
9842 kfree(tr->name);
9854 return -EINVAL;
9859 ret = -ENODEV;
9884 ret = -ENODEV;
9909 if (!tr->name)
9955 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9969 tr->buffer_percent = 50;
10023 * tracing_init_dentry - initialize top level trace array
10035 return -EPERM;
10039 if (tr->dir)
10043 return -ENODEV;
10051 tr->dir = debugfs_create_automount("tracing", NULL,
10068 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
10081 return -ENOMEM;
10104 if (!mod->num_trace_evals)
10114 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
10123 if (!mod->num_trace_evals)
10131 if (map->head.mod == mod)
10134 last = &map->tail.next;
10135 map = map->tail.next;
10140 *last = trace_eval_jmp_to_tail(map)->tail.next;
10240 .priority = INT_MAX - 1,
10245 .priority = INT_MAX - 1,
10286 if (s->seq.len >= TRACE_MAX_PRINT)
10287 s->seq.len = TRACE_MAX_PRINT;
10294 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10295 s->seq.len = s->seq.size - 1;
10298 s->buffer[s->seq.len] = 0;
10300 printk(KERN_TRACE "%s", s->buffer);
10307 iter->tr = &global_trace;
10308 iter->trace = iter->tr->current_trace;
10309 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10310 iter->array_buffer = &global_trace.array_buffer;
10312 if (iter->trace && iter->trace->open)
10313 iter->trace->open(iter);
10316 if (ring_buffer_overruns(iter->array_buffer->buffer))
10317 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10320 if (trace_clocks[iter->tr->clock_id].in_ns)
10321 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10324 iter->temp = static_temp_buf;
10325 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10326 iter->fmt = static_fmt_buf;
10327 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10340 /* Only allow one dump user at a time. */
10351 * If the user does a sysrq-z, then they can re-enable
10362 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10365 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10368 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10402 printk(KERN_TRACE "---------------------------------\n");
10424 printk(KERN_TRACE "---------------------------------\n");
10427 tr->trace_flags |= old_userobj;
10430 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10450 return -ENOMEM;
10453 size = count - done;
10456 size = WRITE_BUFSIZE - 1;
10459 ret = -EFAULT;
10468 size = tmp - buf + 1;
10474 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10476 WRITE_BUFSIZE - 2);
10477 ret = -EINVAL;
10553 boot_instance_info[boot_instance_index - 1] = '\0';
10580 int ret = -ENOMEM;
10585 return -EPERM;
10628 ret = -ENOMEM;
10721 if (!tr->allocated_snapshot)