Lines Matching full:user
44 * probes to print out to the user.
46 * These do not reflect the mapped bytes between the user and kernel space.
194 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
202 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
204 static int destroy_user_event(struct user_event *user);
222 static struct user_event *user_event_get(struct user_event *user) in user_event_get() argument
224 refcount_inc(&user->refcnt); in user_event_get()
226 return user; in user_event_get()
231 struct user_event *user = container_of( in delayed_destroy_user_event() local
236 if (!refcount_dec_and_test(&user->refcnt)) in delayed_destroy_user_event()
239 if (destroy_user_event(user)) { in delayed_destroy_user_event()
247 refcount_set(&user->refcnt, 1); in delayed_destroy_user_event()
253 static void user_event_put(struct user_event *user, bool locked) in user_event_put() argument
257 if (unlikely(!user)) in user_event_put()
273 delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex); in user_event_put()
276 delete = refcount_dec_and_test(&user->refcnt); in user_event_put()
289 if (user->reg_flags & USER_EVENT_REG_PERSIST) { in user_event_put()
300 INIT_WORK(&user->put_work, delayed_destroy_user_event); in user_event_put()
306 * needed because a user-process could register the same event in in user_event_put()
310 * user process would fail a register because the trace_event_call in user_event_put()
313 refcount_set(&user->refcnt, 1); in user_event_put()
315 if (WARN_ON_ONCE(!schedule_work(&user->put_work))) { in user_event_put()
398 * bad user processes to cause excessive looping. in user_event_mm_fault_in()
436 struct user_event *user = enabler->event; in user_event_enabler_fault_fixup() local
439 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user)); in user_event_enabler_fault_fixup()
445 /* User asked for enabler to be removed during fault */ in user_event_enabler_fault_fixup()
547 /* Update bit atomically, user tracers must be atomic as well */ in user_event_enabler_write()
572 static void user_event_enabler_update(struct user_event *user) in user_event_enabler_update() argument
592 mm = user_event_mm_get_all(user); in user_event_enabler_update()
599 if (enabler->event == user) { in user_event_enabler_update()
644 static struct user_event_mm *user_event_mm_get_all(struct user_event *user) in user_event_mm_get_all() argument
654 * when user based events are most wanted for diagnostics. in user_event_mm_get_all()
666 * Each user mm returned has a ref inc to handle remove RCU races. in user_event_mm_get_all()
672 if (enabler->event == user) { in user_event_mm_get_all()
862 *user_event_enabler_create(struct user_reg *reg, struct user_event *user, in user_event_enabler_create() argument
880 enabler->event = user; in user_event_enabler_create()
908 user_event_get(user); in user_event_enabler_create()
929 bool user_event_last_ref(struct user_event *user) in user_event_last_ref() argument
933 if (user->reg_flags & USER_EVENT_REG_PERSIST) in user_event_last_ref()
936 return refcount_read(&user->refcnt) == last; in user_event_last_ref()
955 struct user_event *user = (struct user_event *)call->data; in user_event_get_fields() local
957 return &user->fields; in user_event_get_fields()
968 * NOTE: Offsets are from the user data perspective, they are not from the
970 * sizes to the offset for the user.
1024 /* long is not allowed from a user, since it's ambigious in size */ in user_field_size()
1066 static void user_event_destroy_validators(struct user_event *user) in user_event_destroy_validators() argument
1069 struct list_head *head = &user->validators; in user_event_destroy_validators()
1077 static void user_event_destroy_fields(struct user_event *user) in user_event_destroy_fields() argument
1080 struct list_head *head = &user->fields; in user_event_destroy_fields()
1088 static int user_event_add_field(struct user_event *user, const char *type, in user_event_add_field() argument
1126 list_add_tail(&validator->user_event_link, &user->validators); in user_event_add_field()
1139 list_add(&field->link, &user->fields); in user_event_add_field()
1142 * Min size from user writes that are required, this does not include in user_event_add_field()
1145 user->min_size = (offset + size) - sizeof(struct trace_entry); in user_event_add_field()
1154 static int user_event_parse_field(char *field, struct user_event *user, in user_event_parse_field() argument
1241 return user_event_add_field(user, type, name, saved_offset, size, in user_event_parse_field()
1245 static int user_event_parse_fields(struct user_event *user, char *args) in user_event_parse_fields() argument
1255 ret = user_event_parse_field(field, user, &offset); in user_event_parse_fields()
1366 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len) in user_event_set_print_fmt() argument
1369 struct list_head *head = &user->fields; in user_event_set_print_fmt()
1400 static int user_event_create_print_fmt(struct user_event *user) in user_event_create_print_fmt() argument
1405 len = user_event_set_print_fmt(user, NULL, 0); in user_event_create_print_fmt()
1412 user_event_set_print_fmt(user, print_fmt, len); in user_event_create_print_fmt()
1414 user->call.print_fmt = print_fmt; in user_event_create_print_fmt()
1430 static int user_event_set_call_visible(struct user_event *user, bool visible) in user_event_set_call_visible() argument
1455 ret = trace_add_event_call(&user->call); in user_event_set_call_visible()
1457 ret = trace_remove_event_call(&user->call); in user_event_set_call_visible()
1465 static int destroy_user_event(struct user_event *user) in destroy_user_event() argument
1472 user_event_destroy_fields(user); in destroy_user_event()
1474 ret = user_event_set_call_visible(user, false); in destroy_user_event()
1479 dyn_event_remove(&user->devent); in destroy_user_event()
1480 hash_del(&user->node); in destroy_user_event()
1482 user_event_destroy_validators(user); in destroy_user_event()
1483 kfree(user->call.print_fmt); in destroy_user_event()
1484 kfree(EVENT_NAME(user)); in destroy_user_event()
1485 kfree(user); in destroy_user_event()
1498 struct user_event *user; in find_user_event() local
1503 hash_for_each_possible(group->register_table, user, node, key) in find_user_event()
1504 if (!strcmp(EVENT_NAME(user), name)) in find_user_event()
1505 return user_event_get(user); in find_user_event()
1510 static int user_event_validate(struct user_event *user, void *data, int len) in user_event_validate() argument
1512 struct list_head *head = &user->validators; in user_event_validate()
1544 * Writes the user supplied payload out to a trace file.
1546 static void user_event_ftrace(struct user_event *user, struct iov_iter *i, in user_event_ftrace() argument
1570 if (!list_empty(&user->validators) && in user_event_ftrace()
1571 unlikely(user_event_validate(user, entry, size))) in user_event_ftrace()
1585 * Writes the user supplied payload out to perf ring buffer.
1587 static void user_event_perf(struct user_event *user, struct iov_iter *i, in user_event_perf() argument
1592 perf_head = this_cpu_ptr(user->call.perf_events); in user_event_perf()
1611 if (!list_empty(&user->validators) && in user_event_perf()
1612 unlikely(user_event_validate(user, perf_entry, size))) in user_event_perf()
1616 user->call.event.type, 1, regs, in user_event_perf()
1628 * Update the enabled bit among all user processes.
1630 static void update_enable_bit_for(struct user_event *user) in update_enable_bit_for() argument
1632 struct tracepoint *tp = &user->tracepoint; in update_enable_bit_for()
1661 user->status = status; in update_enable_bit_for()
1663 user_event_enabler_update(user); in update_enable_bit_for()
1673 struct user_event *user = (struct user_event *)call->data; in user_event_reg() local
1676 if (!user) in user_event_reg()
1719 user_event_get(user); in user_event_reg()
1720 update_enable_bit_for(user); in user_event_reg()
1723 update_enable_bit_for(user); in user_event_reg()
1724 user_event_put(user, true); in user_event_reg()
1731 struct user_event *user; in user_event_create() local
1756 ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST); in user_event_create()
1759 user_event_put(user, false); in user_event_create()
1771 struct user_event *user = container_of(ev, struct user_event, devent); in user_event_show() local
1776 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user)); in user_event_show()
1778 head = trace_get_fields(&user->call); in user_event_show()
1801 struct user_event *user = container_of(ev, struct user_event, devent); in user_event_is_busy() local
1803 return !user_event_last_ref(user); in user_event_is_busy()
1808 struct user_event *user = container_of(ev, struct user_event, devent); in user_event_free() local
1810 if (!user_event_last_ref(user)) in user_event_free()
1813 if (!user_event_capable(user->reg_flags)) in user_event_free()
1816 return destroy_user_event(user); in user_event_free()
1856 static bool user_fields_match(struct user_event *user, int argc, in user_fields_match() argument
1860 struct list_head *head = &user->fields; in user_fields_match()
1877 struct user_event *user = container_of(ev, struct user_event, devent); in user_event_match() local
1880 match = strcmp(EVENT_NAME(user), event) == 0 && in user_event_match()
1884 match = user_fields_match(user, argc, argv); in user_event_match()
1886 match = list_empty(&user->fields); in user_event_match()
1899 static int user_event_trace_register(struct user_event *user) in user_event_trace_register() argument
1903 ret = register_trace_event(&user->call.event); in user_event_trace_register()
1908 ret = user_event_set_call_visible(user, true); in user_event_trace_register()
1911 unregister_trace_event(&user->call.event); in user_event_trace_register()
1927 struct user_event *user; in user_event_parse() local
1940 user = find_user_event(group, name, &key); in user_event_parse()
1943 if (user) { in user_event_parse()
1951 ret = user_fields_match(user, argc, (const char **)argv); in user_event_parse()
1955 ret = list_empty(&user->fields); in user_event_parse()
1958 *newuser = user; in user_event_parse()
1971 user_event_put(user, false); in user_event_parse()
1975 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT); in user_event_parse()
1977 if (!user) in user_event_parse()
1980 INIT_LIST_HEAD(&user->class.fields); in user_event_parse()
1981 INIT_LIST_HEAD(&user->fields); in user_event_parse()
1982 INIT_LIST_HEAD(&user->validators); in user_event_parse()
1984 user->group = group; in user_event_parse()
1985 user->tracepoint.name = name; in user_event_parse()
1987 ret = user_event_parse_fields(user, args); in user_event_parse()
1992 ret = user_event_create_print_fmt(user); in user_event_parse()
1997 user->call.data = user; in user_event_parse()
1998 user->call.class = &user->class; in user_event_parse()
1999 user->call.name = name; in user_event_parse()
2000 user->call.flags = TRACE_EVENT_FL_TRACEPOINT; in user_event_parse()
2001 user->call.tp = &user->tracepoint; in user_event_parse()
2002 user->call.event.funcs = &user_event_funcs; in user_event_parse()
2003 user->class.system = group->system_name; in user_event_parse()
2005 user->class.fields_array = user_event_fields_array; in user_event_parse()
2006 user->class.get_fields = user_event_get_fields; in user_event_parse()
2007 user->class.reg = user_event_reg; in user_event_parse()
2008 user->class.probe = user_event_ftrace; in user_event_parse()
2010 user->class.perf_probe = user_event_perf; in user_event_parse()
2020 ret = user_event_trace_register(user); in user_event_parse()
2025 user->reg_flags = reg_flags; in user_event_parse()
2027 if (user->reg_flags & USER_EVENT_REG_PERSIST) { in user_event_parse()
2029 refcount_set(&user->refcnt, 2); in user_event_parse()
2032 refcount_set(&user->refcnt, 1); in user_event_parse()
2035 dyn_event_init(&user->devent, &user_event_dops); in user_event_parse()
2036 dyn_event_add(&user->devent, &user->call); in user_event_parse()
2037 hash_add(group->register_table, &user->node, key); in user_event_parse()
2042 *newuser = user; in user_event_parse()
2047 user_event_destroy_fields(user); in user_event_parse()
2048 user_event_destroy_validators(user); in user_event_parse()
2049 kfree(user->call.print_fmt); in user_event_parse()
2050 kfree(user); in user_event_parse()
2060 struct user_event *user = find_user_event(group, name, &key); in delete_user_event() local
2062 if (!user) in delete_user_event()
2065 user_event_put(user, true); in delete_user_event()
2067 if (!user_event_last_ref(user)) in delete_user_event()
2070 if (!user_event_capable(user->reg_flags)) in delete_user_event()
2073 return destroy_user_event(user); in delete_user_event()
2077 * Validates the user payload and writes via iterator.
2083 struct user_event *user = NULL; in user_events_write_core() local
2100 * added. But the user retrieved from indexing into the events array in user_events_write_core()
2104 user = refs->events[idx]; in user_events_write_core()
2108 if (unlikely(user == NULL)) in user_events_write_core()
2111 if (unlikely(i->count < user->min_size)) in user_events_write_core()
2114 tp = &user->tracepoint; in user_events_write_core()
2141 probe_func(user, ©, tpdata, &faulted); in user_events_write_core()
2197 struct user_event *user) in user_events_ref_add() argument
2210 if (refs->events[i] == user) in user_events_ref_add()
2226 new_refs->events[i] = user_event_get(user); in user_events_ref_add()
2294 * Registers a user_event on behalf of a user process.
2301 struct user_event *user; in user_events_ioctl_reg() local
2315 * for user processes that is far easier to debug if this is explictly in user_events_ioctl_reg()
2330 ret = user_event_parse_cmd(info->group, name, &user, reg.flags); in user_events_ioctl_reg()
2337 ret = user_events_ref_add(info, user); in user_events_ioctl_reg()
2340 user_event_put(user, false); in user_events_ioctl_reg()
2358 enabler = user_event_enabler_create(®, user, &write_result); in user_events_ioctl_reg()
2373 * Deletes a user_event on behalf of a user process.
2455 * Unregisters an enablement address/bit within a task/user mm.
2504 /* Ensure bit is now cleared for user, regardless of event status */ in user_events_ioctl_unreg()
2513 * Handles the ioctl from user mode to register or alter operations.
2546 * Handles the final close of the file from user mode.
2619 struct user_event *user; in user_seq_show() local
2628 hash_for_each(group->register_table, i, user, node) { in user_seq_show()
2629 status = user->status; in user_seq_show()
2631 seq_printf(m, "%s", EVENT_NAME(user)); in user_seq_show()
2697 * Creates a set of tracefs files to allow user mode interactions.