Lines Matching +full:reg +full:- +full:names

1 // SPDX-License-Identifier: GPL-2.0-only
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
37 #define EVENT_NAME(user_event) ((user_event)->reg_name)
38 #define EVENT_TP_NAME(user_event) ((user_event)->tracepoint.name)
63 /* ID that moves forward within the group for multi-event names */
67 /* Group for init_user_ns mapping, top-most group */
77 * Stores per-event properties, as users register events
101 * Stores per-mm/event properties that enable an address to be
114 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
123 /* Bit 8 is for marking 32-bit on 64-bit */
131 #define ENABLE_BITOPS(e) (&(e)->values)
133 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
152 * Stores per-file events references, as users register events
233 refcount_inc(&user->refcnt); in user_event_get()
245 if (!refcount_dec_and_test(&user->refcnt)) in delayed_destroy_user_event()
256 refcount_set(&user->refcnt, 1); in delayed_destroy_user_event()
270 * When the event is not enabled for auto-delete there will always in user_event_put()
282 delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex); in user_event_put()
285 delete = refcount_dec_and_test(&user->refcnt); in user_event_put()
298 if (user->reg_flags & USER_EVENT_REG_PERSIST) { in user_event_put()
300 pr_alert("BUG: Auto-delete engaged on persistent event\n"); in user_event_put()
307 * being removed within the class->reg() operation for unregister. in user_event_put()
309 INIT_WORK(&user->put_work, delayed_destroy_user_event); in user_event_put()
312 * Since the event is still in the hashtable, we have to re-inc in user_event_put()
315 * needed because a user-process could register the same event in in user_event_put()
322 refcount_set(&user->refcnt, 1); in user_event_put()
324 if (WARN_ON_ONCE(!schedule_work(&user->put_work))) { in user_event_put()
339 kfree(group->system_name); in user_event_group_destroy()
340 kfree(group->system_multi_name); in user_event_group_destroy()
378 group->system_name = user_event_group_system_name(); in user_event_group_create()
380 if (!group->system_name) in user_event_group_create()
383 group->system_multi_name = user_event_group_system_multi_name(); in user_event_group_create()
385 if (!group->system_multi_name) in user_event_group_create()
388 mutex_init(&group->reg_mutex); in user_event_group_create()
389 hash_init(group->register_table); in user_event_group_create()
402 list_del_rcu(&enabler->mm_enablers_link); in user_event_enabler_destroy()
405 user_event_put(enabler->event, locked); in user_event_enabler_destroy()
421 return -EFAULT; in user_event_mm_fault_in()
423 mmap_read_lock(mm->mm); in user_event_mm_fault_in()
426 if (refcount_read(&mm->tasks) == 0) { in user_event_mm_fault_in()
427 ret = -ENOENT; in user_event_mm_fault_in()
431 ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, in user_event_mm_fault_in()
434 mmap_read_unlock(mm->mm); in user_event_mm_fault_in()
447 struct user_event_enabler *enabler = fault->enabler; in user_event_enabler_fault_fixup()
448 struct user_event_mm *mm = fault->mm; in user_event_enabler_fault_fixup()
449 unsigned long uaddr = enabler->addr; in user_event_enabler_fault_fixup()
450 int attempt = fault->attempt; in user_event_enabler_fault_fixup()
455 if (ret && ret != -ENOENT) { in user_event_enabler_fault_fixup()
456 struct user_event *user = enabler->event; in user_event_enabler_fault_fixup()
459 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user)); in user_event_enabler_fault_fixup()
472 * If we managed to get the page, re-issue the write. We do not in user_event_enabler_fault_fixup()
481 mmap_read_lock(mm->mm); in user_event_enabler_fault_fixup()
483 mmap_read_unlock(mm->mm); in user_event_enabler_fault_fixup()
504 INIT_WORK(&fault->work, user_event_enabler_fault_fixup); in user_event_enabler_queue_fault()
505 fault->mm = user_event_mm_get(mm); in user_event_enabler_queue_fault()
506 fault->enabler = enabler; in user_event_enabler_queue_fault()
507 fault->attempt = attempt; in user_event_enabler_queue_fault()
512 if (!schedule_work(&fault->work)) { in user_event_enabler_queue_fault()
529 unsigned long uaddr = enabler->addr; in user_event_enabler_write()
537 mmap_assert_locked(mm->mm); in user_event_enabler_write()
542 if (refcount_read(&mm->tasks) == 0) in user_event_enabler_write()
543 return -ENOENT; in user_event_enabler_write()
547 return -EBUSY; in user_event_enabler_write()
551 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT, in user_event_enabler_write()
556 return -EFAULT; in user_event_enabler_write()
561 return -EFAULT; in user_event_enabler_write()
568 if (enabler->event && enabler->event->status) in user_event_enabler_write()
584 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { in user_event_enabler_exists()
585 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) in user_event_enabler_exists()
602 * We need to build a one-shot list of all the mms that have an in user_event_enabler_update()
615 next = mm->next; in user_event_enabler_update()
616 mmap_read_lock(mm->mm); in user_event_enabler_update()
618 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { in user_event_enabler_update()
619 if (enabler->event == user) { in user_event_enabler_update()
625 mmap_read_unlock(mm->mm); in user_event_enabler_update()
645 enabler->event = user_event_get(orig->event); in user_event_enabler_dup()
646 enabler->addr = orig->addr; in user_event_enabler_dup()
649 enabler->values = orig->values & ENABLE_VAL_DUP_MASK; in user_event_enabler_dup()
652 list_add(&enabler->mm_enablers_link, &mm->enablers); in user_event_enabler_dup()
659 refcount_inc(&mm->refcnt); in user_event_mm_get()
671 * We use the mm->next field to build a one-shot list from the global in user_event_mm_get_all()
691 list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) { in user_event_mm_get_all()
692 if (enabler->event == user) { in user_event_mm_get_all()
693 mm->next = found; in user_event_mm_get_all()
714 user_mm->mm = t->mm; in user_event_mm_alloc()
715 INIT_LIST_HEAD(&user_mm->enablers); in user_event_mm_alloc()
716 refcount_set(&user_mm->refcnt, 1); in user_event_mm_alloc()
717 refcount_set(&user_mm->tasks, 1); in user_event_mm_alloc()
727 mmgrab(user_mm->mm); in user_event_mm_alloc()
737 list_add_rcu(&user_mm->mms_link, &user_event_mms); in user_event_mm_attach()
740 t->user_event_mm = user_mm; in user_event_mm_attach()
745 struct user_event_mm *user_mm = current->user_event_mm; in current_user_event_mm()
757 refcount_inc(&user_mm->refcnt); in current_user_event_mm()
766 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) in user_event_mm_destroy()
769 mmdrop(mm->mm); in user_event_mm_destroy()
775 if (mm && refcount_dec_and_test(&mm->refcnt)) in user_event_mm_put()
794 mm = t->user_event_mm; in user_event_mm_remove()
795 t->user_event_mm = NULL; in user_event_mm_remove()
798 if (!refcount_dec_and_test(&mm->tasks)) in user_event_mm_remove()
803 list_del_rcu(&mm->mms_link); in user_event_mm_remove()
813 * lock to ensure in-progress faults have completed. Faults that in user_event_mm_remove()
817 mmap_write_lock(mm->mm); in user_event_mm_remove()
818 mmap_write_unlock(mm->mm); in user_event_mm_remove()
826 * --------------------------------------------------------------- in user_event_mm_remove()
837 INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put); in user_event_mm_remove()
838 queue_rcu_work(system_wq, &mm->put_rwork); in user_event_mm_remove()
851 list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) { in user_event_mm_dup()
882 *user_event_enabler_create(struct user_reg *reg, struct user_event *user, in user_event_enabler_create() argument
887 unsigned long uaddr = (unsigned long)reg->enable_addr; in user_event_enabler_create()
900 enabler->event = user; in user_event_enabler_create()
901 enabler->addr = uaddr; in user_event_enabler_create()
902 enabler->values = reg->enable_bit; in user_event_enabler_create()
905 if (reg->enable_size == 4) in user_event_enabler_create()
914 mmap_read_lock(user_mm->mm); in user_event_enabler_create()
917 mmap_read_unlock(user_mm->mm); in user_event_enabler_create()
929 list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers); in user_event_enabler_create()
935 /* Attempt to fault-in and retry if it worked */ in user_event_enabler_create()
953 if (user->reg_flags & USER_EVENT_REG_PERSIST) in user_event_last_ref()
956 return refcount_read(&user->refcnt) == last; in user_event_last_ref()
975 struct user_event *user = (struct user_event *)call->data; in user_event_get_fields()
977 return &user->fields; in user_event_get_fields()
1021 return -EINVAL; in user_field_array_size()
1024 return -EINVAL; in user_field_array_size()
1029 return -EINVAL; in user_field_array_size()
1034 return -EINVAL; in user_field_array_size()
1037 return -EINVAL; in user_field_array_size()
1083 return -EINVAL; in user_field_size()
1089 struct list_head *head = &user->validators; in user_event_destroy_validators()
1092 list_del(&validator->user_event_link); in user_event_destroy_validators()
1100 struct list_head *head = &user->fields; in user_event_destroy_fields()
1103 list_del(&field->link); in user_event_destroy_fields()
1119 return -ENOMEM; in user_event_add_field()
1139 return -ENOMEM; in user_event_add_field()
1142 validator->flags = validator_flags; in user_event_add_field()
1143 validator->offset = offset; in user_event_add_field()
1146 list_add_tail(&validator->user_event_link, &user->validators); in user_event_add_field()
1149 field->type = type; in user_event_add_field()
1150 field->name = name; in user_event_add_field()
1151 field->offset = offset; in user_event_add_field()
1152 field->size = size; in user_event_add_field()
1153 field->is_signed = is_signed; in user_event_add_field()
1154 field->filter_type = filter_type; in user_event_add_field()
1157 field->filter_type = filter_assign_type(type); in user_event_add_field()
1159 list_add(&field->link, &user->fields); in user_event_add_field()
1165 user->min_size = (offset + size) - sizeof(struct trace_entry); in user_event_add_field()
1179 int len, size = -EINVAL; in user_event_parse_field()
1220 return -EINVAL; in user_event_parse_field()
1237 return -EINVAL; in user_event_parse_field()
1240 return -EINVAL; in user_event_parse_field()
1243 return -EINVAL; in user_event_parse_field()
1248 return -EINVAL; in user_event_parse_field()
1254 return -EINVAL; in user_event_parse_field()
1269 int ret = -EINVAL; in user_event_parse_fields()
1319 /* Unknown, likely struct, allowed treat as 64-bit */ in user_field_format()
1340 #define LEN_OR_ZERO (len ? len - pos : 0)
1373 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type); in user_field_set_string()
1375 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name); in user_field_set_string()
1377 if (str_has_prefix(field->type, "struct ")) in user_field_set_string()
1378 pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size); in user_field_set_string()
1389 struct list_head *head = &user->fields; in user_event_set_print_fmt()
1400 field->name, user_field_format(field->type)); in user_event_set_print_fmt()
1408 if (user_field_is_dyn_string(field->type, &str_func)) in user_event_set_print_fmt()
1410 ", %s(%s)", str_func, field->name); in user_event_set_print_fmt()
1413 ", REC->%s", field->name); in user_event_set_print_fmt()
1430 return -ENOMEM; in user_event_create_print_fmt()
1434 user->call.print_fmt = print_fmt; in user_event_create_print_fmt()
1459 return -ENOMEM; in user_event_set_call_visible()
1470 cred->fsuid = GLOBAL_ROOT_UID; in user_event_set_call_visible()
1475 ret = trace_add_event_call(&user->call); in user_event_set_call_visible()
1477 ret = trace_remove_event_call(&user->call); in user_event_set_call_visible()
1499 dyn_event_remove(&user->devent); in destroy_user_event()
1500 hash_del(&user->node); in destroy_user_event()
1504 /* If we have different names, both must be freed */ in destroy_user_event()
1508 kfree(user->call.print_fmt); in destroy_user_event()
1513 current_user_events--; in destroy_user_event()
1529 hash_for_each_possible(group->register_table, user, node, key) { in find_user_event()
1531 * Single-format events shouldn't return multi-format in find_user_event()
1533 * the name exactly in these cases. Only check like-formats. in find_user_event()
1535 if (EVENT_MULTI_FORMAT(flags) != EVENT_MULTI_FORMAT(user->reg_flags)) in find_user_event()
1544 /* Scan others if this is a multi-format event */ in find_user_event()
1548 return ERR_PTR(-EADDRINUSE); in find_user_event()
1556 struct list_head *head = &user->validators; in user_event_validate()
1562 pos = data + validator->offset; in user_event_validate()
1569 if (likely(validator->flags & VALIDATOR_REL)) in user_event_validate()
1577 return -EFAULT; in user_event_validate()
1579 if (likely(validator->flags & VALIDATOR_ENSURE_NULL)) in user_event_validate()
1580 if (unlikely(*(char *)(pos - 1) != '\0')) in user_event_validate()
1581 return -EFAULT; in user_event_validate()
1596 size_t size = sizeof(*entry) + i->count; in user_event_ftrace()
1601 !(file->flags & EVENT_FILE_FL_ENABLED) || in user_event_ftrace()
1611 if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i))) in user_event_ftrace()
1614 if (!list_empty(&user->validators) && in user_event_ftrace()
1636 perf_head = this_cpu_ptr(user->call.perf_events); in user_event_perf()
1641 size_t size = sizeof(*perf_entry) + i->count; in user_event_perf()
1652 if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i))) in user_event_perf()
1655 if (!list_empty(&user->validators) && in user_event_perf()
1660 user->call.event.type, 1, regs, in user_event_perf()
1676 struct tracepoint *tp = &user->tracepoint; in update_enable_bit_for()
1679 if (static_key_enabled(&tp->key)) { in update_enable_bit_for()
1685 probe_func_ptr = rcu_dereference_sched(tp->funcs); in update_enable_bit_for()
1689 probe_func = probe_func_ptr->func; in update_enable_bit_for()
1699 } while ((++probe_func_ptr)->func); in update_enable_bit_for()
1705 user->status = status; in update_enable_bit_for()
1711 * Register callback for our events from tracing sub-systems.
1717 struct user_event *user = (struct user_event *)call->data; in user_event_reg()
1721 return -ENOENT; in user_event_reg()
1725 ret = tracepoint_probe_register(call->tp, in user_event_reg()
1726 call->class->probe, in user_event_reg()
1733 tracepoint_probe_unregister(call->tp, in user_event_reg()
1734 call->class->probe, in user_event_reg()
1740 ret = tracepoint_probe_register(call->tp, in user_event_reg()
1741 call->class->perf_probe, in user_event_reg()
1748 tracepoint_probe_unregister(call->tp, in user_event_reg()
1749 call->class->perf_probe, in user_event_reg()
1780 return -ECANCELED; in user_event_create()
1788 return -ENOMEM; in user_event_create()
1794 return -ENOENT; in user_event_create()
1797 mutex_lock(&group->reg_mutex); in user_event_create()
1805 mutex_unlock(&group->reg_mutex); in user_event_create()
1822 head = trace_get_fields(&user->call); in user_event_show()
1830 seq_printf(m, "%s %s", field->type, field->name); in user_event_show()
1832 if (str_has_prefix(field->type, "struct ")) in user_event_show()
1833 seq_printf(m, " %d", field->size); in user_event_show()
1855 return -EBUSY; in user_event_free()
1857 if (!user_event_capable(user->reg_flags)) in user_event_free()
1858 return -EPERM; in user_event_free()
1904 struct list_head *head = &user->fields; in user_fields_match()
1930 match = strcmp(system, user->group->system_name) == 0 || in user_event_match()
1931 strcmp(system, user->group->system_multi_name) == 0; in user_event_match()
1952 ret = register_trace_event(&user->call.event); in user_event_trace_register()
1955 return -ENODEV; in user_event_trace_register()
1960 unregister_trace_event(&user->call.event); in user_event_trace_register()
1967 lockdep_assert_held(&user->group->reg_mutex); in user_event_set_tp_name()
1969 if (EVENT_MULTI_FORMAT(user->reg_flags)) { in user_event_set_tp_name()
1973 user->reg_name, user->group->multi_id); in user_event_set_tp_name()
1976 return -ENOMEM; in user_event_set_tp_name()
1978 user->call.name = multi_name; in user_event_set_tp_name()
1979 user->tracepoint.name = multi_name; in user_event_set_tp_name()
1981 /* Inc to ensure unique multi-event name next time */ in user_event_set_tp_name()
1982 user->group->multi_id++; in user_event_set_tp_name()
1984 /* Non Multi-format uses register name */ in user_event_set_tp_name()
1985 user->call.name = user->reg_name; in user_event_set_tp_name()
1986 user->tracepoint.name = user->reg_name; in user_event_set_tp_name()
2083 return -EINVAL; in user_event_parse()
2086 return -EPERM; in user_event_parse()
2092 return -ENOMEM; in user_event_parse()
2121 return -ENOMEM; in user_event_parse()
2123 INIT_LIST_HEAD(&user->class.fields); in user_event_parse()
2124 INIT_LIST_HEAD(&user->fields); in user_event_parse()
2125 INIT_LIST_HEAD(&user->validators); in user_event_parse()
2127 user->group = group; in user_event_parse()
2128 user->reg_name = name; in user_event_parse()
2129 user->reg_flags = reg_flags; in user_event_parse()
2146 user->call.data = user; in user_event_parse()
2147 user->call.class = &user->class; in user_event_parse()
2148 user->call.flags = TRACE_EVENT_FL_TRACEPOINT; in user_event_parse()
2149 user->call.tp = &user->tracepoint; in user_event_parse()
2150 user->call.event.funcs = &user_event_funcs; in user_event_parse()
2152 if (EVENT_MULTI_FORMAT(user->reg_flags)) in user_event_parse()
2153 user->class.system = group->system_multi_name; in user_event_parse()
2155 user->class.system = group->system_name; in user_event_parse()
2157 user->class.fields_array = user_event_fields_array; in user_event_parse()
2158 user->class.get_fields = user_event_get_fields; in user_event_parse()
2159 user->class.reg = user_event_reg; in user_event_parse()
2160 user->class.probe = user_event_ftrace; in user_event_parse()
2162 user->class.perf_probe = user_event_perf; in user_event_parse()
2168 ret = -EMFILE; in user_event_parse()
2177 if (user->reg_flags & USER_EVENT_REG_PERSIST) { in user_event_parse()
2179 refcount_set(&user->refcnt, 2); in user_event_parse()
2182 refcount_set(&user->refcnt, 1); in user_event_parse()
2185 dyn_event_init(&user->devent, &user_event_dops); in user_event_parse()
2186 dyn_event_add(&user->devent, &user->call); in user_event_parse()
2187 hash_add(group->register_table, &user->node, key); in user_event_parse()
2199 kfree(user->call.print_fmt); in user_event_parse()
2201 /* Caller frees reg_name on error, but not multi-name */ in user_event_parse()
2217 int ret = -ENOENT; in delete_user_event()
2220 hash_for_each_possible_safe(group->register_table, user, tmp, node, key) { in delete_user_event()
2225 return -EBUSY; in delete_user_event()
2227 if (!user_event_capable(user->reg_flags)) in delete_user_event()
2228 return -EPERM; in delete_user_event()
2244 struct user_event_file_info *info = file->private_data; in user_events_write_core()
2248 ssize_t ret = i->count; in user_events_write_core()
2252 return -EFAULT; in user_events_write_core()
2255 return -EINVAL; in user_events_write_core()
2259 refs = rcu_dereference_sched(info->refs); in user_events_write_core()
2262 * The refs->events array is protected by RCU, and new items may be in user_events_write_core()
2266 if (likely(refs && idx < refs->count)) in user_events_write_core()
2267 user = refs->events[idx]; in user_events_write_core()
2272 return -ENOENT; in user_events_write_core()
2274 if (unlikely(i->count < user->min_size)) in user_events_write_core()
2275 return -EINVAL; in user_events_write_core()
2277 tp = &user->tracepoint; in user_events_write_core()
2283 if (likely(static_key_enabled(&tp->key))) { in user_events_write_core()
2290 if (unlikely(fault_in_iov_iter_readable(i, i->count))) in user_events_write_core()
2291 return -EFAULT; in user_events_write_core()
2297 probe_func_ptr = rcu_dereference_sched(tp->funcs); in user_events_write_core()
2302 probe_func = probe_func_ptr->func; in user_events_write_core()
2303 tpdata = probe_func_ptr->data; in user_events_write_core()
2305 } while ((++probe_func_ptr)->func); in user_events_write_core()
2311 return -EFAULT; in user_events_write_core()
2313 return -EBADF; in user_events_write_core()
2326 return -ENOENT; in user_events_open()
2331 return -ENOMEM; in user_events_open()
2333 info->group = group; in user_events_open()
2335 file->private_data = info; in user_events_open()
2346 return -EFAULT; in user_events_write()
2349 return -EFAULT; in user_events_write()
2356 return user_events_write_core(kp->ki_filp, i); in user_events_write_iter()
2362 struct user_event_group *group = info->group; in user_events_ref_add()
2366 refs = rcu_dereference_protected(info->refs, in user_events_ref_add()
2367 lockdep_is_held(&group->reg_mutex)); in user_events_ref_add()
2370 count = refs->count; in user_events_ref_add()
2373 if (refs->events[i] == user) in user_events_ref_add()
2382 return -ENOMEM; in user_events_ref_add()
2384 new_refs->count = count + 1; in user_events_ref_add()
2387 new_refs->events[i] = refs->events[i]; in user_events_ref_add()
2389 new_refs->events[i] = user_event_get(user); in user_events_ref_add()
2391 rcu_assign_pointer(info->refs, new_refs); in user_events_ref_add()
2404 ret = get_user(size, &ureg->size); in user_reg_get()
2410 return -E2BIG; in user_reg_get()
2413 return -EINVAL; in user_reg_get()
2421 if (kreg->flags & ~(USER_EVENT_REG_MAX-1)) in user_reg_get()
2422 return -EINVAL; in user_reg_get()
2425 switch (kreg->enable_size) { in user_reg_get()
2427 /* 32-bit */ in user_reg_get()
2431 /* 64-bit */ in user_reg_get()
2435 return -EINVAL; in user_reg_get()
2439 if (kreg->enable_addr % kreg->enable_size) in user_reg_get()
2440 return -EINVAL; in user_reg_get()
2443 if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1) in user_reg_get()
2444 return -EINVAL; in user_reg_get()
2447 if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr, in user_reg_get()
2448 kreg->enable_size)) in user_reg_get()
2449 return -EFAULT; in user_reg_get()
2451 kreg->size = size; in user_reg_get()
2463 struct user_reg reg; in user_events_ioctl_reg() local
2470 ret = user_reg_get(ureg, &reg); in user_events_ioctl_reg()
2481 if (current_user_event_enabler_exists((unsigned long)reg.enable_addr, in user_events_ioctl_reg()
2482 reg.enable_bit)) in user_events_ioctl_reg()
2483 return -EADDRINUSE; in user_events_ioctl_reg()
2485 name = strndup_user((const char __user *)(uintptr_t)reg.name_args, in user_events_ioctl_reg()
2493 ret = user_event_parse_cmd(info->group, name, &user, reg.flags); in user_events_ioctl_reg()
2521 enabler = user_event_enabler_create(&reg, user, &write_result); in user_events_ioctl_reg()
2524 return -ENOMEM; in user_events_ioctl_reg()
2530 put_user((u32)ret, &ureg->write_index); in user_events_ioctl_reg()
2552 ret = delete_user_event(info->group, name); in user_events_ioctl_del()
2566 ret = get_user(size, &ureg->size); in user_unreg_get()
2572 return -E2BIG; in user_unreg_get()
2575 return -EINVAL; in user_unreg_get()
2580 if (kreg->__reserved || kreg->__reserved2) in user_unreg_get()
2581 return -EINVAL; in user_unreg_get()
2602 mmap_read_lock(user_mm->mm); in user_event_mm_clear_bit()
2604 mmap_read_unlock(user_mm->mm); in user_event_mm_clear_bit()
2609 /* Attempt to fault-in and retry if it worked */ in user_event_mm_clear_bit()
2623 struct user_event_mm *mm = current->user_event_mm; in user_events_ioctl_unreg()
2625 struct user_unreg reg; in user_events_ioctl_unreg() local
2629 ret = user_unreg_get(ureg, &reg); in user_events_ioctl_unreg()
2635 return -ENOENT; in user_events_ioctl_unreg()
2638 ret = -ENOENT; in user_events_ioctl_unreg()
2642 * use at all. When faulting is set a page-fault is occurring asyncly. in user_events_ioctl_unreg()
2649 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) { in user_events_ioctl_unreg()
2650 if (enabler->addr == reg.disable_addr && in user_events_ioctl_unreg()
2651 ENABLE_BIT(enabler) == reg.disable_bit) { in user_events_ioctl_unreg()
2655 flags |= enabler->values & ENABLE_VAL_COMPAT_MASK; in user_events_ioctl_unreg()
2669 ret = user_event_mm_clear_bit(mm, reg.disable_addr, in user_events_ioctl_unreg()
2670 reg.disable_bit, flags); in user_events_ioctl_unreg()
2681 struct user_event_file_info *info = file->private_data; in user_events_ioctl()
2682 struct user_event_group *group = info->group; in user_events_ioctl()
2683 long ret = -ENOTTY; in user_events_ioctl()
2687 mutex_lock(&group->reg_mutex); in user_events_ioctl()
2689 mutex_unlock(&group->reg_mutex); in user_events_ioctl()
2693 mutex_lock(&group->reg_mutex); in user_events_ioctl()
2695 mutex_unlock(&group->reg_mutex); in user_events_ioctl()
2699 mutex_lock(&group->reg_mutex); in user_events_ioctl()
2701 mutex_unlock(&group->reg_mutex); in user_events_ioctl()
2713 struct user_event_file_info *info = file->private_data; in user_events_release()
2719 return -EINVAL; in user_events_release()
2721 group = info->group; in user_events_release()
2727 mutex_lock(&group->reg_mutex); in user_events_release()
2729 refs = info->refs; in user_events_release()
2739 for (i = 0; i < refs->count; ++i) in user_events_release()
2740 user_event_put(refs->events[i], false); in user_events_release()
2743 file->private_data = NULL; in user_events_release()
2745 mutex_unlock(&group->reg_mutex); in user_events_release()
2781 struct user_event_group *group = m->private; in user_seq_show()
2787 return -EINVAL; in user_seq_show()
2789 mutex_lock(&group->reg_mutex); in user_seq_show()
2791 hash_for_each(group->register_table, i, user, node) { in user_seq_show()
2792 status = user->status; in user_seq_show()
2811 mutex_unlock(&group->reg_mutex); in user_seq_show()
2835 return -ENOENT; in user_status_open()
2841 struct seq_file *m = file->private_data; in user_status_open()
2843 m->private = group; in user_status_open()
2882 return -ENODEV; in create_user_tracefs()
2916 return -ENOMEM; in trace_events_user_init()
2922 return -ENOMEM; in trace_events_user_init()