/linux/tools/testing/selftests/bpf/progs/ |
H A D | rbtree_fail.c | 16 private(A) struct bpf_spin_lock glock; variable 55 bpf_spin_lock(&glock); in rbtree_api_nolock_remove() 57 bpf_spin_unlock(&glock); in rbtree_api_nolock_remove() 88 bpf_spin_lock(&glock); in rbtree_api_remove_unadded_node() 94 bpf_spin_unlock(&glock); in rbtree_api_remove_unadded_node() 115 bpf_spin_lock(&glock); in rbtree_api_remove_no_drop() 126 bpf_spin_unlock(&glock); in rbtree_api_remove_no_drop() 132 bpf_spin_unlock(&glock); in rbtree_api_remove_no_drop() 146 bpf_spin_lock(&glock); in rbtree_api_add_to_multiple_trees() 151 bpf_spin_unlock(&glock); in rbtree_api_add_to_multiple_trees() [all...] |
H A D | linked_list_peek.c | 15 private(A) struct bpf_spin_lock glock; variable 31 bpf_spin_lock(&glock); in list_peek() 33 bpf_spin_unlock(&glock); in list_peek() 37 bpf_spin_lock(&glock); in list_peek() 39 bpf_spin_unlock(&glock); in list_peek() 48 bpf_spin_lock(&glock); in list_peek() 50 bpf_spin_unlock(&glock); in list_peek() 53 bpf_spin_lock(&glock); in list_peek() 80 bpf_spin_unlock(&glock); in list_peek() 93 bpf_spin_lock(&glock); \ [all...] |
H A D | rbtree.c | 17 struct bpf_spin_lock glock; member 30 private(A) struct bpf_spin_lock glock; variable 83 return __add_three(&groot, &glock); in rbtree_add_nodes() 89 return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock); in rbtree_add_nodes_nested() 108 bpf_spin_lock(&glock); in rbtree_add_and_remove() 112 bpf_spin_unlock(&glock); in rbtree_add_and_remove() 148 bpf_spin_lock(&glock); in rbtree_add_and_remove_array() 157 bpf_spin_unlock(&glock); in rbtree_add_and_remove_array() 213 bpf_spin_lock(&glock); in rbtree_first_and_remove() 220 bpf_spin_unlock(&glock); in rbtree_first_and_remove() [all...] |
H A D | linked_list_fail.c | 112 CHECK(global_kptr, op, &glock, &f1->head); \ 113 CHECK(global_map, op, &glock, &v->head); \ 114 CHECK(global_inner_map, op, &glock, &iv->head); \ 149 CHECK(global_kptr, op, &glock, &f1->head, &b->node); \ 150 CHECK(global_map, op, &glock, &v->head, &f->node2); \ 151 CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \ 364 bpf_spin_lock(&glock); in use_after_unlock() 370 bpf_spin_unlock(&glock); in use_after_unlock() 395 bpf_spin_lock(&glock); in list_double_add() 403 bpf_spin_unlock(&glock); in list_double_add() [all...] |
H A D | refcounted_kptr_fail.c | 20 private(A) struct bpf_spin_lock glock; variable 44 bpf_spin_lock(&glock); in rbtree_refcounted_node_ref_escapes() 48 bpf_spin_unlock(&glock); in rbtree_refcounted_node_ref_escapes() 89 bpf_spin_lock(&glock); in rbtree_refcounted_node_ref_escapes_owning_input() 91 bpf_spin_unlock(&glock); in rbtree_refcounted_node_ref_escapes_owning_input() 110 bpf_spin_lock(&glock); in BPF_PROG() 115 bpf_spin_unlock(&glock); in BPF_PROG()
|
H A D | rbtree_btf_fail__wrong_node_type.c | 20 private(A) struct bpf_spin_lock glock; variable 32 bpf_spin_lock(&glock); in rbtree_api_add__wrong_node_type() 34 bpf_spin_unlock(&glock); in rbtree_api_add__wrong_node_type()
|
H A D | rbtree_btf_fail__add_wrong_type.c | 34 private(A) struct bpf_spin_lock glock; variable 46 bpf_spin_lock(&glock); in rbtree_api_add__add_wrong_type() 48 bpf_spin_unlock(&glock); in rbtree_api_add__add_wrong_type()
|
H A D | linked_list.c | 322 return test_list_push_pop(&glock, &ghead); in global_list_push_pop() 382 ret = list_push_pop_multiple(&glock, &ghead, false); in global_list_push_pop_multiple() 385 return list_push_pop_multiple(&glock, &ghead, true); in global_list_push_pop_multiple() 417 return test_list_in_list(&glock, &ghead); in global_list_in_list()
|
H A D | linked_list.h | 52 private(A) struct bpf_spin_lock glock; variable
|
/linux/net/9p/ |
H A D | client.c | 2262 int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock) in p9_client_getlock_dotl() argument 2271 fid->fid, glock->type, glock->start, glock->length, in p9_client_getlock_dotl() 2272 glock->proc_id, glock->client_id); in p9_client_getlock_dotl() 2275 glock->type, glock->start, glock->length, in p9_client_getlock_dotl() 2276 glock in p9_client_getlock_dotl() [all...] |
/linux/fs/gfs2/ |
H A D | Makefile | 4 gfs2-y := acl.o bmap.o dir.o xattr.o glock.o \
|
H A D | glock.h | 73 * the glock is held in EX mode according to DLM, but local holders on the 145 struct gfs2_glock glock; member 154 /* Look in glock's list of holders for one with current task as owner */ in gfs2_glock_is_locked_by_me() 174 container_of(gl, struct gfs2_glock_aspace, glock); in gfs2_glock2aspace() 231 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock 232 * @gl: the glock
|
H A D | meta_io.h | 46 return gla->glock.gl_name.ln_sbd; in gfs2_mapping2sbd()
|
H A D | main.c | 26 #include "glock.h" 65 gfs2_init_glock_once(&gla->glock); in gfs2_init_gl_aspace_once()
|
H A D | glock.c | 42 #include "glock.h" 57 struct gfs2_glock *gl; /* current glock struct */ 116 * wake_up_glock - Wake up waiters on a glock 117 * @gl: the glock 134 container_of(gl, struct gfs2_glock_aspace, glock); in gfs2_glock_dealloc() 141 * glock_blocked_by_withdraw - determine if we can still use a glock 142 * @gl: the glock 147 * the iopen or freeze glock may be safely used because none of their 207 * gfs2_glock_hold() - increment reference count on glock 208 * @gl: The glock t [all...] |
/linux/Documentation/bpf/ |
H A D | graph_ds_impl.rst | 70 struct bpf_spin_lock glock;
|