Lines Matching +full:foo +full:- +full:queue
1 // SPDX-License-Identifier: GPL-2.0-or-later
13 * - object w/ a bit
14 * - free list
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
25 * - don't just push entire root set; process in place
32 * of foo to bar and vice versa. Current code chokes on that.
38 * upon the beginning and unmark non-junk ones.
56 * parents (->gc_tree).
58 * Damn. Added missing check for ->dead in listen queues scanning.
98 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { in unix_get_socket()
101 struct sock *sk = sock->sk; in unix_get_socket()
103 ops = READ_ONCE(sock->ops); in unix_get_socket()
106 if (sk && ops && ops->family == PF_UNIX) in unix_get_socket()
118 if (edge->successor->listener) in unix_edge_successor()
119 return unix_sk(edge->successor->listener)->vertex; in unix_edge_successor()
121 return edge->successor->vertex; in unix_edge_successor()
151 struct unix_vertex *vertex = edge->predecessor->vertex; in unix_add_edge()
154 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); in unix_add_edge()
155 vertex->index = unix_vertex_unvisited_index; in unix_add_edge()
156 vertex->out_degree = 0; in unix_add_edge()
157 INIT_LIST_HEAD(&vertex->edges); in unix_add_edge()
158 INIT_LIST_HEAD(&vertex->scc_entry); in unix_add_edge()
160 list_move_tail(&vertex->entry, &unix_unvisited_vertices); in unix_add_edge()
161 edge->predecessor->vertex = vertex; in unix_add_edge()
164 vertex->out_degree++; in unix_add_edge()
165 list_add_tail(&edge->vertex_entry, &vertex->edges); in unix_add_edge()
172 struct unix_vertex *vertex = edge->predecessor->vertex; in unix_del_edge()
174 if (!fpl->dead) in unix_del_edge()
177 list_del(&edge->vertex_entry); in unix_del_edge()
178 vertex->out_degree--; in unix_del_edge()
180 if (!vertex->out_degree) { in unix_del_edge()
181 edge->predecessor->vertex = NULL; in unix_del_edge()
182 list_move_tail(&vertex->entry, &fpl->vertices); in unix_del_edge()
190 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { in unix_free_vertices()
191 list_del(&vertex->entry); in unix_free_vertices()
205 if (!fpl->count_unix) in unix_add_edges()
209 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); in unix_add_edges()
215 edge = fpl->edges + i++; in unix_add_edges()
216 edge->predecessor = inflight; in unix_add_edges()
217 edge->successor = receiver; in unix_add_edges()
220 } while (i < fpl->count_unix); in unix_add_edges()
222 receiver->scm_stat.nr_unix_fds += fpl->count_unix; in unix_add_edges()
223 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); in unix_add_edges()
225 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); in unix_add_edges()
229 fpl->inflight = true; in unix_add_edges()
241 if (!fpl->count_unix) in unix_del_edges()
245 struct unix_edge *edge = fpl->edges + i++; in unix_del_edges()
248 } while (i < fpl->count_unix); in unix_del_edges()
250 if (!fpl->dead) { in unix_del_edges()
251 receiver = fpl->edges[0].successor; in unix_del_edges()
252 receiver->scm_stat.nr_unix_fds -= fpl->count_unix; in unix_del_edges()
254 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); in unix_del_edges()
256 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); in unix_del_edges()
260 fpl->inflight = false; in unix_del_edges()
269 if (!receiver->scm_stat.nr_unix_fds) { in unix_update_edges()
270 receiver->listener = NULL; in unix_update_edges()
273 unix_update_graph(unix_sk(receiver->listener)->vertex); in unix_update_edges()
274 receiver->listener = NULL; in unix_update_edges()
284 if (!fpl->count_unix) in unix_prepare_fpl()
287 for (i = 0; i < fpl->count_unix; i++) { in unix_prepare_fpl()
292 list_add(&vertex->entry, &fpl->vertices); in unix_prepare_fpl()
295 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), in unix_prepare_fpl()
297 if (!fpl->edges) in unix_prepare_fpl()
304 return -ENOMEM; in unix_prepare_fpl()
309 if (fpl->inflight) in unix_destroy_fpl()
312 kvfree(fpl->edges); in unix_destroy_fpl()
322 list_for_each_entry(edge, &vertex->edges, vertex_entry) { in unix_vertex_dead()
325 /* The vertex's fd can be received by a non-inflight socket. */ in unix_vertex_dead()
332 if (next_vertex->scc_index != vertex->scc_index) in unix_vertex_dead()
338 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); in unix_vertex_dead()
339 u = edge->predecessor; in unix_vertex_dead()
340 total_ref = file_count(u->sk.sk_socket->file); in unix_vertex_dead()
343 if (total_ref != vertex->out_degree) in unix_vertex_dead()
354 struct sk_buff_head *queue; in unix_collect_skb() local
358 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); in unix_collect_skb()
359 u = edge->predecessor; in unix_collect_skb()
360 queue = &u->sk.sk_receive_queue; in unix_collect_skb()
362 spin_lock(&queue->lock); in unix_collect_skb()
364 if (u->sk.sk_state == TCP_LISTEN) { in unix_collect_skb()
367 skb_queue_walk(queue, skb) { in unix_collect_skb()
368 struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; in unix_collect_skb()
370 spin_lock(&embryo_queue->lock); in unix_collect_skb()
372 spin_unlock(&embryo_queue->lock); in unix_collect_skb()
375 skb_queue_splice_init(queue, hitlist); in unix_collect_skb()
378 spin_unlock(&queue->lock); in unix_collect_skb()
393 /* Self-reference or a embryo-listener circle ? */ in unix_scc_cyclic()
394 list_for_each_entry(edge, &vertex->edges, vertex_entry) { in unix_scc_cyclic()
413 /* Push vertex to vertex_stack and mark it as on-stack in __unix_walk_scc()
417 list_add(&vertex->scc_entry, &vertex_stack); in __unix_walk_scc()
419 vertex->index = *last_index; in __unix_walk_scc()
420 vertex->scc_index = *last_index; in __unix_walk_scc()
424 list_for_each_entry(edge, &vertex->edges, vertex_entry) { in __unix_walk_scc()
430 if (next_vertex->index == unix_vertex_unvisited_index) { in __unix_walk_scc()
436 list_add(&edge->stack_entry, &edge_stack); in __unix_walk_scc()
446 list_del_init(&edge->stack_entry); in __unix_walk_scc()
449 vertex = edge->predecessor->vertex; in __unix_walk_scc()
455 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); in __unix_walk_scc()
456 } else if (next_vertex->index != unix_vertex_grouped_index) { in __unix_walk_scc()
463 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); in __unix_walk_scc()
469 if (vertex->index == vertex->scc_index) { in __unix_walk_scc()
479 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); in __unix_walk_scc()
483 list_move_tail(&v->entry, &unix_visited_vertices); in __unix_walk_scc()
485 /* Mark vertex as off-stack. */ in __unix_walk_scc()
486 v->index = unix_vertex_grouped_index; in __unix_walk_scc()
537 list_add(&scc, &vertex->scc_entry); in unix_walk_scc_fast()
540 list_move_tail(&vertex->entry, &unix_visited_vertices); in unix_walk_scc_fast()
582 UNIXCB(skb).fp->dead = true; in __unix_gc()
616 if (!fpl || !fpl->count_unix || in wait_for_unix_gc()
617 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) in wait_for_unix_gc()