Lines Matching +full:mux +full:- +full:locked

1 // SPDX-License-Identifier: GPL-2.0-only
46 return (struct kcm_tx_msg *)skb->cb; in kcm_tx_msg()
51 csk->sk_err = EPIPE; in report_csk_error()
58 struct sock *csk = psock->sk; in kcm_abort_tx_psock()
59 struct kcm_mux *mux = psock->mux; in kcm_abort_tx_psock() local
63 spin_lock_bh(&mux->lock); in kcm_abort_tx_psock()
65 if (psock->tx_stopped) { in kcm_abort_tx_psock()
66 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
70 psock->tx_stopped = 1; in kcm_abort_tx_psock()
71 KCM_STATS_INCR(psock->stats.tx_aborts); in kcm_abort_tx_psock()
73 if (!psock->tx_kcm) { in kcm_abort_tx_psock()
75 list_del(&psock->psock_avail_list); in kcm_abort_tx_psock()
84 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_abort_tx_psock()
87 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
93 /* RX mux lock held. */
94 static void kcm_update_rx_mux_stats(struct kcm_mux *mux, in kcm_update_rx_mux_stats() argument
97 STRP_STATS_ADD(mux->stats.rx_bytes, in kcm_update_rx_mux_stats()
98 psock->strp.stats.bytes - in kcm_update_rx_mux_stats()
99 psock->saved_rx_bytes); in kcm_update_rx_mux_stats()
100 mux->stats.rx_msgs += in kcm_update_rx_mux_stats()
101 psock->strp.stats.msgs - psock->saved_rx_msgs; in kcm_update_rx_mux_stats()
102 psock->saved_rx_msgs = psock->strp.stats.msgs; in kcm_update_rx_mux_stats()
103 psock->saved_rx_bytes = psock->strp.stats.bytes; in kcm_update_rx_mux_stats()
106 static void kcm_update_tx_mux_stats(struct kcm_mux *mux, in kcm_update_tx_mux_stats() argument
109 KCM_STATS_ADD(mux->stats.tx_bytes, in kcm_update_tx_mux_stats()
110 psock->stats.tx_bytes - psock->saved_tx_bytes); in kcm_update_tx_mux_stats()
111 mux->stats.tx_msgs += in kcm_update_tx_mux_stats()
112 psock->stats.tx_msgs - psock->saved_tx_msgs; in kcm_update_tx_mux_stats()
113 psock->saved_tx_msgs = psock->stats.tx_msgs; in kcm_update_tx_mux_stats()
114 psock->saved_tx_bytes = psock->stats.tx_bytes; in kcm_update_tx_mux_stats()
119 /* KCM is ready to receive messages on its queue-- either the KCM is new or
121 * pending ready messages on a psock. RX mux lock held.
125 struct kcm_mux *mux = kcm->mux; in kcm_rcv_ready() local
129 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled)) in kcm_rcv_ready()
132 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) { in kcm_rcv_ready()
133 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_ready()
135 skb_queue_head(&mux->rx_hold_queue, skb); in kcm_rcv_ready()
136 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
141 while (!list_empty(&mux->psocks_ready)) { in kcm_rcv_ready()
142 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock, in kcm_rcv_ready()
145 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) { in kcm_rcv_ready()
147 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
154 list_del(&psock->psock_ready_list); in kcm_rcv_ready()
155 psock->ready_rx_msg = NULL; in kcm_rcv_ready()
159 strp_unpause(&psock->strp); in kcm_rcv_ready()
160 strp_check_rcv(&psock->strp); in kcm_rcv_ready()
164 list_add_tail(&kcm->wait_rx_list, in kcm_rcv_ready()
165 &kcm->mux->kcm_rx_waiters); in kcm_rcv_ready()
167 WRITE_ONCE(kcm->rx_wait, true); in kcm_rcv_ready()
172 struct sock *sk = skb->sk; in kcm_rfree()
174 struct kcm_mux *mux = kcm->mux; in kcm_rfree() local
175 unsigned int len = skb->truesize; in kcm_rfree()
178 atomic_sub(len, &sk->sk_rmem_alloc); in kcm_rfree()
183 if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) && in kcm_rfree()
184 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) { in kcm_rfree()
185 spin_lock_bh(&mux->rx_lock); in kcm_rfree()
187 spin_unlock_bh(&mux->rx_lock); in kcm_rfree()
193 struct sk_buff_head *list = &sk->sk_receive_queue; in kcm_queue_rcv_skb()
195 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in kcm_queue_rcv_skb()
196 return -ENOMEM; in kcm_queue_rcv_skb()
198 if (!sk_rmem_schedule(sk, skb, skb->truesize)) in kcm_queue_rcv_skb()
199 return -ENOBUFS; in kcm_queue_rcv_skb()
201 skb->dev = NULL; in kcm_queue_rcv_skb()
204 skb->sk = sk; in kcm_queue_rcv_skb()
205 skb->destructor = kcm_rfree; in kcm_queue_rcv_skb()
206 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in kcm_queue_rcv_skb()
207 sk_mem_charge(sk, skb->truesize); in kcm_queue_rcv_skb()
212 sk->sk_data_ready(sk); in kcm_queue_rcv_skb()
219 * RX mux lock held.
221 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) in requeue_rx_msgs() argument
228 skb->destructor = sock_rfree; in requeue_rx_msgs()
231 if (list_empty(&mux->kcm_rx_waiters)) { in requeue_rx_msgs()
232 skb_queue_tail(&mux->rx_hold_queue, skb); in requeue_rx_msgs()
236 kcm = list_first_entry(&mux->kcm_rx_waiters, in requeue_rx_msgs()
239 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in requeue_rx_msgs()
241 list_del(&kcm->wait_rx_list); in requeue_rx_msgs()
243 WRITE_ONCE(kcm->rx_wait, false); in requeue_rx_msgs()
257 struct kcm_mux *mux = psock->mux; in reserve_rx_kcm() local
260 WARN_ON(psock->ready_rx_msg); in reserve_rx_kcm()
262 if (psock->rx_kcm) in reserve_rx_kcm()
263 return psock->rx_kcm; in reserve_rx_kcm()
265 spin_lock_bh(&mux->rx_lock); in reserve_rx_kcm()
267 if (psock->rx_kcm) { in reserve_rx_kcm()
268 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
269 return psock->rx_kcm; in reserve_rx_kcm()
272 kcm_update_rx_mux_stats(mux, psock); in reserve_rx_kcm()
274 if (list_empty(&mux->kcm_rx_waiters)) { in reserve_rx_kcm()
275 psock->ready_rx_msg = head; in reserve_rx_kcm()
276 strp_pause(&psock->strp); in reserve_rx_kcm()
277 list_add_tail(&psock->psock_ready_list, in reserve_rx_kcm()
278 &mux->psocks_ready); in reserve_rx_kcm()
279 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
283 kcm = list_first_entry(&mux->kcm_rx_waiters, in reserve_rx_kcm()
285 list_del(&kcm->wait_rx_list); in reserve_rx_kcm()
287 WRITE_ONCE(kcm->rx_wait, false); in reserve_rx_kcm()
289 psock->rx_kcm = kcm; in reserve_rx_kcm()
291 WRITE_ONCE(kcm->rx_psock, psock); in reserve_rx_kcm()
293 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
309 struct kcm_sock *kcm = psock->rx_kcm; in unreserve_rx_kcm()
310 struct kcm_mux *mux = psock->mux; in unreserve_rx_kcm() local
315 spin_lock_bh(&mux->rx_lock); in unreserve_rx_kcm()
317 psock->rx_kcm = NULL; in unreserve_rx_kcm()
319 WRITE_ONCE(kcm->rx_psock, NULL); in unreserve_rx_kcm()
321 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with in unreserve_rx_kcm()
326 if (unlikely(kcm->done)) { in unreserve_rx_kcm()
327 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
332 INIT_WORK(&kcm->done_work, kcm_done_work); in unreserve_rx_kcm()
333 schedule_work(&kcm->done_work); in unreserve_rx_kcm()
337 if (unlikely(kcm->rx_disabled)) { in unreserve_rx_kcm()
338 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in unreserve_rx_kcm()
339 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) { in unreserve_rx_kcm()
345 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
355 read_lock_bh(&sk->sk_callback_lock); in psock_data_ready()
357 psock = (struct kcm_psock *)sk->sk_user_data; in psock_data_ready()
359 strp_data_ready(&psock->strp); in psock_data_ready()
361 read_unlock_bh(&sk->sk_callback_lock); in psock_data_ready()
379 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_strparser()
389 struct bpf_prog *prog = psock->bpf_prog; in kcm_parse_func_strparser()
418 struct kcm_mux *mux; in psock_write_space() local
421 read_lock_bh(&sk->sk_callback_lock); in psock_write_space()
423 psock = (struct kcm_psock *)sk->sk_user_data; in psock_write_space()
426 mux = psock->mux; in psock_write_space()
428 spin_lock_bh(&mux->lock); in psock_write_space()
431 kcm = psock->tx_kcm; in psock_write_space()
432 if (kcm && !unlikely(kcm->tx_stopped)) in psock_write_space()
433 queue_work(kcm_wq, &kcm->tx_work); in psock_write_space()
435 spin_unlock_bh(&mux->lock); in psock_write_space()
437 read_unlock_bh(&sk->sk_callback_lock); in psock_write_space()
442 /* kcm sock is locked. */
445 struct kcm_mux *mux = kcm->mux; in reserve_psock() local
448 psock = kcm->tx_psock; in reserve_psock()
453 WARN_ON(kcm->tx_wait); in reserve_psock()
454 if (unlikely(psock->tx_stopped)) in reserve_psock()
457 return kcm->tx_psock; in reserve_psock()
460 spin_lock_bh(&mux->lock); in reserve_psock()
465 psock = kcm->tx_psock; in reserve_psock()
467 WARN_ON(kcm->tx_wait); in reserve_psock()
468 spin_unlock_bh(&mux->lock); in reserve_psock()
469 return kcm->tx_psock; in reserve_psock()
472 if (!list_empty(&mux->psocks_avail)) { in reserve_psock()
473 psock = list_first_entry(&mux->psocks_avail, in reserve_psock()
476 list_del(&psock->psock_avail_list); in reserve_psock()
477 if (kcm->tx_wait) { in reserve_psock()
478 list_del(&kcm->wait_psock_list); in reserve_psock()
479 kcm->tx_wait = false; in reserve_psock()
481 kcm->tx_psock = psock; in reserve_psock()
482 psock->tx_kcm = kcm; in reserve_psock()
483 KCM_STATS_INCR(psock->stats.reserved); in reserve_psock()
484 } else if (!kcm->tx_wait) { in reserve_psock()
485 list_add_tail(&kcm->wait_psock_list, in reserve_psock()
486 &mux->kcm_tx_waiters); in reserve_psock()
487 kcm->tx_wait = true; in reserve_psock()
490 spin_unlock_bh(&mux->lock); in reserve_psock()
495 /* mux lock held */
498 struct kcm_mux *mux = psock->mux; in psock_now_avail() local
501 if (list_empty(&mux->kcm_tx_waiters)) { in psock_now_avail()
502 list_add_tail(&psock->psock_avail_list, in psock_now_avail()
503 &mux->psocks_avail); in psock_now_avail()
505 kcm = list_first_entry(&mux->kcm_tx_waiters, in psock_now_avail()
508 list_del(&kcm->wait_psock_list); in psock_now_avail()
509 kcm->tx_wait = false; in psock_now_avail()
510 psock->tx_kcm = kcm; in psock_now_avail()
517 kcm->tx_psock = psock; in psock_now_avail()
518 KCM_STATS_INCR(psock->stats.reserved); in psock_now_avail()
519 queue_work(kcm_wq, &kcm->tx_work); in psock_now_avail()
523 /* kcm sock is locked. */
527 struct kcm_mux *mux = kcm->mux; in unreserve_psock() local
529 spin_lock_bh(&mux->lock); in unreserve_psock()
531 psock = kcm->tx_psock; in unreserve_psock()
534 spin_unlock_bh(&mux->lock); in unreserve_psock()
540 kcm_update_tx_mux_stats(mux, psock); in unreserve_psock()
542 WARN_ON(kcm->tx_wait); in unreserve_psock()
544 kcm->tx_psock = NULL; in unreserve_psock()
545 psock->tx_kcm = NULL; in unreserve_psock()
546 KCM_STATS_INCR(psock->stats.unreserved); in unreserve_psock()
548 if (unlikely(psock->tx_stopped)) { in unreserve_psock()
549 if (psock->done) { in unreserve_psock()
551 list_del(&psock->psock_list); in unreserve_psock()
552 mux->psocks_cnt--; in unreserve_psock()
553 sock_put(psock->sk); in unreserve_psock()
554 fput(psock->sk->sk_socket->file); in unreserve_psock()
560 spin_unlock_bh(&mux->lock); in unreserve_psock()
567 spin_unlock_bh(&mux->lock); in unreserve_psock()
572 struct kcm_mux *mux = kcm->mux; in kcm_report_tx_retry() local
574 spin_lock_bh(&mux->lock); in kcm_report_tx_retry()
575 KCM_STATS_INCR(mux->stats.tx_retries); in kcm_report_tx_retry()
576 spin_unlock_bh(&mux->lock); in kcm_report_tx_retry()
585 struct sock *sk = &kcm->sk; in kcm_write_msgs()
590 kcm->tx_wait_more = false; in kcm_write_msgs()
591 psock = kcm->tx_psock; in kcm_write_msgs()
592 if (unlikely(psock && psock->tx_stopped)) { in kcm_write_msgs()
598 if (skb_queue_empty(&sk->sk_write_queue)) in kcm_write_msgs()
601 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->started_tx = false; in kcm_write_msgs()
605 while ((head = skb_peek(&sk->sk_write_queue))) { in kcm_write_msgs()
614 if (!txm->started_tx) { in kcm_write_msgs()
619 txm->frag_offset = 0; in kcm_write_msgs()
620 txm->sent = 0; in kcm_write_msgs()
621 txm->started_tx = true; in kcm_write_msgs()
624 ret = -EINVAL; in kcm_write_msgs()
627 skb = txm->frag_skb; in kcm_write_msgs()
630 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) { in kcm_write_msgs()
631 ret = -EINVAL; in kcm_write_msgs()
636 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in kcm_write_msgs()
637 msize += skb_frag_size(&skb_shinfo(skb)->frags[i]); in kcm_write_msgs()
640 skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags, in kcm_write_msgs()
642 iov_iter_advance(&msg.msg_iter, txm->frag_offset); in kcm_write_msgs()
645 ret = sock_sendmsg(psock->sk->sk_socket, &msg); in kcm_write_msgs()
647 if (ret == -EAGAIN) { in kcm_write_msgs()
651 txm->frag_skb = skb; in kcm_write_msgs()
661 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE, in kcm_write_msgs()
666 txm->started_tx = false; in kcm_write_msgs()
672 txm->sent += ret; in kcm_write_msgs()
673 txm->frag_offset += ret; in kcm_write_msgs()
674 KCM_STATS_ADD(psock->stats.tx_bytes, ret); in kcm_write_msgs()
679 txm->frag_skb = skb_shinfo(skb)->frag_list; in kcm_write_msgs()
680 txm->frag_offset = 0; in kcm_write_msgs()
683 } else if (skb->next) { in kcm_write_msgs()
684 txm->frag_skb = skb->next; in kcm_write_msgs()
685 txm->frag_offset = 0; in kcm_write_msgs()
690 sk->sk_wmem_queued -= txm->sent; in kcm_write_msgs()
691 total_sent += txm->sent; in kcm_write_msgs()
692 skb_dequeue(&sk->sk_write_queue); in kcm_write_msgs()
694 KCM_STATS_INCR(psock->stats.tx_msgs); in kcm_write_msgs()
699 WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); in kcm_write_msgs()
705 sk->sk_write_space(sk); in kcm_write_msgs()
713 struct sock *sk = &kcm->sk; in kcm_tx_work()
725 report_csk_error(&kcm->sk, -err); in kcm_tx_work()
730 if (likely(sk->sk_socket) && in kcm_tx_work()
731 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in kcm_tx_work()
732 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_tx_work()
733 sk->sk_write_space(sk); in kcm_tx_work()
742 if (kcm->tx_wait_more) in kcm_push()
748 struct sock *sk = sock->sk; in kcm_sendmsg()
752 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); in kcm_sendmsg()
753 int eor = (sock->type == SOCK_DGRAM) ? in kcm_sendmsg()
754 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR); in kcm_sendmsg()
755 int err = -EPIPE; in kcm_sendmsg()
762 if (sk->sk_err) in kcm_sendmsg()
765 if (kcm->seq_skb) { in kcm_sendmsg()
767 head = kcm->seq_skb; in kcm_sendmsg()
768 skb = kcm_tx_msg(head)->last_skb; in kcm_sendmsg()
775 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_sendmsg()
783 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
790 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
798 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
804 int i = skb_shinfo(skb)->nr_frags; in kcm_sendmsg()
810 if (!skb_can_coalesce(skb, i, pfrag->page, in kcm_sendmsg()
811 pfrag->offset)) { in kcm_sendmsg()
815 tskb = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
820 skb_shinfo(head)->frag_list = tskb; in kcm_sendmsg()
822 skb->next = tskb; in kcm_sendmsg()
825 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
831 if (msg->msg_flags & MSG_SPLICE_PAGES) { in kcm_sendmsg()
836 err = skb_splice_from_iter(skb, &msg->msg_iter, copy, in kcm_sendmsg()
837 sk->sk_allocation); in kcm_sendmsg()
839 if (err == -EMSGSIZE) in kcm_sendmsg()
845 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; in kcm_sendmsg()
850 head->truesize += copy; in kcm_sendmsg()
853 pfrag->size - pfrag->offset); in kcm_sendmsg()
857 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in kcm_sendmsg()
858 pfrag->page, in kcm_sendmsg()
859 pfrag->offset, in kcm_sendmsg()
867 &skb_shinfo(skb)->frags[i - 1], copy); in kcm_sendmsg()
869 skb_fill_page_desc(skb, i, pfrag->page, in kcm_sendmsg()
870 pfrag->offset, copy); in kcm_sendmsg()
871 get_page(pfrag->page); in kcm_sendmsg()
874 pfrag->offset += copy; in kcm_sendmsg()
879 head->len += copy; in kcm_sendmsg()
880 head->data_len += copy; in kcm_sendmsg()
893 bool not_busy = skb_queue_empty(&sk->sk_write_queue); in kcm_sendmsg()
897 __skb_queue_tail(&sk->sk_write_queue, head); in kcm_sendmsg()
898 kcm->seq_skb = NULL; in kcm_sendmsg()
899 KCM_STATS_INCR(kcm->stats.tx_msgs); in kcm_sendmsg()
902 if (msg->msg_flags & MSG_BATCH) { in kcm_sendmsg()
903 kcm->tx_wait_more = true; in kcm_sendmsg()
904 } else if (kcm->tx_wait_more || not_busy) { in kcm_sendmsg()
913 report_csk_error(&kcm->sk, -err); in kcm_sendmsg()
920 kcm->seq_skb = head; in kcm_sendmsg()
921 kcm_tx_msg(head)->last_skb = skb; in kcm_sendmsg()
925 KCM_STATS_ADD(kcm->stats.tx_bytes, copied); in kcm_sendmsg()
933 if (sock->type == SOCK_SEQPACKET) { in kcm_sendmsg()
939 if (head != kcm->seq_skb) in kcm_sendmsg()
943 kcm->seq_skb = NULL; in kcm_sendmsg()
946 err = sk_stream_error(sk, msg->msg_flags, err); in kcm_sendmsg()
949 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in kcm_sendmsg()
950 sk->sk_write_space(sk); in kcm_sendmsg()
958 struct sock *sk = sock->sk; in kcm_splice_eof()
961 if (skb_queue_empty_lockless(&sk->sk_write_queue)) in kcm_splice_eof()
972 struct sock *sk = sock->sk; in kcm_recvmsg()
987 if (len > stm->full_len) in kcm_recvmsg()
988 len = stm->full_len; in kcm_recvmsg()
990 err = skb_copy_datagram_msg(skb, stm->offset, msg, len); in kcm_recvmsg()
996 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_recvmsg()
997 if (copied < stm->full_len) { in kcm_recvmsg()
998 if (sock->type == SOCK_DGRAM) { in kcm_recvmsg()
1000 msg->msg_flags |= MSG_TRUNC; in kcm_recvmsg()
1003 stm->offset += copied; in kcm_recvmsg()
1004 stm->full_len -= copied; in kcm_recvmsg()
1008 msg->msg_flags |= MSG_EOR; in kcm_recvmsg()
1009 KCM_STATS_INCR(kcm->stats.rx_msgs); in kcm_recvmsg()
1022 struct sock *sk = sock->sk; in kcm_splice_read()
1039 if (len > stm->full_len) in kcm_splice_read()
1040 len = stm->full_len; in kcm_splice_read()
1042 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags); in kcm_splice_read()
1048 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_splice_read()
1050 stm->offset += copied; in kcm_splice_read()
1051 stm->full_len -= copied; in kcm_splice_read()
1070 struct kcm_mux *mux = kcm->mux; in kcm_recv_disable() local
1072 if (kcm->rx_disabled) in kcm_recv_disable()
1075 spin_lock_bh(&mux->rx_lock); in kcm_recv_disable()
1077 kcm->rx_disabled = 1; in kcm_recv_disable()
1080 if (!kcm->rx_psock) { in kcm_recv_disable()
1081 if (kcm->rx_wait) { in kcm_recv_disable()
1082 list_del(&kcm->wait_rx_list); in kcm_recv_disable()
1084 WRITE_ONCE(kcm->rx_wait, false); in kcm_recv_disable()
1087 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in kcm_recv_disable()
1090 spin_unlock_bh(&mux->rx_lock); in kcm_recv_disable()
1096 struct kcm_mux *mux = kcm->mux; in kcm_recv_enable() local
1098 if (!kcm->rx_disabled) in kcm_recv_enable()
1101 spin_lock_bh(&mux->rx_lock); in kcm_recv_enable()
1103 kcm->rx_disabled = 0; in kcm_recv_enable()
1106 spin_unlock_bh(&mux->rx_lock); in kcm_recv_enable()
1112 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_setsockopt()
1117 return -ENOPROTOOPT; in kcm_setsockopt()
1120 return -EINVAL; in kcm_setsockopt()
1123 return -EFAULT; in kcm_setsockopt()
1129 lock_sock(&kcm->sk); in kcm_setsockopt()
1134 release_sock(&kcm->sk); in kcm_setsockopt()
1137 err = -ENOPROTOOPT; in kcm_setsockopt()
1146 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_getsockopt()
1150 return -ENOPROTOOPT; in kcm_getsockopt()
1153 return -EFAULT; in kcm_getsockopt()
1157 return -EINVAL; in kcm_getsockopt()
1161 val = kcm->rx_disabled; in kcm_getsockopt()
1164 return -ENOPROTOOPT; in kcm_getsockopt()
1168 return -EFAULT; in kcm_getsockopt()
1170 return -EFAULT; in kcm_getsockopt()
1174 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) in init_kcm_sock() argument
1184 kcm->sk.sk_state = TCP_ESTABLISHED; in init_kcm_sock()
1186 /* Add to mux's kcm sockets list */ in init_kcm_sock()
1187 kcm->mux = mux; in init_kcm_sock()
1188 spin_lock_bh(&mux->lock); in init_kcm_sock()
1190 head = &mux->kcm_socks; in init_kcm_sock()
1191 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) { in init_kcm_sock()
1192 if (tkcm->index != index) in init_kcm_sock()
1194 head = &tkcm->kcm_sock_list; in init_kcm_sock()
1198 list_add(&kcm->kcm_sock_list, head); in init_kcm_sock()
1199 kcm->index = index; in init_kcm_sock()
1201 mux->kcm_socks_cnt++; in init_kcm_sock()
1202 spin_unlock_bh(&mux->lock); in init_kcm_sock()
1204 INIT_WORK(&kcm->tx_work, kcm_tx_work); in init_kcm_sock()
1206 spin_lock_bh(&mux->rx_lock); in init_kcm_sock()
1208 spin_unlock_bh(&mux->rx_lock); in init_kcm_sock()
1214 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_attach()
1215 struct kcm_mux *mux = kcm->mux; in kcm_attach() local
1227 csk = csock->sk; in kcm_attach()
1229 return -EINVAL; in kcm_attach()
1234 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || in kcm_attach()
1235 csk->sk_protocol != IPPROTO_TCP) { in kcm_attach()
1236 err = -EOPNOTSUPP; in kcm_attach()
1241 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) { in kcm_attach()
1242 err = -EOPNOTSUPP; in kcm_attach()
1248 err = -ENOMEM; in kcm_attach()
1252 psock->mux = mux; in kcm_attach()
1253 psock->sk = csk; in kcm_attach()
1254 psock->bpf_prog = prog; in kcm_attach()
1256 write_lock_bh(&csk->sk_callback_lock); in kcm_attach()
1261 if (csk->sk_user_data) { in kcm_attach()
1262 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1264 err = -EALREADY; in kcm_attach()
1268 err = strp_init(&psock->strp, csk, &cb); in kcm_attach()
1270 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1275 psock->save_data_ready = csk->sk_data_ready; in kcm_attach()
1276 psock->save_write_space = csk->sk_write_space; in kcm_attach()
1277 psock->save_state_change = csk->sk_state_change; in kcm_attach()
1278 csk->sk_user_data = psock; in kcm_attach()
1279 csk->sk_data_ready = psock_data_ready; in kcm_attach()
1280 csk->sk_write_space = psock_write_space; in kcm_attach()
1281 csk->sk_state_change = psock_state_change; in kcm_attach()
1283 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1287 /* Finished initialization, now add the psock to the MUX. */ in kcm_attach()
1288 spin_lock_bh(&mux->lock); in kcm_attach()
1289 head = &mux->psocks; in kcm_attach()
1290 list_for_each_entry(tpsock, &mux->psocks, psock_list) { in kcm_attach()
1291 if (tpsock->index != index) in kcm_attach()
1293 head = &tpsock->psock_list; in kcm_attach()
1297 list_add(&psock->psock_list, head); in kcm_attach()
1298 psock->index = index; in kcm_attach()
1300 KCM_STATS_INCR(mux->stats.psock_attach); in kcm_attach()
1301 mux->psocks_cnt++; in kcm_attach()
1303 spin_unlock_bh(&mux->lock); in kcm_attach()
1306 strp_check_rcv(&psock->strp); in kcm_attach()
1320 csock = sockfd_lookup(info->fd, &err); in kcm_attach_ioctl()
1322 return -ENOENT; in kcm_attach_ioctl()
1324 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER); in kcm_attach_ioctl()
1346 struct sock *csk = psock->sk; in kcm_unattach()
1347 struct kcm_mux *mux = psock->mux; in kcm_unattach() local
1354 write_lock_bh(&csk->sk_callback_lock); in kcm_unattach()
1355 csk->sk_user_data = NULL; in kcm_unattach()
1356 csk->sk_data_ready = psock->save_data_ready; in kcm_unattach()
1357 csk->sk_write_space = psock->save_write_space; in kcm_unattach()
1358 csk->sk_state_change = psock->save_state_change; in kcm_unattach()
1359 strp_stop(&psock->strp); in kcm_unattach()
1361 if (WARN_ON(psock->rx_kcm)) { in kcm_unattach()
1362 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1367 spin_lock_bh(&mux->rx_lock); in kcm_unattach()
1372 if (psock->ready_rx_msg) { in kcm_unattach()
1373 list_del(&psock->psock_ready_list); in kcm_unattach()
1374 kfree_skb(psock->ready_rx_msg); in kcm_unattach()
1375 psock->ready_rx_msg = NULL; in kcm_unattach()
1376 KCM_STATS_INCR(mux->stats.rx_ready_drops); in kcm_unattach()
1379 spin_unlock_bh(&mux->rx_lock); in kcm_unattach()
1381 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1385 strp_done(&psock->strp); in kcm_unattach()
1388 bpf_prog_put(psock->bpf_prog); in kcm_unattach()
1390 spin_lock_bh(&mux->lock); in kcm_unattach()
1392 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats); in kcm_unattach()
1393 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats); in kcm_unattach()
1395 KCM_STATS_INCR(mux->stats.psock_unattach); in kcm_unattach()
1397 if (psock->tx_kcm) { in kcm_unattach()
1402 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd); in kcm_unattach()
1403 spin_unlock_bh(&mux->lock); in kcm_unattach()
1407 * to do this without the mux lock. in kcm_unattach()
1411 spin_lock_bh(&mux->lock); in kcm_unattach()
1412 if (!psock->tx_kcm) { in kcm_unattach()
1413 /* psock now unreserved in window mux was unlocked */ in kcm_unattach()
1416 psock->done = 1; in kcm_unattach()
1421 /* Queue tx work to make sure psock->done is handled */ in kcm_unattach()
1422 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_unattach()
1423 spin_unlock_bh(&mux->lock); in kcm_unattach()
1426 if (!psock->tx_stopped) in kcm_unattach()
1427 list_del(&psock->psock_avail_list); in kcm_unattach()
1428 list_del(&psock->psock_list); in kcm_unattach()
1429 mux->psocks_cnt--; in kcm_unattach()
1430 spin_unlock_bh(&mux->lock); in kcm_unattach()
1433 fput(csk->sk_socket->file); in kcm_unattach()
1442 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_unattach_ioctl()
1443 struct kcm_mux *mux = kcm->mux; in kcm_unattach_ioctl() local
1449 csock = sockfd_lookup(info->fd, &err); in kcm_unattach_ioctl()
1451 return -ENOENT; in kcm_unattach_ioctl()
1453 csk = csock->sk; in kcm_unattach_ioctl()
1455 err = -EINVAL; in kcm_unattach_ioctl()
1459 err = -ENOENT; in kcm_unattach_ioctl()
1461 spin_lock_bh(&mux->lock); in kcm_unattach_ioctl()
1463 list_for_each_entry(psock, &mux->psocks, psock_list) { in kcm_unattach_ioctl()
1464 if (psock->sk != csk) in kcm_unattach_ioctl()
1469 if (psock->unattaching || WARN_ON(psock->done)) { in kcm_unattach_ioctl()
1470 err = -EALREADY; in kcm_unattach_ioctl()
1474 psock->unattaching = 1; in kcm_unattach_ioctl()
1476 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1485 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1506 return ERR_PTR(-ENFILE); in kcm_clone()
1508 newsock->type = osock->type; in kcm_clone()
1509 newsock->ops = osock->ops; in kcm_clone()
1511 __module_get(newsock->ops->owner); in kcm_clone()
1513 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, in kcm_clone()
1517 return ERR_PTR(-ENOMEM); in kcm_clone()
1520 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux); in kcm_clone()
1522 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); in kcm_clone()
1534 return -EFAULT; in kcm_ioctl()
1544 return -EFAULT; in kcm_ioctl()
1567 return -EFAULT; in kcm_ioctl()
1574 err = -ENOIOCTLCMD; in kcm_ioctl()
1583 struct kcm_mux *mux = container_of(rcu, in free_mux() local
1586 kmem_cache_free(kcm_muxp, mux); in free_mux()
1589 static void release_mux(struct kcm_mux *mux) in release_mux() argument
1591 struct kcm_net *knet = mux->knet; in release_mux()
1596 &mux->psocks, psock_list) { in release_mux()
1597 if (!WARN_ON(psock->unattaching)) in release_mux()
1601 if (WARN_ON(mux->psocks_cnt)) in release_mux()
1604 __skb_queue_purge(&mux->rx_hold_queue); in release_mux()
1606 mutex_lock(&knet->mutex); in release_mux()
1607 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats); in release_mux()
1608 aggregate_psock_stats(&mux->aggregate_psock_stats, in release_mux()
1609 &knet->aggregate_psock_stats); in release_mux()
1610 aggregate_strp_stats(&mux->aggregate_strp_stats, in release_mux()
1611 &knet->aggregate_strp_stats); in release_mux()
1612 list_del_rcu(&mux->kcm_mux_list); in release_mux()
1613 knet->count--; in release_mux()
1614 mutex_unlock(&knet->mutex); in release_mux()
1616 call_rcu(&mux->rcu, free_mux); in release_mux()
1621 struct kcm_mux *mux = kcm->mux; in kcm_done() local
1622 struct sock *sk = &kcm->sk; in kcm_done()
1625 spin_lock_bh(&mux->rx_lock); in kcm_done()
1626 if (kcm->rx_psock) { in kcm_done()
1628 WARN_ON(kcm->done); in kcm_done()
1629 kcm->rx_disabled = 1; in kcm_done()
1630 kcm->done = 1; in kcm_done()
1631 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1635 if (kcm->rx_wait) { in kcm_done()
1636 list_del(&kcm->wait_rx_list); in kcm_done()
1638 WRITE_ONCE(kcm->rx_wait, false); in kcm_done()
1641 requeue_rx_msgs(mux, &sk->sk_receive_queue); in kcm_done()
1643 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1648 /* Detach from MUX */ in kcm_done()
1649 spin_lock_bh(&mux->lock); in kcm_done()
1651 list_del(&kcm->kcm_sock_list); in kcm_done()
1652 mux->kcm_socks_cnt--; in kcm_done()
1653 socks_cnt = mux->kcm_socks_cnt; in kcm_done()
1655 spin_unlock_bh(&mux->lock); in kcm_done()
1658 /* We are done with the mux now. */ in kcm_done()
1659 release_mux(mux); in kcm_done()
1662 WARN_ON(kcm->rx_wait); in kcm_done()
1664 sock_put(&kcm->sk); in kcm_done()
1668 * If this is the last KCM socket on the MUX, destroy the MUX.
1672 struct sock *sk = sock->sk; in kcm_release()
1674 struct kcm_mux *mux; in kcm_release() local
1681 mux = kcm->mux; in kcm_release()
1685 kfree_skb(kcm->seq_skb); in kcm_release()
1691 __skb_queue_purge(&sk->sk_write_queue); in kcm_release()
1697 kcm->tx_stopped = 1; in kcm_release()
1701 spin_lock_bh(&mux->lock); in kcm_release()
1702 if (kcm->tx_wait) { in kcm_release()
1706 list_del(&kcm->wait_psock_list); in kcm_release()
1707 kcm->tx_wait = false; in kcm_release()
1709 spin_unlock_bh(&mux->lock); in kcm_release()
1714 cancel_work_sync(&kcm->tx_work); in kcm_release()
1717 psock = kcm->tx_psock; in kcm_release()
1728 WARN_ON(kcm->tx_wait); in kcm_release()
1729 WARN_ON(kcm->tx_psock); in kcm_release()
1731 sock->sk = NULL; in kcm_release()
1787 struct kcm_mux *mux; in kcm_create() local
1789 switch (sock->type) { in kcm_create()
1791 sock->ops = &kcm_dgram_ops; in kcm_create()
1794 sock->ops = &kcm_seqpacket_ops; in kcm_create()
1797 return -ESOCKTNOSUPPORT; in kcm_create()
1801 return -EPROTONOSUPPORT; in kcm_create()
1805 return -ENOMEM; in kcm_create()
1807 /* Allocate a kcm mux, shared between KCM sockets */ in kcm_create()
1808 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL); in kcm_create()
1809 if (!mux) { in kcm_create()
1811 return -ENOMEM; in kcm_create()
1814 spin_lock_init(&mux->lock); in kcm_create()
1815 spin_lock_init(&mux->rx_lock); in kcm_create()
1816 INIT_LIST_HEAD(&mux->kcm_socks); in kcm_create()
1817 INIT_LIST_HEAD(&mux->kcm_rx_waiters); in kcm_create()
1818 INIT_LIST_HEAD(&mux->kcm_tx_waiters); in kcm_create()
1820 INIT_LIST_HEAD(&mux->psocks); in kcm_create()
1821 INIT_LIST_HEAD(&mux->psocks_ready); in kcm_create()
1822 INIT_LIST_HEAD(&mux->psocks_avail); in kcm_create()
1824 mux->knet = knet; in kcm_create()
1826 /* Add new MUX to list */ in kcm_create()
1827 mutex_lock(&knet->mutex); in kcm_create()
1828 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list); in kcm_create()
1829 knet->count++; in kcm_create()
1830 mutex_unlock(&knet->mutex); in kcm_create()
1832 skb_queue_head_init(&mux->rx_hold_queue); in kcm_create()
1836 init_kcm_sock(kcm_sk(sk), mux); in kcm_create()
1851 INIT_LIST_HEAD_RCU(&knet->mux_list); in kcm_init_net()
1852 mutex_init(&knet->mutex); in kcm_init_net()
1864 WARN_ON(!list_empty(&knet->mux_list)); in kcm_exit_net()
1866 mutex_destroy(&knet->mutex); in kcm_exit_net()
1878 int err = -ENOMEM; in kcm_init()