Lines Matching defs:msk
107 static bool mptcp_pm_is_init_remote_addr(struct mptcp_sock *msk,
112 mptcp_remote_address((struct sock_common *)msk, &mpc_remote);
135 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
140 lockdep_assert_held(&msk->pm.lock);
142 list_for_each_entry(entry, &msk->pm.anno_list, list) {
150 bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk,
156 entry = mptcp_pm_del_add_timer(msk, addr, false);
163 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
171 spin_lock_bh(&msk->pm.lock);
172 list_for_each_entry(entry, &msk->pm.anno_list, list) {
180 spin_unlock_bh(&msk->pm.lock);
184 static void __mptcp_pm_send_ack(struct mptcp_sock *msk,
193 (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
205 void mptcp_pm_send_ack(struct mptcp_sock *msk,
209 spin_unlock_bh(&msk->pm.lock);
210 __mptcp_pm_send_ack(msk, subflow, prio, backup);
211 spin_lock_bh(&msk->pm.lock);
214 void mptcp_pm_addr_send_ack(struct mptcp_sock *msk)
218 msk_owned_by_me(msk);
219 lockdep_assert_held(&msk->pm.lock);
221 if (!mptcp_pm_should_add_signal(msk) &&
222 !mptcp_pm_should_rm_signal(msk))
225 mptcp_for_each_subflow(msk, subflow) {
228 mptcp_pm_send_ack(msk, subflow, false, false);
238 mptcp_pm_send_ack(msk, alt, false, false);
241 int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk,
250 mptcp_for_each_subflow(msk, subflow) {
264 __mptcp_pm_send_ack(msk, subflow, true, bkup);
275 struct mptcp_sock *msk = entry->sock;
276 struct sock *sk = (struct sock *)msk;
278 pr_debug("msk=%p\n", msk);
280 if (!msk)
289 if (mptcp_pm_should_add_signal_addr(msk)) {
294 spin_lock_bh(&msk->pm.lock);
296 if (!mptcp_pm_should_add_signal_addr(msk)) {
298 mptcp_pm_announce_addr(msk, &entry->addr, false);
299 mptcp_pm_add_addr_send_ack(msk);
307 spin_unlock_bh(&msk->pm.lock);
310 mptcp_pm_subflow_established(msk);
317 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
321 struct sock *sk = (struct sock *)msk;
324 spin_lock_bh(&msk->pm.lock);
325 entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
332 spin_unlock_bh(&msk->pm.lock);
341 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
345 struct sock *sk = (struct sock *)msk;
348 lockdep_assert_held(&msk->pm.lock);
350 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
353 if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
365 list_add(&add_entry->list, &msk->pm.anno_list);
368 add_entry->sock = msk;
378 static void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
381 struct sock *sk = (struct sock *)msk;
384 pr_debug("msk=%p\n", msk);
386 spin_lock_bh(&msk->pm.lock);
387 list_splice_init(&msk->pm.anno_list, &free_list);
388 spin_unlock_bh(&msk->pm.lock);
398 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
402 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
404 pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
406 lockdep_assert_held(&msk->pm.lock);
410 MPTCP_INC_STATS(sock_net((struct sock *)msk),
416 msk->pm.remote = *addr;
419 msk->pm.local = *addr;
422 WRITE_ONCE(msk->pm.addr_signal, add_addr);
426 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
428 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
430 pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
433 MPTCP_ADD_STATS(sock_net((struct sock *)msk),
438 msk->pm.rm_list_tx = *rm_list;
440 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
441 mptcp_pm_addr_send_ack(msk);
447 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
449 struct mptcp_pm_data *pm = &msk->pm;
451 pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side);
454 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
457 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
459 struct mptcp_pm_data *pm = &msk->pm;
463 if (mptcp_pm_is_userspace(msk)) {
464 if (mptcp_userspace_pm_active(msk)) {
473 subflows_max = mptcp_pm_get_subflows_max(msk);
475 pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
496 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
499 pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
501 if (msk->pm.status & BIT(new_status))
504 msk->pm.status |= BIT(new_status);
505 mptcp_schedule_work((struct sock *)msk);
509 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
511 struct mptcp_pm_data *pm = &msk->pm;
514 pr_debug("msk=%p\n", msk);
524 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
533 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC);
536 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
538 pr_debug("msk=%p\n", msk);
540 if (msk->token)
541 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
544 void mptcp_pm_subflow_established(struct mptcp_sock *msk)
546 struct mptcp_pm_data *pm = &msk->pm;
548 pr_debug("msk=%p\n", msk);
556 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
561 void mptcp_pm_subflow_check_next(struct mptcp_sock *msk,
564 struct mptcp_pm_data *pm = &msk->pm;
568 if (mptcp_pm_is_userspace(msk)) {
582 __mptcp_pm_close_subflow(msk);
587 if (mptcp_pm_nl_check_work_pending(msk))
588 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
597 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
598 struct mptcp_pm_data *pm = &msk->pm;
600 pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
607 if (mptcp_pm_is_userspace(msk)) {
608 if (mptcp_userspace_pm_active(msk)) {
609 mptcp_pm_announce_addr(msk, addr, true);
610 mptcp_pm_add_addr_send_ack(msk);
612 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
615 } else if ((addr->id == 0 && !mptcp_pm_is_init_remote_addr(msk, addr)) ||
617 mptcp_pm_announce_addr(msk, addr, true);
618 mptcp_pm_add_addr_send_ack(msk);
619 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
622 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
628 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
631 struct mptcp_pm_data *pm = &msk->pm;
633 pr_debug("msk=%p\n", msk);
640 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending))
641 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
646 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
648 if (!mptcp_pm_should_add_signal(msk))
651 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
654 static void mptcp_pm_rm_addr_or_subflow(struct mptcp_sock *msk,
659 struct sock *sk = (struct sock *)msk;
665 msk_owned_by_me(msk);
673 if (list_empty(&msk->conn_list))
680 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
696 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
697 spin_unlock_bh(&msk->pm.lock);
703 spin_lock_bh(&msk->pm.lock);
711 if (removed && mptcp_pm_is_kernel(msk))
712 mptcp_pm_nl_rm_addr(msk, rm_id);
717 static void mptcp_pm_rm_addr_recv(struct mptcp_sock *msk)
719 mptcp_pm_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
722 void mptcp_pm_rm_subflow(struct mptcp_sock *msk,
725 mptcp_pm_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
728 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
731 struct mptcp_pm_data *pm = &msk->pm;
734 pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
737 mptcp_event_addr_removed(msk, rm_list->ids[i]);
740 if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
743 __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
751 struct mptcp_sock *msk;
754 msk = mptcp_sk(sk);
758 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
764 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
769 spin_lock_bh(&msk->fallback_lock);
770 if (!msk->allow_infinite_fallback) {
771 spin_unlock_bh(&msk->fallback_lock);
774 msk->allow_subflows = false;
775 spin_unlock_bh(&msk->fallback_lock);
789 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
799 spin_lock_bh(&msk->pm.lock);
802 if (!mptcp_pm_should_add_signal(msk))
814 *echo = mptcp_pm_should_add_signal_echo(msk);
815 port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
817 family = *echo ? msk->pm.remote.family : msk->pm.local.family;
822 *addr = msk->pm.remote;
823 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO);
825 *addr = msk->pm.local;
826 add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL);
828 WRITE_ONCE(msk->pm.addr_signal, add_addr);
832 spin_unlock_bh(&msk->pm.lock);
836 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
842 spin_lock_bh(&msk->pm.lock);
845 if (!mptcp_pm_should_rm_signal(msk))
848 rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL);
849 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx);
851 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
857 *rm_list = msk->pm.rm_list_tx;
858 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
862 spin_unlock_bh(&msk->pm.lock);
866 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
871 if (WARN_ON_ONCE(!msk))
874 /* The 0 ID mapping is defined by the first subflow, copied into the msk
877 mptcp_local_address((struct sock_common *)msk, &msk_local);
885 if (mptcp_pm_is_userspace(msk))
886 return mptcp_userspace_pm_get_local_id(msk, &skc_local);
887 return mptcp_pm_nl_get_local_id(msk, &skc_local);
890 bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
896 if (mptcp_pm_is_userspace(msk))
897 return mptcp_userspace_pm_is_backup(msk, &skc_local);
899 return mptcp_pm_nl_is_backup(msk, &skc_local);
902 static void mptcp_pm_subflows_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
905 struct sock *sk = (struct sock *)msk;
917 mptcp_for_each_subflow(msk, iter) {
931 * is cheap under the msk socket lock
939 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
951 mptcp_pm_subflows_chk_stale(msk, ssk);
958 void mptcp_pm_worker(struct mptcp_sock *msk)
960 struct mptcp_pm_data *pm = &msk->pm;
962 msk_owned_by_me(msk);
967 spin_lock_bh(&msk->pm.lock);
969 pr_debug("msk=%p status=%x\n", msk, pm->status);
972 mptcp_pm_addr_send_ack(msk);
976 mptcp_pm_rm_addr_recv(msk);
978 __mptcp_pm_kernel_worker(msk);
980 spin_unlock_bh(&msk->pm.lock);
983 void mptcp_pm_destroy(struct mptcp_sock *msk)
985 mptcp_pm_free_anno_list(msk);
987 if (mptcp_pm_is_userspace(msk))
988 mptcp_userspace_pm_free_local_addr_list(msk);
991 void mptcp_pm_data_reset(struct mptcp_sock *msk)
993 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
994 struct mptcp_pm_data *pm = &msk->pm;
1002 bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk);
1008 (!!mptcp_pm_get_local_addr_max(msk) &&
1010 !!mptcp_pm_get_add_addr_signal_max(msk));
1012 !!mptcp_pm_get_add_addr_accept_max(msk) &&
1020 void mptcp_pm_data_init(struct mptcp_sock *msk)
1022 spin_lock_init(&msk->pm.lock);
1023 INIT_LIST_HEAD(&msk->pm.anno_list);
1024 INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list);
1025 mptcp_pm_data_reset(msk);