Lines Matching defs:msk

39 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
41 return pm_nl_get_pernet(sock_net((struct sock *)msk));
49 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
51 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
57 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
59 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
65 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
67 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
73 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
75 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
104 const struct mptcp_sock *msk,
110 msk_owned_by_me(msk);
117 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
132 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk,
141 * Note: removal from the local address list during the msk life-cycle
145 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
165 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
170 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
171 struct sock *sk = (struct sock *)msk, *ssk;
177 subflows_max = mptcp_pm_get_subflows_max(msk);
190 msk->pm.subflows++;
199 mptcp_for_each_subflow(msk, subflow)
203 mptcp_for_each_subflow(msk, subflow) {
216 if (msk->pm.subflows < subflows_max) {
221 msk->pm.subflows++;
256 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
258 struct sock *sk = (struct sock *)msk;
268 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
269 local_addr_max = mptcp_pm_get_local_addr_max(msk);
270 subflows_max = mptcp_pm_get_subflows_max(msk);
273 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
274 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
279 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
283 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
284 msk->mpc_endpoint_id = entry->addr.id;
290 mptcp_pm_send_ack(msk, subflow, true, backup);
292 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
296 msk->pm.local_addr_used, local_addr_max,
297 msk->pm.add_addr_signaled, add_addr_signal_max,
298 msk->pm.subflows, subflows_max);
301 if (msk->pm.add_addr_signaled < add_addr_signal_max) {
309 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
312 if (!select_signal_address(pernet, msk, &local))
318 if (!mptcp_pm_alloc_anno_list(msk, &local.addr))
321 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
322 msk->pm.add_addr_signaled++;
325 if (local.addr.id == msk->mpc_endpoint_id)
328 mptcp_pm_announce_addr(msk, &local.addr, false);
329 mptcp_pm_addr_send_ack(msk);
337 while (msk->pm.local_addr_used < local_addr_max &&
338 msk->pm.subflows < subflows_max) {
345 else if (!select_local_address(pernet, msk, &local))
350 __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
353 if (local.addr.id == msk->mpc_endpoint_id)
356 msk->pm.local_addr_used++;
358 nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
362 spin_unlock_bh(&msk->pm.lock);
365 spin_lock_bh(&msk->pm.lock);
367 mptcp_pm_nl_check_work_pending(msk);
370 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
372 mptcp_pm_create_subflow_or_signal_addr(msk);
375 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
377 mptcp_pm_create_subflow_or_signal_addr(msk);
383 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
387 struct sock *sk = (struct sock *)msk;
394 pernet = pm_nl_get_pernet_from_msk(msk);
395 subflows_max = mptcp_pm_get_subflows_max(msk);
397 mptcp_local_address((struct sock_common *)msk, &mpc_addr);
407 if (msk->pm.subflows < subflows_max) {
416 msk->pm.subflows++;
437 msk->pm.subflows++;
444 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
447 struct sock *sk = (struct sock *)msk;
454 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
455 subflows_max = mptcp_pm_get_subflows_max(msk);
458 msk->pm.add_addr_accepted, add_addr_accept_max,
459 msk->pm.remote.family);
461 remote = msk->pm.remote;
462 mptcp_pm_announce_addr(msk, &remote, true);
463 mptcp_pm_addr_send_ack(msk);
465 if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
475 nr = fill_local_addresses_vec(msk, &remote, locals);
479 spin_unlock_bh(&msk->pm.lock);
483 spin_lock_bh(&msk->pm.lock);
488 msk->pm.add_addr_accepted++;
489 if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
490 msk->pm.subflows >= subflows_max)
491 WRITE_ONCE(msk->pm.accept_addr, false);
495 void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id)
497 if (rm_id && WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
501 if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk))
502 WRITE_ONCE(msk->pm.accept_addr, true);
649 /* The subflow socket lock is acquired in a nested to the msk one
650 * in several places, even by the TCP stack, and this msk is a kernel
652 * modifiers in several places, re-init the lock class for the msk
682 * under the msk socket lock. For the moment, that will not bring
696 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk,
703 pernet = pm_nl_get_pernet_from_msk(msk);
725 bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
727 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
742 struct mptcp_sock *msk;
745 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
746 struct sock *sk = (struct sock *)msk;
749 if (!READ_ONCE(msk->fully_established) ||
750 mptcp_pm_is_userspace(msk))
754 mptcp_local_address((struct sock_common *)msk, &mpc_addr);
757 spin_lock_bh(&msk->pm.lock);
759 msk->mpc_endpoint_id = addr->id;
760 mptcp_pm_create_subflow_or_signal_addr(msk);
761 spin_unlock_bh(&msk->pm.lock);
848 static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
851 return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
854 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
861 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
863 ret = mptcp_remove_anno_list_by_saddr(msk, addr);
865 spin_lock_bh(&msk->pm.lock);
867 __set_bit(addr->id, msk->pm.id_avail_bitmap);
868 msk->pm.add_addr_signaled--;
870 mptcp_pm_remove_addr(msk, &list);
871 spin_unlock_bh(&msk->pm.lock);
876 static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id)
879 if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) &&
880 id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0))
881 msk->pm.local_addr_used--;
890 struct mptcp_sock *msk;
894 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
895 struct sock *sk = (struct sock *)msk;
898 if (mptcp_pm_is_userspace(msk))
902 remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr);
903 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
906 list.ids[0] = mptcp_endp_get_local_id(msk, addr);
908 spin_lock_bh(&msk->pm.lock);
909 mptcp_pm_rm_subflow(msk, &list);
910 spin_unlock_bh(&msk->pm.lock);
914 spin_lock_bh(&msk->pm.lock);
915 __mark_subflow_endp_available(msk, list.ids[0]);
916 spin_unlock_bh(&msk->pm.lock);
919 if (msk->mpc_endpoint_id == entry->addr.id)
920 msk->mpc_endpoint_id = 0;
936 struct mptcp_sock *msk;
940 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
941 struct sock *sk = (struct sock *)msk;
944 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
947 mptcp_local_address((struct sock_common *)msk, &msk_local);
952 spin_lock_bh(&msk->pm.lock);
953 mptcp_pm_remove_addr(msk, &list);
954 mptcp_pm_rm_subflow(msk, &list);
955 __mark_subflow_endp_available(msk, 0);
956 spin_unlock_bh(&msk->pm.lock);
984 /* the zero id address is special: the first address used by the msk
1020 static void mptcp_pm_flush_addrs_and_subflows(struct mptcp_sock *msk,
1028 mptcp_lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
1029 slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
1032 mptcp_remove_anno_list_by_saddr(msk, &entry->addr))
1033 alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
1036 spin_lock_bh(&msk->pm.lock);
1038 msk->pm.add_addr_signaled -= alist.nr;
1039 mptcp_pm_remove_addr(msk, &alist);
1042 mptcp_pm_rm_subflow(msk, &slist);
1044 bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
1045 msk->pm.local_addr_used = 0;
1046 spin_unlock_bh(&msk->pm.lock);
1053 struct mptcp_sock *msk;
1058 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1059 struct sock *sk = (struct sock *)msk;
1061 if (!mptcp_pm_is_userspace(msk)) {
1063 mptcp_pm_flush_addrs_and_subflows(msk, rm_list);
1235 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
1240 list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
1242 spin_lock_bh(&msk->pm.lock);
1243 mptcp_pm_rm_subflow(msk, &list);
1244 __mark_subflow_endp_available(msk, list.ids[0]);
1245 mptcp_pm_create_subflow_or_signal_addr(msk);
1246 spin_unlock_bh(&msk->pm.lock);
1256 struct mptcp_sock *msk;
1261 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1262 struct sock *sk = (struct sock *)msk;
1264 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
1269 mptcp_pm_mp_prio_send_ack(msk, &local->addr, NULL, bkup);
1272 mptcp_pm_nl_fullmesh(msk, &local->addr);
1328 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
1330 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
1332 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
1333 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
1335 WRITE_ONCE(msk->pm.work_pending, false);
1342 void __mptcp_pm_kernel_worker(struct mptcp_sock *msk)
1344 struct mptcp_pm_data *pm = &msk->pm;
1348 mptcp_pm_nl_add_addr_received(msk);
1352 mptcp_pm_nl_fully_established(msk);
1356 mptcp_pm_nl_subflow_established(msk);