Lines Matching defs:msk
44 if (subflow_req->msk)
45 sock_put((struct sock *)subflow_req->msk);
61 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
63 return mptcp_is_fully_established((void *)msk) &&
64 ((mptcp_pm_is_userspace(msk) &&
65 mptcp_userspace_pm_active(msk)) ||
66 READ_ONCE(msk->pm.accept_subflow));
72 struct mptcp_sock *msk = subflow_req->msk;
77 subflow_generate_hmac(READ_ONCE(msk->local_key),
78 READ_ONCE(msk->remote_key),
88 struct mptcp_sock *msk;
91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 if (!msk) {
97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
99 sock_put((struct sock *)msk);
103 subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
105 return msk;
116 subflow_req->msk = NULL;
120 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
122 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
226 subflow_req->msk = subflow_token_join_request(req);
229 if (!subflow_req->msk) {
234 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
237 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
238 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
249 if (!mptcp_can_accept_new_subflow(subflow_req->msk)) {
258 pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
259 subflow_req->remote_nonce, subflow_req->msk);
452 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
454 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
460 struct mptcp_sock *msk = mptcp_sk(sk);
461 struct sock *ssk = msk->first;
465 if (!msk->rcvspace_init)
466 mptcp_rcv_space_init(msk, ssk);
472 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
473 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
479 static void subflow_set_remote_key(struct mptcp_sock *msk,
494 WRITE_ONCE(msk->remote_key, subflow->remote_key);
495 WRITE_ONCE(msk->ack_seq, subflow->iasn);
496 WRITE_ONCE(msk->can_ack, true);
497 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
504 struct mptcp_sock *msk = mptcp_sk(sk);
511 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
512 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
513 subflow_set_remote_key(msk, subflow, mp_opt);
519 msk->pending_state = ssk->sk_state;
520 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
530 struct mptcp_sock *msk;
538 msk = mptcp_sk(parent);
558 WRITE_ONCE(msk->csum_enabled, true);
560 WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
603 if (subflow_use_different_dport(msk, sk)) {
632 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
638 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
643 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
755 struct mptcp_sock *msk = subflow_req->msk;
758 subflow_generate_hmac(READ_ONCE(msk->remote_key),
759 READ_ONCE(msk->local_key),
796 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
800 subflow_set_remote_key(msk, subflow, mp_opt);
802 WRITE_ONCE(msk->fully_established, true);
896 owner = subflow_req->msk;
914 /* move the msk reference ownership to the subflow */
915 subflow_req->msk = NULL;
1103 struct mptcp_sock *msk)
1106 bool csum_reqd = READ_ONCE(msk->csum_enabled);
1155 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1169 mptcp_schedule_work((struct sock *)msk);
1182 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1190 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1282 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1284 struct sock *sk = (struct sock *)msk;
1294 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1298 * ingress data fin, so that the msk state will follow along
1300 if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) &&
1301 msk->first == ssk &&
1302 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1306 static bool mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1312 spin_lock_bh(&msk->fallback_lock);
1313 if (!msk->allow_infinite_fallback) {
1314 spin_unlock_bh(&msk->fallback_lock);
1317 msk->allow_subflows = false;
1318 spin_unlock_bh(&msk->fallback_lock);
1321 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1327 if (sock_flag((struct sock *)msk, SOCK_DEAD))
1339 mptcp_reset_tout_timer(msk, subflow->fail_tout);
1347 struct mptcp_sock *msk;
1355 msk = mptcp_sk(subflow->conn);
1360 status = get_mapping_status(ssk, msk);
1373 if (unlikely(!READ_ONCE(msk->can_ack)))
1376 old_ack = READ_ONCE(msk->ack_seq);
1378 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
1391 subflow_sched_work_if_closed(msk, ssk);
1395 if (!__mptcp_check_fallback(msk)) {
1401 if (!mptcp_subflow_fail(msk, ssk)) {
1432 subflow->map_seq = READ_ONCE(msk->ack_seq);
1481 * and msk socket spinlock
1499 struct mptcp_sock *msk;
1503 msk = mptcp_sk(parent);
1515 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1522 * respect the msk-level threshold eventually mandating an immediate ack
1524 if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
1604 struct mptcp_sock *msk = mptcp_sk(sk);
1622 pr_debug("msk=%p local=%d remote=%d create sock error: %d\n",
1623 msk, local_id, remote_id, err);
1648 subflow->remote_key = READ_ONCE(msk->remote_key);
1649 subflow->local_key = READ_ONCE(msk->local_key);
1650 subflow->token = msk->token;
1662 pr_debug("msk=%p local=%d remote=%d bind error: %d\n",
1663 msk, local_id, remote_id, err);
1668 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
1674 subflow->subflow_id = msk->subflow_id++;
1678 list_add_tail(&subflow->node, &msk->conn_list);
1682 pr_debug("msk=%p local=%d remote=%d connect error: %d\n",
1683 msk, local_id, remote_id, err);
1707 mptcp_pm_close_subflow(msk);
1899 /* can't acquire the msk socket lock under the subflow one,
1938 /* we are still under the listener msk socket lock */
2000 /* if the msk has been orphaned, keep the ctx