Lines Matching full:subflow
346 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_prep_synack() local
354 mptcp_fastopen_subflow_synack_set_params(subflow, req); in subflow_prep_synack()
413 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) in subflow_thmac_valid() argument
418 subflow_generate_hmac(subflow->remote_key, subflow->local_key, in subflow_thmac_valid()
419 subflow->remote_nonce, subflow->local_nonce, in subflow_thmac_valid()
423 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", in subflow_thmac_valid()
424 subflow, subflow->token, thmac, subflow->thmac); in subflow_thmac_valid()
426 return thmac == subflow->thmac; in subflow_thmac_valid()
431 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_reset() local
432 struct sock *sk = subflow->conn; in mptcp_subflow_reset()
458 struct mptcp_subflow_context *subflow; in __mptcp_sync_state() local
462 subflow = mptcp_subflow_ctx(ssk); in __mptcp_sync_state()
468 /* subflow->idsn is always available is TCP_SYN_SENT state, in __mptcp_sync_state()
471 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); in __mptcp_sync_state()
479 struct mptcp_subflow_context *subflow, in subflow_set_remote_key() argument
482 /* active MPC subflow will reach here multiple times: in subflow_set_remote_key()
485 if (subflow->remote_key_valid) in subflow_set_remote_key()
488 subflow->remote_key_valid = 1; in subflow_set_remote_key()
489 subflow->remote_key = mp_opt->sndr_key; in subflow_set_remote_key()
490 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); in subflow_set_remote_key()
491 subflow->iasn++; in subflow_set_remote_key()
493 WRITE_ONCE(msk->remote_key, subflow->remote_key); in subflow_set_remote_key()
494 WRITE_ONCE(msk->ack_seq, subflow->iasn); in subflow_set_remote_key()
496 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); in subflow_set_remote_key()
500 struct mptcp_subflow_context *subflow, in mptcp_propagate_state() argument
510 WRITE_ONCE(msk->snd_una, subflow->idsn + 1); in mptcp_propagate_state()
511 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); in mptcp_propagate_state()
512 subflow_set_remote_key(msk, subflow, mp_opt); in mptcp_propagate_state()
526 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_finish_connect() local
528 struct sock *parent = subflow->conn; in subflow_finish_connect()
531 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); in subflow_finish_connect()
534 if (subflow->conn_finished) in subflow_finish_connect()
538 subflow->rel_write_seq = 1; in subflow_finish_connect()
539 subflow->conn_finished = 1; in subflow_finish_connect()
540 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; in subflow_finish_connect()
541 pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset); in subflow_finish_connect()
544 if (subflow->request_mptcp) { in subflow_finish_connect()
557 subflow->mp_capable = 1; in subflow_finish_connect()
561 mptcp_propagate_state(parent, sk, subflow, &mp_opt); in subflow_finish_connect()
562 } else if (subflow->request_join) { in subflow_finish_connect()
566 subflow->reset_reason = MPTCP_RST_EMPTCP; in subflow_finish_connect()
570 subflow->backup = mp_opt.backup; in subflow_finish_connect()
571 subflow->thmac = mp_opt.thmac; in subflow_finish_connect()
572 subflow->remote_nonce = mp_opt.nonce; in subflow_finish_connect()
573 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); in subflow_finish_connect()
574 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n", in subflow_finish_connect()
575 subflow, subflow->thmac, subflow->remote_nonce, in subflow_finish_connect()
576 subflow->backup); in subflow_finish_connect()
578 if (!subflow_thmac_valid(subflow)) { in subflow_finish_connect()
580 subflow->reset_reason = MPTCP_RST_EMPTCP; in subflow_finish_connect()
587 subflow_generate_hmac(subflow->local_key, subflow->remote_key, in subflow_finish_connect()
588 subflow->local_nonce, in subflow_finish_connect()
589 subflow->remote_nonce, in subflow_finish_connect()
591 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); in subflow_finish_connect()
593 subflow->mp_join = 1; in subflow_finish_connect()
596 if (subflow->backup) in subflow_finish_connect()
607 if (subflow->mpc_drop) in subflow_finish_connect()
610 mptcp_propagate_state(parent, sk, subflow, NULL); in subflow_finish_connect()
615 subflow->reset_transient = 0; in subflow_finish_connect()
619 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) in subflow_set_local_id() argument
622 WRITE_ONCE(subflow->local_id, local_id); in subflow_set_local_id()
627 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_chk_local_id() local
628 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in subflow_chk_local_id()
631 if (likely(subflow->local_id >= 0)) in subflow_chk_local_id()
638 subflow_set_local_id(subflow, err); in subflow_chk_local_id()
639 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); in subflow_chk_local_id()
671 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_v4_conn_request() local
673 pr_debug("subflow=%p\n", subflow); in subflow_v4_conn_request()
702 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_v6_conn_request() local
704 pr_debug("subflow=%p\n", subflow); in subflow_v6_conn_request()
797 struct mptcp_subflow_context *subflow, in __mptcp_subflow_fully_established() argument
800 subflow_set_remote_key(msk, subflow, mp_opt); in __mptcp_subflow_fully_established()
801 WRITE_ONCE(subflow->fully_established, true); in __mptcp_subflow_fully_established()
913 /* move the msk reference ownership to the subflow */ in subflow_syn_recv_sock()
930 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child); in subflow_syn_recv_sock() local
932 subflow_add_reset_reason(skb, subflow->reset_reason); in subflow_syn_recv_sock()
980 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) in dbg_bad_map() argument
983 ssn, subflow->map_subflow_seq, subflow->map_data_len); in dbg_bad_map()
988 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in skb_is_fully_mapped() local
997 return skb->len - skb_consumed <= subflow->map_data_len - in skb_is_fully_mapped()
998 mptcp_subflow_get_map_offset(subflow); in skb_is_fully_mapped()
1003 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_mapping() local
1004 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in validate_mapping()
1006 if (unlikely(before(ssn, subflow->map_subflow_seq))) { in validate_mapping()
1007 /* Mapping covers data later in the subflow stream, in validate_mapping()
1010 dbg_bad_map(subflow, ssn); in validate_mapping()
1013 if (unlikely(!before(ssn, subflow->map_subflow_seq + in validate_mapping()
1014 subflow->map_data_len))) { in validate_mapping()
1015 /* Mapping does covers past subflow data, invalid */ in validate_mapping()
1016 dbg_bad_map(subflow, ssn); in validate_mapping()
1025 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_data_csum() local
1034 if (subflow->map_csum_len == subflow->map_data_len) in validate_data_csum()
1042 delta = subflow->map_data_len - subflow->map_csum_len; in validate_data_csum()
1044 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; in validate_data_csum()
1055 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, in validate_data_csum()
1056 subflow->map_csum_len); in validate_data_csum()
1059 subflow->map_csum_len += len; in validate_data_csum()
1065 /* if this subflow is closed, the partial mapping in validate_data_csum()
1088 csum = __mptcp_make_csum(subflow->map_seq, in validate_data_csum()
1089 subflow->map_subflow_seq, in validate_data_csum()
1090 subflow->map_data_len + subflow->map_data_fin, in validate_data_csum()
1091 subflow->map_data_csum); in validate_data_csum()
1097 subflow->valid_csum_seen = 1; in validate_data_csum()
1104 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in get_mapping_status() local
1120 if (!subflow->map_valid && !skb->len) { in get_mapping_status()
1135 if (!subflow->map_valid) in get_mapping_status()
1157 if (subflow->map_valid) { in get_mapping_status()
1190 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); in get_mapping_status()
1192 if (subflow->map_valid) { in get_mapping_status()
1194 if (subflow->map_seq == map_seq && in get_mapping_status()
1195 subflow->map_subflow_seq == mpext->subflow_seq && in get_mapping_status()
1196 subflow->map_data_len == data_len && in get_mapping_status()
1197 subflow->map_csum_reqd == mpext->csum_reqd) { in get_mapping_status()
1214 subflow->map_seq = map_seq; in get_mapping_status()
1215 subflow->map_subflow_seq = mpext->subflow_seq; in get_mapping_status()
1216 subflow->map_data_len = data_len; in get_mapping_status()
1217 subflow->map_valid = 1; in get_mapping_status()
1218 subflow->map_data_fin = mpext->data_fin; in get_mapping_status()
1219 subflow->mpc_map = mpext->mpc_map; in get_mapping_status()
1220 subflow->map_csum_reqd = mpext->csum_reqd; in get_mapping_status()
1221 subflow->map_csum_len = 0; in get_mapping_status()
1222 subflow->map_data_csum = csum_unfold(mpext->csum); in get_mapping_status()
1225 if (unlikely(subflow->map_csum_reqd != csum_reqd)) in get_mapping_status()
1229 subflow->map_seq, subflow->map_subflow_seq, in get_mapping_status()
1230 subflow->map_data_len, subflow->map_csum_reqd, in get_mapping_status()
1231 subflow->map_data_csum); in get_mapping_status()
1251 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_discard_data() local
1264 offset, subflow->map_subflow_seq); in mptcp_subflow_discard_data()
1271 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) in mptcp_subflow_discard_data()
1272 subflow->map_valid = 0; in mptcp_subflow_discard_data()
1280 /* sched mptcp worker for subflow cleanup if no more data is pending */
1296 /* when the fallback subflow closes the rx side, trigger a 'dummy' in subflow_sched_work_if_closed()
1307 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_fail() local
1310 /* graceful failure can happen only on the MPC subflow */ in mptcp_subflow_fail()
1326 WRITE_ONCE(subflow->fail_tout, fail_tout); in mptcp_subflow_fail()
1329 mptcp_reset_tout_timer(msk, subflow->fail_tout); in mptcp_subflow_fail()
1334 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in subflow_check_data_avail() local
1340 WRITE_ONCE(subflow->data_avail, false); in subflow_check_data_avail()
1341 if (subflow->data_avail) in subflow_check_data_avail()
1344 msk = mptcp_sk(subflow->conn); in subflow_check_data_avail()
1366 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); in subflow_check_data_avail()
1367 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack, in subflow_check_data_avail()
1374 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1387 (subflow->mp_join || subflow->valid_csum_seen)) { in subflow_check_data_avail()
1388 subflow->send_mp_fail = 1; in subflow_check_data_avail()
1391 subflow->reset_transient = 0; in subflow_check_data_avail()
1392 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; in subflow_check_data_avail()
1396 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1404 subflow->reset_transient = 0; in subflow_check_data_avail()
1405 subflow->reset_reason = status == MAPPING_NODSS ? in subflow_check_data_avail()
1415 WRITE_ONCE(subflow->data_avail, false); in subflow_check_data_avail()
1423 subflow->map_valid = 1; in subflow_check_data_avail()
1424 subflow->map_seq = READ_ONCE(msk->ack_seq); in subflow_check_data_avail()
1425 subflow->map_data_len = skb->len; in subflow_check_data_avail()
1426 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in subflow_check_data_avail()
1427 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1433 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcp_subflow_data_available() local
1436 if (subflow->map_valid && in mptcp_subflow_data_available()
1437 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { in mptcp_subflow_data_available()
1438 subflow->map_valid = 0; in mptcp_subflow_data_available()
1439 WRITE_ONCE(subflow->data_avail, false); in mptcp_subflow_data_available()
1442 subflow->map_subflow_seq, in mptcp_subflow_data_available()
1443 subflow->map_data_len); in mptcp_subflow_data_available()
1460 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_space() local
1461 const struct sock *sk = subflow->conn; in mptcp_space()
1488 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_data_ready() local
1490 struct sock *parent = subflow->conn; in subflow_data_ready()
1497 /* MPJ subflow are removed from accept queue before reaching here, in subflow_data_ready()
1507 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && in subflow_data_ready()
1508 !subflow->mp_join && !(state & TCPF_CLOSE)); in subflow_data_ready()
1513 /* subflow-level lowat test are not relevant. in subflow_data_ready()
1545 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcpv6_handle_mapped() local
1551 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n", in mptcpv6_handle_mapped()
1552 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); in mptcpv6_handle_mapped()
1557 subflow->icsk_af_ops = icsk->icsk_af_ops; in mptcpv6_handle_mapped()
1597 struct mptcp_subflow_context *subflow; in __mptcp_subflow_connect() local
1620 subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_connect()
1622 get_random_bytes(&subflow->local_nonce, sizeof(u32)); in __mptcp_subflow_connect()
1623 } while (!subflow->local_nonce); in __mptcp_subflow_connect()
1637 subflow_set_local_id(subflow, local_id); in __mptcp_subflow_connect()
1639 subflow->remote_key_valid = 1; in __mptcp_subflow_connect()
1640 subflow->remote_key = READ_ONCE(msk->remote_key); in __mptcp_subflow_connect()
1641 subflow->local_key = READ_ONCE(msk->local_key); in __mptcp_subflow_connect()
1642 subflow->token = msk->token; in __mptcp_subflow_connect()
1659 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); in __mptcp_subflow_connect()
1662 subflow->remote_token = remote_token; in __mptcp_subflow_connect()
1663 WRITE_ONCE(subflow->remote_id, remote_id); in __mptcp_subflow_connect()
1664 subflow->request_join = 1; in __mptcp_subflow_connect()
1665 subflow->request_bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); in __mptcp_subflow_connect()
1666 subflow->subflow_id = msk->subflow_id++; in __mptcp_subflow_connect()
1670 list_add_tail(&subflow->node, &msk->conn_list); in __mptcp_subflow_connect()
1681 /* discard the subflow socket */ in __mptcp_subflow_connect()
1689 list_del(&subflow->node); in __mptcp_subflow_connect()
1690 sock_put(mptcp_subflow_tcp_sock(subflow)); in __mptcp_subflow_connect()
1693 subflow->disposable = 1; in __mptcp_subflow_connect()
1751 struct mptcp_subflow_context *subflow; in mptcp_subflow_create_socket() local
1797 subflow = mptcp_subflow_ctx(sf->sk); in mptcp_subflow_create_socket()
1798 pr_debug("subflow=%p\n", subflow); in mptcp_subflow_create_socket()
1802 subflow->conn = sk; in mptcp_subflow_create_socket()
1827 pr_debug("subflow=%p\n", ctx); in subflow_create_ctx()
1848 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_state_change() local
1849 struct sock *parent = subflow->conn; in subflow_state_change()
1858 subflow->conn_finished = 1; in subflow_state_change()
1859 mptcp_propagate_state(parent, sk, subflow, NULL); in subflow_state_change()
1862 /* as recvmsg() does not acquire the subflow socket for ssk selection in subflow_state_change()
1878 struct mptcp_subflow_context *subflow; in mptcp_subflow_queue_clean() local
1895 /* can't acquire the msk socket lock under the subflow one, in mptcp_subflow_queue_clean()
1905 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_queue_clean()
1906 if (!subflow || !subflow->conn) in mptcp_subflow_queue_clean()
1909 sk = subflow->conn; in mptcp_subflow_queue_clean()
1966 pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family); in subflow_ulp_init()
1998 * when the subflow is still unaccepted in subflow_ulp_release()
2052 /* this is the first subflow, id is always 0 */ in subflow_ulp_clone()
2065 /* the subflow req id is valid, fetched via subflow_check_req() in subflow_ulp_clone()
2074 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in tcp_release_cb_override() local
2077 /* process and clear all the pending actions, but leave the subflow into in tcp_release_cb_override()
2081 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); in tcp_release_cb_override()
2090 /* closing a listener subflow requires a great deal of care. in tcp_abort_override()
2129 panic("MPTCP: failed to init subflow v4 request sock ops\n"); in mptcp_subflow_init()
2158 panic("MPTCP: failed to init subflow v6 request sock ops\n"); in mptcp_subflow_init()