Lines Matching full:subflow
314 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_prep_synack() local
322 mptcp_fastopen_subflow_synack_set_params(subflow, req); in subflow_prep_synack()
379 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) in subflow_thmac_valid() argument
384 subflow_generate_hmac(subflow->remote_key, subflow->local_key, in subflow_thmac_valid()
385 subflow->remote_nonce, subflow->local_nonce, in subflow_thmac_valid()
389 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", in subflow_thmac_valid()
390 subflow, subflow->token, thmac, subflow->thmac); in subflow_thmac_valid()
392 return thmac == subflow->thmac; in subflow_thmac_valid()
397 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_reset() local
398 struct sock *sk = subflow->conn; in mptcp_subflow_reset()
424 struct mptcp_subflow_context *subflow; in __mptcp_sync_state() local
428 subflow = mptcp_subflow_ctx(ssk); in __mptcp_sync_state()
434 /* subflow->idsn is always available is TCP_SYN_SENT state, in __mptcp_sync_state()
437 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); in __mptcp_sync_state()
445 struct mptcp_subflow_context *subflow, in subflow_set_remote_key() argument
448 /* active MPC subflow will reach here multiple times: in subflow_set_remote_key()
451 if (subflow->remote_key_valid) in subflow_set_remote_key()
454 subflow->remote_key_valid = 1; in subflow_set_remote_key()
455 subflow->remote_key = mp_opt->sndr_key; in subflow_set_remote_key()
456 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); in subflow_set_remote_key()
457 subflow->iasn++; in subflow_set_remote_key()
459 WRITE_ONCE(msk->remote_key, subflow->remote_key); in subflow_set_remote_key()
460 WRITE_ONCE(msk->ack_seq, subflow->iasn); in subflow_set_remote_key()
462 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); in subflow_set_remote_key()
466 struct mptcp_subflow_context *subflow, in mptcp_propagate_state() argument
476 WRITE_ONCE(msk->snd_una, subflow->idsn + 1); in mptcp_propagate_state()
477 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); in mptcp_propagate_state()
478 subflow_set_remote_key(msk, subflow, mp_opt); in mptcp_propagate_state()
492 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_finish_connect() local
494 struct sock *parent = subflow->conn; in subflow_finish_connect()
497 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); in subflow_finish_connect()
500 if (subflow->conn_finished) in subflow_finish_connect()
504 subflow->rel_write_seq = 1; in subflow_finish_connect()
505 subflow->conn_finished = 1; in subflow_finish_connect()
506 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; in subflow_finish_connect()
507 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); in subflow_finish_connect()
510 if (subflow->request_mptcp) { in subflow_finish_connect()
523 subflow->mp_capable = 1; in subflow_finish_connect()
526 mptcp_propagate_state(parent, sk, subflow, &mp_opt); in subflow_finish_connect()
527 } else if (subflow->request_join) { in subflow_finish_connect()
531 subflow->reset_reason = MPTCP_RST_EMPTCP; in subflow_finish_connect()
535 subflow->backup = mp_opt.backup; in subflow_finish_connect()
536 subflow->thmac = mp_opt.thmac; in subflow_finish_connect()
537 subflow->remote_nonce = mp_opt.nonce; in subflow_finish_connect()
538 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); in subflow_finish_connect()
539 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d", in subflow_finish_connect()
540 subflow, subflow->thmac, subflow->remote_nonce, in subflow_finish_connect()
541 subflow->backup); in subflow_finish_connect()
543 if (!subflow_thmac_valid(subflow)) { in subflow_finish_connect()
545 subflow->reset_reason = MPTCP_RST_EMPTCP; in subflow_finish_connect()
552 subflow_generate_hmac(subflow->local_key, subflow->remote_key, in subflow_finish_connect()
553 subflow->local_nonce, in subflow_finish_connect()
554 subflow->remote_nonce, in subflow_finish_connect()
556 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); in subflow_finish_connect()
558 subflow->mp_join = 1; in subflow_finish_connect()
569 mptcp_propagate_state(parent, sk, subflow, NULL); in subflow_finish_connect()
574 subflow->reset_transient = 0; in subflow_finish_connect()
578 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) in subflow_set_local_id() argument
581 WRITE_ONCE(subflow->local_id, local_id); in subflow_set_local_id()
586 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_chk_local_id() local
587 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in subflow_chk_local_id()
590 if (likely(subflow->local_id >= 0)) in subflow_chk_local_id()
597 subflow_set_local_id(subflow, err); in subflow_chk_local_id()
628 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_v4_conn_request() local
630 pr_debug("subflow=%p", subflow); in subflow_v4_conn_request()
659 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_v6_conn_request() local
661 pr_debug("subflow=%p", subflow); in subflow_v6_conn_request()
755 struct mptcp_subflow_context *subflow, in __mptcp_subflow_fully_established() argument
758 subflow_set_remote_key(msk, subflow, mp_opt); in __mptcp_subflow_fully_established()
759 subflow->fully_established = 1; in __mptcp_subflow_fully_established()
762 if (subflow->is_mptfo) in __mptcp_subflow_fully_established()
763 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); in __mptcp_subflow_fully_established()
866 /* move the msk reference ownership to the subflow */ in subflow_syn_recv_sock()
924 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) in dbg_bad_map() argument
927 ssn, subflow->map_subflow_seq, subflow->map_data_len); in dbg_bad_map()
932 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in skb_is_fully_mapped() local
939 return skb->len - skb_consumed <= subflow->map_data_len - in skb_is_fully_mapped()
940 mptcp_subflow_get_map_offset(subflow); in skb_is_fully_mapped()
945 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_mapping() local
946 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in validate_mapping()
948 if (unlikely(before(ssn, subflow->map_subflow_seq))) { in validate_mapping()
949 /* Mapping covers data later in the subflow stream, in validate_mapping()
952 dbg_bad_map(subflow, ssn); in validate_mapping()
955 if (unlikely(!before(ssn, subflow->map_subflow_seq + in validate_mapping()
956 subflow->map_data_len))) { in validate_mapping()
957 /* Mapping does covers past subflow data, invalid */ in validate_mapping()
958 dbg_bad_map(subflow, ssn); in validate_mapping()
967 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_data_csum() local
976 if (subflow->map_csum_len == subflow->map_data_len) in validate_data_csum()
984 delta = subflow->map_data_len - subflow->map_csum_len; in validate_data_csum()
986 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; in validate_data_csum()
997 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, in validate_data_csum()
998 subflow->map_csum_len); in validate_data_csum()
1001 subflow->map_csum_len += len; in validate_data_csum()
1007 /* if this subflow is closed, the partial mapping in validate_data_csum()
1030 csum = __mptcp_make_csum(subflow->map_seq, in validate_data_csum()
1031 subflow->map_subflow_seq, in validate_data_csum()
1032 subflow->map_data_len + subflow->map_data_fin, in validate_data_csum()
1033 subflow->map_data_csum); in validate_data_csum()
1039 subflow->valid_csum_seen = 1; in validate_data_csum()
1046 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in get_mapping_status() local
1062 if (!subflow->map_valid && !skb->len) { in get_mapping_status()
1076 if (!subflow->map_valid) in get_mapping_status()
1088 subflow->map_data_len = 0; in get_mapping_status()
1097 if (subflow->map_valid) { in get_mapping_status()
1130 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); in get_mapping_status()
1132 if (subflow->map_valid) { in get_mapping_status()
1134 if (subflow->map_seq == map_seq && in get_mapping_status()
1135 subflow->map_subflow_seq == mpext->subflow_seq && in get_mapping_status()
1136 subflow->map_data_len == data_len && in get_mapping_status()
1137 subflow->map_csum_reqd == mpext->csum_reqd) { in get_mapping_status()
1154 subflow->map_seq = map_seq; in get_mapping_status()
1155 subflow->map_subflow_seq = mpext->subflow_seq; in get_mapping_status()
1156 subflow->map_data_len = data_len; in get_mapping_status()
1157 subflow->map_valid = 1; in get_mapping_status()
1158 subflow->map_data_fin = mpext->data_fin; in get_mapping_status()
1159 subflow->mpc_map = mpext->mpc_map; in get_mapping_status()
1160 subflow->map_csum_reqd = mpext->csum_reqd; in get_mapping_status()
1161 subflow->map_csum_len = 0; in get_mapping_status()
1162 subflow->map_data_csum = csum_unfold(mpext->csum); in get_mapping_status()
1165 if (unlikely(subflow->map_csum_reqd != csum_reqd)) in get_mapping_status()
1169 subflow->map_seq, subflow->map_subflow_seq, in get_mapping_status()
1170 subflow->map_data_len, subflow->map_csum_reqd, in get_mapping_status()
1171 subflow->map_data_csum); in get_mapping_status()
1191 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_discard_data() local
1198 subflow->map_subflow_seq); in mptcp_subflow_discard_data()
1203 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) in mptcp_subflow_discard_data()
1204 subflow->map_valid = 0; in mptcp_subflow_discard_data()
1207 /* sched mptcp worker to remove the subflow if no more data is pending */
1218 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) in subflow_can_fallback() argument
1220 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in subflow_can_fallback()
1222 if (subflow->mp_join) in subflow_can_fallback()
1225 return !subflow->valid_csum_seen; in subflow_can_fallback()
1227 return !subflow->fully_established; in subflow_can_fallback()
1232 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_fail() local
1235 /* greceful failure can happen only on the MPC subflow */ in mptcp_subflow_fail()
1251 WRITE_ONCE(subflow->fail_tout, fail_tout); in mptcp_subflow_fail()
1254 mptcp_reset_tout_timer(msk, subflow->fail_tout); in mptcp_subflow_fail()
1259 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in subflow_check_data_avail() local
1265 WRITE_ONCE(subflow->data_avail, false); in subflow_check_data_avail()
1266 if (subflow->data_avail) in subflow_check_data_avail()
1269 msk = mptcp_sk(subflow->conn); in subflow_check_data_avail()
1291 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); in subflow_check_data_avail()
1292 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, in subflow_check_data_avail()
1299 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1312 (subflow->mp_join || subflow->valid_csum_seen)) { in subflow_check_data_avail()
1313 subflow->send_mp_fail = 1; in subflow_check_data_avail()
1316 subflow->reset_transient = 0; in subflow_check_data_avail()
1317 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; in subflow_check_data_avail()
1321 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1325 if (!subflow_can_fallback(subflow) && subflow->map_data_len) { in subflow_check_data_avail()
1329 subflow->reset_transient = 0; in subflow_check_data_avail()
1330 subflow->reset_reason = MPTCP_RST_EMPTCP; in subflow_check_data_avail()
1338 WRITE_ONCE(subflow->data_avail, false); in subflow_check_data_avail()
1346 subflow->map_valid = 1; in subflow_check_data_avail()
1347 subflow->map_seq = READ_ONCE(msk->ack_seq); in subflow_check_data_avail()
1348 subflow->map_data_len = skb->len; in subflow_check_data_avail()
1349 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in subflow_check_data_avail()
1350 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1356 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcp_subflow_data_available() local
1359 if (subflow->map_valid && in mptcp_subflow_data_available()
1360 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { in mptcp_subflow_data_available()
1361 subflow->map_valid = 0; in mptcp_subflow_data_available()
1362 WRITE_ONCE(subflow->data_avail, false); in mptcp_subflow_data_available()
1365 subflow->map_subflow_seq, in mptcp_subflow_data_available()
1366 subflow->map_data_len); in mptcp_subflow_data_available()
1383 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_space() local
1384 const struct sock *sk = subflow->conn; in mptcp_space()
1411 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_data_ready() local
1413 struct sock *parent = subflow->conn; in subflow_data_ready()
1420 /* MPJ subflow are removed from accept queue before reaching here, in subflow_data_ready()
1430 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && in subflow_data_ready()
1431 !subflow->mp_join && !(state & TCPF_CLOSE)); in subflow_data_ready()
1436 /* subflow-level lowat test are not relevant. in subflow_data_ready()
1468 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcpv6_handle_mapped() local
1474 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d", in mptcpv6_handle_mapped()
1475 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); in mptcpv6_handle_mapped()
1480 subflow->icsk_af_ops = icsk->icsk_af_ops; in mptcpv6_handle_mapped()
1520 struct mptcp_subflow_context *subflow; in __mptcp_subflow_connect() local
1540 subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_connect()
1542 get_random_bytes(&subflow->local_nonce, sizeof(u32)); in __mptcp_subflow_connect()
1543 } while (!subflow->local_nonce); in __mptcp_subflow_connect()
1546 subflow_set_local_id(subflow, local_id); in __mptcp_subflow_connect()
1550 subflow->remote_key_valid = 1; in __mptcp_subflow_connect()
1551 subflow->remote_key = msk->remote_key; in __mptcp_subflow_connect()
1552 subflow->local_key = msk->local_key; in __mptcp_subflow_connect()
1553 subflow->token = msk->token; in __mptcp_subflow_connect()
1566 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); in __mptcp_subflow_connect()
1569 subflow->remote_token = remote_token; in __mptcp_subflow_connect()
1570 WRITE_ONCE(subflow->remote_id, remote_id); in __mptcp_subflow_connect()
1571 subflow->request_join = 1; in __mptcp_subflow_connect()
1572 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP); in __mptcp_subflow_connect()
1573 subflow->subflow_id = msk->subflow_id++; in __mptcp_subflow_connect()
1577 list_add_tail(&subflow->node, &msk->conn_list); in __mptcp_subflow_connect()
1582 /* discard the subflow socket */ in __mptcp_subflow_connect()
1590 list_del(&subflow->node); in __mptcp_subflow_connect()
1591 sock_put(mptcp_subflow_tcp_sock(subflow)); in __mptcp_subflow_connect()
1594 subflow->disposable = 1; in __mptcp_subflow_connect()
1652 struct mptcp_subflow_context *subflow; in mptcp_subflow_create_socket() local
1701 subflow = mptcp_subflow_ctx(sf->sk); in mptcp_subflow_create_socket()
1702 pr_debug("subflow=%p", subflow); in mptcp_subflow_create_socket()
1706 subflow->conn = sk; in mptcp_subflow_create_socket()
1731 pr_debug("subflow=%p", ctx); in subflow_create_ctx()
1757 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_state_change() local
1758 struct sock *parent = subflow->conn; in subflow_state_change()
1767 subflow->conn_finished = 1; in subflow_state_change()
1768 mptcp_propagate_state(parent, sk, subflow, NULL); in subflow_state_change()
1771 /* as recvmsg() does not acquire the subflow socket for ssk selection in subflow_state_change()
1782 /* when the fallback subflow closes the rx side, trigger a 'dummy' in subflow_state_change()
1794 struct mptcp_subflow_context *subflow; in mptcp_subflow_queue_clean() local
1811 /* can't acquire the msk socket lock under the subflow one, in mptcp_subflow_queue_clean()
1821 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_queue_clean()
1822 if (!subflow || !subflow->conn) in mptcp_subflow_queue_clean()
1825 sk = subflow->conn; in mptcp_subflow_queue_clean()
1882 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family); in subflow_ulp_init()
1914 * when the subflow is still unaccepted in subflow_ulp_release()
1969 /* this is the first subflow, id is always 0 */ in subflow_ulp_clone()
1981 /* the subflow req id is valid, fetched via subflow_check_req() in subflow_ulp_clone()
1990 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in tcp_release_cb_override() local
1993 /* process and clear all the pending actions, but leave the subflow into in tcp_release_cb_override()
1997 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); in tcp_release_cb_override()
2006 /* closing a listener subflow requires a great deal of care. in tcp_abort_override()
2045 panic("MPTCP: failed to init subflow v4 request sock ops\n"); in mptcp_subflow_init()
2074 panic("MPTCP: failed to init subflow v6 request sock ops\n"); in mptcp_subflow_init()