/linux-5.10/net/tls/ |
D | trace.h | 17 TP_PROTO(struct sock *sk, int dir, u32 tcp_seq, u8 *rec_no, int ret), 19 TP_ARGS(sk, dir, tcp_seq, rec_no, ret), 25 __field( u32, tcp_seq ) 33 __entry->tcp_seq = tcp_seq; 39 __entry->sk, __entry->dir, __entry->tcp_seq, __entry->rec_no, 46 TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, u32 rec_len, 49 TP_ARGS(sk, tcp_seq, rec_no, rec_len, encrypted, decrypted), 54 __field( u32, tcp_seq ) 63 __entry->tcp_seq = tcp_seq; 71 __entry->sk, __entry->tcp_seq, [all …]
|
D | tls_device_fallback.c | 237 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); in fill_sg_in() local 244 record = tls_get_record(ctx, tcp_seq, rcd_sn); in fill_sg_in() 250 *sync_size = tcp_seq - tls_record_start_seq(record); in fill_sg_in()
|
/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | tls_rxtx.c | 102 u32 tcp_seq, struct sync_info *info) in mlx5e_tls_get_sync_data() argument 110 record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn); in mlx5e_tls_get_sync_data() 115 sync_size = tcp_seq - tls_record_start_seq(record); in mlx5e_tls_get_sync_data() 144 struct sk_buff *nskb, u32 tcp_seq, in mlx5e_tls_complete_sync_skb() argument 164 tcp_seq -= data_len; in mlx5e_tls_complete_sync_skb() 165 th->seq = htonl(tcp_seq); in mlx5e_tls_complete_sync_skb() 177 pet->content.send.first_seq = htons(tcp_seq); in mlx5e_tls_complete_sync_skb() 191 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); in mlx5e_tls_handle_ooo() local 200 if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { in mlx5e_tls_handle_ooo() 236 context->expected_seq = tcp_seq + skb->len - headln; in mlx5e_tls_handle_ooo() [all …]
|
D | ktls_tx.c | 225 u32 tcp_seq, int datalen, struct tx_sync_info *info) in tx_sync_info_get() argument 235 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); in tx_sync_info_get() 250 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record)); in tx_sync_info_get() 260 info->sync_len = tcp_seq - tls_record_start_seq(record); in tx_sync_info_get()
|
/linux-5.10/drivers/net/ethernet/netronome/nfp/crypto/ |
D | fw.h | 14 __be32 tcp_seq; member 54 __be32 tcp_seq; member 90 __be32 tcp_seq; member
|
D | tls.c | 335 back->tcp_seq = cpu_to_be32(start_offload_tcp_sn); in nfp_net_tls_add() 445 req->tcp_seq = cpu_to_be32(seq); in nfp_net_tls_resync() 481 __be32 tcp_seq; in nfp_net_tls_rx_resync_req() local 530 memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq)); in nfp_net_tls_rx_resync_req() 531 tls_offload_rx_resync_request(sk, tcp_seq); in nfp_net_tls_rx_resync_req()
|
/linux-5.10/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/ |
D | chcr_ktls.c | 835 struct sge_eth_txq *q, u64 tcp_seq, in chcr_ktls_xmit_tcb_cpls() argument 868 if (first_wr || tcp_seq != tx_info->prev_seq) { in chcr_ktls_xmit_tcb_cpls() 872 TCB_TX_MAX_V(tcp_seq), 0); in chcr_ktls_xmit_tcb_cpls() 876 if (tcp_seq != tx_info->prev_seq || offset) { in chcr_ktls_xmit_tcb_cpls() 885 if (tcp_seq != tx_info->prev_seq) in chcr_ktls_xmit_tcb_cpls() 1094 struct sge_eth_txq *q, u32 tcp_seq, in chcr_ktls_xmit_wr_complete() argument 1220 tx_data->rsvd = htonl(tcp_seq); in chcr_ktls_xmit_wr_complete() 1269 u32 tcp_seq, bool tcp_push, u32 mss, in chcr_ktls_xmit_wr_short() argument 1396 tx_data->rsvd = htonl(tcp_seq); in chcr_ktls_xmit_wr_short() 1454 struct sk_buff *skb, u32 tcp_seq, u32 mss, in chcr_ktls_tx_plaintxt() argument [all …]
|
/linux-5.10/net/core/ |
D | tso.c | 38 put_unaligned_be32(tso->tcp_seq, &tcph->seq); in tso_build_hdr() 56 tso->tcp_seq += size; /* not worth avoiding this operation for UDP */ in tso_build_data() 79 tso->tcp_seq = (tlen != sizeof(struct udphdr)) ? ntohl(tcp_hdr(skb)->seq) : 0; in tso_start()
|
/linux-5.10/include/net/ |
D | tso.h | 16 u32 tcp_seq; member
|