Lines Matching +full:record +full:- +full:size

13  *      - Redistributions of source code must retain the above
17 * - Redistributions in binary form must reproduce the above
59 if (ctx->tx_conf == TLS_HW) in tls_device_free_ctx()
62 if (ctx->rx_conf == TLS_HW) in tls_device_free_ctx()
72 struct tls_context *ctx = offload_ctx->ctx; in tls_device_tx_del_task()
78 netdev = rcu_dereference_protected(ctx->netdev, in tls_device_tx_del_task()
79 !refcount_read(&ctx->refcount)); in tls_device_tx_del_task()
81 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); in tls_device_tx_del_task()
83 ctx->netdev = NULL; in tls_device_tx_del_task()
94 if (unlikely(!refcount_dec_and_test(&ctx->refcount))) { in tls_device_queue_ctx_destruction()
99 list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */ in tls_device_queue_ctx_destruction()
104 netdev = rcu_dereference_protected(ctx->netdev, in tls_device_queue_ctx_destruction()
105 !refcount_read(&ctx->refcount)); in tls_device_queue_ctx_destruction()
107 async_cleanup = netdev && ctx->tx_conf == TLS_HW; in tls_device_queue_ctx_destruction()
114 queue_work(destruct_wq, &offload_ctx->destruct_work); in tls_device_queue_ctx_destruction()
129 netdev = netdev_sk_get_lowest_dev(dst->dev, sk); in get_netdev_for_sock()
138 static void destroy_record(struct tls_record_info *record) in destroy_record() argument
142 for (i = 0; i < record->num_frags; i++) in destroy_record()
143 __skb_frag_unref(&record->frags[i], false); in destroy_record()
144 kfree(record); in destroy_record()
151 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) { in delete_all_records()
152 list_del(&info->list); in delete_all_records()
156 offload_ctx->retransmit_hint = NULL; in delete_all_records()
172 spin_lock_irqsave(&ctx->lock, flags); in tls_icsk_clean_acked()
173 info = ctx->retransmit_hint; in tls_icsk_clean_acked()
174 if (info && !before(acked_seq, info->end_seq)) in tls_icsk_clean_acked()
175 ctx->retransmit_hint = NULL; in tls_icsk_clean_acked()
177 list_for_each_entry_safe(info, temp, &ctx->records_list, list) { in tls_icsk_clean_acked()
178 if (before(acked_seq, info->end_seq)) in tls_icsk_clean_acked()
180 list_del(&info->list); in tls_icsk_clean_acked()
186 ctx->unacked_record_sn += deleted_records; in tls_icsk_clean_acked()
187 spin_unlock_irqrestore(&ctx->lock, flags); in tls_icsk_clean_acked()
191 * socket and no in-flight SKBs associated with this
199 tls_ctx->sk_destruct(sk); in tls_device_sk_destruct()
201 if (tls_ctx->tx_conf == TLS_HW) { in tls_device_sk_destruct()
202 if (ctx->open_record) in tls_device_sk_destruct()
203 destroy_record(ctx->open_record); in tls_device_sk_destruct()
205 crypto_free_aead(ctx->aead_send); in tls_device_sk_destruct()
225 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); in tls_offload_tx_resync_request()
239 TCP_SKB_CB(skb)->eor = 1; in tls_device_resync_tx()
241 rcd_sn = tls_ctx->tx.rec_seq; in tls_device_resync_tx()
245 netdev = rcu_dereference_protected(tls_ctx->netdev, in tls_device_resync_tx()
248 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, in tls_device_resync_tx()
255 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags); in tls_device_resync_tx()
258 static void tls_append_frag(struct tls_record_info *record, in tls_append_frag() argument
260 int size) in tls_append_frag() argument
264 frag = &record->frags[record->num_frags - 1]; in tls_append_frag()
265 if (skb_frag_page(frag) == pfrag->page && in tls_append_frag()
266 skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) { in tls_append_frag()
267 skb_frag_size_add(frag, size); in tls_append_frag()
270 skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset, in tls_append_frag()
271 size); in tls_append_frag()
272 ++record->num_frags; in tls_append_frag()
273 get_page(pfrag->page); in tls_append_frag()
276 pfrag->offset += size; in tls_append_frag()
277 record->len += size; in tls_append_frag()
283 struct tls_record_info *record, in tls_push_record() argument
286 struct tls_prot_info *prot = &ctx->prot_info; in tls_push_record()
291 record->end_seq = tp->write_seq + record->len; in tls_push_record()
292 list_add_tail_rcu(&record->list, &offload_ctx->records_list); in tls_push_record()
293 offload_ctx->open_record = NULL; in tls_push_record()
295 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags)) in tls_push_record()
296 tls_device_resync_tx(sk, ctx, tp->write_seq); in tls_push_record()
298 tls_advance_record_sn(sk, prot, &ctx->tx); in tls_push_record()
300 for (i = 0; i < record->num_frags; i++) { in tls_push_record()
301 frag = &record->frags[i]; in tls_push_record()
302 sg_unmark_end(&offload_ctx->sg_tx_data[i]); in tls_push_record()
303 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag), in tls_push_record()
308 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]); in tls_push_record()
311 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); in tls_push_record()
316 struct tls_record_info *record, in tls_device_record_close() argument
320 struct tls_prot_info *prot = &ctx->prot_info; in tls_device_record_close()
325 * use socket memory to improve coalescing (re-using a single buffer in tls_device_record_close()
329 if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) && in tls_device_record_close()
330 !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) { in tls_device_record_close()
335 tls_append_frag(record, pfrag, prot->tag_size); in tls_device_record_close()
338 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), in tls_device_record_close()
339 record->len - prot->overhead_size, in tls_device_record_close()
347 struct tls_record_info *record; in tls_create_new_record() local
350 record = kmalloc(sizeof(*record), GFP_KERNEL); in tls_create_new_record()
351 if (!record) in tls_create_new_record()
352 return -ENOMEM; in tls_create_new_record()
354 frag = &record->frags[0]; in tls_create_new_record()
355 skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset, in tls_create_new_record()
358 get_page(pfrag->page); in tls_create_new_record()
359 pfrag->offset += prepend_size; in tls_create_new_record()
361 record->num_frags = 1; in tls_create_new_record()
362 record->len = prepend_size; in tls_create_new_record()
363 offload_ctx->open_record = record; in tls_create_new_record()
374 if (!offload_ctx->open_record) { in tls_do_allocation()
376 sk->sk_allocation))) { in tls_do_allocation()
377 READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); in tls_do_allocation()
379 return -ENOMEM; in tls_do_allocation()
386 if (pfrag->size > pfrag->offset) in tls_do_allocation()
391 return -ENOMEM; in tls_do_allocation()
400 pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1); in tls_device_copy_data()
404 return -EFAULT; in tls_device_copy_data()
405 bytes -= pre_copy; in tls_device_copy_data()
411 return -EFAULT; in tls_device_copy_data()
412 bytes -= nocache; in tls_device_copy_data()
416 return -EFAULT; in tls_device_copy_data()
423 size_t size, int flags, in tls_push_data() argument
427 struct tls_prot_info *prot = &tls_ctx->prot_info; in tls_push_data()
429 struct tls_record_info *record; in tls_push_data() local
432 size_t orig_size = size; in tls_push_data()
442 return -EOPNOTSUPP; in tls_push_data()
445 return -EINVAL; in tls_push_data()
447 if (unlikely(sk->sk_err)) in tls_push_data()
448 return -sk->sk_err; in tls_push_data()
462 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and in tls_push_data()
466 prot->prepend_size; in tls_push_data()
468 rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size); in tls_push_data()
474 record = ctx->open_record; in tls_push_data()
475 if (!record) in tls_push_data()
480 * record with type != in tls_push_data()
483 size = orig_size; in tls_push_data()
484 destroy_record(record); in tls_push_data()
485 ctx->open_record = NULL; in tls_push_data()
486 } else if (record->len > prot->prepend_size) { in tls_push_data()
493 record = ctx->open_record; in tls_push_data()
495 copy = min_t(size_t, size, max_open_record_len - record->len); in tls_push_data()
505 rc = -EIO; in tls_push_data()
512 rc = -EIO; in tls_push_data()
517 zc_pfrag.size = copy; in tls_push_data()
518 tls_append_frag(record, &zc_pfrag, copy); in tls_push_data()
520 copy = min_t(size_t, copy, pfrag->size - pfrag->offset); in tls_push_data()
522 rc = tls_device_copy_data(page_address(pfrag->page) + in tls_push_data()
523 pfrag->offset, copy, in tls_push_data()
527 tls_append_frag(record, pfrag, copy); in tls_push_data()
530 size -= copy; in tls_push_data()
531 if (!size) { in tls_push_data()
542 if (done || record->len >= max_open_record_len || in tls_push_data()
543 (record->num_frags >= MAX_SKB_FRAGS - 1)) { in tls_push_data()
544 tls_device_record_close(sk, tls_ctx, record, in tls_push_data()
550 record, in tls_push_data()
557 tls_ctx->pending_open_record_frags = more; in tls_push_data()
559 if (orig_size - size > 0) in tls_push_data()
560 rc = orig_size - size; in tls_push_data()
565 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) in tls_device_sendmsg() argument
571 if (!tls_ctx->zerocopy_sendfile) in tls_device_sendmsg()
572 msg->msg_flags &= ~MSG_SPLICE_PAGES; in tls_device_sendmsg()
574 mutex_lock(&tls_ctx->tx_lock); in tls_device_sendmsg()
577 if (unlikely(msg->msg_controllen)) { in tls_device_sendmsg()
583 rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags, in tls_device_sendmsg()
588 mutex_unlock(&tls_ctx->tx_lock); in tls_device_sendmsg()
594 struct sock *sk = sock->sk; in tls_device_splice_eof()
601 mutex_lock(&tls_ctx->tx_lock); in tls_device_splice_eof()
610 mutex_unlock(&tls_ctx->tx_lock); in tls_device_splice_eof()
616 u64 record_sn = context->hint_record_sn; in tls_get_record()
619 info = context->retransmit_hint; in tls_get_record()
621 before(seq, info->end_seq - info->len)) { in tls_get_record()
625 info = list_first_entry_or_null(&context->records_list, in tls_get_record()
629 /* send the start_marker record if seq number is before the in tls_get_record()
630 * tls offload start marker sequence number. This record is in tls_get_record()
637 /* we have the first record, get the last record to see in tls_get_record()
640 last = list_last_entry(&context->records_list, in tls_get_record()
644 last->end_seq)) in tls_get_record()
647 record_sn = context->unacked_record_sn; in tls_get_record()
652 list_for_each_entry_from_rcu(info, &context->records_list, list) { in tls_get_record()
653 if (before(seq, info->end_seq)) { in tls_get_record()
654 if (!context->retransmit_hint || in tls_get_record()
655 after(info->end_seq, in tls_get_record()
656 context->retransmit_hint->end_seq)) { in tls_get_record()
657 context->hint_record_sn = record_sn; in tls_get_record()
658 context->retransmit_hint = info; in tls_get_record()
684 gfp_t sk_allocation = sk->sk_allocation; in tls_device_write_space()
686 WARN_ON_ONCE(sk->sk_write_pending); in tls_device_write_space()
688 sk->sk_allocation = GFP_ATOMIC; in tls_device_write_space()
692 sk->sk_allocation = sk_allocation; in tls_device_write_space()
702 trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); in tls_device_resync_rx()
704 netdev = rcu_dereference(tls_ctx->netdev); in tls_device_resync_rx()
706 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, in tls_device_resync_rx()
727 if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) in tls_device_rx_resync_async()
736 resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) in tls_device_rx_resync_async()
737 resync_async->log[resync_async->loglen++] = *seq; in tls_device_rx_resync_async()
739 resync_async->rcd_delta++; in tls_device_rx_resync_async()
747 for (i = 0; i < resync_async->loglen; i++) in tls_device_rx_resync_async()
748 if (req_seq == resync_async->log[i] && in tls_device_rx_resync_async()
749 atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) { in tls_device_rx_resync_async()
750 *rcd_delta = resync_async->rcd_delta - i; in tls_device_rx_resync_async()
752 resync_async->loglen = 0; in tls_device_rx_resync_async()
753 resync_async->rcd_delta = 0; in tls_device_rx_resync_async()
757 resync_async->loglen = 0; in tls_device_rx_resync_async()
758 resync_async->rcd_delta = 0; in tls_device_rx_resync_async()
761 atomic64_try_cmpxchg(&resync_async->req, in tls_device_rx_resync_async()
779 if (tls_ctx->rx_conf != TLS_HW) in tls_device_rx_resync_new_rec()
781 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) in tls_device_rx_resync_new_rec()
784 prot = &tls_ctx->prot_info; in tls_device_rx_resync_new_rec()
786 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); in tls_device_rx_resync_new_rec()
788 switch (rx_ctx->resync_type) { in tls_device_rx_resync_new_rec()
790 resync_req = atomic64_read(&rx_ctx->resync_req); in tls_device_rx_resync_new_rec()
792 seq += TLS_HEADER_SIZE - 1; in tls_device_rx_resync_new_rec()
796 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) in tls_device_rx_resync_new_rec()
800 if (likely(!rx_ctx->resync_nh_do_now)) in tls_device_rx_resync_new_rec()
813 rx_ctx->resync_nh_do_now = 0; in tls_device_rx_resync_new_rec()
815 tls_bigint_increment(rcd_sn, prot->rec_seq_size); in tls_device_rx_resync_new_rec()
818 resync_req = atomic64_read(&rx_ctx->resync_async->req); in tls_device_rx_resync_new_rec()
823 if (!tls_device_rx_resync_async(rx_ctx->resync_async, in tls_device_rx_resync_new_rec()
840 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT) in tls_device_core_ctrl_rx_resync()
843 if (ctx->resync_nh_do_now) in tls_device_core_ctrl_rx_resync()
845 /* seen decrypted fragments since last fully-failed record */ in tls_device_core_ctrl_rx_resync()
846 if (ctx->resync_nh_reset) { in tls_device_core_ctrl_rx_resync()
847 ctx->resync_nh_reset = 0; in tls_device_core_ctrl_rx_resync()
848 ctx->resync_nh.decrypted_failed = 1; in tls_device_core_ctrl_rx_resync()
849 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL; in tls_device_core_ctrl_rx_resync()
853 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt) in tls_device_core_ctrl_rx_resync()
857 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL) in tls_device_core_ctrl_rx_resync()
858 ctx->resync_nh.decrypted_tgt *= 2; in tls_device_core_ctrl_rx_resync()
860 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL; in tls_device_core_ctrl_rx_resync()
865 if (tcp_inq(sk) > rxm->full_len) { in tls_device_core_ctrl_rx_resync()
867 ctx->resync_nh_do_now = 1; in tls_device_core_ctrl_rx_resync()
869 struct tls_prot_info *prot = &tls_ctx->prot_info; in tls_device_core_ctrl_rx_resync()
872 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); in tls_device_core_ctrl_rx_resync()
873 tls_bigint_increment(rcd_sn, prot->rec_seq_size); in tls_device_core_ctrl_rx_resync()
875 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq, in tls_device_core_ctrl_rx_resync()
891 cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type); in tls_device_reencrypt()
892 DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable); in tls_device_reencrypt()
895 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv, in tls_device_reencrypt()
896 sk->sk_allocation); in tls_device_reencrypt()
898 return -ENOMEM; in tls_device_reencrypt()
907 offset = rxm->offset; in tls_device_reencrypt()
911 rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv); in tls_device_reencrypt()
912 err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv); in tls_device_reencrypt()
918 if (err != -EBADMSG) in tls_device_reencrypt()
923 data_len = rxm->full_len - cipher_desc->tag; in tls_device_reencrypt()
926 copy = min_t(int, skb_pagelen(skb) - offset, data_len); in tls_device_reencrypt()
928 if (skb->decrypted) { in tls_device_reencrypt()
946 if (pos + skb_iter->len <= offset) in tls_device_reencrypt()
948 if (pos >= data_len + rxm->offset) in tls_device_reencrypt()
951 frag_pos = offset - pos; in tls_device_reencrypt()
952 copy = min_t(int, skb_iter->len - frag_pos, in tls_device_reencrypt()
953 data_len + rxm->offset - offset); in tls_device_reencrypt()
955 if (skb_iter->decrypted) { in tls_device_reencrypt()
964 pos += skb_iter->len; in tls_device_reencrypt()
981 is_decrypted = skb->decrypted; in tls_device_decrypted()
988 trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, in tls_device_decrypted()
989 tls_ctx->rx.rec_seq, rxm->full_len, in tls_device_decrypted()
992 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { in tls_device_decrypted()
1003 /* Return immediately if the record is either entirely plaintext or in tls_device_decrypted()
1005 * record. in tls_device_decrypted()
1008 ctx->resync_nh_reset = 1; in tls_device_decrypted()
1016 ctx->resync_nh_reset = 1; in tls_device_decrypted()
1023 if (sk->sk_destruct != tls_device_sk_destruct) { in tls_device_attach()
1024 refcount_set(&ctx->refcount, 1); in tls_device_attach()
1026 RCU_INIT_POINTER(ctx->netdev, netdev); in tls_device_attach()
1028 list_add_tail(&ctx->list, &tls_device_list); in tls_device_attach()
1031 ctx->sk_destruct = sk->sk_destruct; in tls_device_attach()
1032 smp_store_release(&sk->sk_destruct, tls_device_sk_destruct); in tls_device_attach()
1045 INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task); in alloc_offload_ctx_tx()
1046 INIT_LIST_HEAD(&offload_ctx->records_list); in alloc_offload_ctx_tx()
1047 spin_lock_init(&offload_ctx->lock); in alloc_offload_ctx_tx()
1048 sg_init_table(offload_ctx->sg_tx_data, in alloc_offload_ctx_tx()
1049 ARRAY_SIZE(offload_ctx->sg_tx_data)); in alloc_offload_ctx_tx()
1051 /* start at rec_seq - 1 to account for the start marker record */ in alloc_offload_ctx_tx()
1052 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn)); in alloc_offload_ctx_tx()
1053 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1; in alloc_offload_ctx_tx()
1055 offload_ctx->ctx = ctx; in alloc_offload_ctx_tx()
1074 prot = &ctx->prot_info; in tls_set_device_offload()
1076 if (ctx->priv_ctx_tx) in tls_set_device_offload()
1077 return -EEXIST; in tls_set_device_offload()
1082 return -EINVAL; in tls_set_device_offload()
1085 if (!(netdev->features & NETIF_F_HW_TLS_TX)) { in tls_set_device_offload()
1086 rc = -EOPNOTSUPP; in tls_set_device_offload()
1090 crypto_info = &ctx->crypto_send.info; in tls_set_device_offload()
1091 if (crypto_info->version != TLS_1_2_VERSION) { in tls_set_device_offload()
1092 rc = -EOPNOTSUPP; in tls_set_device_offload()
1096 cipher_desc = get_cipher_desc(crypto_info->cipher_type); in tls_set_device_offload()
1097 if (!cipher_desc || !cipher_desc->offloadable) { in tls_set_device_offload()
1098 rc = -EINVAL; in tls_set_device_offload()
1109 memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv); in tls_set_device_offload()
1110 memcpy(ctx->tx.rec_seq, rec_seq, cipher_desc->rec_seq); in tls_set_device_offload()
1114 rc = -ENOMEM; in tls_set_device_offload()
1120 rc = -ENOMEM; in tls_set_device_offload()
1128 start_marker_record->end_seq = tcp_sk(sk)->write_seq; in tls_set_device_offload()
1129 start_marker_record->len = 0; in tls_set_device_offload()
1130 start_marker_record->num_frags = 0; in tls_set_device_offload()
1131 list_add_tail(&start_marker_record->list, &offload_ctx->records_list); in tls_set_device_offload()
1134 ctx->push_pending_record = tls_device_push_pending_record; in tls_set_device_offload()
1138 * So mark the last skb in the write queue as end of record. in tls_set_device_offload()
1142 TCP_SKB_CB(skb)->eor = 1; in tls_set_device_offload()
1153 if (!(netdev->flags & IFF_UP)) { in tls_set_device_offload()
1154 rc = -EINVAL; in tls_set_device_offload()
1158 ctx->priv_ctx_tx = offload_ctx; in tls_set_device_offload()
1159 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, in tls_set_device_offload()
1160 &ctx->crypto_send.info, in tls_set_device_offload()
1161 tcp_sk(sk)->write_seq); in tls_set_device_offload()
1163 tcp_sk(sk)->write_seq, rec_seq, rc); in tls_set_device_offload()
1174 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); in tls_set_device_offload()
1182 crypto_free_aead(offload_ctx->aead_send); in tls_set_device_offload()
1185 ctx->priv_ctx_tx = NULL; in tls_set_device_offload()
1200 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) in tls_set_device_offload_rx()
1201 return -EOPNOTSUPP; in tls_set_device_offload_rx()
1206 return -EINVAL; in tls_set_device_offload_rx()
1209 if (!(netdev->features & NETIF_F_HW_TLS_RX)) { in tls_set_device_offload_rx()
1210 rc = -EOPNOTSUPP; in tls_set_device_offload_rx()
1223 if (!(netdev->flags & IFF_UP)) { in tls_set_device_offload_rx()
1224 rc = -EINVAL; in tls_set_device_offload_rx()
1230 rc = -ENOMEM; in tls_set_device_offload_rx()
1233 context->resync_nh_reset = 1; in tls_set_device_offload_rx()
1235 ctx->priv_ctx_rx = context; in tls_set_device_offload_rx()
1240 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, in tls_set_device_offload_rx()
1241 &ctx->crypto_recv.info, in tls_set_device_offload_rx()
1242 tcp_sk(sk)->copied_seq); in tls_set_device_offload_rx()
1243 info = (void *)&ctx->crypto_recv.info; in tls_set_device_offload_rx()
1245 tcp_sk(sk)->copied_seq, info->rec_seq, rc); in tls_set_device_offload_rx()
1261 ctx->priv_ctx_rx = NULL; in tls_set_device_offload_rx()
1275 netdev = rcu_dereference_protected(tls_ctx->netdev, in tls_device_offload_cleanup_rx()
1280 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, in tls_device_offload_cleanup_rx()
1283 if (tls_ctx->tx_conf != TLS_HW) { in tls_device_offload_cleanup_rx()
1285 rcu_assign_pointer(tls_ctx->netdev, NULL); in tls_device_offload_cleanup_rx()
1287 set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags); in tls_device_offload_cleanup_rx()
1306 rcu_dereference_protected(ctx->netdev, in tls_device_down()
1310 !refcount_inc_not_zero(&ctx->refcount)) in tls_device_down()
1313 list_move(&ctx->list, &list); in tls_device_down()
1321 WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); in tls_device_down()
1326 rcu_assign_pointer(ctx->netdev, NULL); in tls_device_down()
1329 set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags); in tls_device_down()
1332 * TX: no non-encrypted packets will be passed to the driver. in tls_device_down()
1338 if (ctx->tx_conf == TLS_HW) in tls_device_down()
1339 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, in tls_device_down()
1341 if (ctx->rx_conf == TLS_HW && in tls_device_down()
1342 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) in tls_device_down()
1343 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, in tls_device_down()
1351 * run offload-specific code on this context. in tls_device_down()
1354 list_move_tail(&ctx->list, &tls_device_down_list); in tls_device_down()
1361 if (refcount_dec_and_test(&ctx->refcount)) { in tls_device_down()
1365 list_del(&ctx->list); in tls_device_down()
1382 if (!dev->tlsdev_ops && in tls_dev_event()
1383 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) in tls_dev_event()
1391 if ((dev->features & NETIF_F_HW_TLS_RX) && in tls_dev_event()
1392 !dev->tlsdev_ops->tls_dev_resync) in tls_dev_event()
1395 if (dev->tlsdev_ops && in tls_dev_event()
1396 dev->tlsdev_ops->tls_dev_add && in tls_dev_event()
1397 dev->tlsdev_ops->tls_dev_del) in tls_dev_event()
1417 return -ENOMEM; in tls_device_init()
1421 err = -ENOMEM; in tls_device_init()