1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2a0894394SAtul Gupta /* 3a0894394SAtul Gupta * Copyright (c) 2018 Chelsio Communications, Inc. 4a0894394SAtul Gupta * 5a0894394SAtul Gupta * Written by: Atul Gupta (atul.gupta@chelsio.com) 6a0894394SAtul Gupta */ 7a0894394SAtul Gupta #include <linux/kernel.h> 8a0894394SAtul Gupta #include <linux/module.h> 9a0894394SAtul Gupta #include <linux/skbuff.h> 10a0894394SAtul Gupta #include <linux/socket.h> 11a0894394SAtul Gupta #include <linux/hash.h> 12a0894394SAtul Gupta #include <linux/in.h> 13a0894394SAtul Gupta #include <linux/net.h> 14a0894394SAtul Gupta #include <linux/ip.h> 15a0894394SAtul Gupta #include <linux/tcp.h> 166abde0b2SVinay Kumar Yadav #include <net/ipv6.h> 176abde0b2SVinay Kumar Yadav #include <net/transp_v6.h> 18a0894394SAtul Gupta #include <net/tcp.h> 19a0894394SAtul Gupta #include <net/tls.h> 20a0894394SAtul Gupta 21a0894394SAtul Gupta #include "chtls.h" 22a0894394SAtul Gupta #include "chtls_cm.h" 23a0894394SAtul Gupta 24a0894394SAtul Gupta #define DRV_NAME "chtls" 25a0894394SAtul Gupta 26a0894394SAtul Gupta /* 27a0894394SAtul Gupta * chtls device management 28a0894394SAtul Gupta * maintains a list of the chtls devices 29a0894394SAtul Gupta */ 30a0894394SAtul Gupta static LIST_HEAD(cdev_list); 31a0894394SAtul Gupta static DEFINE_MUTEX(cdev_mutex); 32a0894394SAtul Gupta 33a0894394SAtul Gupta static DEFINE_MUTEX(notify_mutex); 34a0894394SAtul Gupta static RAW_NOTIFIER_HEAD(listen_notify_list); 356abde0b2SVinay Kumar Yadav static struct proto chtls_cpl_prot, chtls_cpl_protv6; 366abde0b2SVinay Kumar Yadav struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6; 37a0894394SAtul Gupta static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT; 38a0894394SAtul Gupta 39a0894394SAtul Gupta static void register_listen_notifier(struct notifier_block *nb) 40a0894394SAtul Gupta { 41a0894394SAtul Gupta mutex_lock(¬ify_mutex); 42a0894394SAtul Gupta raw_notifier_chain_register(&listen_notify_list, nb); 43a0894394SAtul Gupta mutex_unlock(¬ify_mutex); 44a0894394SAtul Gupta } 45a0894394SAtul Gupta 46a0894394SAtul Gupta static void unregister_listen_notifier(struct notifier_block *nb) 47a0894394SAtul Gupta { 48a0894394SAtul Gupta mutex_lock(¬ify_mutex); 49a0894394SAtul Gupta raw_notifier_chain_unregister(&listen_notify_list, nb); 50a0894394SAtul Gupta mutex_unlock(¬ify_mutex); 51a0894394SAtul Gupta } 52a0894394SAtul Gupta 53a0894394SAtul Gupta static int listen_notify_handler(struct notifier_block *this, 54a0894394SAtul Gupta unsigned long event, void *data) 55a0894394SAtul Gupta { 566422ccc5SAtul Gupta struct chtls_listen *clisten; 576422ccc5SAtul Gupta int ret = NOTIFY_DONE; 58a0894394SAtul Gupta 596422ccc5SAtul Gupta clisten = (struct chtls_listen *)data; 60a0894394SAtul Gupta 61a0894394SAtul Gupta switch (event) { 62a0894394SAtul Gupta case CHTLS_LISTEN_START: 636422ccc5SAtul Gupta ret = chtls_listen_start(clisten->cdev, clisten->sk); 646422ccc5SAtul Gupta kfree(clisten); 656422ccc5SAtul Gupta break; 66a0894394SAtul Gupta case CHTLS_LISTEN_STOP: 676422ccc5SAtul Gupta chtls_listen_stop(clisten->cdev, clisten->sk); 686422ccc5SAtul Gupta kfree(clisten); 69a0894394SAtul Gupta break; 70a0894394SAtul Gupta } 71a0894394SAtul Gupta return ret; 72a0894394SAtul Gupta } 73a0894394SAtul Gupta 74a0894394SAtul Gupta static struct notifier_block listen_notifier = { 75a0894394SAtul Gupta .notifier_call = listen_notify_handler 76a0894394SAtul Gupta }; 77a0894394SAtul Gupta 78a0894394SAtul Gupta static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) 79a0894394SAtul Gupta { 80a0894394SAtul Gupta if (likely(skb_transport_header(skb) != skb_network_header(skb))) 81a0894394SAtul Gupta return tcp_v4_do_rcv(sk, skb); 82a0894394SAtul Gupta BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); 83a0894394SAtul Gupta return 0; 84a0894394SAtul Gupta } 85a0894394SAtul Gupta 866422ccc5SAtul Gupta static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) 87a0894394SAtul Gupta { 886422ccc5SAtul Gupta struct chtls_listen *clisten; 89a0894394SAtul Gupta 90a0894394SAtul Gupta if (sk->sk_protocol != IPPROTO_TCP) 91a0894394SAtul Gupta return -EPROTONOSUPPORT; 92a0894394SAtul Gupta 93a0894394SAtul Gupta if (sk->sk_family == PF_INET && 94a0894394SAtul Gupta LOOPBACK(inet_sk(sk)->inet_rcv_saddr)) 95a0894394SAtul Gupta return -EADDRNOTAVAIL; 96a0894394SAtul Gupta 97a0894394SAtul Gupta sk->sk_backlog_rcv = listen_backlog_rcv; 986422ccc5SAtul Gupta clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); 996422ccc5SAtul Gupta if (!clisten) 1006422ccc5SAtul Gupta return -ENOMEM; 1016422ccc5SAtul Gupta clisten->cdev = cdev; 1026422ccc5SAtul Gupta clisten->sk = sk; 103a0894394SAtul Gupta mutex_lock(¬ify_mutex); 104e0437dc6SVinay Kumar Yadav raw_notifier_call_chain(&listen_notify_list, 1056422ccc5SAtul Gupta CHTLS_LISTEN_START, clisten); 106a0894394SAtul Gupta mutex_unlock(¬ify_mutex); 107e0437dc6SVinay Kumar Yadav return 0; 108a0894394SAtul Gupta } 109a0894394SAtul Gupta 1106422ccc5SAtul Gupta static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) 111a0894394SAtul Gupta { 1126422ccc5SAtul Gupta struct chtls_listen *clisten; 1136422ccc5SAtul Gupta 114a0894394SAtul Gupta if (sk->sk_protocol != IPPROTO_TCP) 115a0894394SAtul Gupta return; 116a0894394SAtul Gupta 1176422ccc5SAtul Gupta clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); 1186422ccc5SAtul Gupta if (!clisten) 1196422ccc5SAtul Gupta return; 1206422ccc5SAtul Gupta clisten->cdev = cdev; 1216422ccc5SAtul Gupta clisten->sk = sk; 122a0894394SAtul Gupta mutex_lock(¬ify_mutex); 123a0894394SAtul Gupta raw_notifier_call_chain(&listen_notify_list, 1246422ccc5SAtul Gupta CHTLS_LISTEN_STOP, clisten); 125a0894394SAtul Gupta mutex_unlock(¬ify_mutex); 126a0894394SAtul Gupta } 127a0894394SAtul Gupta 128f21912edSJakub Kicinski static int chtls_inline_feature(struct tls_toe_device *dev) 129a0894394SAtul Gupta { 130a0894394SAtul Gupta struct net_device *netdev; 131a0894394SAtul Gupta struct chtls_dev *cdev; 132a0894394SAtul Gupta int i; 133a0894394SAtul Gupta 134a0894394SAtul Gupta cdev = to_chtls_dev(dev); 135a0894394SAtul Gupta 136a0894394SAtul Gupta for (i = 0; i < cdev->lldi->nports; i++) { 137a0894394SAtul Gupta netdev = cdev->ports[i]; 138a0894394SAtul Gupta if (netdev->features & NETIF_F_HW_TLS_RECORD) 139a0894394SAtul Gupta return 1; 140a0894394SAtul Gupta } 141a0894394SAtul Gupta return 0; 142a0894394SAtul Gupta } 143a0894394SAtul Gupta 144f21912edSJakub Kicinski static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk) 145a0894394SAtul Gupta { 1466422ccc5SAtul Gupta struct chtls_dev *cdev = to_chtls_dev(dev); 1476422ccc5SAtul Gupta 148a0894394SAtul Gupta if (sk->sk_state == TCP_LISTEN) 1496422ccc5SAtul Gupta return chtls_start_listen(cdev, sk); 150a0894394SAtul Gupta return 0; 151a0894394SAtul Gupta } 152a0894394SAtul Gupta 153f21912edSJakub Kicinski static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk) 154a0894394SAtul Gupta { 1556422ccc5SAtul Gupta struct chtls_dev *cdev = to_chtls_dev(dev); 1566422ccc5SAtul Gupta 157a0894394SAtul Gupta if (sk->sk_state == TCP_LISTEN) 1586422ccc5SAtul Gupta chtls_stop_listen(cdev, sk); 159a0894394SAtul Gupta } 160a0894394SAtul Gupta 161df9d4a17SAtul Gupta static void chtls_free_uld(struct chtls_dev *cdev) 162df9d4a17SAtul Gupta { 163df9d4a17SAtul Gupta int i; 164df9d4a17SAtul Gupta 165f21912edSJakub Kicinski tls_toe_unregister_device(&cdev->tlsdev); 166df9d4a17SAtul Gupta kvfree(cdev->kmap.addr); 167df9d4a17SAtul Gupta idr_destroy(&cdev->hwtid_idr); 168df9d4a17SAtul Gupta for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) 169df9d4a17SAtul Gupta kfree_skb(cdev->rspq_skb_cache[i]); 170df9d4a17SAtul Gupta kfree(cdev->lldi); 171df9d4a17SAtul Gupta kfree_skb(cdev->askb); 172df9d4a17SAtul Gupta kfree(cdev); 173df9d4a17SAtul Gupta } 174df9d4a17SAtul Gupta 175df9d4a17SAtul Gupta static inline void chtls_dev_release(struct kref *kref) 176df9d4a17SAtul Gupta { 177f21912edSJakub Kicinski struct tls_toe_device *dev; 178df9d4a17SAtul Gupta struct chtls_dev *cdev; 1794e98a880SVinay Kumar Yadav struct adapter *adap; 180df9d4a17SAtul Gupta 181f21912edSJakub Kicinski dev = container_of(kref, struct tls_toe_device, kref); 182df9d4a17SAtul Gupta cdev = to_chtls_dev(dev); 1834e98a880SVinay Kumar Yadav 1844e98a880SVinay Kumar Yadav /* Reset tls rx/tx stats */ 1854e98a880SVinay Kumar Yadav adap = pci_get_drvdata(cdev->pdev); 1864e98a880SVinay Kumar Yadav atomic_set(&adap->chcr_stats.tls_pdu_tx, 0); 1874e98a880SVinay Kumar Yadav atomic_set(&adap->chcr_stats.tls_pdu_rx, 0); 1884e98a880SVinay Kumar Yadav 189df9d4a17SAtul Gupta chtls_free_uld(cdev); 190df9d4a17SAtul Gupta } 191df9d4a17SAtul Gupta 192a0894394SAtul Gupta static void chtls_register_dev(struct chtls_dev *cdev) 193a0894394SAtul Gupta { 194f21912edSJakub Kicinski struct tls_toe_device *tlsdev = &cdev->tlsdev; 195a0894394SAtul Gupta 196f21912edSJakub Kicinski strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX); 197a0894394SAtul Gupta strlcat(tlsdev->name, cdev->lldi->ports[0]->name, 198f21912edSJakub Kicinski TLS_TOE_DEVICE_NAME_MAX); 199a0894394SAtul Gupta tlsdev->feature = chtls_inline_feature; 200a0894394SAtul Gupta tlsdev->hash = chtls_create_hash; 201a0894394SAtul Gupta tlsdev->unhash = chtls_destroy_hash; 202df9d4a17SAtul Gupta tlsdev->release = chtls_dev_release; 203df9d4a17SAtul Gupta kref_init(&tlsdev->kref); 204f21912edSJakub Kicinski tls_toe_register_device(tlsdev); 20565b2c12dSGanesh Goudar cdev->cdev_state = CHTLS_CDEV_STATE_UP; 206a0894394SAtul Gupta } 207a0894394SAtul Gupta 208a0894394SAtul Gupta static void process_deferq(struct work_struct *task_param) 209a0894394SAtul Gupta { 210a0894394SAtul Gupta struct chtls_dev *cdev = container_of(task_param, 211a0894394SAtul Gupta struct chtls_dev, deferq_task); 212a0894394SAtul Gupta struct sk_buff *skb; 213a0894394SAtul Gupta 214a0894394SAtul Gupta spin_lock_bh(&cdev->deferq.lock); 215a0894394SAtul Gupta while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) { 216a0894394SAtul Gupta spin_unlock_bh(&cdev->deferq.lock); 217a0894394SAtul Gupta DEFERRED_SKB_CB(skb)->handler(cdev, skb); 218a0894394SAtul Gupta spin_lock_bh(&cdev->deferq.lock); 219a0894394SAtul Gupta } 220a0894394SAtul Gupta spin_unlock_bh(&cdev->deferq.lock); 221a0894394SAtul Gupta } 222a0894394SAtul Gupta 223a0894394SAtul Gupta static int chtls_get_skb(struct chtls_dev *cdev) 224a0894394SAtul Gupta { 225a0894394SAtul Gupta cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL); 226a0894394SAtul Gupta if (!cdev->askb) 227a0894394SAtul Gupta return -ENOMEM; 228a0894394SAtul Gupta 229a0894394SAtul Gupta skb_put(cdev->askb, sizeof(struct tcphdr)); 230a0894394SAtul Gupta skb_reset_transport_header(cdev->askb); 231a0894394SAtul Gupta memset(cdev->askb->data, 0, cdev->askb->len); 232a0894394SAtul Gupta return 0; 233a0894394SAtul Gupta } 234a0894394SAtul Gupta 235a0894394SAtul Gupta static void *chtls_uld_add(const struct cxgb4_lld_info *info) 236a0894394SAtul Gupta { 237a0894394SAtul Gupta struct cxgb4_lld_info *lldi; 238a0894394SAtul Gupta struct chtls_dev *cdev; 239a0894394SAtul Gupta int i, j; 240a0894394SAtul Gupta 241f426faacSStephen Kitt cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 242a0894394SAtul Gupta if (!cdev) 243a0894394SAtul Gupta goto out; 244a0894394SAtul Gupta 245a0894394SAtul Gupta lldi = kzalloc(sizeof(*lldi), GFP_KERNEL); 246a0894394SAtul Gupta if (!lldi) 247a0894394SAtul Gupta goto out_lldi; 248a0894394SAtul Gupta 249a0894394SAtul Gupta if (chtls_get_skb(cdev)) 250a0894394SAtul Gupta goto out_skb; 251a0894394SAtul Gupta 252a0894394SAtul Gupta *lldi = *info; 253a0894394SAtul Gupta cdev->lldi = lldi; 254a0894394SAtul Gupta cdev->pdev = lldi->pdev; 255a0894394SAtul Gupta cdev->tids = lldi->tids; 256a0894394SAtul Gupta cdev->ports = lldi->ports; 257a0894394SAtul Gupta cdev->mtus = lldi->mtus; 258a0894394SAtul Gupta cdev->tids = lldi->tids; 259a0894394SAtul Gupta cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 260a0894394SAtul Gupta << FW_VIID_PFN_S; 261a0894394SAtul Gupta 262a0894394SAtul Gupta for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) { 263a0894394SAtul Gupta unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8; 264a0894394SAtul Gupta 265a0894394SAtul Gupta cdev->rspq_skb_cache[i] = __alloc_skb(size, 266a0894394SAtul Gupta gfp_any(), 0, 267a0894394SAtul Gupta lldi->nodeid); 268a0894394SAtul Gupta if (unlikely(!cdev->rspq_skb_cache[i])) 269a0894394SAtul Gupta goto out_rspq_skb; 270a0894394SAtul Gupta } 271a0894394SAtul Gupta 272a0894394SAtul Gupta idr_init(&cdev->hwtid_idr); 273a0894394SAtul Gupta INIT_WORK(&cdev->deferq_task, process_deferq); 274a0894394SAtul Gupta spin_lock_init(&cdev->listen_lock); 275a0894394SAtul Gupta spin_lock_init(&cdev->idr_lock); 276a0894394SAtul Gupta cdev->send_page_order = min_t(uint, get_order(32768), 277a0894394SAtul Gupta send_page_order); 2783b8305f5SAtul Gupta cdev->max_host_sndbuf = 48 * 1024; 279a0894394SAtul Gupta 280a0894394SAtul Gupta if (lldi->vr->key.size) 281a0894394SAtul Gupta if (chtls_init_kmap(cdev, lldi)) 282a0894394SAtul Gupta goto out_rspq_skb; 283a0894394SAtul Gupta 284a0894394SAtul Gupta mutex_lock(&cdev_mutex); 285a0894394SAtul Gupta list_add_tail(&cdev->list, &cdev_list); 286a0894394SAtul Gupta mutex_unlock(&cdev_mutex); 287a0894394SAtul Gupta 288a0894394SAtul Gupta return cdev; 289a0894394SAtul Gupta out_rspq_skb: 290b268b350SAtul Gupta for (j = 0; j < i; j++) 291a0894394SAtul Gupta kfree_skb(cdev->rspq_skb_cache[j]); 292a0894394SAtul Gupta kfree_skb(cdev->askb); 293a0894394SAtul Gupta out_skb: 294a0894394SAtul Gupta kfree(lldi); 295a0894394SAtul Gupta out_lldi: 296a0894394SAtul Gupta kfree(cdev); 297a0894394SAtul Gupta out: 298a0894394SAtul Gupta return NULL; 299a0894394SAtul Gupta } 300a0894394SAtul Gupta 301a0894394SAtul Gupta static void chtls_free_all_uld(void) 302a0894394SAtul Gupta { 303a0894394SAtul Gupta struct chtls_dev *cdev, *tmp; 304a0894394SAtul Gupta 305a0894394SAtul Gupta mutex_lock(&cdev_mutex); 30665b2c12dSGanesh Goudar list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { 307df9d4a17SAtul Gupta if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) { 308df9d4a17SAtul Gupta list_del(&cdev->list); 309df9d4a17SAtul Gupta kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); 310df9d4a17SAtul Gupta } 31165b2c12dSGanesh Goudar } 312a0894394SAtul Gupta mutex_unlock(&cdev_mutex); 313a0894394SAtul Gupta } 314a0894394SAtul Gupta 315a0894394SAtul Gupta static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state) 316a0894394SAtul Gupta { 317a0894394SAtul Gupta struct chtls_dev *cdev = handle; 318a0894394SAtul Gupta 319a0894394SAtul Gupta switch (new_state) { 320a0894394SAtul Gupta case CXGB4_STATE_UP: 321a0894394SAtul Gupta chtls_register_dev(cdev); 322a0894394SAtul Gupta break; 323a0894394SAtul Gupta case CXGB4_STATE_DOWN: 324a0894394SAtul Gupta break; 325a0894394SAtul Gupta case CXGB4_STATE_START_RECOVERY: 326a0894394SAtul Gupta break; 327a0894394SAtul Gupta case CXGB4_STATE_DETACH: 328a0894394SAtul Gupta mutex_lock(&cdev_mutex); 329a0894394SAtul Gupta list_del(&cdev->list); 330a0894394SAtul Gupta mutex_unlock(&cdev_mutex); 331df9d4a17SAtul Gupta kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); 332a0894394SAtul Gupta break; 333a0894394SAtul Gupta default: 334a0894394SAtul Gupta break; 335a0894394SAtul Gupta } 336a0894394SAtul Gupta return 0; 337a0894394SAtul Gupta } 338a0894394SAtul Gupta 339a0894394SAtul Gupta static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 340a0894394SAtul Gupta const __be64 *rsp, 341a0894394SAtul Gupta u32 pktshift) 342a0894394SAtul Gupta { 343a0894394SAtul Gupta struct sk_buff *skb; 344a0894394SAtul Gupta 345a0894394SAtul Gupta /* Allocate space for cpl_pass_accpet_req which will be synthesized by 346a0894394SAtul Gupta * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go 347a0894394SAtul Gupta * through the regular cpl_pass_accept_req processing in TOM. 348a0894394SAtul Gupta */ 349a0894394SAtul Gupta skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) 350a0894394SAtul Gupta - pktshift, GFP_ATOMIC); 351a0894394SAtul Gupta if (unlikely(!skb)) 352a0894394SAtul Gupta return NULL; 353a0894394SAtul Gupta __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) 354a0894394SAtul Gupta - pktshift); 355a0894394SAtul Gupta /* For now we will copy cpl_rx_pkt in the skb */ 356a0894394SAtul Gupta skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt)); 357a0894394SAtul Gupta skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req) 358a0894394SAtul Gupta , gl->va + pktshift, 359a0894394SAtul Gupta gl->tot_len - pktshift); 360a0894394SAtul Gupta 361a0894394SAtul Gupta return skb; 362a0894394SAtul Gupta } 363a0894394SAtul Gupta 364a0894394SAtul Gupta static int chtls_recv_packet(struct chtls_dev *cdev, 365a0894394SAtul Gupta const struct pkt_gl *gl, const __be64 *rsp) 366a0894394SAtul Gupta { 367a0894394SAtul Gupta unsigned int opcode = *(u8 *)rsp; 368a0894394SAtul Gupta struct sk_buff *skb; 369a0894394SAtul Gupta int ret; 370a0894394SAtul Gupta 371a0894394SAtul Gupta skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); 372a0894394SAtul Gupta if (!skb) 373a0894394SAtul Gupta return -ENOMEM; 374a0894394SAtul Gupta 375a0894394SAtul Gupta ret = chtls_handlers[opcode](cdev, skb); 376a0894394SAtul Gupta if (ret & CPL_RET_BUF_DONE) 377a0894394SAtul Gupta kfree_skb(skb); 378a0894394SAtul Gupta 379a0894394SAtul Gupta return 0; 380a0894394SAtul Gupta } 381a0894394SAtul Gupta 382a0894394SAtul Gupta static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp) 383a0894394SAtul Gupta { 384a0894394SAtul Gupta unsigned long rspq_bin; 385a0894394SAtul Gupta unsigned int opcode; 386a0894394SAtul Gupta struct sk_buff *skb; 387a0894394SAtul Gupta unsigned int len; 388a0894394SAtul Gupta int ret; 389a0894394SAtul Gupta 390a0894394SAtul Gupta len = 64 - sizeof(struct rsp_ctrl) - 8; 391a0894394SAtul Gupta opcode = *(u8 *)rsp; 392a0894394SAtul Gupta 393a0894394SAtul Gupta rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS); 394a0894394SAtul Gupta skb = cdev->rspq_skb_cache[rspq_bin]; 395a0894394SAtul Gupta if (skb && !skb_is_nonlinear(skb) && 396a0894394SAtul Gupta !skb_shared(skb) && !skb_cloned(skb)) { 397a0894394SAtul Gupta refcount_inc(&skb->users); 398a0894394SAtul Gupta if (refcount_read(&skb->users) == 2) { 399a0894394SAtul Gupta __skb_trim(skb, 0); 400a0894394SAtul Gupta if (skb_tailroom(skb) >= len) 401a0894394SAtul Gupta goto copy_out; 402a0894394SAtul Gupta } 403a0894394SAtul Gupta refcount_dec(&skb->users); 404a0894394SAtul Gupta } 405a0894394SAtul Gupta skb = alloc_skb(len, GFP_ATOMIC); 406a0894394SAtul Gupta if (unlikely(!skb)) 407a0894394SAtul Gupta return -ENOMEM; 408a0894394SAtul Gupta 409a0894394SAtul Gupta copy_out: 410a0894394SAtul Gupta __skb_put(skb, len); 411a0894394SAtul Gupta skb_copy_to_linear_data(skb, rsp, len); 412a0894394SAtul Gupta skb_reset_network_header(skb); 413a0894394SAtul Gupta skb_reset_transport_header(skb); 414a0894394SAtul Gupta ret = chtls_handlers[opcode](cdev, skb); 415a0894394SAtul Gupta 416a0894394SAtul Gupta if (ret & CPL_RET_BUF_DONE) 417a0894394SAtul Gupta kfree_skb(skb); 418a0894394SAtul Gupta return 0; 419a0894394SAtul Gupta } 420a0894394SAtul Gupta 421a0894394SAtul Gupta static void chtls_recv(struct chtls_dev *cdev, 422a0894394SAtul Gupta struct sk_buff **skbs, const __be64 *rsp) 423a0894394SAtul Gupta { 424a0894394SAtul Gupta struct sk_buff *skb = *skbs; 425a0894394SAtul Gupta unsigned int opcode; 426a0894394SAtul Gupta int ret; 427a0894394SAtul Gupta 428a0894394SAtul Gupta opcode = *(u8 *)rsp; 429a0894394SAtul Gupta 430a0894394SAtul Gupta __skb_push(skb, sizeof(struct rss_header)); 431a0894394SAtul Gupta skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header)); 432a0894394SAtul Gupta 433a0894394SAtul Gupta ret = chtls_handlers[opcode](cdev, skb); 434a0894394SAtul Gupta if (ret & CPL_RET_BUF_DONE) 435a0894394SAtul Gupta kfree_skb(skb); 436a0894394SAtul Gupta } 437a0894394SAtul Gupta 438a0894394SAtul Gupta static int chtls_uld_rx_handler(void *handle, const __be64 *rsp, 439a0894394SAtul Gupta const struct pkt_gl *gl) 440a0894394SAtul Gupta { 441a0894394SAtul Gupta struct chtls_dev *cdev = handle; 442a0894394SAtul Gupta unsigned int opcode; 443a0894394SAtul Gupta struct sk_buff *skb; 444a0894394SAtul Gupta 445a0894394SAtul Gupta opcode = *(u8 *)rsp; 446a0894394SAtul Gupta 447a0894394SAtul Gupta if (unlikely(opcode == CPL_RX_PKT)) { 448a0894394SAtul Gupta if (chtls_recv_packet(cdev, gl, rsp) < 0) 449a0894394SAtul Gupta goto nomem; 450a0894394SAtul Gupta return 0; 451a0894394SAtul Gupta } 452a0894394SAtul Gupta 453a0894394SAtul Gupta if (!gl) 454a0894394SAtul Gupta return chtls_recv_rsp(cdev, rsp); 455a0894394SAtul Gupta 456a0894394SAtul Gupta #define RX_PULL_LEN 128 457a0894394SAtul Gupta skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 458a0894394SAtul Gupta if (unlikely(!skb)) 459a0894394SAtul Gupta goto nomem; 460a0894394SAtul Gupta chtls_recv(cdev, &skb, rsp); 461a0894394SAtul Gupta return 0; 462a0894394SAtul Gupta 463a0894394SAtul Gupta nomem: 464a0894394SAtul Gupta return -ENOMEM; 465a0894394SAtul Gupta } 466a0894394SAtul Gupta 467a0894394SAtul Gupta static int do_chtls_getsockopt(struct sock *sk, char __user *optval, 468a0894394SAtul Gupta int __user *optlen) 469a0894394SAtul Gupta { 4702d93913eSColin Ian King struct tls_crypto_info crypto_info = { 0 }; 471a0894394SAtul Gupta 472a0894394SAtul Gupta crypto_info.version = TLS_1_2_VERSION; 473a0894394SAtul Gupta if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info))) 474a0894394SAtul Gupta return -EFAULT; 475a0894394SAtul Gupta return 0; 476a0894394SAtul Gupta } 477a0894394SAtul Gupta 478a0894394SAtul Gupta static int chtls_getsockopt(struct sock *sk, int level, int optname, 479a0894394SAtul Gupta char __user *optval, int __user *optlen) 480a0894394SAtul Gupta { 481a0894394SAtul Gupta struct tls_context *ctx = tls_get_ctx(sk); 482a0894394SAtul Gupta 483a0894394SAtul Gupta if (level != SOL_TLS) 484be7bbea1SJakub Kicinski return ctx->sk_proto->getsockopt(sk, level, 485be7bbea1SJakub Kicinski optname, optval, optlen); 486a0894394SAtul Gupta 487a0894394SAtul Gupta return do_chtls_getsockopt(sk, optval, optlen); 488a0894394SAtul Gupta } 489a0894394SAtul Gupta 490a0894394SAtul Gupta static int do_chtls_setsockopt(struct sock *sk, int optname, 491a7b75c5aSChristoph Hellwig sockptr_t optval, unsigned int optlen) 492a0894394SAtul Gupta { 493a0894394SAtul Gupta struct tls_crypto_info *crypto_info, tmp_crypto_info; 494a0894394SAtul Gupta struct chtls_sock *csk; 495a0894394SAtul Gupta int keylen; 496596d0a28SVinay Kumar Yadav int cipher_type; 497a0894394SAtul Gupta int rc = 0; 498a0894394SAtul Gupta 499a0894394SAtul Gupta csk = rcu_dereference_sk_user_data(sk); 500a0894394SAtul Gupta 501a7b75c5aSChristoph Hellwig if (sockptr_is_null(optval) || optlen < sizeof(*crypto_info)) { 502a0894394SAtul Gupta rc = -EINVAL; 503a0894394SAtul Gupta goto out; 504a0894394SAtul Gupta } 505a0894394SAtul Gupta 506a7b75c5aSChristoph Hellwig rc = copy_from_sockptr(&tmp_crypto_info, optval, sizeof(*crypto_info)); 507a0894394SAtul Gupta if (rc) { 508a0894394SAtul Gupta rc = -EFAULT; 509a0894394SAtul Gupta goto out; 510a0894394SAtul Gupta } 511a0894394SAtul Gupta 512a0894394SAtul Gupta /* check version */ 513a0894394SAtul Gupta if (tmp_crypto_info.version != TLS_1_2_VERSION) { 514a0894394SAtul Gupta rc = -ENOTSUPP; 515a0894394SAtul Gupta goto out; 516a0894394SAtul Gupta } 517a0894394SAtul Gupta 518a0894394SAtul Gupta crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; 519a0894394SAtul Gupta 520596d0a28SVinay Kumar Yadav /* GCM mode of AES supports 128 and 256 bit encryption, so 521596d0a28SVinay Kumar Yadav * copy keys from user based on GCM cipher type. 522596d0a28SVinay Kumar Yadav */ 523a0894394SAtul Gupta switch (tmp_crypto_info.cipher_type) { 524a0894394SAtul Gupta case TLS_CIPHER_AES_GCM_128: { 525f16b613cSWenwen Wang /* Obtain version and type from previous copy */ 526f16b613cSWenwen Wang crypto_info[0] = tmp_crypto_info; 527f16b613cSWenwen Wang /* Now copy the following data */ 528d3c48151SChristoph Hellwig rc = copy_from_sockptr_offset((char *)crypto_info + 529d3c48151SChristoph Hellwig sizeof(*crypto_info), 530d3c48151SChristoph Hellwig optval, sizeof(*crypto_info), 531f16b613cSWenwen Wang sizeof(struct tls12_crypto_info_aes_gcm_128) 532f16b613cSWenwen Wang - sizeof(*crypto_info)); 533a0894394SAtul Gupta 534a0894394SAtul Gupta if (rc) { 535a0894394SAtul Gupta rc = -EFAULT; 536a0894394SAtul Gupta goto out; 537a0894394SAtul Gupta } 538a0894394SAtul Gupta 539a0894394SAtul Gupta keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; 540596d0a28SVinay Kumar Yadav cipher_type = TLS_CIPHER_AES_GCM_128; 541596d0a28SVinay Kumar Yadav break; 542596d0a28SVinay Kumar Yadav } 543596d0a28SVinay Kumar Yadav case TLS_CIPHER_AES_GCM_256: { 544596d0a28SVinay Kumar Yadav crypto_info[0] = tmp_crypto_info; 545d3c48151SChristoph Hellwig rc = copy_from_sockptr_offset((char *)crypto_info + 546d3c48151SChristoph Hellwig sizeof(*crypto_info), 547d3c48151SChristoph Hellwig optval, sizeof(*crypto_info), 548596d0a28SVinay Kumar Yadav sizeof(struct tls12_crypto_info_aes_gcm_256) 549596d0a28SVinay Kumar Yadav - sizeof(*crypto_info)); 550596d0a28SVinay Kumar Yadav 551596d0a28SVinay Kumar Yadav if (rc) { 552596d0a28SVinay Kumar Yadav rc = -EFAULT; 553596d0a28SVinay Kumar Yadav goto out; 554596d0a28SVinay Kumar Yadav } 555596d0a28SVinay Kumar Yadav 556596d0a28SVinay Kumar Yadav keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE; 557596d0a28SVinay Kumar Yadav cipher_type = TLS_CIPHER_AES_GCM_256; 558a0894394SAtul Gupta break; 559a0894394SAtul Gupta } 560a0894394SAtul Gupta default: 561a0894394SAtul Gupta rc = -EINVAL; 562a0894394SAtul Gupta goto out; 563a0894394SAtul Gupta } 564596d0a28SVinay Kumar Yadav rc = chtls_setkey(csk, keylen, optname, cipher_type); 565a0894394SAtul Gupta out: 566a0894394SAtul Gupta return rc; 567a0894394SAtul Gupta } 568a0894394SAtul Gupta 569a0894394SAtul Gupta static int chtls_setsockopt(struct sock *sk, int level, int optname, 570a7b75c5aSChristoph Hellwig sockptr_t optval, unsigned int optlen) 571a0894394SAtul Gupta { 572a0894394SAtul Gupta struct tls_context *ctx = tls_get_ctx(sk); 573a0894394SAtul Gupta 574a0894394SAtul Gupta if (level != SOL_TLS) 575be7bbea1SJakub Kicinski return ctx->sk_proto->setsockopt(sk, level, 576be7bbea1SJakub Kicinski optname, optval, optlen); 577a0894394SAtul Gupta 578a0894394SAtul Gupta return do_chtls_setsockopt(sk, optname, optval, optlen); 579a0894394SAtul Gupta } 580a0894394SAtul Gupta 581a0894394SAtul Gupta static struct cxgb4_uld_info chtls_uld_info = { 582a0894394SAtul Gupta .name = DRV_NAME, 583a0894394SAtul Gupta .nrxq = MAX_ULD_QSETS, 584a0894394SAtul Gupta .ntxq = MAX_ULD_QSETS, 585a0894394SAtul Gupta .rxq_size = 1024, 586a0894394SAtul Gupta .add = chtls_uld_add, 587a0894394SAtul Gupta .state_change = chtls_uld_state_change, 588a0894394SAtul Gupta .rx_handler = chtls_uld_rx_handler, 589a0894394SAtul Gupta }; 590a0894394SAtul Gupta 591a0894394SAtul Gupta void chtls_install_cpl_ops(struct sock *sk) 592a0894394SAtul Gupta { 5936abde0b2SVinay Kumar Yadav if (sk->sk_family == AF_INET) 594a0894394SAtul Gupta sk->sk_prot = &chtls_cpl_prot; 5956abde0b2SVinay Kumar Yadav else 5966abde0b2SVinay Kumar Yadav sk->sk_prot = &chtls_cpl_protv6; 597a0894394SAtul Gupta } 598a0894394SAtul Gupta 599a0894394SAtul Gupta static void __init chtls_init_ulp_ops(void) 600a0894394SAtul Gupta { 601a0894394SAtul Gupta chtls_cpl_prot = tcp_prot; 602a0894394SAtul Gupta chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops, 603a0894394SAtul Gupta &tcp_prot, PF_INET); 604a0894394SAtul Gupta chtls_cpl_prot.close = chtls_close; 605a0894394SAtul Gupta chtls_cpl_prot.disconnect = chtls_disconnect; 606a0894394SAtul Gupta chtls_cpl_prot.destroy = chtls_destroy_sock; 607a0894394SAtul Gupta chtls_cpl_prot.shutdown = chtls_shutdown; 60836bedb3fSAtul Gupta chtls_cpl_prot.sendmsg = chtls_sendmsg; 60936bedb3fSAtul Gupta chtls_cpl_prot.sendpage = chtls_sendpage; 610b647993fSAtul Gupta chtls_cpl_prot.recvmsg = chtls_recvmsg; 611a0894394SAtul Gupta chtls_cpl_prot.setsockopt = chtls_setsockopt; 612a0894394SAtul Gupta chtls_cpl_prot.getsockopt = chtls_getsockopt; 613a624a865SVinay Kumar Yadav #if IS_ENABLED(CONFIG_IPV6) 6146abde0b2SVinay Kumar Yadav chtls_cpl_protv6 = chtls_cpl_prot; 6156abde0b2SVinay Kumar Yadav chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6, 6166abde0b2SVinay Kumar Yadav &tcpv6_prot, PF_INET6); 617a624a865SVinay Kumar Yadav #endif 618a0894394SAtul Gupta } 619a0894394SAtul Gupta 620a0894394SAtul Gupta static int __init chtls_register(void) 621a0894394SAtul Gupta { 622a0894394SAtul Gupta chtls_init_ulp_ops(); 623a0894394SAtul Gupta register_listen_notifier(&listen_notifier); 624a0894394SAtul Gupta cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info); 625a0894394SAtul Gupta return 0; 626a0894394SAtul Gupta } 627a0894394SAtul Gupta 628a0894394SAtul Gupta static void __exit chtls_unregister(void) 629a0894394SAtul Gupta { 630a0894394SAtul Gupta unregister_listen_notifier(&listen_notifier); 631a0894394SAtul Gupta chtls_free_all_uld(); 632a0894394SAtul Gupta cxgb4_unregister_uld(CXGB4_ULD_TLS); 633a0894394SAtul Gupta } 634a0894394SAtul Gupta 635a0894394SAtul Gupta module_init(chtls_register); 636a0894394SAtul Gupta module_exit(chtls_unregister); 637a0894394SAtul Gupta 638a0894394SAtul Gupta MODULE_DESCRIPTION("Chelsio TLS Inline driver"); 639a0894394SAtul Gupta MODULE_LICENSE("GPL"); 640a0894394SAtul Gupta MODULE_AUTHOR("Chelsio Communications"); 641*44fd1c1fSVinay Kumar Yadav MODULE_VERSION(CHTLS_DRV_VERSION); 642