Lines Matching full:pf
22 static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf) in cn10k_cpt_device_set_inuse() argument
27 state = atomic_cmpxchg(&pf->ipsec.cpt_state, in cn10k_cpt_device_set_inuse()
39 static void cn10k_cpt_device_set_available(struct otx2_nic *pf) in cn10k_cpt_device_set_available() argument
41 atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE); in cn10k_cpt_device_set_available()
44 static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf) in cn10k_cpt_device_set_unavailable() argument
46 atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE); in cn10k_cpt_device_set_unavailable()
49 static int cn10k_outb_cptlf_attach(struct otx2_nic *pf) in cn10k_outb_cptlf_attach() argument
54 mutex_lock(&pf->mbox.lock); in cn10k_outb_cptlf_attach()
56 attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox); in cn10k_outb_cptlf_attach()
64 ret = otx2_sync_mbox_msg(&pf->mbox); in cn10k_outb_cptlf_attach()
67 mutex_unlock(&pf->mbox.lock); in cn10k_outb_cptlf_attach()
71 static int cn10k_outb_cptlf_detach(struct otx2_nic *pf) in cn10k_outb_cptlf_detach() argument
76 mutex_lock(&pf->mbox.lock); in cn10k_outb_cptlf_detach()
77 detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox); in cn10k_outb_cptlf_detach()
85 ret = otx2_sync_mbox_msg(&pf->mbox); in cn10k_outb_cptlf_detach()
88 mutex_unlock(&pf->mbox.lock); in cn10k_outb_cptlf_detach()
92 static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf) in cn10k_outb_cptlf_alloc() argument
97 mutex_lock(&pf->mbox.lock); in cn10k_outb_cptlf_alloc()
98 req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox); in cn10k_outb_cptlf_alloc()
102 /* PF function */ in cn10k_outb_cptlf_alloc()
103 req->nix_pf_func = pf->pcifunc; in cn10k_outb_cptlf_alloc()
107 ret = otx2_sync_mbox_msg(&pf->mbox); in cn10k_outb_cptlf_alloc()
110 mutex_unlock(&pf->mbox.lock); in cn10k_outb_cptlf_alloc()
114 static void cn10k_outb_cptlf_free(struct otx2_nic *pf) in cn10k_outb_cptlf_free() argument
116 mutex_lock(&pf->mbox.lock); in cn10k_outb_cptlf_free()
117 otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox); in cn10k_outb_cptlf_free()
118 otx2_sync_mbox_msg(&pf->mbox); in cn10k_outb_cptlf_free()
119 mutex_unlock(&pf->mbox.lock); in cn10k_outb_cptlf_free()
122 static int cn10k_outb_cptlf_config(struct otx2_nic *pf) in cn10k_outb_cptlf_config() argument
127 mutex_lock(&pf->mbox.lock); in cn10k_outb_cptlf_config()
128 req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox); in cn10k_outb_cptlf_config()
134 req->nix_pf_func = pf->pcifunc; in cn10k_outb_cptlf_config()
135 ret = otx2_sync_mbox_msg(&pf->mbox); in cn10k_outb_cptlf_config()
137 mutex_unlock(&pf->mbox.lock); in cn10k_outb_cptlf_config()
141 static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf) in cn10k_outb_cptlf_iq_enable() argument
146 reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); in cn10k_outb_cptlf_iq_enable()
148 otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); in cn10k_outb_cptlf_iq_enable()
151 reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL); in cn10k_outb_cptlf_iq_enable()
153 otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val); in cn10k_outb_cptlf_iq_enable()
156 static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf) in cn10k_outb_cptlf_iq_disable() argument
165 otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull); in cn10k_outb_cptlf_iq_disable()
171 reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); in cn10k_outb_cptlf_iq_disable()
178 netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n"); in cn10k_outb_cptlf_iq_disable()
187 otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); in cn10k_outb_cptlf_iq_disable()
192 reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); in cn10k_outb_cptlf_iq_disable()
197 reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR); in cn10k_outb_cptlf_iq_disable()
204 reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); in cn10k_outb_cptlf_iq_disable()
233 static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf) in cn10k_outb_cptlf_iq_alloc() argument
235 struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; in cn10k_outb_cptlf_iq_alloc()
240 iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size, in cn10k_outb_cptlf_iq_alloc()
255 static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf) in cn10k_outb_cptlf_iq_free() argument
257 struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; in cn10k_outb_cptlf_iq_free()
260 dma_free_coherent(pf->dev, iq->size, iq->real_vaddr, in cn10k_outb_cptlf_iq_free()
267 static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf) in cn10k_outb_cptlf_iq_init() argument
273 ret = cn10k_outb_cptlf_iq_alloc(pf); in cn10k_outb_cptlf_iq_init()
278 cn10k_outb_cptlf_iq_disable(pf); in cn10k_outb_cptlf_iq_init()
281 otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr); in cn10k_outb_cptlf_iq_init()
286 otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val); in cn10k_outb_cptlf_iq_init()
291 static int cn10k_outb_cptlf_init(struct otx2_nic *pf) in cn10k_outb_cptlf_init() argument
296 ret = cn10k_outb_cptlf_iq_init(pf); in cn10k_outb_cptlf_init()
301 ret = cn10k_outb_cptlf_config(pf); in cn10k_outb_cptlf_init()
306 cn10k_outb_cptlf_iq_enable(pf); in cn10k_outb_cptlf_init()
309 cn10k_outb_cptlf_iq_free(pf); in cn10k_outb_cptlf_init()
315 struct otx2_nic *pf = netdev_priv(netdev); in cn10k_outb_cpt_init() local
319 ret = cn10k_outb_cptlf_attach(pf); in cn10k_outb_cpt_init()
324 ret = cn10k_outb_cptlf_alloc(pf); in cn10k_outb_cpt_init()
329 ret = cn10k_outb_cptlf_init(pf); in cn10k_outb_cpt_init()
333 pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf, in cn10k_outb_cpt_init()
337 pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; in cn10k_outb_cpt_init()
339 cn10k_cpt_device_set_available(pf); in cn10k_outb_cpt_init()
343 cn10k_outb_cptlf_free(pf); in cn10k_outb_cpt_init()
345 cn10k_outb_cptlf_detach(pf); in cn10k_outb_cpt_init()
349 static int cn10k_outb_cpt_clean(struct otx2_nic *pf) in cn10k_outb_cpt_clean() argument
353 if (!cn10k_cpt_device_set_inuse(pf)) { in cn10k_outb_cpt_clean()
354 netdev_err(pf->netdev, "CPT LF device unavailable\n"); in cn10k_outb_cpt_clean()
359 pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; in cn10k_outb_cpt_clean()
362 cn10k_outb_cptlf_iq_disable(pf); in cn10k_outb_cpt_clean()
365 otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0); in cn10k_outb_cpt_clean()
366 otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0); in cn10k_outb_cpt_clean()
369 cn10k_outb_cptlf_iq_free(pf); in cn10k_outb_cpt_clean()
372 cn10k_outb_cptlf_free(pf); in cn10k_outb_cpt_clean()
373 ret = cn10k_outb_cptlf_detach(pf); in cn10k_outb_cpt_clean()
375 netdev_err(pf->netdev, "Failed to detach CPT LF\n"); in cn10k_outb_cpt_clean()
377 cn10k_cpt_device_set_unavailable(pf); in cn10k_outb_cpt_clean()
381 static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst, in cn10k_cpt_inst_flush() argument
387 lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id()); in cn10k_cpt_inst_flush()
397 tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4; in cn10k_cpt_inst_flush()
403 static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf, in cn10k_wait_for_cpt_respose() argument
411 netdev_err(pf->netdev, "CPT response timeout\n"); in cn10k_wait_for_cpt_respose()
419 netdev_err(pf->netdev, "compcode=%x doneint=%x\n", in cn10k_wait_for_cpt_respose()
421 netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n", in cn10k_wait_for_cpt_respose()
427 static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info) in cn10k_outb_write_sa() argument
442 res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s), in cn10k_outb_write_sa()
448 sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC); in cn10k_outb_write_sa()
450 dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, in cn10k_outb_write_sa()
472 if (!cn10k_cpt_device_set_inuse(pf)) { in cn10k_outb_write_sa()
477 cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); in cn10k_outb_write_sa()
479 ret = cn10k_wait_for_cpt_respose(pf, res); in cn10k_outb_write_sa()
485 otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); in cn10k_outb_write_sa()
488 cn10k_cpt_device_set_available(pf); in cn10k_outb_write_sa()
490 dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova); in cn10k_outb_write_sa()
491 dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova); in cn10k_outb_write_sa()
526 struct otx2_nic *pf; in cn10k_outb_prepare_sa() local
534 pf = netdev_priv(netdev); in cn10k_outb_prepare_sa()
535 sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; in cn10k_outb_prepare_sa()
672 struct otx2_nic *pf; in cn10k_ipsec_outb_add_state() local
679 pf = netdev_priv(netdev); in cn10k_ipsec_outb_add_state()
681 err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); in cn10k_ipsec_outb_add_state()
688 err = cn10k_outb_write_sa(pf, sa_info); in cn10k_ipsec_outb_add_state()
691 qmem_free(pf->dev, sa_info); in cn10k_ipsec_outb_add_state()
697 if (!pf->ipsec.outb_sa_count) in cn10k_ipsec_outb_add_state()
699 pf->ipsec.outb_sa_count++; in cn10k_ipsec_outb_add_state()
717 struct otx2_nic *pf; in cn10k_ipsec_del_state() local
723 pf = netdev_priv(netdev); in cn10k_ipsec_del_state()
730 sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; in cn10k_ipsec_del_state()
733 err = cn10k_outb_write_sa(pf, sa_info); in cn10k_ipsec_del_state()
738 qmem_free(pf->dev, sa_info); in cn10k_ipsec_del_state()
743 if (!--pf->ipsec.outb_sa_count) in cn10k_ipsec_del_state()
744 queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work); in cn10k_ipsec_del_state()
756 struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec); in cn10k_ipsec_sa_wq_handler() local
761 netdev_update_features(pf->netdev); in cn10k_ipsec_sa_wq_handler()
767 struct otx2_nic *pf = netdev_priv(netdev); in cn10k_ipsec_ethtool_init() local
770 if (!is_dev_support_ipsec_offload(pf->pdev)) in cn10k_ipsec_ethtool_init()
778 if (pf->ipsec.outb_sa_count) { in cn10k_ipsec_ethtool_init()
779 netdev_err(pf->netdev, "SA installed on this device\n"); in cn10k_ipsec_ethtool_init()
783 return cn10k_outb_cpt_clean(pf); in cn10k_ipsec_ethtool_init()
788 struct otx2_nic *pf = netdev_priv(netdev); in cn10k_ipsec_init() local
791 if (!is_dev_support_ipsec_offload(pf->pdev)) in cn10k_ipsec_init()
798 pf->ipsec.sa_size = sa_size; in cn10k_ipsec_init()
800 INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler); in cn10k_ipsec_init()
801 pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0); in cn10k_ipsec_init()
802 if (!pf->ipsec.sa_workq) { in cn10k_ipsec_init()
803 netdev_err(pf->netdev, "SA alloc workqueue failed\n"); in cn10k_ipsec_init()
812 cn10k_cpt_device_set_unavailable(pf); in cn10k_ipsec_init()
817 void cn10k_ipsec_clean(struct otx2_nic *pf) in cn10k_ipsec_clean() argument
819 if (!is_dev_support_ipsec_offload(pf->pdev)) in cn10k_ipsec_clean()
822 if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) in cn10k_ipsec_clean()
825 if (pf->ipsec.sa_workq) { in cn10k_ipsec_clean()
826 destroy_workqueue(pf->ipsec.sa_workq); in cn10k_ipsec_clean()
827 pf->ipsec.sa_workq = NULL; in cn10k_ipsec_clean()
830 cn10k_outb_cpt_clean(pf); in cn10k_ipsec_clean()
930 bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, in cn10k_ipsec_transmit() argument
947 if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) in cn10k_ipsec_transmit()
963 if (dlen == 0 && netif_msg_tx_err(pf)) { in cn10k_ipsec_transmit()
964 netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n"); in cn10k_ipsec_transmit()
992 inst.rvu_pf_func = pf->pcifunc; in cn10k_ipsec_transmit()
1036 cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); in cn10k_ipsec_transmit()