xref: /linux/drivers/crypto/marvell/octeontx2/cn10k_cpt.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Marvell. */
3 
4 #include <linux/soc/marvell/octeontx2/asm.h>
5 #include "otx2_cptpf.h"
6 #include "otx2_cptvf.h"
7 #include "otx2_cptlf.h"
8 #include "cn10k_cpt.h"
9 #include "otx2_cpt_common.h"
10 
11 static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
12 			       struct otx2_cptlf_info *lf);
13 
14 static struct cpt_hw_ops otx2_hw_ops = {
15 	.send_cmd = otx2_cpt_send_cmd,
16 	.cpt_get_compcode = otx2_cpt_get_compcode,
17 	.cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
18 	.cpt_sg_info_create = otx2_sg_info_create,
19 };
20 
21 static struct cpt_hw_ops cn10k_hw_ops = {
22 	.send_cmd = cn10k_cpt_send_cmd,
23 	.cpt_get_compcode = cn10k_cpt_get_compcode,
24 	.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
25 	.cpt_sg_info_create = otx2_sg_info_create,
26 };
27 
cn10k_cpt_send_cmd(union otx2_cpt_inst_s * cptinst,u32 insts_num,struct otx2_cptlf_info * lf)28 static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
29 			       struct otx2_cptlf_info *lf)
30 {
31 	void *lmtline = lf->lfs->lmt_info.base + (lf->slot * LMTLINE_SIZE);
32 	u64 val = (lf->slot & 0x7FF);
33 	u64 tar_addr = 0;
34 
35 	/* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
36 	tar_addr |= (__force u64)lf->ioreg |
37 		    (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
38 	/*
39 	 * Make sure memory areas pointed in CPT_INST_S
40 	 * are flushed before the instruction is sent to CPT
41 	 */
42 	dma_wmb();
43 
44 	/* Copy CPT command to LMTLINE */
45 	memcpy(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
46 	cn10k_lmt_flush(val, tar_addr);
47 }
48 
cn10k_cpt_lmtst_free(struct pci_dev * pdev,struct otx2_cptlfs_info * lfs)49 void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs)
50 {
51 	struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
52 
53 	if (!lmt_info->base)
54 		return;
55 
56 	dma_free_attrs(&pdev->dev, lmt_info->size,
57 		       lmt_info->base - lmt_info->align,
58 		       lmt_info->iova - lmt_info->align,
59 		       DMA_ATTR_FORCE_CONTIGUOUS);
60 }
61 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_lmtst_free, "CRYPTO_DEV_OCTEONTX2_CPT");
62 
cn10k_cpt_lmtst_alloc(struct pci_dev * pdev,struct otx2_cptlfs_info * lfs,u32 size)63 static int cn10k_cpt_lmtst_alloc(struct pci_dev *pdev,
64 				 struct otx2_cptlfs_info *lfs, u32 size)
65 {
66 	struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
67 	dma_addr_t align_iova;
68 	dma_addr_t iova;
69 
70 	lmt_info->base = dma_alloc_attrs(&pdev->dev, size, &iova, GFP_KERNEL,
71 					 DMA_ATTR_FORCE_CONTIGUOUS);
72 	if (!lmt_info->base)
73 		return -ENOMEM;
74 
75 	align_iova = ALIGN((u64)iova, LMTLINE_ALIGN);
76 	lmt_info->iova = align_iova;
77 	lmt_info->align = align_iova - iova;
78 	lmt_info->size = size;
79 	lmt_info->base += lmt_info->align;
80 	return 0;
81 }
82 
cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev * cptpf)83 int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
84 {
85 	struct pci_dev *pdev = cptpf->pdev;
86 	u32 size;
87 	int ret;
88 
89 	if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
90 		cptpf->lfs.ops = &otx2_hw_ops;
91 		return 0;
92 	}
93 
94 	cptpf->lfs.ops = &cn10k_hw_ops;
95 	size = OTX2_CPT_MAX_VFS_NUM * LMTLINE_SIZE + LMTLINE_ALIGN;
96 	ret = cn10k_cpt_lmtst_alloc(pdev, &cptpf->lfs, size);
97 	if (ret) {
98 		dev_err(&pdev->dev, "PF-%d LMTLINE memory allocation failed\n",
99 			cptpf->pf_id);
100 		return ret;
101 	}
102 
103 	ret = otx2_cpt_lmtst_tbl_setup_msg(&cptpf->lfs);
104 	if (ret) {
105 		dev_err(&pdev->dev, "PF-%d: LMTST Table setup failed\n",
106 		cptpf->pf_id);
107 		cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
108 	}
109 
110 	return 0;
111 }
112 EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
113 
cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev * cptvf)114 int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
115 {
116 	struct pci_dev *pdev = cptvf->pdev;
117 	u32 size;
118 	int ret;
119 
120 	if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
121 		return 0;
122 
123 	size = cptvf->lfs.lfs_num * LMTLINE_SIZE + LMTLINE_ALIGN;
124 	ret = cn10k_cpt_lmtst_alloc(pdev, &cptvf->lfs, size);
125 	if (ret) {
126 		dev_err(&pdev->dev, "VF-%d LMTLINE memory allocation failed\n",
127 			cptvf->vf_id);
128 		return ret;
129 	}
130 
131 	ret = otx2_cpt_lmtst_tbl_setup_msg(&cptvf->lfs);
132 	if (ret) {
133 		dev_err(&pdev->dev, "VF-%d: LMTST Table setup failed\n",
134 			cptvf->vf_id);
135 		cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
136 	}
137 
138 	return 0;
139 }
140 EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
141 
cn10k_cpt_hw_ctx_clear(struct pci_dev * pdev,struct cn10k_cpt_errata_ctx * er_ctx)142 void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
143 			    struct cn10k_cpt_errata_ctx *er_ctx)
144 {
145 	u64 cptr_dma;
146 
147 	if (!is_dev_cn10ka_ax(pdev))
148 		return;
149 
150 	cptr_dma = er_ctx->cptr_dma & ~(BIT_ULL(60));
151 	cn10k_cpt_ctx_flush(pdev, cptr_dma, true);
152 	dma_unmap_single(&pdev->dev, cptr_dma, CN10K_CPT_HW_CTX_SIZE,
153 			 DMA_BIDIRECTIONAL);
154 	kfree(er_ctx->hw_ctx);
155 }
156 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, "CRYPTO_DEV_OCTEONTX2_CPT");
157 
cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx * hctx,u16 ctx_sz)158 void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
159 {
160 	hctx->w0.aop_valid = 1;
161 	hctx->w0.ctx_hdr_sz = 0;
162 	hctx->w0.ctx_sz = ctx_sz;
163 	hctx->w0.ctx_push_sz = 1;
164 }
165 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, "CRYPTO_DEV_OCTEONTX2_CPT");
166 
cn10k_cpt_hw_ctx_init(struct pci_dev * pdev,struct cn10k_cpt_errata_ctx * er_ctx)167 int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
168 			  struct cn10k_cpt_errata_ctx *er_ctx)
169 {
170 	union cn10k_cpt_hw_ctx *hctx;
171 	u64 cptr_dma;
172 
173 	er_ctx->cptr_dma = 0;
174 	er_ctx->hw_ctx = NULL;
175 
176 	if (!is_dev_cn10ka_ax(pdev))
177 		return 0;
178 
179 	hctx = kmalloc(CN10K_CPT_HW_CTX_SIZE, GFP_KERNEL);
180 	if (unlikely(!hctx))
181 		return -ENOMEM;
182 	cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
183 				  DMA_BIDIRECTIONAL);
184 	if (dma_mapping_error(&pdev->dev, cptr_dma)) {
185 		kfree(hctx);
186 		return -ENOMEM;
187 	}
188 
189 	cn10k_cpt_hw_ctx_set(hctx, 1);
190 	er_ctx->hw_ctx = hctx;
191 	er_ctx->cptr_dma = cptr_dma | BIT_ULL(60);
192 
193 	return 0;
194 }
195 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, "CRYPTO_DEV_OCTEONTX2_CPT");
196 
cn10k_cpt_ctx_flush(struct pci_dev * pdev,u64 cptr,bool inval)197 void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
198 {
199 	struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
200 	struct otx2_cptlfs_info *lfs = &cptvf->lfs;
201 	u64 reg;
202 
203 	reg = (uintptr_t)cptr >> 7;
204 	if (inval)
205 		reg = reg | BIT_ULL(46);
206 
207 	otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
208 			 OTX2_CPT_LF_CTX_FLUSH, reg);
209 	/* Make sure that the FLUSH operation is complete */
210 	wmb();
211 	otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
212 			OTX2_CPT_LF_CTX_ERR);
213 }
214 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, "CRYPTO_DEV_OCTEONTX2_CPT");
215 
cptvf_hw_ops_get(struct otx2_cptvf_dev * cptvf)216 void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
217 {
218 	if (test_bit(CN10K_LMTST, &cptvf->cap_flag))
219 		cptvf->lfs.ops = &cn10k_hw_ops;
220 	else
221 		cptvf->lfs.ops = &otx2_hw_ops;
222 }
223 EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, "CRYPTO_DEV_OCTEONTX2_CPT");
224