xref: /linux/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/crypto.h>
4 #include <crypto/acompress.h>
5 #include <crypto/internal/acompress.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/workqueue.h>
9 #include "adf_accel_devices.h"
10 #include "adf_common_drv.h"
11 #include "adf_dc.h"
12 #include "qat_bl.h"
13 #include "qat_comp_req.h"
14 #include "qat_compression.h"
15 #include "qat_algs_send.h"
16 
17 static DEFINE_MUTEX(algs_lock);
18 static unsigned int active_devs;
19 
20 enum direction {
21 	DECOMPRESSION = 0,
22 	COMPRESSION = 1,
23 };
24 
25 struct qat_compression_req;
26 
27 struct qat_compression_ctx {
28 	u8 comp_ctx[QAT_COMP_CTX_SIZE];
29 	struct qat_compression_instance *inst;
30 	int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
31 };
32 
33 struct qat_compression_req {
34 	u8 req[QAT_COMP_REQ_SIZE];
35 	struct qat_compression_ctx *qat_compression_ctx;
36 	struct acomp_req *acompress_req;
37 	struct qat_request_buffs buf;
38 	enum direction dir;
39 	int actual_dlen;
40 	struct qat_alg_req alg_req;
41 };
42 
qat_alg_send_dc_message(struct qat_compression_req * qat_req,struct qat_compression_instance * inst,struct crypto_async_request * base)43 static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
44 				   struct qat_compression_instance *inst,
45 				   struct crypto_async_request *base)
46 {
47 	struct qat_alg_req *alg_req = &qat_req->alg_req;
48 
49 	alg_req->fw_req = (u32 *)&qat_req->req;
50 	alg_req->tx_ring = inst->dc_tx;
51 	alg_req->base = base;
52 	alg_req->backlog = &inst->backlog;
53 
54 	return qat_alg_send_message(alg_req);
55 }
56 
qat_comp_generic_callback(struct qat_compression_req * qat_req,void * resp)57 static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
58 				      void *resp)
59 {
60 	struct acomp_req *areq = qat_req->acompress_req;
61 	struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
62 	struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
63 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
64 	struct qat_compression_instance *inst = ctx->inst;
65 	int consumed, produced;
66 	s8 cmp_err, xlt_err;
67 	int res = -EBADMSG;
68 	int status;
69 	u8 cnv;
70 
71 	status = qat_comp_get_cmp_status(resp);
72 	status |= qat_comp_get_xlt_status(resp);
73 	cmp_err = qat_comp_get_cmp_err(resp);
74 	xlt_err = qat_comp_get_xlt_err(resp);
75 
76 	consumed = qat_comp_get_consumed_ctr(resp);
77 	produced = qat_comp_get_produced_ctr(resp);
78 
79 	dev_dbg(&GET_DEV(accel_dev),
80 		"[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
81 		crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
82 		qat_req->dir == COMPRESSION ? "comp  " : "decomp",
83 		status ? "ERR" : "OK ",
84 		areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
85 
86 	areq->dlen = 0;
87 
88 	if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
89 		goto end;
90 
91 	if (qat_req->dir == COMPRESSION) {
92 		cnv = qat_comp_get_cmp_cnv_flag(resp);
93 		if (unlikely(!cnv)) {
94 			dev_err(&GET_DEV(accel_dev),
95 				"Verified compression not supported\n");
96 			goto end;
97 		}
98 
99 		if (unlikely(produced > qat_req->actual_dlen)) {
100 			memset(inst->dc_data->ovf_buff, 0,
101 			       inst->dc_data->ovf_buff_sz);
102 			dev_dbg(&GET_DEV(accel_dev),
103 				"Actual buffer overflow: produced=%d, dlen=%d\n",
104 				produced, qat_req->actual_dlen);
105 			goto end;
106 		}
107 	}
108 
109 	res = 0;
110 	areq->dlen = produced;
111 
112 	if (ctx->qat_comp_callback)
113 		res = ctx->qat_comp_callback(qat_req, resp);
114 
115 end:
116 	qat_bl_free_bufl(accel_dev, &qat_req->buf);
117 	acomp_request_complete(areq, res);
118 }
119 
qat_comp_alg_callback(void * resp)120 void qat_comp_alg_callback(void *resp)
121 {
122 	struct qat_compression_req *qat_req =
123 			(void *)(__force long)qat_comp_get_opaque(resp);
124 	struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
125 
126 	qat_comp_generic_callback(qat_req, resp);
127 
128 	qat_alg_send_backlog(backlog);
129 }
130 
qat_comp_alg_init_tfm(struct crypto_acomp * acomp_tfm)131 static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
132 {
133 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
134 	struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
135 	struct qat_compression_instance *inst;
136 	int node;
137 
138 	if (tfm->node == NUMA_NO_NODE)
139 		node = numa_node_id();
140 	else
141 		node = tfm->node;
142 
143 	memset(ctx, 0, sizeof(*ctx));
144 	inst = qat_compression_get_instance_node(node);
145 	if (!inst)
146 		return -EINVAL;
147 	ctx->inst = inst;
148 
149 	return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE);
150 }
151 
qat_comp_alg_exit_tfm(struct crypto_acomp * acomp_tfm)152 static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
153 {
154 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
155 	struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
156 
157 	qat_compression_put_instance(ctx->inst);
158 	memset(ctx, 0, sizeof(*ctx));
159 }
160 
qat_comp_alg_compress_decompress(struct acomp_req * areq,enum direction dir,unsigned int shdr,unsigned int sftr,unsigned int dhdr,unsigned int dftr)161 static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
162 					    unsigned int shdr, unsigned int sftr,
163 					    unsigned int dhdr, unsigned int dftr)
164 {
165 	struct qat_compression_req *qat_req = acomp_request_ctx(areq);
166 	struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
167 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
168 	struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
169 	struct qat_compression_instance *inst = ctx->inst;
170 	gfp_t f = qat_algs_alloc_flags(&areq->base);
171 	struct qat_sgl_to_bufl_params params = {0};
172 	int slen = areq->slen - shdr - sftr;
173 	int dlen = areq->dlen - dhdr - dftr;
174 	dma_addr_t sfbuf, dfbuf;
175 	u8 *req = qat_req->req;
176 	size_t ovf_buff_sz;
177 	int ret;
178 
179 	params.sskip = shdr;
180 	params.dskip = dhdr;
181 
182 	if (!areq->src || !slen)
183 		return -EINVAL;
184 
185 	if (!areq->dst || !dlen)
186 		return -EINVAL;
187 
188 	if (dir == COMPRESSION) {
189 		params.extra_dst_buff = inst->dc_data->ovf_buff_p;
190 		ovf_buff_sz = inst->dc_data->ovf_buff_sz;
191 		params.sz_extra_dst_buff = ovf_buff_sz;
192 	}
193 
194 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
195 				 &qat_req->buf, &params, f);
196 	if (unlikely(ret))
197 		return ret;
198 
199 	sfbuf = qat_req->buf.blp;
200 	dfbuf = qat_req->buf.bloutp;
201 	qat_req->qat_compression_ctx = ctx;
202 	qat_req->acompress_req = areq;
203 	qat_req->dir = dir;
204 
205 	if (dir == COMPRESSION) {
206 		qat_req->actual_dlen = dlen;
207 		dlen += ovf_buff_sz;
208 		qat_comp_create_compression_req(ctx->comp_ctx, req,
209 						(u64)(__force long)sfbuf, slen,
210 						(u64)(__force long)dfbuf, dlen,
211 						(u64)(__force long)qat_req);
212 	} else {
213 		qat_comp_create_decompression_req(ctx->comp_ctx, req,
214 						  (u64)(__force long)sfbuf, slen,
215 						  (u64)(__force long)dfbuf, dlen,
216 						  (u64)(__force long)qat_req);
217 	}
218 
219 	ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
220 	if (ret == -ENOSPC)
221 		qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
222 
223 	return ret;
224 }
225 
qat_comp_alg_compress(struct acomp_req * req)226 static int qat_comp_alg_compress(struct acomp_req *req)
227 {
228 	return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
229 }
230 
qat_comp_alg_decompress(struct acomp_req * req)231 static int qat_comp_alg_decompress(struct acomp_req *req)
232 {
233 	return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
234 }
235 
236 static struct acomp_alg qat_acomp[] = { {
237 	.base = {
238 		.cra_name = "deflate",
239 		.cra_driver_name = "qat_deflate",
240 		.cra_priority = 4001,
241 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
242 		.cra_ctxsize = sizeof(struct qat_compression_ctx),
243 		.cra_reqsize = sizeof(struct qat_compression_req),
244 		.cra_module = THIS_MODULE,
245 	},
246 	.init = qat_comp_alg_init_tfm,
247 	.exit = qat_comp_alg_exit_tfm,
248 	.compress = qat_comp_alg_compress,
249 	.decompress = qat_comp_alg_decompress,
250 }};
251 
qat_comp_algs_register(void)252 int qat_comp_algs_register(void)
253 {
254 	int ret = 0;
255 
256 	mutex_lock(&algs_lock);
257 	if (++active_devs == 1)
258 		ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
259 	mutex_unlock(&algs_lock);
260 	return ret;
261 }
262 
qat_comp_algs_unregister(void)263 void qat_comp_algs_unregister(void)
264 {
265 	mutex_lock(&algs_lock);
266 	if (--active_devs == 0)
267 		crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
268 	mutex_unlock(&algs_lock);
269 }
270