1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/crypto.h>
4 #include <crypto/acompress.h>
5 #include <crypto/internal/acompress.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/workqueue.h>
9 #include "adf_accel_devices.h"
10 #include "adf_common_drv.h"
11 #include "qat_bl.h"
12 #include "qat_comp_req.h"
13 #include "qat_compression.h"
14 #include "qat_algs_send.h"
15
16 static DEFINE_MUTEX(algs_lock);
17 static unsigned int active_devs;
18
19 enum direction {
20 DECOMPRESSION = 0,
21 COMPRESSION = 1,
22 };
23
24 struct qat_compression_req;
25
26 struct qat_compression_ctx {
27 u8 comp_ctx[QAT_COMP_CTX_SIZE];
28 struct qat_compression_instance *inst;
29 int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
30 };
31
32 struct qat_compression_req {
33 u8 req[QAT_COMP_REQ_SIZE];
34 struct qat_compression_ctx *qat_compression_ctx;
35 struct acomp_req *acompress_req;
36 struct qat_request_buffs buf;
37 enum direction dir;
38 int actual_dlen;
39 struct qat_alg_req alg_req;
40 };
41
qat_alg_send_dc_message(struct qat_compression_req * qat_req,struct qat_compression_instance * inst,struct crypto_async_request * base)42 static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
43 struct qat_compression_instance *inst,
44 struct crypto_async_request *base)
45 {
46 struct qat_alg_req *alg_req = &qat_req->alg_req;
47
48 alg_req->fw_req = (u32 *)&qat_req->req;
49 alg_req->tx_ring = inst->dc_tx;
50 alg_req->base = base;
51 alg_req->backlog = &inst->backlog;
52
53 return qat_alg_send_message(alg_req);
54 }
55
qat_comp_generic_callback(struct qat_compression_req * qat_req,void * resp)56 static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
57 void *resp)
58 {
59 struct acomp_req *areq = qat_req->acompress_req;
60 struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
61 struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
62 struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
63 struct qat_compression_instance *inst = ctx->inst;
64 int consumed, produced;
65 s8 cmp_err, xlt_err;
66 int res = -EBADMSG;
67 int status;
68 u8 cnv;
69
70 status = qat_comp_get_cmp_status(resp);
71 status |= qat_comp_get_xlt_status(resp);
72 cmp_err = qat_comp_get_cmp_err(resp);
73 xlt_err = qat_comp_get_xlt_err(resp);
74
75 consumed = qat_comp_get_consumed_ctr(resp);
76 produced = qat_comp_get_produced_ctr(resp);
77
78 dev_dbg(&GET_DEV(accel_dev),
79 "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
80 crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
81 qat_req->dir == COMPRESSION ? "comp " : "decomp",
82 status ? "ERR" : "OK ",
83 areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
84
85 areq->dlen = 0;
86
87 if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
88 goto end;
89
90 if (qat_req->dir == COMPRESSION) {
91 cnv = qat_comp_get_cmp_cnv_flag(resp);
92 if (unlikely(!cnv)) {
93 dev_err(&GET_DEV(accel_dev),
94 "Verified compression not supported\n");
95 goto end;
96 }
97
98 if (unlikely(produced > qat_req->actual_dlen)) {
99 memset(inst->dc_data->ovf_buff, 0,
100 inst->dc_data->ovf_buff_sz);
101 dev_dbg(&GET_DEV(accel_dev),
102 "Actual buffer overflow: produced=%d, dlen=%d\n",
103 produced, qat_req->actual_dlen);
104 goto end;
105 }
106 }
107
108 res = 0;
109 areq->dlen = produced;
110
111 if (ctx->qat_comp_callback)
112 res = ctx->qat_comp_callback(qat_req, resp);
113
114 end:
115 qat_bl_free_bufl(accel_dev, &qat_req->buf);
116 acomp_request_complete(areq, res);
117 }
118
qat_comp_alg_callback(void * resp)119 void qat_comp_alg_callback(void *resp)
120 {
121 struct qat_compression_req *qat_req =
122 (void *)(__force long)qat_comp_get_opaque(resp);
123 struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
124
125 qat_comp_generic_callback(qat_req, resp);
126
127 qat_alg_send_backlog(backlog);
128 }
129
qat_comp_alg_init_tfm(struct crypto_acomp * acomp_tfm)130 static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
131 {
132 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
133 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
134 struct qat_compression_instance *inst;
135 int node;
136
137 if (tfm->node == NUMA_NO_NODE)
138 node = numa_node_id();
139 else
140 node = tfm->node;
141
142 memset(ctx, 0, sizeof(*ctx));
143 inst = qat_compression_get_instance_node(node);
144 if (!inst)
145 return -EINVAL;
146 ctx->inst = inst;
147
148 ctx->inst->build_deflate_ctx(ctx->comp_ctx);
149
150 return 0;
151 }
152
qat_comp_alg_exit_tfm(struct crypto_acomp * acomp_tfm)153 static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
154 {
155 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
156 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
157
158 qat_compression_put_instance(ctx->inst);
159 memset(ctx, 0, sizeof(*ctx));
160 }
161
qat_comp_alg_compress_decompress(struct acomp_req * areq,enum direction dir,unsigned int shdr,unsigned int sftr,unsigned int dhdr,unsigned int dftr)162 static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
163 unsigned int shdr, unsigned int sftr,
164 unsigned int dhdr, unsigned int dftr)
165 {
166 struct qat_compression_req *qat_req = acomp_request_ctx(areq);
167 struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
168 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
169 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
170 struct qat_compression_instance *inst = ctx->inst;
171 gfp_t f = qat_algs_alloc_flags(&areq->base);
172 struct qat_sgl_to_bufl_params params = {0};
173 int slen = areq->slen - shdr - sftr;
174 int dlen = areq->dlen - dhdr - dftr;
175 dma_addr_t sfbuf, dfbuf;
176 u8 *req = qat_req->req;
177 size_t ovf_buff_sz;
178 int ret;
179
180 params.sskip = shdr;
181 params.dskip = dhdr;
182
183 if (!areq->src || !slen)
184 return -EINVAL;
185
186 if (!areq->dst || !dlen)
187 return -EINVAL;
188
189 if (dir == COMPRESSION) {
190 params.extra_dst_buff = inst->dc_data->ovf_buff_p;
191 ovf_buff_sz = inst->dc_data->ovf_buff_sz;
192 params.sz_extra_dst_buff = ovf_buff_sz;
193 }
194
195 ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
196 &qat_req->buf, ¶ms, f);
197 if (unlikely(ret))
198 return ret;
199
200 sfbuf = qat_req->buf.blp;
201 dfbuf = qat_req->buf.bloutp;
202 qat_req->qat_compression_ctx = ctx;
203 qat_req->acompress_req = areq;
204 qat_req->dir = dir;
205
206 if (dir == COMPRESSION) {
207 qat_req->actual_dlen = dlen;
208 dlen += ovf_buff_sz;
209 qat_comp_create_compression_req(ctx->comp_ctx, req,
210 (u64)(__force long)sfbuf, slen,
211 (u64)(__force long)dfbuf, dlen,
212 (u64)(__force long)qat_req);
213 } else {
214 qat_comp_create_decompression_req(ctx->comp_ctx, req,
215 (u64)(__force long)sfbuf, slen,
216 (u64)(__force long)dfbuf, dlen,
217 (u64)(__force long)qat_req);
218 }
219
220 ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
221 if (ret == -ENOSPC)
222 qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
223
224 return ret;
225 }
226
qat_comp_alg_compress(struct acomp_req * req)227 static int qat_comp_alg_compress(struct acomp_req *req)
228 {
229 return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
230 }
231
qat_comp_alg_decompress(struct acomp_req * req)232 static int qat_comp_alg_decompress(struct acomp_req *req)
233 {
234 return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
235 }
236
237 static struct acomp_alg qat_acomp[] = { {
238 .base = {
239 .cra_name = "deflate",
240 .cra_driver_name = "qat_deflate",
241 .cra_priority = 4001,
242 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
243 .cra_ctxsize = sizeof(struct qat_compression_ctx),
244 .cra_module = THIS_MODULE,
245 },
246 .init = qat_comp_alg_init_tfm,
247 .exit = qat_comp_alg_exit_tfm,
248 .compress = qat_comp_alg_compress,
249 .decompress = qat_comp_alg_decompress,
250 .reqsize = sizeof(struct qat_compression_req),
251 }};
252
qat_comp_algs_register(void)253 int qat_comp_algs_register(void)
254 {
255 int ret = 0;
256
257 mutex_lock(&algs_lock);
258 if (++active_devs == 1)
259 ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
260 mutex_unlock(&algs_lock);
261 return ret;
262 }
263
qat_comp_algs_unregister(void)264 void qat_comp_algs_unregister(void)
265 {
266 mutex_lock(&algs_lock);
267 if (--active_devs == 0)
268 crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
269 mutex_unlock(&algs_lock);
270 }
271