1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/cipher.h>
8 #include <crypto/internal/skcipher.h>
9 #include <crypto/aes.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <crypto/algapi.h>
13 #include <crypto/authenc.h>
14 #include <crypto/scatterwalk.h>
15 #include <crypto/xts.h>
16 #include <linux/dma-mapping.h>
17 #include "adf_accel_devices.h"
18 #include "qat_algs_send.h"
19 #include "adf_common_drv.h"
20 #include "qat_crypto.h"
21 #include "icp_qat_hw.h"
22 #include "icp_qat_fw.h"
23 #include "icp_qat_fw_la.h"
24 #include "qat_bl.h"
25
26 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
27 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
28 ICP_QAT_HW_CIPHER_NO_CONVERT, \
29 ICP_QAT_HW_CIPHER_ENCRYPT)
30
31 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
32 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
33 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
34 ICP_QAT_HW_CIPHER_DECRYPT)
35
36 #define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
37 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
38 ICP_QAT_HW_CIPHER_NO_CONVERT, \
39 ICP_QAT_HW_CIPHER_DECRYPT)
40
41 #define HW_CAP_AES_V2(accel_dev) \
42 (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
43 ICP_ACCEL_CAPABILITIES_AES_V2)
44
45 static DEFINE_MUTEX(algs_lock);
46 static unsigned int active_devs;
47
48 /* Common content descriptor */
49 struct qat_alg_cd {
50 union {
51 struct qat_enc { /* Encrypt content desc */
52 struct icp_qat_hw_cipher_algo_blk cipher;
53 struct icp_qat_hw_auth_algo_blk hash;
54 } qat_enc_cd;
55 struct qat_dec { /* Decrypt content desc */
56 struct icp_qat_hw_auth_algo_blk hash;
57 struct icp_qat_hw_cipher_algo_blk cipher;
58 } qat_dec_cd;
59 };
60 } __aligned(64);
61
62 struct qat_alg_aead_ctx {
63 struct qat_alg_cd *enc_cd;
64 struct qat_alg_cd *dec_cd;
65 dma_addr_t enc_cd_paddr;
66 dma_addr_t dec_cd_paddr;
67 struct icp_qat_fw_la_bulk_req enc_fw_req;
68 struct icp_qat_fw_la_bulk_req dec_fw_req;
69 enum icp_qat_hw_auth_algo qat_hash_alg;
70 unsigned int hash_digestsize;
71 unsigned int hash_blocksize;
72 struct qat_crypto_instance *inst;
73 };
74
75 struct qat_alg_skcipher_ctx {
76 struct icp_qat_hw_cipher_algo_blk *enc_cd;
77 struct icp_qat_hw_cipher_algo_blk *dec_cd;
78 dma_addr_t enc_cd_paddr;
79 dma_addr_t dec_cd_paddr;
80 struct icp_qat_fw_la_bulk_req enc_fw_req;
81 struct icp_qat_fw_la_bulk_req dec_fw_req;
82 struct qat_crypto_instance *inst;
83 struct crypto_skcipher *ftfm;
84 struct crypto_cipher *tweak;
85 bool fallback;
86 int mode;
87 };
88
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const u8 * auth_key,unsigned int auth_keylen)89 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
90 struct qat_alg_aead_ctx *ctx,
91 const u8 *auth_key,
92 unsigned int auth_keylen)
93 {
94 switch (ctx->qat_hash_alg) {
95 case ICP_QAT_HW_AUTH_ALGO_SHA1: {
96 struct hmac_sha1_key key;
97 __be32 *istate = (__be32 *)hash->sha.state1;
98 __be32 *ostate = (__be32 *)(hash->sha.state1 +
99 round_up(sizeof(key.istate.h), 8));
100
101 hmac_sha1_preparekey(&key, auth_key, auth_keylen);
102 for (int i = 0; i < ARRAY_SIZE(key.istate.h); i++) {
103 istate[i] = cpu_to_be32(key.istate.h[i]);
104 ostate[i] = cpu_to_be32(key.ostate.h[i]);
105 }
106 memzero_explicit(&key, sizeof(key));
107 return 0;
108 }
109 case ICP_QAT_HW_AUTH_ALGO_SHA256: {
110 struct hmac_sha256_key key;
111 __be32 *istate = (__be32 *)hash->sha.state1;
112 __be32 *ostate = (__be32 *)(hash->sha.state1 +
113 sizeof(key.key.istate.h));
114
115 hmac_sha256_preparekey(&key, auth_key, auth_keylen);
116 for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
117 istate[i] = cpu_to_be32(key.key.istate.h[i]);
118 ostate[i] = cpu_to_be32(key.key.ostate.h[i]);
119 }
120 memzero_explicit(&key, sizeof(key));
121 return 0;
122 }
123 case ICP_QAT_HW_AUTH_ALGO_SHA512: {
124 struct hmac_sha512_key key;
125 __be64 *istate = (__be64 *)hash->sha.state1;
126 __be64 *ostate = (__be64 *)(hash->sha.state1 +
127 sizeof(key.key.istate.h));
128
129 hmac_sha512_preparekey(&key, auth_key, auth_keylen);
130 for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
131 istate[i] = cpu_to_be64(key.key.istate.h[i]);
132 ostate[i] = cpu_to_be64(key.key.ostate.h[i]);
133 }
134 memzero_explicit(&key, sizeof(key));
135 return 0;
136 }
137 default:
138 return -EFAULT;
139 }
140 }
141
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header)142 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
143 {
144 header->hdr_flags =
145 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
146 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
147 header->comn_req_flags =
148 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
149 QAT_COMN_PTR_TYPE_SGL);
150 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
151 ICP_QAT_FW_LA_PARTIAL_NONE);
152 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
153 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
154 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
155 ICP_QAT_FW_LA_NO_PROTO);
156 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
157 ICP_QAT_FW_LA_NO_UPDATE_STATE);
158 }
159
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)160 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
161 int alg,
162 struct crypto_authenc_keys *keys,
163 int mode)
164 {
165 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
166 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
167 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
168 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
169 struct icp_qat_hw_auth_algo_blk *hash =
170 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
171 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
172 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
173 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
174 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
175 void *ptr = &req_tmpl->cd_ctrl;
176 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
177 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
178
179 /* CD setup */
180 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
181 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
182 hash->sha.inner_setup.auth_config.config =
183 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
184 ctx->qat_hash_alg, digestsize);
185 hash->sha.inner_setup.auth_counter.counter =
186 cpu_to_be32(ctx->hash_blocksize);
187
188 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
189 return -EFAULT;
190
191 /* Request setup */
192 qat_alg_init_common_hdr(header);
193 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
194 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
195 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
196 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
197 ICP_QAT_FW_LA_RET_AUTH_RES);
198 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
199 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
200 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
201 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
202
203 /* Cipher CD config setup */
204 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
205 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
206 cipher_cd_ctrl->cipher_cfg_offset = 0;
207 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
208 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
209 /* Auth CD config setup */
210 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
211 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
212 hash_cd_ctrl->inner_res_sz = digestsize;
213 hash_cd_ctrl->final_sz = digestsize;
214
215 switch (ctx->qat_hash_alg) {
216 case ICP_QAT_HW_AUTH_ALGO_SHA1:
217 hash_cd_ctrl->inner_state1_sz =
218 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
219 hash_cd_ctrl->inner_state2_sz =
220 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
221 break;
222 case ICP_QAT_HW_AUTH_ALGO_SHA256:
223 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
224 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
225 break;
226 case ICP_QAT_HW_AUTH_ALGO_SHA512:
227 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
228 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
229 break;
230 default:
231 break;
232 }
233 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
234 ((sizeof(struct icp_qat_hw_auth_setup) +
235 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
236 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
237 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
238 return 0;
239 }
240
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)241 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
242 int alg,
243 struct crypto_authenc_keys *keys,
244 int mode)
245 {
246 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
247 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
248 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
249 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
250 struct icp_qat_hw_cipher_algo_blk *cipher =
251 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
252 sizeof(struct icp_qat_hw_auth_setup) +
253 roundup(ctx->hash_digestsize, 8) * 2);
254 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
255 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
256 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
257 void *ptr = &req_tmpl->cd_ctrl;
258 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
259 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
260 struct icp_qat_fw_la_auth_req_params *auth_param =
261 (struct icp_qat_fw_la_auth_req_params *)
262 ((char *)&req_tmpl->serv_specif_rqpars +
263 sizeof(struct icp_qat_fw_la_cipher_req_params));
264
265 /* CD setup */
266 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
267 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
268 hash->sha.inner_setup.auth_config.config =
269 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
270 ctx->qat_hash_alg,
271 digestsize);
272 hash->sha.inner_setup.auth_counter.counter =
273 cpu_to_be32(ctx->hash_blocksize);
274
275 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
276 return -EFAULT;
277
278 /* Request setup */
279 qat_alg_init_common_hdr(header);
280 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
281 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
282 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
283 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
284 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
285 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
286 ICP_QAT_FW_LA_CMP_AUTH_RES);
287 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
288 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
289
290 /* Cipher CD config setup */
291 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
292 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
293 cipher_cd_ctrl->cipher_cfg_offset =
294 (sizeof(struct icp_qat_hw_auth_setup) +
295 roundup(ctx->hash_digestsize, 8) * 2) >> 3;
296 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
297 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
298
299 /* Auth CD config setup */
300 hash_cd_ctrl->hash_cfg_offset = 0;
301 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
302 hash_cd_ctrl->inner_res_sz = digestsize;
303 hash_cd_ctrl->final_sz = digestsize;
304
305 switch (ctx->qat_hash_alg) {
306 case ICP_QAT_HW_AUTH_ALGO_SHA1:
307 hash_cd_ctrl->inner_state1_sz =
308 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
309 hash_cd_ctrl->inner_state2_sz =
310 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
311 break;
312 case ICP_QAT_HW_AUTH_ALGO_SHA256:
313 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
314 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
315 break;
316 case ICP_QAT_HW_AUTH_ALGO_SHA512:
317 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
318 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
319 break;
320 default:
321 break;
322 }
323
324 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
325 ((sizeof(struct icp_qat_hw_auth_setup) +
326 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
327 auth_param->auth_res_sz = digestsize;
328 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
329 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
330 return 0;
331 }
332
qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const u8 * key,unsigned int keylen)333 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
334 struct icp_qat_fw_la_bulk_req *req,
335 struct icp_qat_hw_cipher_algo_blk *cd,
336 const u8 *key, unsigned int keylen)
337 {
338 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
339 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
340 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
341 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
342 int mode = ctx->mode;
343
344 qat_alg_init_common_hdr(header);
345 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
346 cd_pars->u.s.content_desc_params_sz =
347 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
348
349 if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
350 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
351 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
352
353 /* Store both XTS keys in CD, only the first key is sent
354 * to the HW, the second key is used for tweak calculation
355 */
356 memcpy(cd->ucs_aes.key, key, keylen);
357 keylen = keylen / 2;
358 } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
359 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
360 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
361 memcpy(cd->ucs_aes.key, key, keylen);
362 keylen = round_up(keylen, 16);
363 } else {
364 memcpy(cd->aes.key, key, keylen);
365 }
366
367 /* Cipher CD config setup */
368 cd_ctrl->cipher_key_sz = keylen >> 3;
369 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
370 cd_ctrl->cipher_cfg_offset = 0;
371 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
372 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
373 }
374
qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)375 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
376 int alg, const u8 *key,
377 unsigned int keylen, int mode)
378 {
379 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
380 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
381 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
382
383 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
384 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
385 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
386 }
387
qat_alg_xts_reverse_key(const u8 * key_forward,unsigned int keylen,u8 * key_reverse)388 static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
389 u8 *key_reverse)
390 {
391 struct crypto_aes_ctx aes_expanded;
392 int nrounds;
393 u8 *key;
394
395 aes_expandkey(&aes_expanded, key_forward, keylen);
396 if (keylen == AES_KEYSIZE_128) {
397 nrounds = 10;
398 key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
399 memcpy(key_reverse, key, AES_BLOCK_SIZE);
400 } else {
401 /* AES_KEYSIZE_256 */
402 nrounds = 14;
403 key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
404 memcpy(key_reverse, key, AES_BLOCK_SIZE);
405 memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
406 AES_BLOCK_SIZE);
407 }
408 }
409
qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)410 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
411 int alg, const u8 *key,
412 unsigned int keylen, int mode)
413 {
414 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
415 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
416 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
417 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
418
419 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
420 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
421
422 if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
423 /* Key reversing not supported, set no convert */
424 dec_cd->aes.cipher_config.val =
425 QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
426
427 /* In-place key reversal */
428 qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
429 dec_cd->ucs_aes.key);
430 } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
431 dec_cd->aes.cipher_config.val =
432 QAT_AES_HW_CONFIG_DEC(alg, mode);
433 } else {
434 dec_cd->aes.cipher_config.val =
435 QAT_AES_HW_CONFIG_ENC(alg, mode);
436 }
437 }
438
qat_alg_validate_key(int key_len,int * alg,int mode)439 static int qat_alg_validate_key(int key_len, int *alg, int mode)
440 {
441 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
442 switch (key_len) {
443 case AES_KEYSIZE_128:
444 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
445 break;
446 case AES_KEYSIZE_192:
447 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
448 break;
449 case AES_KEYSIZE_256:
450 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
451 break;
452 default:
453 return -EINVAL;
454 }
455 } else {
456 switch (key_len) {
457 case AES_KEYSIZE_128 << 1:
458 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
459 break;
460 case AES_KEYSIZE_256 << 1:
461 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
462 break;
463 default:
464 return -EINVAL;
465 }
466 }
467 return 0;
468 }
469
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)470 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
471 unsigned int keylen, int mode)
472 {
473 struct crypto_authenc_keys keys;
474 int alg;
475
476 if (crypto_authenc_extractkeys(&keys, key, keylen))
477 goto bad_key;
478
479 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
480 goto bad_key;
481
482 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
483 goto error;
484
485 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
486 goto error;
487
488 memzero_explicit(&keys, sizeof(keys));
489 return 0;
490 bad_key:
491 memzero_explicit(&keys, sizeof(keys));
492 return -EINVAL;
493 error:
494 memzero_explicit(&keys, sizeof(keys));
495 return -EFAULT;
496 }
497
qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)498 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
499 const u8 *key,
500 unsigned int keylen,
501 int mode)
502 {
503 int alg;
504
505 if (qat_alg_validate_key(keylen, &alg, mode))
506 return -EINVAL;
507
508 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
509 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
510 return 0;
511 }
512
qat_alg_aead_rekey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)513 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
514 unsigned int keylen)
515 {
516 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
517
518 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
519 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
520 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
521 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
522
523 return qat_alg_aead_init_sessions(tfm, key, keylen,
524 ICP_QAT_HW_CIPHER_CBC_MODE);
525 }
526
qat_alg_aead_newkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)527 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
528 unsigned int keylen)
529 {
530 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
531 struct qat_crypto_instance *inst = NULL;
532 int node = numa_node_id();
533 struct device *dev;
534 int ret;
535
536 inst = qat_crypto_get_instance_node(node);
537 if (!inst)
538 return -EINVAL;
539 dev = &GET_DEV(inst->accel_dev);
540 ctx->inst = inst;
541 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
542 &ctx->enc_cd_paddr,
543 GFP_ATOMIC);
544 if (!ctx->enc_cd) {
545 ret = -ENOMEM;
546 goto out_free_inst;
547 }
548 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
549 &ctx->dec_cd_paddr,
550 GFP_ATOMIC);
551 if (!ctx->dec_cd) {
552 ret = -ENOMEM;
553 goto out_free_enc;
554 }
555
556 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
557 ICP_QAT_HW_CIPHER_CBC_MODE);
558 if (ret)
559 goto out_free_all;
560
561 return 0;
562
563 out_free_all:
564 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
565 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
566 ctx->dec_cd, ctx->dec_cd_paddr);
567 ctx->dec_cd = NULL;
568 out_free_enc:
569 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
570 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
571 ctx->enc_cd, ctx->enc_cd_paddr);
572 ctx->enc_cd = NULL;
573 out_free_inst:
574 ctx->inst = NULL;
575 qat_crypto_put_instance(inst);
576 return ret;
577 }
578
qat_alg_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)579 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
580 unsigned int keylen)
581 {
582 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
583
584 if (ctx->enc_cd)
585 return qat_alg_aead_rekey(tfm, key, keylen);
586 else
587 return qat_alg_aead_newkey(tfm, key, keylen);
588 }
589
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)590 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
591 struct qat_crypto_request *qat_req)
592 {
593 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
594 struct qat_crypto_instance *inst = ctx->inst;
595 struct aead_request *areq = qat_req->aead_req;
596 u8 stat_filed = qat_resp->comn_resp.comn_status;
597 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
598
599 qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
600 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
601 res = -EBADMSG;
602 aead_request_complete(areq, res);
603 }
604
qat_alg_update_iv_ctr_mode(struct qat_crypto_request * qat_req)605 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
606 {
607 struct skcipher_request *sreq = qat_req->skcipher_req;
608 u64 iv_lo_prev;
609 u64 iv_lo;
610 u64 iv_hi;
611
612 memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
613
614 iv_lo = be64_to_cpu(qat_req->iv_lo);
615 iv_hi = be64_to_cpu(qat_req->iv_hi);
616
617 iv_lo_prev = iv_lo;
618 iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
619 if (iv_lo < iv_lo_prev)
620 iv_hi++;
621
622 qat_req->iv_lo = cpu_to_be64(iv_lo);
623 qat_req->iv_hi = cpu_to_be64(iv_hi);
624 }
625
qat_alg_update_iv_cbc_mode(struct qat_crypto_request * qat_req)626 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
627 {
628 struct skcipher_request *sreq = qat_req->skcipher_req;
629 int offset = sreq->cryptlen - AES_BLOCK_SIZE;
630 struct scatterlist *sgl;
631
632 if (qat_req->encryption)
633 sgl = sreq->dst;
634 else
635 sgl = sreq->src;
636
637 scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
638 }
639
qat_alg_update_iv(struct qat_crypto_request * qat_req)640 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
641 {
642 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
643 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
644
645 switch (ctx->mode) {
646 case ICP_QAT_HW_CIPHER_CTR_MODE:
647 qat_alg_update_iv_ctr_mode(qat_req);
648 break;
649 case ICP_QAT_HW_CIPHER_CBC_MODE:
650 qat_alg_update_iv_cbc_mode(qat_req);
651 break;
652 case ICP_QAT_HW_CIPHER_XTS_MODE:
653 break;
654 default:
655 dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
656 ctx->mode);
657 }
658 }
659
qat_skcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)660 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
661 struct qat_crypto_request *qat_req)
662 {
663 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
664 struct qat_crypto_instance *inst = ctx->inst;
665 struct skcipher_request *sreq = qat_req->skcipher_req;
666 u8 stat_filed = qat_resp->comn_resp.comn_status;
667 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
668
669 qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
670 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
671 res = -EINVAL;
672
673 if (qat_req->encryption)
674 qat_alg_update_iv(qat_req);
675
676 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
677
678 skcipher_request_complete(sreq, res);
679 }
680
qat_alg_callback(void * resp)681 void qat_alg_callback(void *resp)
682 {
683 struct icp_qat_fw_la_resp *qat_resp = resp;
684 struct qat_crypto_request *qat_req =
685 (void *)(__force long)qat_resp->opaque_data;
686 struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
687
688 qat_req->cb(qat_resp, qat_req);
689
690 qat_alg_send_backlog(backlog);
691 }
692
qat_alg_send_sym_message(struct qat_crypto_request * qat_req,struct qat_crypto_instance * inst,struct crypto_async_request * base)693 static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
694 struct qat_crypto_instance *inst,
695 struct crypto_async_request *base)
696 {
697 struct qat_alg_req *alg_req = &qat_req->alg_req;
698
699 alg_req->fw_req = (u32 *)&qat_req->req;
700 alg_req->tx_ring = inst->sym_tx;
701 alg_req->base = base;
702 alg_req->backlog = &inst->backlog;
703
704 return qat_alg_send_message(alg_req);
705 }
706
qat_alg_aead_dec(struct aead_request * areq)707 static int qat_alg_aead_dec(struct aead_request *areq)
708 {
709 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
710 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
711 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
712 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
713 struct icp_qat_fw_la_cipher_req_params *cipher_param;
714 struct icp_qat_fw_la_auth_req_params *auth_param;
715 struct icp_qat_fw_la_bulk_req *msg;
716 int digst_size = crypto_aead_authsize(aead_tfm);
717 gfp_t f = qat_algs_alloc_flags(&areq->base);
718 int ret;
719 u32 cipher_len;
720
721 cipher_len = areq->cryptlen - digst_size;
722 if (cipher_len % AES_BLOCK_SIZE != 0)
723 return -EINVAL;
724
725 ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
726 &qat_req->buf, NULL, f);
727 if (unlikely(ret))
728 return ret;
729
730 msg = &qat_req->req;
731 *msg = ctx->dec_fw_req;
732 qat_req->aead_ctx = ctx;
733 qat_req->aead_req = areq;
734 qat_req->cb = qat_aead_alg_callback;
735 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
736 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
737 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
738 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
739 cipher_param->cipher_length = cipher_len;
740 cipher_param->cipher_offset = areq->assoclen;
741 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
742 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
743 auth_param->auth_off = 0;
744 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
745
746 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
747 if (ret == -ENOSPC)
748 qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
749
750 return ret;
751 }
752
qat_alg_aead_enc(struct aead_request * areq)753 static int qat_alg_aead_enc(struct aead_request *areq)
754 {
755 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
756 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
757 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
758 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
759 struct icp_qat_fw_la_cipher_req_params *cipher_param;
760 struct icp_qat_fw_la_auth_req_params *auth_param;
761 gfp_t f = qat_algs_alloc_flags(&areq->base);
762 struct icp_qat_fw_la_bulk_req *msg;
763 u8 *iv = areq->iv;
764 int ret;
765
766 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
767 return -EINVAL;
768
769 ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
770 &qat_req->buf, NULL, f);
771 if (unlikely(ret))
772 return ret;
773
774 msg = &qat_req->req;
775 *msg = ctx->enc_fw_req;
776 qat_req->aead_ctx = ctx;
777 qat_req->aead_req = areq;
778 qat_req->cb = qat_aead_alg_callback;
779 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
780 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
781 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
782 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
783 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
784
785 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
786 cipher_param->cipher_length = areq->cryptlen;
787 cipher_param->cipher_offset = areq->assoclen;
788
789 auth_param->auth_off = 0;
790 auth_param->auth_len = areq->assoclen + areq->cryptlen;
791
792 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
793 if (ret == -ENOSPC)
794 qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
795
796 return ret;
797 }
798
qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)799 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
800 const u8 *key, unsigned int keylen,
801 int mode)
802 {
803 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
804 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
805 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
806 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
807
808 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
809 }
810
qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)811 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
812 const u8 *key, unsigned int keylen,
813 int mode)
814 {
815 struct qat_crypto_instance *inst = NULL;
816 struct device *dev;
817 int node = numa_node_id();
818 int ret;
819
820 inst = qat_crypto_get_instance_node(node);
821 if (!inst)
822 return -EINVAL;
823 dev = &GET_DEV(inst->accel_dev);
824 ctx->inst = inst;
825 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
826 &ctx->enc_cd_paddr,
827 GFP_ATOMIC);
828 if (!ctx->enc_cd) {
829 ret = -ENOMEM;
830 goto out_free_instance;
831 }
832 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
833 &ctx->dec_cd_paddr,
834 GFP_ATOMIC);
835 if (!ctx->dec_cd) {
836 ret = -ENOMEM;
837 goto out_free_enc;
838 }
839
840 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
841 if (ret)
842 goto out_free_all;
843
844 return 0;
845
846 out_free_all:
847 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
848 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
849 ctx->dec_cd, ctx->dec_cd_paddr);
850 ctx->dec_cd = NULL;
851 out_free_enc:
852 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
853 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
854 ctx->enc_cd, ctx->enc_cd_paddr);
855 ctx->enc_cd = NULL;
856 out_free_instance:
857 ctx->inst = NULL;
858 qat_crypto_put_instance(inst);
859 return ret;
860 }
861
qat_alg_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,int mode)862 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
863 const u8 *key, unsigned int keylen,
864 int mode)
865 {
866 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
867
868 ctx->mode = mode;
869
870 if (ctx->enc_cd)
871 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
872 else
873 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
874 }
875
qat_alg_skcipher_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)876 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
877 const u8 *key, unsigned int keylen)
878 {
879 return qat_alg_skcipher_setkey(tfm, key, keylen,
880 ICP_QAT_HW_CIPHER_CBC_MODE);
881 }
882
qat_alg_skcipher_ctr_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)883 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
884 const u8 *key, unsigned int keylen)
885 {
886 return qat_alg_skcipher_setkey(tfm, key, keylen,
887 ICP_QAT_HW_CIPHER_CTR_MODE);
888 }
889
qat_alg_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)890 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
891 const u8 *key, unsigned int keylen)
892 {
893 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
894 int ret;
895
896 ret = xts_verify_key(tfm, key, keylen);
897 if (ret)
898 return ret;
899
900 if (keylen >> 1 == AES_KEYSIZE_192) {
901 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
902 if (ret)
903 return ret;
904
905 ctx->fallback = true;
906
907 return 0;
908 }
909
910 ctx->fallback = false;
911
912 ret = qat_alg_skcipher_setkey(tfm, key, keylen,
913 ICP_QAT_HW_CIPHER_XTS_MODE);
914 if (ret)
915 return ret;
916
917 if (HW_CAP_AES_V2(ctx->inst->accel_dev))
918 ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
919 keylen / 2);
920
921 return ret;
922 }
923
qat_alg_set_req_iv(struct qat_crypto_request * qat_req)924 static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
925 {
926 struct icp_qat_fw_la_cipher_req_params *cipher_param;
927 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
928 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
929 u8 *iv = qat_req->skcipher_req->iv;
930
931 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
932
933 if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
934 crypto_cipher_encrypt_one(ctx->tweak,
935 (u8 *)cipher_param->u.cipher_IV_array,
936 iv);
937 else
938 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
939 }
940
qat_alg_skcipher_encrypt(struct skcipher_request * req)941 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
942 {
943 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
944 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
945 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
946 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
947 struct icp_qat_fw_la_cipher_req_params *cipher_param;
948 gfp_t f = qat_algs_alloc_flags(&req->base);
949 struct icp_qat_fw_la_bulk_req *msg;
950 int ret;
951
952 if (req->cryptlen == 0)
953 return 0;
954
955 ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
956 &qat_req->buf, NULL, f);
957 if (unlikely(ret))
958 return ret;
959
960 msg = &qat_req->req;
961 *msg = ctx->enc_fw_req;
962 qat_req->skcipher_ctx = ctx;
963 qat_req->skcipher_req = req;
964 qat_req->cb = qat_skcipher_alg_callback;
965 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
966 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
967 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
968 qat_req->encryption = true;
969 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
970 cipher_param->cipher_length = req->cryptlen;
971 cipher_param->cipher_offset = 0;
972
973 qat_alg_set_req_iv(qat_req);
974
975 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
976 if (ret == -ENOSPC)
977 qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
978
979 return ret;
980 }
981
qat_alg_skcipher_blk_encrypt(struct skcipher_request * req)982 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
983 {
984 if (req->cryptlen % AES_BLOCK_SIZE != 0)
985 return -EINVAL;
986
987 return qat_alg_skcipher_encrypt(req);
988 }
989
qat_alg_skcipher_xts_encrypt(struct skcipher_request * req)990 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
991 {
992 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
993 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
994 struct skcipher_request *nreq = skcipher_request_ctx(req);
995
996 if (req->cryptlen < XTS_BLOCK_SIZE)
997 return -EINVAL;
998
999 if (ctx->fallback) {
1000 memcpy(nreq, req, sizeof(*req));
1001 skcipher_request_set_tfm(nreq, ctx->ftfm);
1002 return crypto_skcipher_encrypt(nreq);
1003 }
1004
1005 return qat_alg_skcipher_encrypt(req);
1006 }
1007
qat_alg_skcipher_decrypt(struct skcipher_request * req)1008 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1009 {
1010 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1011 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1012 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1013 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1014 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1015 gfp_t f = qat_algs_alloc_flags(&req->base);
1016 struct icp_qat_fw_la_bulk_req *msg;
1017 int ret;
1018
1019 if (req->cryptlen == 0)
1020 return 0;
1021
1022 ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
1023 &qat_req->buf, NULL, f);
1024 if (unlikely(ret))
1025 return ret;
1026
1027 msg = &qat_req->req;
1028 *msg = ctx->dec_fw_req;
1029 qat_req->skcipher_ctx = ctx;
1030 qat_req->skcipher_req = req;
1031 qat_req->cb = qat_skcipher_alg_callback;
1032 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1033 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1034 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1035 qat_req->encryption = false;
1036 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1037 cipher_param->cipher_length = req->cryptlen;
1038 cipher_param->cipher_offset = 0;
1039
1040 qat_alg_set_req_iv(qat_req);
1041 qat_alg_update_iv(qat_req);
1042
1043 ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1044 if (ret == -ENOSPC)
1045 qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
1046
1047 return ret;
1048 }
1049
qat_alg_skcipher_blk_decrypt(struct skcipher_request * req)1050 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1051 {
1052 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1053 return -EINVAL;
1054
1055 return qat_alg_skcipher_decrypt(req);
1056 }
1057
qat_alg_skcipher_xts_decrypt(struct skcipher_request * req)1058 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1059 {
1060 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1061 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1062 struct skcipher_request *nreq = skcipher_request_ctx(req);
1063
1064 if (req->cryptlen < XTS_BLOCK_SIZE)
1065 return -EINVAL;
1066
1067 if (ctx->fallback) {
1068 memcpy(nreq, req, sizeof(*req));
1069 skcipher_request_set_tfm(nreq, ctx->ftfm);
1070 return crypto_skcipher_decrypt(nreq);
1071 }
1072
1073 return qat_alg_skcipher_decrypt(req);
1074 }
1075
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash_alg,unsigned int hash_digestsize,unsigned int hash_blocksize)1076 static int qat_alg_aead_init(struct crypto_aead *tfm,
1077 enum icp_qat_hw_auth_algo hash_alg,
1078 unsigned int hash_digestsize,
1079 unsigned int hash_blocksize)
1080 {
1081 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1082
1083 ctx->qat_hash_alg = hash_alg;
1084 ctx->hash_digestsize = hash_digestsize;
1085 ctx->hash_blocksize = hash_blocksize;
1086 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1087 return 0;
1088 }
1089
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1090 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1091 {
1092 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1,
1093 SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
1094 }
1095
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1096 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1097 {
1098 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256,
1099 SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
1100 }
1101
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1102 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1103 {
1104 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512,
1105 SHA512_DIGEST_SIZE, SHA512_BLOCK_SIZE);
1106 }
1107
qat_alg_aead_exit(struct crypto_aead * tfm)1108 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1109 {
1110 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1111 struct qat_crypto_instance *inst = ctx->inst;
1112 struct device *dev;
1113
1114 if (!inst)
1115 return;
1116
1117 dev = &GET_DEV(inst->accel_dev);
1118 if (ctx->enc_cd) {
1119 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1120 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1121 ctx->enc_cd, ctx->enc_cd_paddr);
1122 }
1123 if (ctx->dec_cd) {
1124 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1125 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1126 ctx->dec_cd, ctx->dec_cd_paddr);
1127 }
1128 qat_crypto_put_instance(inst);
1129 }
1130
qat_alg_skcipher_init_tfm(struct crypto_skcipher * tfm)1131 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1132 {
1133 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1134 return 0;
1135 }
1136
qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher * tfm)1137 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1138 {
1139 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1140 int reqsize;
1141
1142 ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1143 CRYPTO_ALG_NEED_FALLBACK);
1144 if (IS_ERR(ctx->ftfm))
1145 return PTR_ERR(ctx->ftfm);
1146
1147 ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
1148 if (IS_ERR(ctx->tweak)) {
1149 crypto_free_skcipher(ctx->ftfm);
1150 return PTR_ERR(ctx->tweak);
1151 }
1152
1153 reqsize = max(sizeof(struct qat_crypto_request),
1154 sizeof(struct skcipher_request) +
1155 crypto_skcipher_reqsize(ctx->ftfm));
1156 crypto_skcipher_set_reqsize(tfm, reqsize);
1157
1158 return 0;
1159 }
1160
qat_alg_skcipher_exit_tfm(struct crypto_skcipher * tfm)1161 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1162 {
1163 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1164 struct qat_crypto_instance *inst = ctx->inst;
1165 struct device *dev;
1166
1167 if (!inst)
1168 return;
1169
1170 dev = &GET_DEV(inst->accel_dev);
1171 if (ctx->enc_cd) {
1172 memset(ctx->enc_cd, 0,
1173 sizeof(struct icp_qat_hw_cipher_algo_blk));
1174 dma_free_coherent(dev,
1175 sizeof(struct icp_qat_hw_cipher_algo_blk),
1176 ctx->enc_cd, ctx->enc_cd_paddr);
1177 }
1178 if (ctx->dec_cd) {
1179 memset(ctx->dec_cd, 0,
1180 sizeof(struct icp_qat_hw_cipher_algo_blk));
1181 dma_free_coherent(dev,
1182 sizeof(struct icp_qat_hw_cipher_algo_blk),
1183 ctx->dec_cd, ctx->dec_cd_paddr);
1184 }
1185 qat_crypto_put_instance(inst);
1186 }
1187
qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher * tfm)1188 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1189 {
1190 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1191
1192 if (ctx->ftfm)
1193 crypto_free_skcipher(ctx->ftfm);
1194
1195 if (ctx->tweak)
1196 crypto_free_cipher(ctx->tweak);
1197
1198 qat_alg_skcipher_exit_tfm(tfm);
1199 }
1200
1201 static struct aead_alg qat_aeads[] = { {
1202 .base = {
1203 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1204 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1205 .cra_priority = 100,
1206 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1207 .cra_blocksize = AES_BLOCK_SIZE,
1208 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1209 .cra_module = THIS_MODULE,
1210 },
1211 .init = qat_alg_aead_sha1_init,
1212 .exit = qat_alg_aead_exit,
1213 .setkey = qat_alg_aead_setkey,
1214 .decrypt = qat_alg_aead_dec,
1215 .encrypt = qat_alg_aead_enc,
1216 .ivsize = AES_BLOCK_SIZE,
1217 .maxauthsize = SHA1_DIGEST_SIZE,
1218 }, {
1219 .base = {
1220 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1221 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1222 .cra_priority = 100,
1223 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1224 .cra_blocksize = AES_BLOCK_SIZE,
1225 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1226 .cra_module = THIS_MODULE,
1227 },
1228 .init = qat_alg_aead_sha256_init,
1229 .exit = qat_alg_aead_exit,
1230 .setkey = qat_alg_aead_setkey,
1231 .decrypt = qat_alg_aead_dec,
1232 .encrypt = qat_alg_aead_enc,
1233 .ivsize = AES_BLOCK_SIZE,
1234 .maxauthsize = SHA256_DIGEST_SIZE,
1235 }, {
1236 .base = {
1237 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1238 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1239 .cra_priority = 100,
1240 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1241 .cra_blocksize = AES_BLOCK_SIZE,
1242 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1243 .cra_module = THIS_MODULE,
1244 },
1245 .init = qat_alg_aead_sha512_init,
1246 .exit = qat_alg_aead_exit,
1247 .setkey = qat_alg_aead_setkey,
1248 .decrypt = qat_alg_aead_dec,
1249 .encrypt = qat_alg_aead_enc,
1250 .ivsize = AES_BLOCK_SIZE,
1251 .maxauthsize = SHA512_DIGEST_SIZE,
1252 } };
1253
1254 static struct skcipher_alg qat_skciphers[] = { {
1255 .base.cra_name = "cbc(aes)",
1256 .base.cra_driver_name = "qat_aes_cbc",
1257 .base.cra_priority = 100,
1258 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1259 .base.cra_blocksize = AES_BLOCK_SIZE,
1260 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1261 .base.cra_alignmask = 0,
1262 .base.cra_module = THIS_MODULE,
1263
1264 .init = qat_alg_skcipher_init_tfm,
1265 .exit = qat_alg_skcipher_exit_tfm,
1266 .setkey = qat_alg_skcipher_cbc_setkey,
1267 .decrypt = qat_alg_skcipher_blk_decrypt,
1268 .encrypt = qat_alg_skcipher_blk_encrypt,
1269 .min_keysize = AES_MIN_KEY_SIZE,
1270 .max_keysize = AES_MAX_KEY_SIZE,
1271 .ivsize = AES_BLOCK_SIZE,
1272 }, {
1273 .base.cra_name = "ctr(aes)",
1274 .base.cra_driver_name = "qat_aes_ctr",
1275 .base.cra_priority = 100,
1276 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1277 .base.cra_blocksize = 1,
1278 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1279 .base.cra_alignmask = 0,
1280 .base.cra_module = THIS_MODULE,
1281
1282 .init = qat_alg_skcipher_init_tfm,
1283 .exit = qat_alg_skcipher_exit_tfm,
1284 .setkey = qat_alg_skcipher_ctr_setkey,
1285 .decrypt = qat_alg_skcipher_decrypt,
1286 .encrypt = qat_alg_skcipher_encrypt,
1287 .min_keysize = AES_MIN_KEY_SIZE,
1288 .max_keysize = AES_MAX_KEY_SIZE,
1289 .ivsize = AES_BLOCK_SIZE,
1290 }, {
1291 .base.cra_name = "xts(aes)",
1292 .base.cra_driver_name = "qat_aes_xts",
1293 .base.cra_priority = 100,
1294 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1295 CRYPTO_ALG_ALLOCATES_MEMORY,
1296 .base.cra_blocksize = AES_BLOCK_SIZE,
1297 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1298 .base.cra_alignmask = 0,
1299 .base.cra_module = THIS_MODULE,
1300
1301 .init = qat_alg_skcipher_init_xts_tfm,
1302 .exit = qat_alg_skcipher_exit_xts_tfm,
1303 .setkey = qat_alg_skcipher_xts_setkey,
1304 .decrypt = qat_alg_skcipher_xts_decrypt,
1305 .encrypt = qat_alg_skcipher_xts_encrypt,
1306 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1307 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1308 .ivsize = AES_BLOCK_SIZE,
1309 } };
1310
qat_algs_register(void)1311 int qat_algs_register(void)
1312 {
1313 int ret = 0;
1314
1315 mutex_lock(&algs_lock);
1316 if (++active_devs != 1)
1317 goto unlock;
1318
1319 ret = crypto_register_skciphers(qat_skciphers,
1320 ARRAY_SIZE(qat_skciphers));
1321 if (ret)
1322 goto unlock;
1323
1324 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1325 if (ret)
1326 goto unreg_algs;
1327
1328 unlock:
1329 mutex_unlock(&algs_lock);
1330 return ret;
1331
1332 unreg_algs:
1333 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1334 goto unlock;
1335 }
1336
qat_algs_unregister(void)1337 void qat_algs_unregister(void)
1338 {
1339 mutex_lock(&algs_lock);
1340 if (--active_devs != 0)
1341 goto unlock;
1342
1343 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1344 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1345
1346 unlock:
1347 mutex_unlock(&algs_lock);
1348 }
1349