xref: /linux/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-cipher.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7  *
8  * This file add support for AES cipher with 128,192,256 bits keysize in
9  * CBC and ECB mode.
10  *
11  * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
12  */
13 
14 #include <linux/bottom_half.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/io.h>
18 #include <linux/pm_runtime.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/internal/skcipher.h>
22 #include "sun8i-ce.h"
23 
sun8i_ce_cipher_need_fallback(struct skcipher_request * areq)24 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
25 {
26 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
27 	struct scatterlist *sg;
28 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
29 	struct sun8i_ce_alg_template *algt;
30 	unsigned int todo, len;
31 
32 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
33 
34 	if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
35 	    sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
36 		if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
37 			algt->stat_fb_maxsg++;
38 
39 		return true;
40 	}
41 
42 	if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
43 		if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
44 			algt->stat_fb_leniv++;
45 
46 		return true;
47 	}
48 
49 	if (areq->cryptlen == 0) {
50 		if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
51 			algt->stat_fb_len0++;
52 
53 		return true;
54 	}
55 
56 	if (areq->cryptlen % 16) {
57 		if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
58 			algt->stat_fb_mod16++;
59 
60 		return true;
61 	}
62 
63 	len = areq->cryptlen;
64 	sg = areq->src;
65 	while (sg) {
66 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
67 			if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
68 				algt->stat_fb_srcali++;
69 
70 			return true;
71 		}
72 		todo = min(len, sg->length);
73 		if (todo % 4) {
74 			if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
75 				algt->stat_fb_srclen++;
76 
77 			return true;
78 		}
79 		len -= todo;
80 		sg = sg_next(sg);
81 	}
82 
83 	len = areq->cryptlen;
84 	sg = areq->dst;
85 	while (sg) {
86 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
87 			if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
88 				algt->stat_fb_dstali++;
89 
90 			return true;
91 		}
92 		todo = min(len, sg->length);
93 		if (todo % 4) {
94 			if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
95 				algt->stat_fb_dstlen++;
96 
97 			return true;
98 		}
99 		len -= todo;
100 		sg = sg_next(sg);
101 	}
102 	return false;
103 }
104 
sun8i_ce_cipher_fallback(struct skcipher_request * areq)105 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
106 {
107 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
108 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
109 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
110 	int err;
111 
112 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
113 		struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
114 		struct sun8i_ce_alg_template *algt __maybe_unused;
115 
116 		algt = container_of(alg, struct sun8i_ce_alg_template,
117 				    alg.skcipher.base);
118 
119 		algt->stat_fb++;
120 	}
121 
122 	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
123 	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
124 				      areq->base.complete, areq->base.data);
125 	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
126 				   areq->cryptlen, areq->iv);
127 	if (rctx->op_dir & CE_DECRYPTION)
128 		err = crypto_skcipher_decrypt(&rctx->fallback_req);
129 	else
130 		err = crypto_skcipher_encrypt(&rctx->fallback_req);
131 	return err;
132 }
133 
sun8i_ce_cipher_prepare(struct crypto_engine * engine,void * async_req)134 static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
135 {
136 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
137 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
138 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
139 	struct sun8i_ce_dev *ce = op->ce;
140 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
141 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
142 	struct sun8i_ce_alg_template *algt;
143 	struct sun8i_ce_flow *chan;
144 	struct ce_task *cet;
145 	struct scatterlist *sg;
146 	unsigned int todo, len, offset, ivsize;
147 	u32 common, sym;
148 	int flow, i;
149 	int nr_sgs = 0;
150 	int nr_sgd = 0;
151 	int err = 0;
152 	int ns = sg_nents_for_len(areq->src, areq->cryptlen);
153 	int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
154 
155 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
156 
157 	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
158 		crypto_tfm_alg_name(areq->base.tfm),
159 		areq->cryptlen,
160 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
161 		op->keylen);
162 
163 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
164 		algt->stat_req++;
165 
166 	flow = rctx->flow;
167 
168 	chan = &ce->chanlist[flow];
169 
170 	cet = chan->tl;
171 	memset(cet, 0, sizeof(struct ce_task));
172 
173 	cet->t_id = cpu_to_le32(flow);
174 	common = ce->variant->alg_cipher[algt->ce_algo_id];
175 	common |= rctx->op_dir | CE_COMM_INT;
176 	cet->t_common_ctl = cpu_to_le32(common);
177 	/* CTS and recent CE (H6) need length in bytes, in word otherwise */
178 	if (ce->variant->cipher_t_dlen_in_bytes)
179 		cet->t_dlen = cpu_to_le32(areq->cryptlen);
180 	else
181 		cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
182 
183 	sym = ce->variant->op_mode[algt->ce_blockmode];
184 	len = op->keylen;
185 	switch (len) {
186 	case 128 / 8:
187 		sym |= CE_AES_128BITS;
188 		break;
189 	case 192 / 8:
190 		sym |= CE_AES_192BITS;
191 		break;
192 	case 256 / 8:
193 		sym |= CE_AES_256BITS;
194 		break;
195 	}
196 
197 	cet->t_sym_ctl = cpu_to_le32(sym);
198 	cet->t_asym_ctl = 0;
199 
200 	rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
201 	if (dma_mapping_error(ce->dev, rctx->addr_key)) {
202 		dev_err(ce->dev, "Cannot DMA MAP KEY\n");
203 		err = -EFAULT;
204 		goto theend;
205 	}
206 	cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
207 
208 	ivsize = crypto_skcipher_ivsize(tfm);
209 	if (areq->iv && ivsize > 0) {
210 		if (rctx->op_dir & CE_DECRYPTION) {
211 			offset = areq->cryptlen - ivsize;
212 			scatterwalk_map_and_copy(chan->backup_iv, areq->src,
213 						 offset, ivsize, 0);
214 		}
215 		memcpy(chan->bounce_iv, areq->iv, ivsize);
216 		rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, ivsize,
217 					       DMA_TO_DEVICE);
218 		if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
219 			dev_err(ce->dev, "Cannot DMA MAP IV\n");
220 			err = -ENOMEM;
221 			goto theend_iv;
222 		}
223 		cet->t_iv = desc_addr_val_le32(ce, rctx->addr_iv);
224 	}
225 
226 	if (areq->src == areq->dst) {
227 		nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
228 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
229 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
230 			err = -EINVAL;
231 			goto theend_iv;
232 		}
233 		nr_sgd = nr_sgs;
234 	} else {
235 		nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
236 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
237 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
238 			err = -EINVAL;
239 			goto theend_iv;
240 		}
241 		nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
242 		if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
243 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
244 			err = -EINVAL;
245 			goto theend_sgs;
246 		}
247 	}
248 
249 	len = areq->cryptlen;
250 	for_each_sg(areq->src, sg, nr_sgs, i) {
251 		cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
252 		todo = min(len, sg_dma_len(sg));
253 		cet->t_src[i].len = cpu_to_le32(todo / 4);
254 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
255 			areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
256 		len -= todo;
257 	}
258 	if (len > 0) {
259 		dev_err(ce->dev, "remaining len %d\n", len);
260 		err = -EINVAL;
261 		goto theend_sgs;
262 	}
263 
264 	len = areq->cryptlen;
265 	for_each_sg(areq->dst, sg, nr_sgd, i) {
266 		cet->t_dst[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
267 		todo = min(len, sg_dma_len(sg));
268 		cet->t_dst[i].len = cpu_to_le32(todo / 4);
269 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
270 			areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
271 		len -= todo;
272 	}
273 	if (len > 0) {
274 		dev_err(ce->dev, "remaining len %d\n", len);
275 		err = -EINVAL;
276 		goto theend_sgs;
277 	}
278 
279 	chan->timeout = areq->cryptlen;
280 	rctx->nr_sgs = ns;
281 	rctx->nr_sgd = nd;
282 	return 0;
283 
284 theend_sgs:
285 	if (areq->src == areq->dst) {
286 		dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
287 	} else {
288 		if (nr_sgs > 0)
289 			dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
290 
291 		if (nr_sgd > 0)
292 			dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
293 	}
294 
295 theend_iv:
296 	if (areq->iv && ivsize > 0) {
297 		if (!dma_mapping_error(ce->dev, rctx->addr_iv))
298 			dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
299 					 DMA_TO_DEVICE);
300 
301 		offset = areq->cryptlen - ivsize;
302 		if (rctx->op_dir & CE_DECRYPTION) {
303 			memcpy(areq->iv, chan->backup_iv, ivsize);
304 			memzero_explicit(chan->backup_iv, ivsize);
305 		} else {
306 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
307 						 ivsize, 0);
308 		}
309 		memzero_explicit(chan->bounce_iv, ivsize);
310 	}
311 
312 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
313 
314 theend:
315 	return err;
316 }
317 
sun8i_ce_cipher_unprepare(struct crypto_engine * engine,void * async_req)318 static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
319 				      void *async_req)
320 {
321 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
322 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
323 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
324 	struct sun8i_ce_dev *ce = op->ce;
325 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
326 	struct sun8i_ce_flow *chan;
327 	struct ce_task *cet;
328 	unsigned int ivsize, offset;
329 	int nr_sgs = rctx->nr_sgs;
330 	int nr_sgd = rctx->nr_sgd;
331 	int flow;
332 
333 	flow = rctx->flow;
334 	chan = &ce->chanlist[flow];
335 	cet = chan->tl;
336 	ivsize = crypto_skcipher_ivsize(tfm);
337 
338 	if (areq->src == areq->dst) {
339 		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
340 	} else {
341 		if (nr_sgs > 0)
342 			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
343 		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
344 	}
345 
346 	if (areq->iv && ivsize > 0) {
347 		if (cet->t_iv)
348 			dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
349 					 DMA_TO_DEVICE);
350 		offset = areq->cryptlen - ivsize;
351 		if (rctx->op_dir & CE_DECRYPTION) {
352 			memcpy(areq->iv, chan->backup_iv, ivsize);
353 			memzero_explicit(chan->backup_iv, ivsize);
354 		} else {
355 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
356 						 ivsize, 0);
357 		}
358 		memzero_explicit(chan->bounce_iv, ivsize);
359 	}
360 
361 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
362 }
363 
sun8i_ce_cipher_run(struct crypto_engine * engine,void * areq)364 static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
365 {
366 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
367 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
368 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
369 	struct sun8i_ce_dev *ce = op->ce;
370 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
371 	int flow, err;
372 
373 	flow = rctx->flow;
374 	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
375 	sun8i_ce_cipher_unprepare(engine, areq);
376 	local_bh_disable();
377 	crypto_finalize_skcipher_request(engine, breq, err);
378 	local_bh_enable();
379 }
380 
sun8i_ce_cipher_do_one(struct crypto_engine * engine,void * areq)381 int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
382 {
383 	int err = sun8i_ce_cipher_prepare(engine, areq);
384 
385 	if (err)
386 		return err;
387 
388 	sun8i_ce_cipher_run(engine, areq);
389 	return 0;
390 }
391 
sun8i_ce_skdecrypt(struct skcipher_request * areq)392 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
393 {
394 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
395 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
396 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
397 	struct crypto_engine *engine;
398 	int e;
399 
400 	rctx->op_dir = CE_DECRYPTION;
401 	if (sun8i_ce_cipher_need_fallback(areq))
402 		return sun8i_ce_cipher_fallback(areq);
403 
404 	e = sun8i_ce_get_engine_number(op->ce);
405 	rctx->flow = e;
406 	engine = op->ce->chanlist[e].engine;
407 
408 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
409 }
410 
sun8i_ce_skencrypt(struct skcipher_request * areq)411 int sun8i_ce_skencrypt(struct skcipher_request *areq)
412 {
413 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
414 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
415 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
416 	struct crypto_engine *engine;
417 	int e;
418 
419 	rctx->op_dir = CE_ENCRYPTION;
420 	if (sun8i_ce_cipher_need_fallback(areq))
421 		return sun8i_ce_cipher_fallback(areq);
422 
423 	e = sun8i_ce_get_engine_number(op->ce);
424 	rctx->flow = e;
425 	engine = op->ce->chanlist[e].engine;
426 
427 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
428 }
429 
sun8i_ce_cipher_init(struct crypto_tfm * tfm)430 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
431 {
432 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
433 	struct sun8i_ce_alg_template *algt;
434 	const char *name = crypto_tfm_alg_name(tfm);
435 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
436 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
437 	int err;
438 
439 	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
440 
441 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
442 	op->ce = algt->ce;
443 
444 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
445 	if (IS_ERR(op->fallback_tfm)) {
446 		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
447 			name, PTR_ERR(op->fallback_tfm));
448 		return PTR_ERR(op->fallback_tfm);
449 	}
450 
451 	crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
452 				    crypto_skcipher_reqsize(op->fallback_tfm));
453 
454 	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
455 		memcpy(algt->fbname,
456 		       crypto_skcipher_driver_name(op->fallback_tfm),
457 		       CRYPTO_MAX_ALG_NAME);
458 
459 	err = pm_runtime_resume_and_get(op->ce->dev);
460 	if (err < 0)
461 		goto error_pm;
462 
463 	return 0;
464 error_pm:
465 	crypto_free_skcipher(op->fallback_tfm);
466 	return err;
467 }
468 
sun8i_ce_cipher_exit(struct crypto_tfm * tfm)469 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
470 {
471 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
472 
473 	kfree_sensitive(op->key);
474 	crypto_free_skcipher(op->fallback_tfm);
475 	pm_runtime_put_sync_suspend(op->ce->dev);
476 }
477 
sun8i_ce_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)478 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
479 			unsigned int keylen)
480 {
481 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
482 	struct sun8i_ce_dev *ce = op->ce;
483 
484 	switch (keylen) {
485 	case 128 / 8:
486 		break;
487 	case 192 / 8:
488 		break;
489 	case 256 / 8:
490 		break;
491 	default:
492 		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
493 		return -EINVAL;
494 	}
495 	kfree_sensitive(op->key);
496 	op->keylen = keylen;
497 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
498 	if (!op->key)
499 		return -ENOMEM;
500 
501 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
502 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
503 
504 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
505 }
506 
sun8i_ce_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)507 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
508 			 unsigned int keylen)
509 {
510 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
511 	int err;
512 
513 	err = verify_skcipher_des3_key(tfm, key);
514 	if (err)
515 		return err;
516 
517 	kfree_sensitive(op->key);
518 	op->keylen = keylen;
519 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
520 	if (!op->key)
521 		return -ENOMEM;
522 
523 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
524 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
525 
526 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
527 }
528