1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * K3 SA2UL crypto accelerator driver
4 *
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Keerthy
8 * Vitaly Andrianov
9 * Tero Kristo
10 */
11 #include <linux/clk.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dmapool.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18
19 #include <crypto/aes.h>
20 #include <crypto/authenc.h>
21 #include <crypto/des.h>
22 #include <crypto/internal/aead.h>
23 #include <crypto/internal/hash.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/sha.h>
27
28 #include "sa2ul.h"
29
30 /* Byte offset for key in encryption security context */
31 #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
32 /* Byte offset for Aux-1 in encryption security context */
33 #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
34
35 #define SA_CMDL_UPD_ENC 0x0001
36 #define SA_CMDL_UPD_AUTH 0x0002
37 #define SA_CMDL_UPD_ENC_IV 0x0004
38 #define SA_CMDL_UPD_AUTH_IV 0x0008
39 #define SA_CMDL_UPD_AUX_KEY 0x0010
40
41 #define SA_AUTH_SUBKEY_LEN 16
42 #define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
43 #define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
44
45 #define MODE_CONTROL_BYTES 27
46 #define SA_HASH_PROCESSING 0
47 #define SA_CRYPTO_PROCESSING 0
48 #define SA_UPLOAD_HASH_TO_TLR BIT(6)
49
50 #define SA_SW0_FLAGS_MASK 0xF0000
51 #define SA_SW0_CMDL_INFO_MASK 0x1F00000
52 #define SA_SW0_CMDL_PRESENT BIT(4)
53 #define SA_SW0_ENG_ID_MASK 0x3E000000
54 #define SA_SW0_DEST_INFO_PRESENT BIT(30)
55 #define SA_SW2_EGRESS_LENGTH 0xFF000000
56 #define SA_BASIC_HASH 0x10
57
58 #define SHA256_DIGEST_WORDS 8
59 /* Make 32-bit word from 4 bytes */
60 #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
61 ((b2) << 8) | (b3))
62
63 /* size of SCCTL structure in bytes */
64 #define SA_SCCTL_SZ 16
65
66 /* Max Authentication tag size */
67 #define SA_MAX_AUTH_TAG_SZ 64
68
69 #define PRIV_ID 0x1
70 #define PRIV 0x1
71
72 static struct device *sa_k3_dev;
73
74 /**
75 * struct sa_cmdl_cfg - Command label configuration descriptor
76 * @aalg: authentication algorithm ID
77 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
78 * @auth_eng_id: Authentication Engine ID
79 * @iv_size: Initialization Vector size
80 * @akey: Authentication key
81 * @akey_len: Authentication key length
82 * @enc: True, if this is an encode request
83 */
84 struct sa_cmdl_cfg {
85 int aalg;
86 u8 enc_eng_id;
87 u8 auth_eng_id;
88 u8 iv_size;
89 const u8 *akey;
90 u16 akey_len;
91 bool enc;
92 };
93
94 /**
95 * struct algo_data - Crypto algorithm specific data
96 * @enc_eng: Encryption engine info structure
97 * @auth_eng: Authentication engine info structure
98 * @auth_ctrl: Authentication control word
99 * @hash_size: Size of digest
100 * @iv_idx: iv index in psdata
101 * @iv_out_size: iv out size
102 * @ealg_id: Encryption Algorithm ID
103 * @aalg_id: Authentication algorithm ID
104 * @mci_enc: Mode Control Instruction for Encryption algorithm
105 * @mci_dec: Mode Control Instruction for Decryption
106 * @inv_key: Whether the encryption algorithm demands key inversion
107 * @ctx: Pointer to the algorithm context
108 * @keyed_mac: Whether the authentication algorithm has key
109 * @prep_iopad: Function pointer to generate intermediate ipad/opad
110 */
111 struct algo_data {
112 struct sa_eng_info enc_eng;
113 struct sa_eng_info auth_eng;
114 u8 auth_ctrl;
115 u8 hash_size;
116 u8 iv_idx;
117 u8 iv_out_size;
118 u8 ealg_id;
119 u8 aalg_id;
120 u8 *mci_enc;
121 u8 *mci_dec;
122 bool inv_key;
123 struct sa_tfm_ctx *ctx;
124 bool keyed_mac;
125 void (*prep_iopad)(struct algo_data *algo, const u8 *key,
126 u16 key_sz, __be32 *ipad, __be32 *opad);
127 };
128
129 /**
130 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
131 * @type: Type of the crypto algorithm.
132 * @alg: Union of crypto algorithm definitions.
133 * @registered: Flag indicating if the crypto algorithm is already registered
134 */
135 struct sa_alg_tmpl {
136 u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
137 union {
138 struct skcipher_alg skcipher;
139 struct ahash_alg ahash;
140 struct aead_alg aead;
141 } alg;
142 bool registered;
143 };
144
145 /**
146 * struct sa_mapped_sg: scatterlist information for tx and rx
147 * @mapped: Set to true if the @sgt is mapped
148 * @dir: mapping direction used for @sgt
149 * @split_sg: Set if the sg is split and needs to be freed up
150 * @static_sg: Static scatterlist entry for overriding data
151 * @sgt: scatterlist table for DMA API use
152 */
153 struct sa_mapped_sg {
154 bool mapped;
155 enum dma_data_direction dir;
156 struct scatterlist static_sg;
157 struct scatterlist *split_sg;
158 struct sg_table sgt;
159 };
160 /**
161 * struct sa_rx_data: RX Packet miscellaneous data place holder
162 * @req: crypto request data pointer
163 * @ddev: pointer to the DMA device
164 * @tx_in: dma_async_tx_descriptor pointer for rx channel
165 * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
166 * @enc: Flag indicating either encryption or decryption
167 * @enc_iv_size: Initialisation vector size
168 * @iv_idx: Initialisation vector index
169 */
170 struct sa_rx_data {
171 void *req;
172 struct device *ddev;
173 struct dma_async_tx_descriptor *tx_in;
174 struct sa_mapped_sg mapped_sg[2];
175 u8 enc;
176 u8 enc_iv_size;
177 u8 iv_idx;
178 };
179
180 /**
181 * struct sa_req: SA request definition
182 * @dev: device for the request
183 * @size: total data to the xmitted via DMA
184 * @enc_offset: offset of cipher data
185 * @enc_size: data to be passed to cipher engine
186 * @enc_iv: cipher IV
187 * @auth_offset: offset of the authentication data
188 * @auth_size: size of the authentication data
189 * @auth_iv: authentication IV
190 * @type: algorithm type for the request
191 * @cmdl: command label pointer
192 * @base: pointer to the base request
193 * @ctx: pointer to the algorithm context data
194 * @enc: true if this is an encode request
195 * @src: source data
196 * @dst: destination data
197 * @callback: DMA callback for the request
198 * @mdata_size: metadata size passed to DMA
199 */
200 struct sa_req {
201 struct device *dev;
202 u16 size;
203 u8 enc_offset;
204 u16 enc_size;
205 u8 *enc_iv;
206 u8 auth_offset;
207 u16 auth_size;
208 u8 *auth_iv;
209 u32 type;
210 u32 *cmdl;
211 struct crypto_async_request *base;
212 struct sa_tfm_ctx *ctx;
213 bool enc;
214 struct scatterlist *src;
215 struct scatterlist *dst;
216 dma_async_tx_callback callback;
217 u16 mdata_size;
218 };
219
220 /*
221 * Mode Control Instructions for various Key lengths 128, 192, 256
222 * For CBC (Cipher Block Chaining) mode for encryption
223 */
224 static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
225 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
228 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
231 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
234 };
235
236 /*
237 * Mode Control Instructions for various Key lengths 128, 192, 256
238 * For CBC (Cipher Block Chaining) mode for decryption
239 */
240 static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
241 { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
244 { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
247 { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
250 };
251
252 /*
253 * Mode Control Instructions for various Key lengths 128, 192, 256
254 * For CBC (Cipher Block Chaining) mode for encryption
255 */
256 static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
257 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
260 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
263 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
266 };
267
268 /*
269 * Mode Control Instructions for various Key lengths 128, 192, 256
270 * For CBC (Cipher Block Chaining) mode for decryption
271 */
272 static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
273 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
274 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
276 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
279 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
282 };
283
284 /*
285 * Mode Control Instructions for various Key lengths 128, 192, 256
286 * For ECB (Electronic Code Book) mode for encryption
287 */
288 static u8 mci_ecb_enc_array[3][27] = {
289 { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
292 { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
295 { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
298 };
299
300 /*
301 * Mode Control Instructions for various Key lengths 128, 192, 256
302 * For ECB (Electronic Code Book) mode for decryption
303 */
304 static u8 mci_ecb_dec_array[3][27] = {
305 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
308 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
311 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
314 };
315
316 /*
317 * Mode Control Instructions for DES algorithm
318 * For CBC (Cipher Block Chaining) mode and ECB mode
319 * encryption and for decryption respectively
320 */
321 static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
322 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00,
325 };
326
327 static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
328 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00,
331 };
332
333 static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
334 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00,
337 };
338
339 static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
340 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00,
343 };
344
345 /*
346 * Perform 16 byte or 128 bit swizzling
347 * The SA2UL Expects the security context to
348 * be in little Endian and the bus width is 128 bits or 16 bytes
349 * Hence swap 16 bytes at a time from higher to lower address
350 */
sa_swiz_128(u8 * in,u16 len)351 static void sa_swiz_128(u8 *in, u16 len)
352 {
353 u8 data[16];
354 int i, j;
355
356 for (i = 0; i < len; i += 16) {
357 memcpy(data, &in[i], 16);
358 for (j = 0; j < 16; j++)
359 in[i + j] = data[15 - j];
360 }
361 }
362
363 /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
prepare_kiopad(u8 * k_ipad,u8 * k_opad,const u8 * key,u16 key_sz)364 static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
365 {
366 int i;
367
368 for (i = 0; i < key_sz; i++) {
369 k_ipad[i] = key[i] ^ 0x36;
370 k_opad[i] = key[i] ^ 0x5c;
371 }
372
373 /* Instead of XOR with 0 */
374 for (; i < SHA1_BLOCK_SIZE; i++) {
375 k_ipad[i] = 0x36;
376 k_opad[i] = 0x5c;
377 }
378 }
379
sa_export_shash(struct shash_desc * hash,int block_size,int digest_size,__be32 * out)380 static void sa_export_shash(struct shash_desc *hash, int block_size,
381 int digest_size, __be32 *out)
382 {
383 union {
384 struct sha1_state sha1;
385 struct sha256_state sha256;
386 struct sha512_state sha512;
387 } sha;
388 void *state;
389 u32 *result;
390 int i;
391
392 switch (digest_size) {
393 case SHA1_DIGEST_SIZE:
394 state = &sha.sha1;
395 result = sha.sha1.state;
396 break;
397 case SHA256_DIGEST_SIZE:
398 state = &sha.sha256;
399 result = sha.sha256.state;
400 break;
401 default:
402 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
403 digest_size);
404 return;
405 }
406
407 crypto_shash_export(hash, state);
408
409 for (i = 0; i < digest_size >> 2; i++)
410 out[i] = cpu_to_be32(result[i]);
411 }
412
sa_prepare_iopads(struct algo_data * data,const u8 * key,u16 key_sz,__be32 * ipad,__be32 * opad)413 static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
414 u16 key_sz, __be32 *ipad, __be32 *opad)
415 {
416 SHASH_DESC_ON_STACK(shash, data->ctx->shash);
417 int block_size = crypto_shash_blocksize(data->ctx->shash);
418 int digest_size = crypto_shash_digestsize(data->ctx->shash);
419 u8 k_ipad[SHA1_BLOCK_SIZE];
420 u8 k_opad[SHA1_BLOCK_SIZE];
421
422 shash->tfm = data->ctx->shash;
423
424 prepare_kiopad(k_ipad, k_opad, key, key_sz);
425
426 memzero_explicit(ipad, block_size);
427 memzero_explicit(opad, block_size);
428
429 crypto_shash_init(shash);
430 crypto_shash_update(shash, k_ipad, block_size);
431 sa_export_shash(shash, block_size, digest_size, ipad);
432
433 crypto_shash_init(shash);
434 crypto_shash_update(shash, k_opad, block_size);
435
436 sa_export_shash(shash, block_size, digest_size, opad);
437 }
438
439 /* Derive the inverse key used in AES-CBC decryption operation */
sa_aes_inv_key(u8 * inv_key,const u8 * key,u16 key_sz)440 static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
441 {
442 struct crypto_aes_ctx ctx;
443 int key_pos;
444
445 if (aes_expandkey(&ctx, key, key_sz)) {
446 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
447 return -EINVAL;
448 }
449
450 /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
451 if (key_sz == AES_KEYSIZE_192) {
452 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
453 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
454 }
455
456 /* Based crypto_aes_expand_key logic */
457 switch (key_sz) {
458 case AES_KEYSIZE_128:
459 case AES_KEYSIZE_192:
460 key_pos = key_sz + 24;
461 break;
462
463 case AES_KEYSIZE_256:
464 key_pos = key_sz + 24 - 4;
465 break;
466
467 default:
468 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
469 return -EINVAL;
470 }
471
472 memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
473 return 0;
474 }
475
476 /* Set Security context for the encryption engine */
sa_set_sc_enc(struct algo_data * ad,const u8 * key,u16 key_sz,u8 enc,u8 * sc_buf)477 static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
478 u8 enc, u8 *sc_buf)
479 {
480 const u8 *mci = NULL;
481
482 /* Set Encryption mode selector to crypto processing */
483 sc_buf[0] = SA_CRYPTO_PROCESSING;
484
485 if (enc)
486 mci = ad->mci_enc;
487 else
488 mci = ad->mci_dec;
489 /* Set the mode control instructions in security context */
490 if (mci)
491 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
492
493 /* For AES-CBC decryption get the inverse key */
494 if (ad->inv_key && !enc) {
495 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
496 return -EINVAL;
497 /* For all other cases: key is used */
498 } else {
499 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
500 }
501
502 return 0;
503 }
504
505 /* Set Security context for the authentication engine */
sa_set_sc_auth(struct algo_data * ad,const u8 * key,u16 key_sz,u8 * sc_buf)506 static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
507 u8 *sc_buf)
508 {
509 __be32 ipad[64], opad[64];
510
511 /* Set Authentication mode selector to hash processing */
512 sc_buf[0] = SA_HASH_PROCESSING;
513 /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
514 sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
515 sc_buf[1] |= ad->auth_ctrl;
516
517 /* Copy the keys or ipad/opad */
518 if (ad->keyed_mac) {
519 ad->prep_iopad(ad, key, key_sz, ipad, opad);
520
521 /* Copy ipad to AuthKey */
522 memcpy(&sc_buf[32], ipad, ad->hash_size);
523 /* Copy opad to Aux-1 */
524 memcpy(&sc_buf[64], opad, ad->hash_size);
525 } else {
526 /* basic hash */
527 sc_buf[1] |= SA_BASIC_HASH;
528 }
529 }
530
sa_copy_iv(__be32 * out,const u8 * iv,bool size16)531 static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
532 {
533 int j;
534
535 for (j = 0; j < ((size16) ? 4 : 2); j++) {
536 *out = cpu_to_be32(*((u32 *)iv));
537 iv += 4;
538 out++;
539 }
540 }
541
542 /* Format general command label */
sa_format_cmdl_gen(struct sa_cmdl_cfg * cfg,u8 * cmdl,struct sa_cmdl_upd_info * upd_info)543 static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
544 struct sa_cmdl_upd_info *upd_info)
545 {
546 u8 enc_offset = 0, auth_offset = 0, total = 0;
547 u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
548 u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
549 u32 *word_ptr = (u32 *)cmdl;
550 int i;
551
552 /* Clear the command label */
553 memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
554
555 /* Iniialize the command update structure */
556 memzero_explicit(upd_info, sizeof(*upd_info));
557
558 if (cfg->enc_eng_id && cfg->auth_eng_id) {
559 if (cfg->enc) {
560 auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
561 enc_next_eng = cfg->auth_eng_id;
562
563 if (cfg->iv_size)
564 auth_offset += cfg->iv_size;
565 } else {
566 enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
567 auth_next_eng = cfg->enc_eng_id;
568 }
569 }
570
571 if (cfg->enc_eng_id) {
572 upd_info->flags |= SA_CMDL_UPD_ENC;
573 upd_info->enc_size.index = enc_offset >> 2;
574 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
575 /* Encryption command label */
576 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
577
578 /* Encryption modes requiring IV */
579 if (cfg->iv_size) {
580 upd_info->flags |= SA_CMDL_UPD_ENC_IV;
581 upd_info->enc_iv.index =
582 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
583 upd_info->enc_iv.size = cfg->iv_size;
584
585 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
586 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
587
588 cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
589 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
590 total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
591 } else {
592 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
593 SA_CMDL_HEADER_SIZE_BYTES;
594 total += SA_CMDL_HEADER_SIZE_BYTES;
595 }
596 }
597
598 if (cfg->auth_eng_id) {
599 upd_info->flags |= SA_CMDL_UPD_AUTH;
600 upd_info->auth_size.index = auth_offset >> 2;
601 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
602 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
603 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
604 SA_CMDL_HEADER_SIZE_BYTES;
605 total += SA_CMDL_HEADER_SIZE_BYTES;
606 }
607
608 total = roundup(total, 8);
609
610 for (i = 0; i < total / 4; i++)
611 word_ptr[i] = swab32(word_ptr[i]);
612
613 return total;
614 }
615
616 /* Update Command label */
sa_update_cmdl(struct sa_req * req,u32 * cmdl,struct sa_cmdl_upd_info * upd_info)617 static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
618 struct sa_cmdl_upd_info *upd_info)
619 {
620 int i = 0, j;
621
622 if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
623 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
624 cmdl[upd_info->enc_size.index] |= req->enc_size;
625 cmdl[upd_info->enc_offset.index] &=
626 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
627 cmdl[upd_info->enc_offset.index] |=
628 ((u32)req->enc_offset <<
629 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
630
631 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
632 __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
633 u32 *enc_iv = (u32 *)req->enc_iv;
634
635 for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
636 data[j] = cpu_to_be32(*enc_iv);
637 enc_iv++;
638 }
639 }
640 }
641
642 if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
643 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
644 cmdl[upd_info->auth_size.index] |= req->auth_size;
645 cmdl[upd_info->auth_offset.index] &=
646 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
647 cmdl[upd_info->auth_offset.index] |=
648 ((u32)req->auth_offset <<
649 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
650 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
651 sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
652 req->auth_iv,
653 (upd_info->auth_iv.size > 8));
654 }
655 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
656 int offset = (req->auth_size & 0xF) ? 4 : 0;
657
658 memcpy(&cmdl[upd_info->aux_key_info.index],
659 &upd_info->aux_key[offset], 16);
660 }
661 }
662 }
663
664 /* Format SWINFO words to be sent to SA */
665 static
sa_set_swinfo(u8 eng_id,u16 sc_id,dma_addr_t sc_phys,u8 cmdl_present,u8 cmdl_offset,u8 flags,u8 hash_size,u32 * swinfo)666 void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
667 u8 cmdl_present, u8 cmdl_offset, u8 flags,
668 u8 hash_size, u32 *swinfo)
669 {
670 swinfo[0] = sc_id;
671 swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
672 if (likely(cmdl_present))
673 swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
674 __ffs(SA_SW0_CMDL_INFO_MASK));
675 swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
676
677 swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
678 swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
679 swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
680 swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
681 }
682
683 /* Dump the security context */
sa_dump_sc(u8 * buf,dma_addr_t dma_addr)684 static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
685 {
686 #ifdef DEBUG
687 dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
688 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
689 16, 1, buf, SA_CTX_MAX_SZ, false);
690 #endif
691 }
692
693 static
sa_init_sc(struct sa_ctx_info * ctx,const u8 * enc_key,u16 enc_key_sz,const u8 * auth_key,u16 auth_key_sz,struct algo_data * ad,u8 enc,u32 * swinfo)694 int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
695 u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
696 struct algo_data *ad, u8 enc, u32 *swinfo)
697 {
698 int enc_sc_offset = 0;
699 int auth_sc_offset = 0;
700 u8 *sc_buf = ctx->sc;
701 u16 sc_id = ctx->sc_id;
702 u8 first_engine = 0;
703
704 memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
705
706 if (ad->auth_eng.eng_id) {
707 if (enc)
708 first_engine = ad->enc_eng.eng_id;
709 else
710 first_engine = ad->auth_eng.eng_id;
711
712 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
713 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
714 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
715 if (!ad->hash_size)
716 return -EINVAL;
717 ad->hash_size = roundup(ad->hash_size, 8);
718
719 } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
720 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
721 first_engine = ad->enc_eng.eng_id;
722 sc_buf[1] = SA_SCCTL_FE_ENC;
723 ad->hash_size = ad->iv_out_size;
724 }
725
726 /* SCCTL Owner info: 0=host, 1=CP_ACE */
727 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
728 memcpy(&sc_buf[2], &sc_id, 2);
729 sc_buf[4] = 0x0;
730 sc_buf[5] = PRIV_ID;
731 sc_buf[6] = PRIV;
732 sc_buf[7] = 0x0;
733
734 /* Prepare context for encryption engine */
735 if (ad->enc_eng.sc_size) {
736 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
737 &sc_buf[enc_sc_offset]))
738 return -EINVAL;
739 }
740
741 /* Prepare context for authentication engine */
742 if (ad->auth_eng.sc_size)
743 sa_set_sc_auth(ad, auth_key, auth_key_sz,
744 &sc_buf[auth_sc_offset]);
745
746 /* Set the ownership of context to CP_ACE */
747 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
748
749 /* swizzle the security context */
750 sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
751
752 sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
753 SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
754
755 sa_dump_sc(sc_buf, ctx->sc_phys);
756
757 return 0;
758 }
759
760 /* Free the per direction context memory */
sa_free_ctx_info(struct sa_ctx_info * ctx,struct sa_crypto_data * data)761 static void sa_free_ctx_info(struct sa_ctx_info *ctx,
762 struct sa_crypto_data *data)
763 {
764 unsigned long bn;
765
766 bn = ctx->sc_id - data->sc_id_start;
767 spin_lock(&data->scid_lock);
768 __clear_bit(bn, data->ctx_bm);
769 data->sc_id--;
770 spin_unlock(&data->scid_lock);
771
772 if (ctx->sc) {
773 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
774 ctx->sc = NULL;
775 }
776 }
777
sa_init_ctx_info(struct sa_ctx_info * ctx,struct sa_crypto_data * data)778 static int sa_init_ctx_info(struct sa_ctx_info *ctx,
779 struct sa_crypto_data *data)
780 {
781 unsigned long bn;
782 int err;
783
784 spin_lock(&data->scid_lock);
785 bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
786 __set_bit(bn, data->ctx_bm);
787 data->sc_id++;
788 spin_unlock(&data->scid_lock);
789
790 ctx->sc_id = (u16)(data->sc_id_start + bn);
791
792 ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
793 if (!ctx->sc) {
794 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
795 err = -ENOMEM;
796 goto scid_rollback;
797 }
798
799 return 0;
800
801 scid_rollback:
802 spin_lock(&data->scid_lock);
803 __clear_bit(bn, data->ctx_bm);
804 data->sc_id--;
805 spin_unlock(&data->scid_lock);
806
807 return err;
808 }
809
sa_cipher_cra_exit(struct crypto_skcipher * tfm)810 static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
811 {
812 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
813 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
814
815 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
816 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
817 ctx->dec.sc_id, &ctx->dec.sc_phys);
818
819 sa_free_ctx_info(&ctx->enc, data);
820 sa_free_ctx_info(&ctx->dec, data);
821
822 crypto_free_sync_skcipher(ctx->fallback.skcipher);
823 }
824
sa_cipher_cra_init(struct crypto_skcipher * tfm)825 static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
826 {
827 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
828 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
829 const char *name = crypto_tfm_alg_name(&tfm->base);
830 int ret;
831
832 memzero_explicit(ctx, sizeof(*ctx));
833 ctx->dev_data = data;
834
835 ret = sa_init_ctx_info(&ctx->enc, data);
836 if (ret)
837 return ret;
838 ret = sa_init_ctx_info(&ctx->dec, data);
839 if (ret) {
840 sa_free_ctx_info(&ctx->enc, data);
841 return ret;
842 }
843
844 ctx->fallback.skcipher =
845 crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
846
847 if (IS_ERR(ctx->fallback.skcipher)) {
848 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
849 return PTR_ERR(ctx->fallback.skcipher);
850 }
851
852 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
853 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
854 ctx->dec.sc_id, &ctx->dec.sc_phys);
855 return 0;
856 }
857
sa_cipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,struct algo_data * ad)858 static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
859 unsigned int keylen, struct algo_data *ad)
860 {
861 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
862 int cmdl_len;
863 struct sa_cmdl_cfg cfg;
864 int ret;
865
866 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
867 keylen != AES_KEYSIZE_256)
868 return -EINVAL;
869
870 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
871 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
872
873 memzero_explicit(&cfg, sizeof(cfg));
874 cfg.enc_eng_id = ad->enc_eng.eng_id;
875 cfg.iv_size = crypto_skcipher_ivsize(tfm);
876
877 crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher,
878 CRYPTO_TFM_REQ_MASK);
879 crypto_sync_skcipher_set_flags(ctx->fallback.skcipher,
880 tfm->base.crt_flags &
881 CRYPTO_TFM_REQ_MASK);
882 ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen);
883 if (ret)
884 return ret;
885
886 /* Setup Encryption Security Context & Command label template */
887 if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
888 &ctx->enc.epib[1]))
889 goto badkey;
890
891 cmdl_len = sa_format_cmdl_gen(&cfg,
892 (u8 *)ctx->enc.cmdl,
893 &ctx->enc.cmdl_upd_info);
894 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
895 goto badkey;
896
897 ctx->enc.cmdl_size = cmdl_len;
898
899 /* Setup Decryption Security Context & Command label template */
900 if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
901 &ctx->dec.epib[1]))
902 goto badkey;
903
904 cfg.enc_eng_id = ad->enc_eng.eng_id;
905 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
906 &ctx->dec.cmdl_upd_info);
907
908 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
909 goto badkey;
910
911 ctx->dec.cmdl_size = cmdl_len;
912 ctx->iv_idx = ad->iv_idx;
913
914 return 0;
915
916 badkey:
917 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
918 return -EINVAL;
919 }
920
sa_aes_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)921 static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
922 unsigned int keylen)
923 {
924 struct algo_data ad = { 0 };
925 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
926 int key_idx = (keylen >> 3) - 2;
927
928 if (key_idx >= 3)
929 return -EINVAL;
930
931 ad.mci_enc = mci_cbc_enc_array[key_idx];
932 ad.mci_dec = mci_cbc_dec_array[key_idx];
933 ad.inv_key = true;
934 ad.ealg_id = SA_EALG_ID_AES_CBC;
935 ad.iv_idx = 4;
936 ad.iv_out_size = 16;
937
938 return sa_cipher_setkey(tfm, key, keylen, &ad);
939 }
940
sa_aes_ecb_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)941 static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
942 unsigned int keylen)
943 {
944 struct algo_data ad = { 0 };
945 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
946 int key_idx = (keylen >> 3) - 2;
947
948 if (key_idx >= 3)
949 return -EINVAL;
950
951 ad.mci_enc = mci_ecb_enc_array[key_idx];
952 ad.mci_dec = mci_ecb_dec_array[key_idx];
953 ad.inv_key = true;
954 ad.ealg_id = SA_EALG_ID_AES_ECB;
955
956 return sa_cipher_setkey(tfm, key, keylen, &ad);
957 }
958
sa_3des_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)959 static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
960 unsigned int keylen)
961 {
962 struct algo_data ad = { 0 };
963
964 ad.mci_enc = mci_cbc_3des_enc_array;
965 ad.mci_dec = mci_cbc_3des_dec_array;
966 ad.ealg_id = SA_EALG_ID_3DES_CBC;
967 ad.iv_idx = 6;
968 ad.iv_out_size = 8;
969
970 return sa_cipher_setkey(tfm, key, keylen, &ad);
971 }
972
sa_3des_ecb_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)973 static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
974 unsigned int keylen)
975 {
976 struct algo_data ad = { 0 };
977
978 ad.mci_enc = mci_ecb_3des_enc_array;
979 ad.mci_dec = mci_ecb_3des_dec_array;
980
981 return sa_cipher_setkey(tfm, key, keylen, &ad);
982 }
983
sa_sync_from_device(struct sa_rx_data * rxd)984 static void sa_sync_from_device(struct sa_rx_data *rxd)
985 {
986 struct sg_table *sgt;
987
988 if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
989 sgt = &rxd->mapped_sg[0].sgt;
990 else
991 sgt = &rxd->mapped_sg[1].sgt;
992
993 dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
994 }
995
sa_free_sa_rx_data(struct sa_rx_data * rxd)996 static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
997 {
998 int i;
999
1000 for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1001 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1002
1003 if (mapped_sg->mapped) {
1004 dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1005 mapped_sg->dir, 0);
1006 kfree(mapped_sg->split_sg);
1007 }
1008 }
1009
1010 kfree(rxd);
1011 }
1012
sa_aes_dma_in_callback(void * data)1013 static void sa_aes_dma_in_callback(void *data)
1014 {
1015 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1016 struct skcipher_request *req;
1017 u32 *result;
1018 __be32 *mdptr;
1019 size_t ml, pl;
1020 int i;
1021
1022 sa_sync_from_device(rxd);
1023 req = container_of(rxd->req, struct skcipher_request, base);
1024
1025 if (req->iv) {
1026 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1027 &ml);
1028 result = (u32 *)req->iv;
1029
1030 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1031 result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1032 }
1033
1034 sa_free_sa_rx_data(rxd);
1035
1036 skcipher_request_complete(req, 0);
1037 }
1038
1039 static void
sa_prepare_tx_desc(u32 * mdptr,u32 pslen,u32 * psdata,u32 epiblen,u32 * epib)1040 sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1041 {
1042 u32 *out, *in;
1043 int i;
1044
1045 for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1046 *out++ = *in++;
1047
1048 mdptr[4] = (0xFFFF << 16);
1049 for (out = &mdptr[5], in = psdata, i = 0;
1050 i < pslen / sizeof(u32); i++)
1051 *out++ = *in++;
1052 }
1053
sa_run(struct sa_req * req)1054 static int sa_run(struct sa_req *req)
1055 {
1056 struct sa_rx_data *rxd;
1057 gfp_t gfp_flags;
1058 u32 cmdl[SA_MAX_CMDL_WORDS];
1059 struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1060 struct device *ddev;
1061 struct dma_chan *dma_rx;
1062 int sg_nents, src_nents, dst_nents;
1063 struct scatterlist *src, *dst;
1064 size_t pl, ml, split_size;
1065 struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1066 int ret;
1067 struct dma_async_tx_descriptor *tx_out;
1068 u32 *mdptr;
1069 bool diff_dst;
1070 enum dma_data_direction dir_src;
1071 struct sa_mapped_sg *mapped_sg;
1072
1073 gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1074 GFP_KERNEL : GFP_ATOMIC;
1075
1076 rxd = kzalloc(sizeof(*rxd), gfp_flags);
1077 if (!rxd)
1078 return -ENOMEM;
1079
1080 if (req->src != req->dst) {
1081 diff_dst = true;
1082 dir_src = DMA_TO_DEVICE;
1083 } else {
1084 diff_dst = false;
1085 dir_src = DMA_BIDIRECTIONAL;
1086 }
1087
1088 /*
1089 * SA2UL has an interesting feature where the receive DMA channel
1090 * is selected based on the data passed to the engine. Within the
1091 * transition range, there is also a space where it is impossible
1092 * to determine where the data will end up, and this should be
1093 * avoided. This will be handled by the SW fallback mechanism by
1094 * the individual algorithm implementations.
1095 */
1096 if (req->size >= 256)
1097 dma_rx = pdata->dma_rx2;
1098 else
1099 dma_rx = pdata->dma_rx1;
1100
1101 ddev = dma_rx->device->dev;
1102 rxd->ddev = ddev;
1103
1104 memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1105
1106 sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1107
1108 if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1109 if (req->enc)
1110 req->type |=
1111 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1112 else
1113 req->type |=
1114 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1115 }
1116
1117 cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1118
1119 /*
1120 * Map the packets, first we check if the data fits into a single
1121 * sg entry and use that if possible. If it does not fit, we check
1122 * if we need to do sg_split to align the scatterlist data on the
1123 * actual data size being processed by the crypto engine.
1124 */
1125 src = req->src;
1126 sg_nents = sg_nents_for_len(src, req->size);
1127
1128 split_size = req->size;
1129
1130 mapped_sg = &rxd->mapped_sg[0];
1131 if (sg_nents == 1 && split_size <= req->src->length) {
1132 src = &mapped_sg->static_sg;
1133 src_nents = 1;
1134 sg_init_table(src, 1);
1135 sg_set_page(src, sg_page(req->src), split_size,
1136 req->src->offset);
1137
1138 mapped_sg->sgt.sgl = src;
1139 mapped_sg->sgt.orig_nents = src_nents;
1140 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1141 if (ret)
1142 return ret;
1143
1144 mapped_sg->dir = dir_src;
1145 mapped_sg->mapped = true;
1146 } else {
1147 mapped_sg->sgt.sgl = req->src;
1148 mapped_sg->sgt.orig_nents = sg_nents;
1149 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1150 if (ret)
1151 return ret;
1152
1153 mapped_sg->dir = dir_src;
1154 mapped_sg->mapped = true;
1155
1156 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1157 &split_size, &src, &src_nents, gfp_flags);
1158 if (ret) {
1159 src_nents = mapped_sg->sgt.nents;
1160 src = mapped_sg->sgt.sgl;
1161 } else {
1162 mapped_sg->split_sg = src;
1163 }
1164 }
1165
1166 dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1167
1168 if (!diff_dst) {
1169 dst_nents = src_nents;
1170 dst = src;
1171 } else {
1172 dst_nents = sg_nents_for_len(req->dst, req->size);
1173 mapped_sg = &rxd->mapped_sg[1];
1174
1175 if (dst_nents == 1 && split_size <= req->dst->length) {
1176 dst = &mapped_sg->static_sg;
1177 dst_nents = 1;
1178 sg_init_table(dst, 1);
1179 sg_set_page(dst, sg_page(req->dst), split_size,
1180 req->dst->offset);
1181
1182 mapped_sg->sgt.sgl = dst;
1183 mapped_sg->sgt.orig_nents = dst_nents;
1184 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1185 DMA_FROM_DEVICE, 0);
1186 if (ret)
1187 goto err_cleanup;
1188
1189 mapped_sg->dir = DMA_FROM_DEVICE;
1190 mapped_sg->mapped = true;
1191 } else {
1192 mapped_sg->sgt.sgl = req->dst;
1193 mapped_sg->sgt.orig_nents = dst_nents;
1194 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1195 DMA_FROM_DEVICE, 0);
1196 if (ret)
1197 goto err_cleanup;
1198
1199 mapped_sg->dir = DMA_FROM_DEVICE;
1200 mapped_sg->mapped = true;
1201
1202 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1203 0, 1, &split_size, &dst, &dst_nents,
1204 gfp_flags);
1205 if (ret) {
1206 dst_nents = mapped_sg->sgt.nents;
1207 dst = mapped_sg->sgt.sgl;
1208 } else {
1209 mapped_sg->split_sg = dst;
1210 }
1211 }
1212 }
1213
1214 rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1215 DMA_DEV_TO_MEM,
1216 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1217 if (!rxd->tx_in) {
1218 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1219 ret = -EINVAL;
1220 goto err_cleanup;
1221 }
1222
1223 rxd->req = (void *)req->base;
1224 rxd->enc = req->enc;
1225 rxd->iv_idx = req->ctx->iv_idx;
1226 rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1227 rxd->tx_in->callback = req->callback;
1228 rxd->tx_in->callback_param = rxd;
1229
1230 tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1231 src_nents, DMA_MEM_TO_DEV,
1232 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1233
1234 if (!tx_out) {
1235 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1236 ret = -EINVAL;
1237 goto err_cleanup;
1238 }
1239
1240 /*
1241 * Prepare metadata for DMA engine. This essentially describes the
1242 * crypto algorithm to be used, data sizes, different keys etc.
1243 */
1244 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1245
1246 sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1247 sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1248 sa_ctx->epib);
1249
1250 ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1251 dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1252
1253 dmaengine_submit(tx_out);
1254 dmaengine_submit(rxd->tx_in);
1255
1256 dma_async_issue_pending(dma_rx);
1257 dma_async_issue_pending(pdata->dma_tx);
1258
1259 return -EINPROGRESS;
1260
1261 err_cleanup:
1262 sa_free_sa_rx_data(rxd);
1263
1264 return ret;
1265 }
1266
sa_cipher_run(struct skcipher_request * req,u8 * iv,int enc)1267 static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1268 {
1269 struct sa_tfm_ctx *ctx =
1270 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1271 struct crypto_alg *alg = req->base.tfm->__crt_alg;
1272 struct sa_req sa_req = { 0 };
1273 int ret;
1274
1275 if (!req->cryptlen)
1276 return 0;
1277
1278 if (req->cryptlen % alg->cra_blocksize)
1279 return -EINVAL;
1280
1281 /* Use SW fallback if the data size is not supported */
1282 if (req->cryptlen > SA_MAX_DATA_SZ ||
1283 (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1284 req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1285 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
1286
1287 skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher);
1288 skcipher_request_set_callback(subreq, req->base.flags,
1289 NULL, NULL);
1290 skcipher_request_set_crypt(subreq, req->src, req->dst,
1291 req->cryptlen, req->iv);
1292 if (enc)
1293 ret = crypto_skcipher_encrypt(subreq);
1294 else
1295 ret = crypto_skcipher_decrypt(subreq);
1296
1297 skcipher_request_zero(subreq);
1298 return ret;
1299 }
1300
1301 sa_req.size = req->cryptlen;
1302 sa_req.enc_size = req->cryptlen;
1303 sa_req.src = req->src;
1304 sa_req.dst = req->dst;
1305 sa_req.enc_iv = iv;
1306 sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1307 sa_req.enc = enc;
1308 sa_req.callback = sa_aes_dma_in_callback;
1309 sa_req.mdata_size = 44;
1310 sa_req.base = &req->base;
1311 sa_req.ctx = ctx;
1312
1313 return sa_run(&sa_req);
1314 }
1315
sa_encrypt(struct skcipher_request * req)1316 static int sa_encrypt(struct skcipher_request *req)
1317 {
1318 return sa_cipher_run(req, req->iv, 1);
1319 }
1320
sa_decrypt(struct skcipher_request * req)1321 static int sa_decrypt(struct skcipher_request *req)
1322 {
1323 return sa_cipher_run(req, req->iv, 0);
1324 }
1325
sa_sha_dma_in_callback(void * data)1326 static void sa_sha_dma_in_callback(void *data)
1327 {
1328 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1329 struct ahash_request *req;
1330 struct crypto_ahash *tfm;
1331 unsigned int authsize;
1332 int i;
1333 size_t ml, pl;
1334 u32 *result;
1335 __be32 *mdptr;
1336
1337 sa_sync_from_device(rxd);
1338 req = container_of(rxd->req, struct ahash_request, base);
1339 tfm = crypto_ahash_reqtfm(req);
1340 authsize = crypto_ahash_digestsize(tfm);
1341
1342 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1343 result = (u32 *)req->result;
1344
1345 for (i = 0; i < (authsize / 4); i++)
1346 result[i] = be32_to_cpu(mdptr[i + 4]);
1347
1348 sa_free_sa_rx_data(rxd);
1349
1350 ahash_request_complete(req, 0);
1351 }
1352
zero_message_process(struct ahash_request * req)1353 static int zero_message_process(struct ahash_request *req)
1354 {
1355 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1356 int sa_digest_size = crypto_ahash_digestsize(tfm);
1357
1358 switch (sa_digest_size) {
1359 case SHA1_DIGEST_SIZE:
1360 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1361 break;
1362 case SHA256_DIGEST_SIZE:
1363 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1364 break;
1365 case SHA512_DIGEST_SIZE:
1366 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1367 break;
1368 default:
1369 return -EINVAL;
1370 }
1371
1372 return 0;
1373 }
1374
sa_sha_run(struct ahash_request * req)1375 static int sa_sha_run(struct ahash_request *req)
1376 {
1377 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1378 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1379 struct sa_req sa_req = { 0 };
1380 size_t auth_len;
1381
1382 auth_len = req->nbytes;
1383
1384 if (!auth_len)
1385 return zero_message_process(req);
1386
1387 if (auth_len > SA_MAX_DATA_SZ ||
1388 (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1389 auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1390 struct ahash_request *subreq = &rctx->fallback_req;
1391 int ret = 0;
1392
1393 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1394 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1395
1396 crypto_ahash_init(subreq);
1397
1398 subreq->nbytes = auth_len;
1399 subreq->src = req->src;
1400 subreq->result = req->result;
1401
1402 ret |= crypto_ahash_update(subreq);
1403
1404 subreq->nbytes = 0;
1405
1406 ret |= crypto_ahash_final(subreq);
1407
1408 return ret;
1409 }
1410
1411 sa_req.size = auth_len;
1412 sa_req.auth_size = auth_len;
1413 sa_req.src = req->src;
1414 sa_req.dst = req->src;
1415 sa_req.enc = true;
1416 sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1417 sa_req.callback = sa_sha_dma_in_callback;
1418 sa_req.mdata_size = 28;
1419 sa_req.ctx = ctx;
1420 sa_req.base = &req->base;
1421
1422 return sa_run(&sa_req);
1423 }
1424
sa_sha_setup(struct sa_tfm_ctx * ctx,struct algo_data * ad)1425 static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
1426 {
1427 int bs = crypto_shash_blocksize(ctx->shash);
1428 int cmdl_len;
1429 struct sa_cmdl_cfg cfg;
1430
1431 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1432 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1433 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1434
1435 memset(ctx->authkey, 0, bs);
1436 memset(&cfg, 0, sizeof(cfg));
1437 cfg.aalg = ad->aalg_id;
1438 cfg.enc_eng_id = ad->enc_eng.eng_id;
1439 cfg.auth_eng_id = ad->auth_eng.eng_id;
1440 cfg.iv_size = 0;
1441 cfg.akey = NULL;
1442 cfg.akey_len = 0;
1443
1444 /* Setup Encryption Security Context & Command label template */
1445 if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
1446 &ctx->enc.epib[1]))
1447 goto badkey;
1448
1449 cmdl_len = sa_format_cmdl_gen(&cfg,
1450 (u8 *)ctx->enc.cmdl,
1451 &ctx->enc.cmdl_upd_info);
1452 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1453 goto badkey;
1454
1455 ctx->enc.cmdl_size = cmdl_len;
1456
1457 return 0;
1458
1459 badkey:
1460 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1461 return -EINVAL;
1462 }
1463
sa_sha_cra_init_alg(struct crypto_tfm * tfm,const char * alg_base)1464 static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1465 {
1466 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1467 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1468 int ret;
1469
1470 memset(ctx, 0, sizeof(*ctx));
1471 ctx->dev_data = data;
1472 ret = sa_init_ctx_info(&ctx->enc, data);
1473 if (ret)
1474 return ret;
1475
1476 if (alg_base) {
1477 ctx->shash = crypto_alloc_shash(alg_base, 0,
1478 CRYPTO_ALG_NEED_FALLBACK);
1479 if (IS_ERR(ctx->shash)) {
1480 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1481 alg_base);
1482 return PTR_ERR(ctx->shash);
1483 }
1484 /* for fallback */
1485 ctx->fallback.ahash =
1486 crypto_alloc_ahash(alg_base, 0,
1487 CRYPTO_ALG_NEED_FALLBACK);
1488 if (IS_ERR(ctx->fallback.ahash)) {
1489 dev_err(ctx->dev_data->dev,
1490 "Could not load fallback driver\n");
1491 return PTR_ERR(ctx->fallback.ahash);
1492 }
1493 }
1494
1495 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1496 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1497 ctx->dec.sc_id, &ctx->dec.sc_phys);
1498
1499 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1500 sizeof(struct sa_sha_req_ctx) +
1501 crypto_ahash_reqsize(ctx->fallback.ahash));
1502
1503 return 0;
1504 }
1505
sa_sha_digest(struct ahash_request * req)1506 static int sa_sha_digest(struct ahash_request *req)
1507 {
1508 return sa_sha_run(req);
1509 }
1510
sa_sha_init(struct ahash_request * req)1511 static int sa_sha_init(struct ahash_request *req)
1512 {
1513 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1514 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1515 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1516
1517 dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1518 crypto_ahash_digestsize(tfm), rctx);
1519
1520 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1521 rctx->fallback_req.base.flags =
1522 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1523
1524 return crypto_ahash_init(&rctx->fallback_req);
1525 }
1526
sa_sha_update(struct ahash_request * req)1527 static int sa_sha_update(struct ahash_request *req)
1528 {
1529 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1530 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1531 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1532
1533 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1534 rctx->fallback_req.base.flags =
1535 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1536 rctx->fallback_req.nbytes = req->nbytes;
1537 rctx->fallback_req.src = req->src;
1538
1539 return crypto_ahash_update(&rctx->fallback_req);
1540 }
1541
sa_sha_final(struct ahash_request * req)1542 static int sa_sha_final(struct ahash_request *req)
1543 {
1544 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1545 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1546 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1547
1548 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1549 rctx->fallback_req.base.flags =
1550 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1551 rctx->fallback_req.result = req->result;
1552
1553 return crypto_ahash_final(&rctx->fallback_req);
1554 }
1555
sa_sha_finup(struct ahash_request * req)1556 static int sa_sha_finup(struct ahash_request *req)
1557 {
1558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1559 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1560 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1561
1562 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1563 rctx->fallback_req.base.flags =
1564 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1565
1566 rctx->fallback_req.nbytes = req->nbytes;
1567 rctx->fallback_req.src = req->src;
1568 rctx->fallback_req.result = req->result;
1569
1570 return crypto_ahash_finup(&rctx->fallback_req);
1571 }
1572
sa_sha_import(struct ahash_request * req,const void * in)1573 static int sa_sha_import(struct ahash_request *req, const void *in)
1574 {
1575 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1576 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1577 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1578
1579 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1580 rctx->fallback_req.base.flags = req->base.flags &
1581 CRYPTO_TFM_REQ_MAY_SLEEP;
1582
1583 return crypto_ahash_import(&rctx->fallback_req, in);
1584 }
1585
sa_sha_export(struct ahash_request * req,void * out)1586 static int sa_sha_export(struct ahash_request *req, void *out)
1587 {
1588 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1589 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1590 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1591 struct ahash_request *subreq = &rctx->fallback_req;
1592
1593 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1594 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1595
1596 return crypto_ahash_export(subreq, out);
1597 }
1598
sa_sha1_cra_init(struct crypto_tfm * tfm)1599 static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1600 {
1601 struct algo_data ad = { 0 };
1602 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1603
1604 sa_sha_cra_init_alg(tfm, "sha1");
1605
1606 ad.aalg_id = SA_AALG_ID_SHA1;
1607 ad.hash_size = SHA1_DIGEST_SIZE;
1608 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1609
1610 sa_sha_setup(ctx, &ad);
1611
1612 return 0;
1613 }
1614
sa_sha256_cra_init(struct crypto_tfm * tfm)1615 static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1616 {
1617 struct algo_data ad = { 0 };
1618 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1619
1620 sa_sha_cra_init_alg(tfm, "sha256");
1621
1622 ad.aalg_id = SA_AALG_ID_SHA2_256;
1623 ad.hash_size = SHA256_DIGEST_SIZE;
1624 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1625
1626 sa_sha_setup(ctx, &ad);
1627
1628 return 0;
1629 }
1630
sa_sha512_cra_init(struct crypto_tfm * tfm)1631 static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1632 {
1633 struct algo_data ad = { 0 };
1634 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1635
1636 sa_sha_cra_init_alg(tfm, "sha512");
1637
1638 ad.aalg_id = SA_AALG_ID_SHA2_512;
1639 ad.hash_size = SHA512_DIGEST_SIZE;
1640 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1641
1642 sa_sha_setup(ctx, &ad);
1643
1644 return 0;
1645 }
1646
sa_sha_cra_exit(struct crypto_tfm * tfm)1647 static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1648 {
1649 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1650 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1651
1652 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1653 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1654 ctx->dec.sc_id, &ctx->dec.sc_phys);
1655
1656 if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1657 sa_free_ctx_info(&ctx->enc, data);
1658
1659 crypto_free_shash(ctx->shash);
1660 crypto_free_ahash(ctx->fallback.ahash);
1661 }
1662
sa_aead_dma_in_callback(void * data)1663 static void sa_aead_dma_in_callback(void *data)
1664 {
1665 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1666 struct aead_request *req;
1667 struct crypto_aead *tfm;
1668 unsigned int start;
1669 unsigned int authsize;
1670 u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1671 size_t pl, ml;
1672 int i;
1673 int err = 0;
1674 u16 auth_len;
1675 u32 *mdptr;
1676
1677 sa_sync_from_device(rxd);
1678 req = container_of(rxd->req, struct aead_request, base);
1679 tfm = crypto_aead_reqtfm(req);
1680 start = req->assoclen + req->cryptlen;
1681 authsize = crypto_aead_authsize(tfm);
1682
1683 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1684 for (i = 0; i < (authsize / 4); i++)
1685 mdptr[i + 4] = swab32(mdptr[i + 4]);
1686
1687 auth_len = req->assoclen + req->cryptlen;
1688
1689 if (rxd->enc) {
1690 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1691 1);
1692 } else {
1693 auth_len -= authsize;
1694 start -= authsize;
1695 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1696 0);
1697
1698 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1699 }
1700
1701 sa_free_sa_rx_data(rxd);
1702
1703 aead_request_complete(req, err);
1704 }
1705
sa_cra_init_aead(struct crypto_aead * tfm,const char * hash,const char * fallback)1706 static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1707 const char *fallback)
1708 {
1709 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1710 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1711 int ret;
1712
1713 memzero_explicit(ctx, sizeof(*ctx));
1714
1715 ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1716 if (IS_ERR(ctx->shash)) {
1717 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1718 return PTR_ERR(ctx->shash);
1719 }
1720
1721 ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1722 CRYPTO_ALG_NEED_FALLBACK);
1723
1724 if (IS_ERR(ctx->fallback.aead)) {
1725 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1726 fallback);
1727 return PTR_ERR(ctx->fallback.aead);
1728 }
1729
1730 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1731 crypto_aead_reqsize(ctx->fallback.aead));
1732
1733 ret = sa_init_ctx_info(&ctx->enc, data);
1734 if (ret)
1735 return ret;
1736
1737 ret = sa_init_ctx_info(&ctx->dec, data);
1738 if (ret) {
1739 sa_free_ctx_info(&ctx->enc, data);
1740 return ret;
1741 }
1742
1743 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1744 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1745 ctx->dec.sc_id, &ctx->dec.sc_phys);
1746
1747 return ret;
1748 }
1749
sa_cra_init_aead_sha1(struct crypto_aead * tfm)1750 static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1751 {
1752 return sa_cra_init_aead(tfm, "sha1",
1753 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1754 }
1755
sa_cra_init_aead_sha256(struct crypto_aead * tfm)1756 static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1757 {
1758 return sa_cra_init_aead(tfm, "sha256",
1759 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1760 }
1761
sa_exit_tfm_aead(struct crypto_aead * tfm)1762 static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1763 {
1764 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1765 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1766
1767 crypto_free_shash(ctx->shash);
1768 crypto_free_aead(ctx->fallback.aead);
1769
1770 sa_free_ctx_info(&ctx->enc, data);
1771 sa_free_ctx_info(&ctx->dec, data);
1772 }
1773
1774 /* AEAD algorithm configuration interface function */
sa_aead_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen,struct algo_data * ad)1775 static int sa_aead_setkey(struct crypto_aead *authenc,
1776 const u8 *key, unsigned int keylen,
1777 struct algo_data *ad)
1778 {
1779 struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1780 struct crypto_authenc_keys keys;
1781 int cmdl_len;
1782 struct sa_cmdl_cfg cfg;
1783 int key_idx;
1784
1785 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1786 return -EINVAL;
1787
1788 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1789 key_idx = (keys.enckeylen >> 3) - 2;
1790 if (key_idx >= 3)
1791 return -EINVAL;
1792
1793 ad->ctx = ctx;
1794 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1795 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1796 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1797 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1798 ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1799 ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1800 ad->inv_key = true;
1801 ad->keyed_mac = true;
1802 ad->ealg_id = SA_EALG_ID_AES_CBC;
1803 ad->prep_iopad = sa_prepare_iopads;
1804
1805 memset(&cfg, 0, sizeof(cfg));
1806 cfg.enc = true;
1807 cfg.aalg = ad->aalg_id;
1808 cfg.enc_eng_id = ad->enc_eng.eng_id;
1809 cfg.auth_eng_id = ad->auth_eng.eng_id;
1810 cfg.iv_size = crypto_aead_ivsize(authenc);
1811 cfg.akey = keys.authkey;
1812 cfg.akey_len = keys.authkeylen;
1813
1814 /* Setup Encryption Security Context & Command label template */
1815 if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
1816 keys.authkey, keys.authkeylen,
1817 ad, 1, &ctx->enc.epib[1]))
1818 return -EINVAL;
1819
1820 cmdl_len = sa_format_cmdl_gen(&cfg,
1821 (u8 *)ctx->enc.cmdl,
1822 &ctx->enc.cmdl_upd_info);
1823 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1824 return -EINVAL;
1825
1826 ctx->enc.cmdl_size = cmdl_len;
1827
1828 /* Setup Decryption Security Context & Command label template */
1829 if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
1830 keys.authkey, keys.authkeylen,
1831 ad, 0, &ctx->dec.epib[1]))
1832 return -EINVAL;
1833
1834 cfg.enc = false;
1835 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1836 &ctx->dec.cmdl_upd_info);
1837
1838 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1839 return -EINVAL;
1840
1841 ctx->dec.cmdl_size = cmdl_len;
1842
1843 crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1844 crypto_aead_set_flags(ctx->fallback.aead,
1845 crypto_aead_get_flags(authenc) &
1846 CRYPTO_TFM_REQ_MASK);
1847 crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1848
1849 return 0;
1850 }
1851
sa_aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1852 static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1853 {
1854 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1855
1856 return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1857 }
1858
sa_aead_cbc_sha1_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)1859 static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1860 const u8 *key, unsigned int keylen)
1861 {
1862 struct algo_data ad = { 0 };
1863
1864 ad.ealg_id = SA_EALG_ID_AES_CBC;
1865 ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1866 ad.hash_size = SHA1_DIGEST_SIZE;
1867 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1868
1869 return sa_aead_setkey(authenc, key, keylen, &ad);
1870 }
1871
sa_aead_cbc_sha256_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)1872 static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1873 const u8 *key, unsigned int keylen)
1874 {
1875 struct algo_data ad = { 0 };
1876
1877 ad.ealg_id = SA_EALG_ID_AES_CBC;
1878 ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1879 ad.hash_size = SHA256_DIGEST_SIZE;
1880 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1881
1882 return sa_aead_setkey(authenc, key, keylen, &ad);
1883 }
1884
sa_aead_run(struct aead_request * req,u8 * iv,int enc)1885 static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1886 {
1887 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1888 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1889 struct sa_req sa_req = { 0 };
1890 size_t auth_size, enc_size;
1891
1892 enc_size = req->cryptlen;
1893 auth_size = req->assoclen + req->cryptlen;
1894
1895 if (!enc) {
1896 enc_size -= crypto_aead_authsize(tfm);
1897 auth_size -= crypto_aead_authsize(tfm);
1898 }
1899
1900 if (auth_size > SA_MAX_DATA_SZ ||
1901 (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1902 auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1903 struct aead_request *subreq = aead_request_ctx(req);
1904 int ret;
1905
1906 aead_request_set_tfm(subreq, ctx->fallback.aead);
1907 aead_request_set_callback(subreq, req->base.flags,
1908 req->base.complete, req->base.data);
1909 aead_request_set_crypt(subreq, req->src, req->dst,
1910 req->cryptlen, req->iv);
1911 aead_request_set_ad(subreq, req->assoclen);
1912
1913 ret = enc ? crypto_aead_encrypt(subreq) :
1914 crypto_aead_decrypt(subreq);
1915 return ret;
1916 }
1917
1918 sa_req.enc_offset = req->assoclen;
1919 sa_req.enc_size = enc_size;
1920 sa_req.auth_size = auth_size;
1921 sa_req.size = auth_size;
1922 sa_req.enc_iv = iv;
1923 sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1924 sa_req.enc = enc;
1925 sa_req.callback = sa_aead_dma_in_callback;
1926 sa_req.mdata_size = 52;
1927 sa_req.base = &req->base;
1928 sa_req.ctx = ctx;
1929 sa_req.src = req->src;
1930 sa_req.dst = req->dst;
1931
1932 return sa_run(&sa_req);
1933 }
1934
1935 /* AEAD algorithm encrypt interface function */
sa_aead_encrypt(struct aead_request * req)1936 static int sa_aead_encrypt(struct aead_request *req)
1937 {
1938 return sa_aead_run(req, req->iv, 1);
1939 }
1940
1941 /* AEAD algorithm decrypt interface function */
sa_aead_decrypt(struct aead_request * req)1942 static int sa_aead_decrypt(struct aead_request *req)
1943 {
1944 return sa_aead_run(req, req->iv, 0);
1945 }
1946
1947 static struct sa_alg_tmpl sa_algs[] = {
1948 {
1949 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1950 .alg.skcipher = {
1951 .base.cra_name = "cbc(aes)",
1952 .base.cra_driver_name = "cbc-aes-sa2ul",
1953 .base.cra_priority = 30000,
1954 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1955 CRYPTO_ALG_KERN_DRIVER_ONLY |
1956 CRYPTO_ALG_ASYNC |
1957 CRYPTO_ALG_NEED_FALLBACK,
1958 .base.cra_blocksize = AES_BLOCK_SIZE,
1959 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1960 .base.cra_module = THIS_MODULE,
1961 .init = sa_cipher_cra_init,
1962 .exit = sa_cipher_cra_exit,
1963 .min_keysize = AES_MIN_KEY_SIZE,
1964 .max_keysize = AES_MAX_KEY_SIZE,
1965 .ivsize = AES_BLOCK_SIZE,
1966 .setkey = sa_aes_cbc_setkey,
1967 .encrypt = sa_encrypt,
1968 .decrypt = sa_decrypt,
1969 }
1970 },
1971 {
1972 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1973 .alg.skcipher = {
1974 .base.cra_name = "ecb(aes)",
1975 .base.cra_driver_name = "ecb-aes-sa2ul",
1976 .base.cra_priority = 30000,
1977 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1978 CRYPTO_ALG_KERN_DRIVER_ONLY |
1979 CRYPTO_ALG_ASYNC |
1980 CRYPTO_ALG_NEED_FALLBACK,
1981 .base.cra_blocksize = AES_BLOCK_SIZE,
1982 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1983 .base.cra_module = THIS_MODULE,
1984 .init = sa_cipher_cra_init,
1985 .exit = sa_cipher_cra_exit,
1986 .min_keysize = AES_MIN_KEY_SIZE,
1987 .max_keysize = AES_MAX_KEY_SIZE,
1988 .setkey = sa_aes_ecb_setkey,
1989 .encrypt = sa_encrypt,
1990 .decrypt = sa_decrypt,
1991 }
1992 },
1993 {
1994 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1995 .alg.skcipher = {
1996 .base.cra_name = "cbc(des3_ede)",
1997 .base.cra_driver_name = "cbc-des3-sa2ul",
1998 .base.cra_priority = 30000,
1999 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2000 CRYPTO_ALG_KERN_DRIVER_ONLY |
2001 CRYPTO_ALG_ASYNC |
2002 CRYPTO_ALG_NEED_FALLBACK,
2003 .base.cra_blocksize = DES_BLOCK_SIZE,
2004 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2005 .base.cra_module = THIS_MODULE,
2006 .init = sa_cipher_cra_init,
2007 .exit = sa_cipher_cra_exit,
2008 .min_keysize = 3 * DES_KEY_SIZE,
2009 .max_keysize = 3 * DES_KEY_SIZE,
2010 .ivsize = DES_BLOCK_SIZE,
2011 .setkey = sa_3des_cbc_setkey,
2012 .encrypt = sa_encrypt,
2013 .decrypt = sa_decrypt,
2014 }
2015 },
2016 {
2017 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2018 .alg.skcipher = {
2019 .base.cra_name = "ecb(des3_ede)",
2020 .base.cra_driver_name = "ecb-des3-sa2ul",
2021 .base.cra_priority = 30000,
2022 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2023 CRYPTO_ALG_KERN_DRIVER_ONLY |
2024 CRYPTO_ALG_ASYNC |
2025 CRYPTO_ALG_NEED_FALLBACK,
2026 .base.cra_blocksize = DES_BLOCK_SIZE,
2027 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2028 .base.cra_module = THIS_MODULE,
2029 .init = sa_cipher_cra_init,
2030 .exit = sa_cipher_cra_exit,
2031 .min_keysize = 3 * DES_KEY_SIZE,
2032 .max_keysize = 3 * DES_KEY_SIZE,
2033 .setkey = sa_3des_ecb_setkey,
2034 .encrypt = sa_encrypt,
2035 .decrypt = sa_decrypt,
2036 }
2037 },
2038 {
2039 .type = CRYPTO_ALG_TYPE_AHASH,
2040 .alg.ahash = {
2041 .halg.base = {
2042 .cra_name = "sha1",
2043 .cra_driver_name = "sha1-sa2ul",
2044 .cra_priority = 400,
2045 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2046 CRYPTO_ALG_ASYNC |
2047 CRYPTO_ALG_KERN_DRIVER_ONLY |
2048 CRYPTO_ALG_NEED_FALLBACK,
2049 .cra_blocksize = SHA1_BLOCK_SIZE,
2050 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2051 .cra_module = THIS_MODULE,
2052 .cra_init = sa_sha1_cra_init,
2053 .cra_exit = sa_sha_cra_exit,
2054 },
2055 .halg.digestsize = SHA1_DIGEST_SIZE,
2056 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2057 sizeof(struct sha1_state),
2058 .init = sa_sha_init,
2059 .update = sa_sha_update,
2060 .final = sa_sha_final,
2061 .finup = sa_sha_finup,
2062 .digest = sa_sha_digest,
2063 .export = sa_sha_export,
2064 .import = sa_sha_import,
2065 },
2066 },
2067 {
2068 .type = CRYPTO_ALG_TYPE_AHASH,
2069 .alg.ahash = {
2070 .halg.base = {
2071 .cra_name = "sha256",
2072 .cra_driver_name = "sha256-sa2ul",
2073 .cra_priority = 400,
2074 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2075 CRYPTO_ALG_ASYNC |
2076 CRYPTO_ALG_KERN_DRIVER_ONLY |
2077 CRYPTO_ALG_NEED_FALLBACK,
2078 .cra_blocksize = SHA256_BLOCK_SIZE,
2079 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2080 .cra_module = THIS_MODULE,
2081 .cra_init = sa_sha256_cra_init,
2082 .cra_exit = sa_sha_cra_exit,
2083 },
2084 .halg.digestsize = SHA256_DIGEST_SIZE,
2085 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2086 sizeof(struct sha256_state),
2087 .init = sa_sha_init,
2088 .update = sa_sha_update,
2089 .final = sa_sha_final,
2090 .finup = sa_sha_finup,
2091 .digest = sa_sha_digest,
2092 .export = sa_sha_export,
2093 .import = sa_sha_import,
2094 },
2095 },
2096 {
2097 .type = CRYPTO_ALG_TYPE_AHASH,
2098 .alg.ahash = {
2099 .halg.base = {
2100 .cra_name = "sha512",
2101 .cra_driver_name = "sha512-sa2ul",
2102 .cra_priority = 400,
2103 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2104 CRYPTO_ALG_ASYNC |
2105 CRYPTO_ALG_KERN_DRIVER_ONLY |
2106 CRYPTO_ALG_NEED_FALLBACK,
2107 .cra_blocksize = SHA512_BLOCK_SIZE,
2108 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2109 .cra_module = THIS_MODULE,
2110 .cra_init = sa_sha512_cra_init,
2111 .cra_exit = sa_sha_cra_exit,
2112 },
2113 .halg.digestsize = SHA512_DIGEST_SIZE,
2114 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2115 sizeof(struct sha512_state),
2116 .init = sa_sha_init,
2117 .update = sa_sha_update,
2118 .final = sa_sha_final,
2119 .finup = sa_sha_finup,
2120 .digest = sa_sha_digest,
2121 .export = sa_sha_export,
2122 .import = sa_sha_import,
2123 },
2124 },
2125 {
2126 .type = CRYPTO_ALG_TYPE_AEAD,
2127 .alg.aead = {
2128 .base = {
2129 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2130 .cra_driver_name =
2131 "authenc(hmac(sha1),cbc(aes))-sa2ul",
2132 .cra_blocksize = AES_BLOCK_SIZE,
2133 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2134 CRYPTO_ALG_KERN_DRIVER_ONLY |
2135 CRYPTO_ALG_ASYNC |
2136 CRYPTO_ALG_NEED_FALLBACK,
2137 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2138 .cra_module = THIS_MODULE,
2139 .cra_priority = 3000,
2140 },
2141 .ivsize = AES_BLOCK_SIZE,
2142 .maxauthsize = SHA1_DIGEST_SIZE,
2143
2144 .init = sa_cra_init_aead_sha1,
2145 .exit = sa_exit_tfm_aead,
2146 .setkey = sa_aead_cbc_sha1_setkey,
2147 .setauthsize = sa_aead_setauthsize,
2148 .encrypt = sa_aead_encrypt,
2149 .decrypt = sa_aead_decrypt,
2150 },
2151 },
2152 {
2153 .type = CRYPTO_ALG_TYPE_AEAD,
2154 .alg.aead = {
2155 .base = {
2156 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2157 .cra_driver_name =
2158 "authenc(hmac(sha256),cbc(aes))-sa2ul",
2159 .cra_blocksize = AES_BLOCK_SIZE,
2160 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2161 CRYPTO_ALG_KERN_DRIVER_ONLY |
2162 CRYPTO_ALG_ASYNC |
2163 CRYPTO_ALG_NEED_FALLBACK,
2164 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2165 .cra_module = THIS_MODULE,
2166 .cra_alignmask = 0,
2167 .cra_priority = 3000,
2168 },
2169 .ivsize = AES_BLOCK_SIZE,
2170 .maxauthsize = SHA256_DIGEST_SIZE,
2171
2172 .init = sa_cra_init_aead_sha256,
2173 .exit = sa_exit_tfm_aead,
2174 .setkey = sa_aead_cbc_sha256_setkey,
2175 .setauthsize = sa_aead_setauthsize,
2176 .encrypt = sa_aead_encrypt,
2177 .decrypt = sa_aead_decrypt,
2178 },
2179 },
2180 };
2181
2182 /* Register the algorithms in crypto framework */
sa_register_algos(const struct device * dev)2183 static void sa_register_algos(const struct device *dev)
2184 {
2185 char *alg_name;
2186 u32 type;
2187 int i, err;
2188
2189 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2190 type = sa_algs[i].type;
2191 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2192 alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2193 err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2194 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2195 alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2196 err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2197 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2198 alg_name = sa_algs[i].alg.aead.base.cra_name;
2199 err = crypto_register_aead(&sa_algs[i].alg.aead);
2200 } else {
2201 dev_err(dev,
2202 "un-supported crypto algorithm (%d)",
2203 sa_algs[i].type);
2204 continue;
2205 }
2206
2207 if (err)
2208 dev_err(dev, "Failed to register '%s'\n", alg_name);
2209 else
2210 sa_algs[i].registered = true;
2211 }
2212 }
2213
2214 /* Unregister the algorithms in crypto framework */
sa_unregister_algos(const struct device * dev)2215 static void sa_unregister_algos(const struct device *dev)
2216 {
2217 u32 type;
2218 int i;
2219
2220 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2221 type = sa_algs[i].type;
2222 if (!sa_algs[i].registered)
2223 continue;
2224 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2225 crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2226 else if (type == CRYPTO_ALG_TYPE_AHASH)
2227 crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2228 else if (type == CRYPTO_ALG_TYPE_AEAD)
2229 crypto_unregister_aead(&sa_algs[i].alg.aead);
2230
2231 sa_algs[i].registered = false;
2232 }
2233 }
2234
sa_init_mem(struct sa_crypto_data * dev_data)2235 static int sa_init_mem(struct sa_crypto_data *dev_data)
2236 {
2237 struct device *dev = &dev_data->pdev->dev;
2238 /* Setup dma pool for security context buffers */
2239 dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2240 SA_CTX_MAX_SZ, 64, 0);
2241 if (!dev_data->sc_pool) {
2242 dev_err(dev, "Failed to create dma pool");
2243 return -ENOMEM;
2244 }
2245
2246 return 0;
2247 }
2248
sa_dma_init(struct sa_crypto_data * dd)2249 static int sa_dma_init(struct sa_crypto_data *dd)
2250 {
2251 int ret;
2252 struct dma_slave_config cfg;
2253
2254 dd->dma_rx1 = NULL;
2255 dd->dma_tx = NULL;
2256 dd->dma_rx2 = NULL;
2257
2258 ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2259 if (ret)
2260 return ret;
2261
2262 dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2263 if (IS_ERR(dd->dma_rx1))
2264 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2265 "Unable to request rx1 DMA channel\n");
2266
2267 dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2268 if (IS_ERR(dd->dma_rx2)) {
2269 dma_release_channel(dd->dma_rx1);
2270 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2271 "Unable to request rx2 DMA channel\n");
2272 }
2273
2274 dd->dma_tx = dma_request_chan(dd->dev, "tx");
2275 if (IS_ERR(dd->dma_tx)) {
2276 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2277 "Unable to request tx DMA channel\n");
2278 goto err_dma_tx;
2279 }
2280
2281 memzero_explicit(&cfg, sizeof(cfg));
2282
2283 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2284 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2285 cfg.src_maxburst = 4;
2286 cfg.dst_maxburst = 4;
2287
2288 ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2289 if (ret) {
2290 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2291 ret);
2292 return ret;
2293 }
2294
2295 ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2296 if (ret) {
2297 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2298 ret);
2299 return ret;
2300 }
2301
2302 ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2303 if (ret) {
2304 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2305 ret);
2306 return ret;
2307 }
2308
2309 return 0;
2310
2311 err_dma_tx:
2312 dma_release_channel(dd->dma_rx1);
2313 dma_release_channel(dd->dma_rx2);
2314
2315 return ret;
2316 }
2317
sa_link_child(struct device * dev,void * data)2318 static int sa_link_child(struct device *dev, void *data)
2319 {
2320 struct device *parent = data;
2321
2322 device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2323
2324 return 0;
2325 }
2326
sa_ul_probe(struct platform_device * pdev)2327 static int sa_ul_probe(struct platform_device *pdev)
2328 {
2329 struct device *dev = &pdev->dev;
2330 struct device_node *node = dev->of_node;
2331 struct resource *res;
2332 static void __iomem *saul_base;
2333 struct sa_crypto_data *dev_data;
2334 u32 val;
2335 int ret;
2336
2337 dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2338 if (!dev_data)
2339 return -ENOMEM;
2340
2341 sa_k3_dev = dev;
2342 dev_data->dev = dev;
2343 dev_data->pdev = pdev;
2344 platform_set_drvdata(pdev, dev_data);
2345 dev_set_drvdata(sa_k3_dev, dev_data);
2346
2347 pm_runtime_enable(dev);
2348 ret = pm_runtime_get_sync(dev);
2349 if (ret < 0) {
2350 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
2351 ret);
2352 return ret;
2353 }
2354
2355 sa_init_mem(dev_data);
2356 ret = sa_dma_init(dev_data);
2357 if (ret)
2358 goto disable_pm_runtime;
2359
2360 spin_lock_init(&dev_data->scid_lock);
2361 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2362 saul_base = devm_ioremap_resource(dev, res);
2363
2364 dev_data->base = saul_base;
2365 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2366 SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2367 SA_EEC_TRNG_EN;
2368
2369 writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2370
2371 sa_register_algos(dev);
2372
2373 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2374 if (ret)
2375 goto release_dma;
2376
2377 device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
2378
2379 return 0;
2380
2381 release_dma:
2382 sa_unregister_algos(&pdev->dev);
2383
2384 dma_release_channel(dev_data->dma_rx2);
2385 dma_release_channel(dev_data->dma_rx1);
2386 dma_release_channel(dev_data->dma_tx);
2387
2388 dma_pool_destroy(dev_data->sc_pool);
2389
2390 disable_pm_runtime:
2391 pm_runtime_put_sync(&pdev->dev);
2392 pm_runtime_disable(&pdev->dev);
2393
2394 return ret;
2395 }
2396
sa_ul_remove(struct platform_device * pdev)2397 static int sa_ul_remove(struct platform_device *pdev)
2398 {
2399 struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2400
2401 sa_unregister_algos(&pdev->dev);
2402
2403 dma_release_channel(dev_data->dma_rx2);
2404 dma_release_channel(dev_data->dma_rx1);
2405 dma_release_channel(dev_data->dma_tx);
2406
2407 dma_pool_destroy(dev_data->sc_pool);
2408
2409 platform_set_drvdata(pdev, NULL);
2410
2411 pm_runtime_put_sync(&pdev->dev);
2412 pm_runtime_disable(&pdev->dev);
2413
2414 return 0;
2415 }
2416
2417 static const struct of_device_id of_match[] = {
2418 {.compatible = "ti,j721e-sa2ul",},
2419 {.compatible = "ti,am654-sa2ul",},
2420 {},
2421 };
2422 MODULE_DEVICE_TABLE(of, of_match);
2423
2424 static struct platform_driver sa_ul_driver = {
2425 .probe = sa_ul_probe,
2426 .remove = sa_ul_remove,
2427 .driver = {
2428 .name = "saul-crypto",
2429 .of_match_table = of_match,
2430 },
2431 };
2432 module_platform_driver(sa_ul_driver);
2433 MODULE_LICENSE("GPL v2");
2434