1 /*
2 * ASPEED Hash and Crypto Engine
3 *
4 * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
5 * Copyright (C) 2021 IBM Corp.
6 *
7 * Joel Stanley <joel@jms.id.au>
8 *
9 * SPDX-License-Identifier: GPL-2.0-or-later
10 */
11
12 #include "qemu/osdep.h"
13 #include "qemu/cutils.h"
14 #include "qemu/log.h"
15 #include "qemu/error-report.h"
16 #include "qemu/iov.h"
17 #include "hw/misc/aspeed_hace.h"
18 #include "qapi/error.h"
19 #include "migration/vmstate.h"
20 #include "crypto/hash.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/irq.h"
23 #include "trace.h"
24
25 #define R_CRYPT_CMD (0x10 / 4)
26
27 #define R_STATUS (0x1c / 4)
28 #define HASH_IRQ BIT(9)
29 #define CRYPT_IRQ BIT(12)
30 #define TAG_IRQ BIT(15)
31
32 #define R_HASH_SRC (0x20 / 4)
33 #define R_HASH_DIGEST (0x24 / 4)
34 #define R_HASH_KEY_BUFF (0x28 / 4)
35 #define R_HASH_SRC_LEN (0x2c / 4)
36 #define R_HASH_SRC_HI (0x90 / 4)
37 #define R_HASH_DIGEST_HI (0x94 / 4)
38 #define R_HASH_KEY_BUFF_HI (0x98 / 4)
39
40 #define R_HASH_CMD (0x30 / 4)
41 /* Hash algorithm selection */
42 #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6))
43 #define HASH_ALGO_MD5 0
44 #define HASH_ALGO_SHA1 BIT(5)
45 #define HASH_ALGO_SHA224 BIT(6)
46 #define HASH_ALGO_SHA256 (BIT(4) | BIT(6))
47 #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6))
48 /* SHA512 algorithm selection */
49 #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12))
50 #define HASH_ALGO_SHA512_SHA512 0
51 #define HASH_ALGO_SHA512_SHA384 BIT(10)
52 #define HASH_ALGO_SHA512_SHA256 BIT(11)
53 #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11))
54 /* HMAC modes */
55 #define HASH_HMAC_MASK (BIT(7) | BIT(8))
56 #define HASH_DIGEST 0
57 #define HASH_DIGEST_HMAC BIT(7)
58 #define HASH_DIGEST_ACCUM BIT(8)
59 #define HASH_HMAC_KEY (BIT(7) | BIT(8))
60 /* Cascaded operation modes */
61 #define HASH_ONLY 0
62 #define HASH_ONLY2 BIT(0)
63 #define HASH_CRYPT_THEN_HASH BIT(1)
64 #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1))
65 /* Other cmd bits */
66 #define HASH_IRQ_EN BIT(9)
67 #define HASH_SG_EN BIT(18)
68 #define CRYPT_IRQ_EN BIT(12)
69 /* Scatter-gather data list */
70 #define SG_LIST_LEN_SIZE 4
71 #define SG_LIST_LEN_MASK 0x0FFFFFFF
72 #define SG_LIST_LEN_LAST BIT(31)
73 #define SG_LIST_ADDR_SIZE 4
74 #define SG_LIST_ADDR_MASK 0x7FFFFFFF
75 #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
76
77 static const struct {
78 uint32_t mask;
79 QCryptoHashAlgo algo;
80 } hash_algo_map[] = {
81 { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 },
82 { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 },
83 { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 },
84 { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 },
85 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512,
86 QCRYPTO_HASH_ALGO_SHA512 },
87 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384,
88 QCRYPTO_HASH_ALGO_SHA384 },
89 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256,
90 QCRYPTO_HASH_ALGO_SHA256 },
91 };
92
hace_hexdump(const char * desc,const char * buf,size_t size)93 static void hace_hexdump(const char *desc, const char *buf, size_t size)
94 {
95 g_autoptr(GString) str = g_string_sized_new(64);
96 size_t len;
97 size_t i;
98
99 for (i = 0; i < size; i += len) {
100 len = MIN(16, size - i);
101 g_string_truncate(str, 0);
102 qemu_hexdump_line(str, buf + i, len, 1, 4);
103 trace_aspeed_hace_hexdump(desc, i, str->str);
104 }
105 }
106
hace_iov_hexdump(const char * desc,const struct iovec * iov,const unsigned int iov_cnt)107 static void hace_iov_hexdump(const char *desc, const struct iovec *iov,
108 const unsigned int iov_cnt)
109 {
110 size_t size = 0;
111 char *buf;
112 int i;
113
114 for (i = 0; i < iov_cnt; i++) {
115 size += iov[i].iov_len;
116 }
117
118 buf = g_malloc(size);
119
120 if (!buf) {
121 return;
122 }
123
124 iov_to_buf(iov, iov_cnt, 0, buf, size);
125 hace_hexdump(desc, buf, size);
126 g_free(buf);
127 }
128
hash_algo_lookup(uint32_t reg)129 static int hash_algo_lookup(uint32_t reg)
130 {
131 int i;
132
133 reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
134
135 for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
136 if (reg == hash_algo_map[i].mask) {
137 return hash_algo_map[i].algo;
138 }
139 }
140
141 return -1;
142 }
143
144 /**
145 * Check whether the request contains padding message.
146 *
147 * @param s aspeed hace state object
148 * @param iov iov of current request
149 * @param req_len length of the current request
150 * @param total_msg_len length of all acc_mode requests(excluding padding msg)
151 * @param pad_offset start offset of padding message
152 */
has_padding(AspeedHACEState * s,struct iovec * iov,hwaddr req_len,uint32_t * total_msg_len,uint32_t * pad_offset)153 static bool has_padding(AspeedHACEState *s, struct iovec *iov,
154 hwaddr req_len, uint32_t *total_msg_len,
155 uint32_t *pad_offset)
156 {
157 *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
158 /*
159 * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
160 * last request. The last request should contain padding message.
161 * We check whether message contains padding by
162 * 1. Get total message length. If the current message contains
163 * padding, the last 8 bytes are total message length.
164 * 2. Check whether the total message length is valid.
165 * If it is valid, the value should less than or equal to
166 * total_req_len.
167 * 3. Current request len - padding_size to get padding offset.
168 * The padding message's first byte should be 0x80
169 */
170 if (*total_msg_len <= s->total_req_len) {
171 uint32_t padding_size = s->total_req_len - *total_msg_len;
172 uint8_t *padding = iov->iov_base;
173
174 if (padding_size > req_len) {
175 return false;
176 }
177
178 *pad_offset = req_len - padding_size;
179 if (padding[*pad_offset] == 0x80) {
180 return true;
181 }
182 }
183
184 return false;
185 }
186
hash_get_source_addr(AspeedHACEState * s)187 static uint64_t hash_get_source_addr(AspeedHACEState *s)
188 {
189 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
190 uint64_t src_addr = 0;
191
192 src_addr = deposit64(src_addr, 0, 32, s->regs[R_HASH_SRC]);
193 if (ahc->has_dma64) {
194 src_addr = deposit64(src_addr, 32, 32, s->regs[R_HASH_SRC_HI]);
195 }
196
197 return src_addr;
198 }
199
hash_prepare_direct_iov(AspeedHACEState * s,struct iovec * iov,bool acc_mode,bool * acc_final_request)200 static int hash_prepare_direct_iov(AspeedHACEState *s, struct iovec *iov,
201 bool acc_mode, bool *acc_final_request)
202 {
203 uint32_t total_msg_len;
204 uint32_t pad_offset;
205 uint64_t src;
206 void *haddr;
207 hwaddr plen;
208 int iov_idx;
209
210 plen = s->regs[R_HASH_SRC_LEN];
211 src = hash_get_source_addr(s);
212 trace_aspeed_hace_hash_addr("src", src);
213 haddr = address_space_map(&s->dram_as, src, &plen, false,
214 MEMTXATTRS_UNSPECIFIED);
215 if (haddr == NULL) {
216 qemu_log_mask(LOG_GUEST_ERROR,
217 "%s: Unable to map address, addr=0x%" HWADDR_PRIx
218 " ,plen=0x%" HWADDR_PRIx "\n",
219 __func__, src, plen);
220 return -1;
221 }
222
223 iov[0].iov_base = haddr;
224 iov_idx = 1;
225
226 if (acc_mode) {
227 s->total_req_len += plen;
228
229 if (has_padding(s, &iov[0], plen, &total_msg_len,
230 &pad_offset)) {
231 /* Padding being present indicates the final request */
232 *acc_final_request = true;
233 iov[0].iov_len = pad_offset;
234 } else {
235 iov[0].iov_len = plen;
236 }
237 } else {
238 iov[0].iov_len = plen;
239 }
240
241 return iov_idx;
242 }
243
hash_prepare_sg_iov(AspeedHACEState * s,struct iovec * iov,bool acc_mode,bool * acc_final_request)244 static int hash_prepare_sg_iov(AspeedHACEState *s, struct iovec *iov,
245 bool acc_mode, bool *acc_final_request)
246 {
247 uint32_t total_msg_len;
248 uint32_t pad_offset;
249 uint32_t len = 0;
250 uint32_t sg_addr;
251 uint64_t src;
252 int iov_idx;
253 hwaddr plen;
254 void *haddr;
255
256 src = hash_get_source_addr(s);
257 for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) {
258 if (iov_idx == ASPEED_HACE_MAX_SG) {
259 qemu_log_mask(LOG_GUEST_ERROR,
260 "%s: Failed to set end of sg list marker\n",
261 __func__);
262 return -1;
263 }
264
265 len = address_space_ldl_le(&s->dram_as, src,
266 MEMTXATTRS_UNSPECIFIED, NULL);
267 sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
268 MEMTXATTRS_UNSPECIFIED, NULL);
269 sg_addr &= SG_LIST_ADDR_MASK;
270 trace_aspeed_hace_hash_sg(iov_idx, src, sg_addr, len);
271 /*
272 * To maintain compatibility with older SoCs such as the AST2600,
273 * the AST2700 HW automatically set bit 34 of the 64-bit sg_addr.
274 * As a result, the firmware only needs to provide a 32-bit sg_addr
275 * containing bits [31:0]. This is sufficient for the AST2700, as
276 * it uses a DRAM offset rather than a DRAM address.
277 */
278 plen = len & SG_LIST_LEN_MASK;
279 haddr = address_space_map(&s->dram_as, sg_addr, &plen, false,
280 MEMTXATTRS_UNSPECIFIED);
281
282 if (haddr == NULL) {
283 qemu_log_mask(LOG_GUEST_ERROR,
284 "%s: Unable to map address, sg_addr=0x%x, "
285 "plen=0x%" HWADDR_PRIx "\n",
286 __func__, sg_addr, plen);
287 return -1;
288 }
289
290 src += SG_LIST_ENTRY_SIZE;
291
292 iov[iov_idx].iov_base = haddr;
293 if (acc_mode) {
294 s->total_req_len += plen;
295
296 if (has_padding(s, &iov[iov_idx], plen, &total_msg_len,
297 &pad_offset)) {
298 /* Padding being present indicates the final request */
299 *acc_final_request = true;
300 iov[iov_idx].iov_len = pad_offset;
301 } else {
302 iov[iov_idx].iov_len = plen;
303 }
304 } else {
305 iov[iov_idx].iov_len = plen;
306 }
307 }
308
309 return iov_idx;
310 }
311
hash_get_digest_addr(AspeedHACEState * s)312 static uint64_t hash_get_digest_addr(AspeedHACEState *s)
313 {
314 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
315 uint64_t digest_addr = 0;
316
317 digest_addr = deposit64(digest_addr, 0, 32, s->regs[R_HASH_DIGEST]);
318 if (ahc->has_dma64) {
319 digest_addr = deposit64(digest_addr, 32, 32, s->regs[R_HASH_DIGEST_HI]);
320 }
321
322 return digest_addr;
323 }
324
hash_write_digest_and_unmap_iov(AspeedHACEState * s,struct iovec * iov,int iov_idx,uint8_t * digest_buf,size_t digest_len)325 static void hash_write_digest_and_unmap_iov(AspeedHACEState *s,
326 struct iovec *iov,
327 int iov_idx,
328 uint8_t *digest_buf,
329 size_t digest_len)
330 {
331 uint64_t digest_addr = 0;
332
333 digest_addr = hash_get_digest_addr(s);
334 trace_aspeed_hace_hash_addr("digest", digest_addr);
335 if (address_space_write(&s->dram_as, digest_addr,
336 MEMTXATTRS_UNSPECIFIED,
337 digest_buf, digest_len)) {
338 qemu_log_mask(LOG_GUEST_ERROR,
339 "%s: Failed to write digest to 0x%" HWADDR_PRIx "\n",
340 __func__, digest_addr);
341 }
342
343 if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) {
344 hace_hexdump("digest", (char *)digest_buf, digest_len);
345 }
346
347 for (; iov_idx > 0; iov_idx--) {
348 address_space_unmap(&s->dram_as, iov[iov_idx - 1].iov_base,
349 iov[iov_idx - 1].iov_len, false,
350 iov[iov_idx - 1].iov_len);
351 }
352 }
353
hash_execute_non_acc_mode(AspeedHACEState * s,int algo,struct iovec * iov,int iov_idx)354 static void hash_execute_non_acc_mode(AspeedHACEState *s, int algo,
355 struct iovec *iov, int iov_idx)
356 {
357 g_autofree uint8_t *digest_buf = NULL;
358 Error *local_err = NULL;
359 size_t digest_len = 0;
360
361 if (qcrypto_hash_bytesv(algo, iov, iov_idx, &digest_buf,
362 &digest_len, &local_err) < 0) {
363 qemu_log_mask(LOG_GUEST_ERROR,
364 "%s: qcrypto hash bytesv failed : %s",
365 __func__, error_get_pretty(local_err));
366 error_free(local_err);
367 return;
368 }
369
370 hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len);
371 }
372
hash_execute_acc_mode(AspeedHACEState * s,int algo,struct iovec * iov,int iov_idx,bool final_request)373 static void hash_execute_acc_mode(AspeedHACEState *s, int algo,
374 struct iovec *iov, int iov_idx,
375 bool final_request)
376 {
377 g_autofree uint8_t *digest_buf = NULL;
378 Error *local_err = NULL;
379 size_t digest_len = 0;
380
381 trace_aspeed_hace_hash_execute_acc_mode(final_request);
382
383 if (s->hash_ctx == NULL) {
384 s->hash_ctx = qcrypto_hash_new(algo, &local_err);
385 if (s->hash_ctx == NULL) {
386 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash new failed : %s",
387 __func__, error_get_pretty(local_err));
388 error_free(local_err);
389 return;
390 }
391 }
392
393 if (qcrypto_hash_updatev(s->hash_ctx, iov, iov_idx, &local_err) < 0) {
394 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash updatev failed : %s",
395 __func__, error_get_pretty(local_err));
396 error_free(local_err);
397 return;
398 }
399
400 if (final_request) {
401 if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
402 &digest_len, &local_err)) {
403 qemu_log_mask(LOG_GUEST_ERROR,
404 "%s: qcrypto hash finalize bytes failed : %s",
405 __func__, error_get_pretty(local_err));
406 error_free(local_err);
407 local_err = NULL;
408 }
409
410 qcrypto_hash_free(s->hash_ctx);
411
412 s->hash_ctx = NULL;
413 s->total_req_len = 0;
414 }
415
416 hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len);
417 }
418
do_hash_operation(AspeedHACEState * s,int algo,bool sg_mode,bool acc_mode)419 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
420 bool acc_mode)
421 {
422 struct iovec iov[ASPEED_HACE_MAX_SG];
423 bool acc_final_request = false;
424 int iov_idx = -1;
425
426 /* Prepares the iov for hashing operations based on the selected mode */
427 if (sg_mode) {
428 iov_idx = hash_prepare_sg_iov(s, iov, acc_mode, &acc_final_request);
429 } else {
430 iov_idx = hash_prepare_direct_iov(s, iov, acc_mode,
431 &acc_final_request);
432 }
433
434 if (iov_idx <= 0) {
435 qemu_log_mask(LOG_GUEST_ERROR,
436 "%s: Failed to prepare iov\n", __func__);
437 return;
438 }
439
440 if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) {
441 hace_iov_hexdump("plaintext", iov, iov_idx);
442 }
443
444 /* Executes the hash operation */
445 if (acc_mode) {
446 hash_execute_acc_mode(s, algo, iov, iov_idx, acc_final_request);
447 } else {
448 hash_execute_non_acc_mode(s, algo, iov, iov_idx);
449 }
450 }
451
aspeed_hace_read(void * opaque,hwaddr addr,unsigned int size)452 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
453 {
454 AspeedHACEState *s = ASPEED_HACE(opaque);
455
456 addr >>= 2;
457
458 trace_aspeed_hace_read(addr << 2, s->regs[addr]);
459
460 return s->regs[addr];
461 }
462
aspeed_hace_write(void * opaque,hwaddr addr,uint64_t data,unsigned int size)463 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
464 unsigned int size)
465 {
466 AspeedHACEState *s = ASPEED_HACE(opaque);
467 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
468
469 addr >>= 2;
470
471 trace_aspeed_hace_write(addr << 2, data);
472
473 switch (addr) {
474 case R_STATUS:
475 if (data & HASH_IRQ) {
476 data &= ~HASH_IRQ;
477
478 if (s->regs[addr] & HASH_IRQ) {
479 qemu_irq_lower(s->irq);
480 }
481 }
482 if (ahc->raise_crypt_interrupt_workaround) {
483 if (data & CRYPT_IRQ) {
484 data &= ~CRYPT_IRQ;
485
486 if (s->regs[addr] & CRYPT_IRQ) {
487 qemu_irq_lower(s->irq);
488 }
489 }
490 }
491 break;
492 case R_HASH_SRC:
493 data &= ahc->src_mask;
494 break;
495 case R_HASH_DIGEST:
496 data &= ahc->dest_mask;
497 break;
498 case R_HASH_KEY_BUFF:
499 data &= ahc->key_mask;
500 break;
501 case R_HASH_SRC_LEN:
502 data &= 0x0FFFFFFF;
503 break;
504 case R_HASH_CMD: {
505 int algo;
506 data &= ahc->hash_mask;
507
508 if ((data & HASH_DIGEST_HMAC)) {
509 qemu_log_mask(LOG_UNIMP,
510 "%s: HMAC mode not implemented\n",
511 __func__);
512 }
513 if (data & BIT(1)) {
514 qemu_log_mask(LOG_UNIMP,
515 "%s: Cascaded mode not implemented\n",
516 __func__);
517 }
518 algo = hash_algo_lookup(data);
519 if (algo < 0) {
520 qemu_log_mask(LOG_GUEST_ERROR,
521 "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
522 __func__, data & ahc->hash_mask);
523 } else {
524 do_hash_operation(s, algo, data & HASH_SG_EN,
525 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
526 }
527
528 /*
529 * Set status bits to indicate completion. Testing shows hardware sets
530 * these irrespective of HASH_IRQ_EN.
531 */
532 s->regs[R_STATUS] |= HASH_IRQ;
533
534 if (data & HASH_IRQ_EN) {
535 qemu_irq_raise(s->irq);
536 }
537 break;
538 }
539 case R_CRYPT_CMD:
540 qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
541 __func__);
542 if (ahc->raise_crypt_interrupt_workaround) {
543 s->regs[R_STATUS] |= CRYPT_IRQ;
544 if (data & CRYPT_IRQ_EN) {
545 qemu_irq_raise(s->irq);
546 }
547 }
548 break;
549 case R_HASH_SRC_HI:
550 data &= ahc->src_hi_mask;
551 break;
552 case R_HASH_DIGEST_HI:
553 data &= ahc->dest_hi_mask;
554 break;
555 case R_HASH_KEY_BUFF_HI:
556 data &= ahc->key_hi_mask;
557 break;
558 default:
559 break;
560 }
561
562 s->regs[addr] = data;
563 }
564
565 static const MemoryRegionOps aspeed_hace_ops = {
566 .read = aspeed_hace_read,
567 .write = aspeed_hace_write,
568 .endianness = DEVICE_LITTLE_ENDIAN,
569 .valid = {
570 .min_access_size = 1,
571 .max_access_size = 4,
572 },
573 };
574
aspeed_hace_reset(DeviceState * dev)575 static void aspeed_hace_reset(DeviceState *dev)
576 {
577 struct AspeedHACEState *s = ASPEED_HACE(dev);
578 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
579
580 if (s->hash_ctx != NULL) {
581 qcrypto_hash_free(s->hash_ctx);
582 s->hash_ctx = NULL;
583 }
584
585 memset(s->regs, 0, ahc->nr_regs << 2);
586 s->total_req_len = 0;
587 }
588
aspeed_hace_realize(DeviceState * dev,Error ** errp)589 static void aspeed_hace_realize(DeviceState *dev, Error **errp)
590 {
591 AspeedHACEState *s = ASPEED_HACE(dev);
592 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
593 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
594
595 sysbus_init_irq(sbd, &s->irq);
596
597 s->regs = g_new(uint32_t, ahc->nr_regs);
598 memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
599 TYPE_ASPEED_HACE, ahc->nr_regs << 2);
600
601 if (!s->dram_mr) {
602 error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
603 return;
604 }
605
606 address_space_init(&s->dram_as, s->dram_mr, "dram");
607
608 sysbus_init_mmio(sbd, &s->iomem);
609 }
610
611 static const Property aspeed_hace_properties[] = {
612 DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
613 TYPE_MEMORY_REGION, MemoryRegion *),
614 };
615
616
617 static const VMStateDescription vmstate_aspeed_hace = {
618 .name = TYPE_ASPEED_HACE,
619 .version_id = 2,
620 .minimum_version_id = 2,
621 .fields = (const VMStateField[]) {
622 VMSTATE_UINT32(total_req_len, AspeedHACEState),
623 VMSTATE_END_OF_LIST(),
624 }
625 };
626
aspeed_hace_unrealize(DeviceState * dev)627 static void aspeed_hace_unrealize(DeviceState *dev)
628 {
629 AspeedHACEState *s = ASPEED_HACE(dev);
630
631 g_free(s->regs);
632 s->regs = NULL;
633 }
634
aspeed_hace_class_init(ObjectClass * klass,const void * data)635 static void aspeed_hace_class_init(ObjectClass *klass, const void *data)
636 {
637 DeviceClass *dc = DEVICE_CLASS(klass);
638
639 dc->realize = aspeed_hace_realize;
640 dc->unrealize = aspeed_hace_unrealize;
641 device_class_set_legacy_reset(dc, aspeed_hace_reset);
642 device_class_set_props(dc, aspeed_hace_properties);
643 dc->vmsd = &vmstate_aspeed_hace;
644 }
645
646 static const TypeInfo aspeed_hace_info = {
647 .name = TYPE_ASPEED_HACE,
648 .parent = TYPE_SYS_BUS_DEVICE,
649 .instance_size = sizeof(AspeedHACEState),
650 .class_init = aspeed_hace_class_init,
651 .class_size = sizeof(AspeedHACEClass)
652 };
653
aspeed_ast2400_hace_class_init(ObjectClass * klass,const void * data)654 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, const void *data)
655 {
656 DeviceClass *dc = DEVICE_CLASS(klass);
657 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
658
659 dc->desc = "AST2400 Hash and Crypto Engine";
660
661 ahc->nr_regs = 0x64 >> 2;
662 ahc->src_mask = 0x0FFFFFFF;
663 ahc->dest_mask = 0x0FFFFFF8;
664 ahc->key_mask = 0x0FFFFFC0;
665 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
666 }
667
668 static const TypeInfo aspeed_ast2400_hace_info = {
669 .name = TYPE_ASPEED_AST2400_HACE,
670 .parent = TYPE_ASPEED_HACE,
671 .class_init = aspeed_ast2400_hace_class_init,
672 };
673
aspeed_ast2500_hace_class_init(ObjectClass * klass,const void * data)674 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, const void *data)
675 {
676 DeviceClass *dc = DEVICE_CLASS(klass);
677 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
678
679 dc->desc = "AST2500 Hash and Crypto Engine";
680
681 ahc->nr_regs = 0x64 >> 2;
682 ahc->src_mask = 0x3fffffff;
683 ahc->dest_mask = 0x3ffffff8;
684 ahc->key_mask = 0x3FFFFFC0;
685 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
686 }
687
688 static const TypeInfo aspeed_ast2500_hace_info = {
689 .name = TYPE_ASPEED_AST2500_HACE,
690 .parent = TYPE_ASPEED_HACE,
691 .class_init = aspeed_ast2500_hace_class_init,
692 };
693
aspeed_ast2600_hace_class_init(ObjectClass * klass,const void * data)694 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, const void *data)
695 {
696 DeviceClass *dc = DEVICE_CLASS(klass);
697 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
698
699 dc->desc = "AST2600 Hash and Crypto Engine";
700
701 ahc->nr_regs = 0x64 >> 2;
702 ahc->src_mask = 0x7FFFFFFF;
703 ahc->dest_mask = 0x7FFFFFF8;
704 ahc->key_mask = 0x7FFFFFF8;
705 ahc->hash_mask = 0x00147FFF;
706 }
707
708 static const TypeInfo aspeed_ast2600_hace_info = {
709 .name = TYPE_ASPEED_AST2600_HACE,
710 .parent = TYPE_ASPEED_HACE,
711 .class_init = aspeed_ast2600_hace_class_init,
712 };
713
aspeed_ast1030_hace_class_init(ObjectClass * klass,const void * data)714 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, const void *data)
715 {
716 DeviceClass *dc = DEVICE_CLASS(klass);
717 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
718
719 dc->desc = "AST1030 Hash and Crypto Engine";
720
721 ahc->nr_regs = 0x64 >> 2;
722 ahc->src_mask = 0x7FFFFFFF;
723 ahc->dest_mask = 0x7FFFFFF8;
724 ahc->key_mask = 0x7FFFFFF8;
725 ahc->hash_mask = 0x00147FFF;
726 }
727
728 static const TypeInfo aspeed_ast1030_hace_info = {
729 .name = TYPE_ASPEED_AST1030_HACE,
730 .parent = TYPE_ASPEED_HACE,
731 .class_init = aspeed_ast1030_hace_class_init,
732 };
733
aspeed_ast2700_hace_class_init(ObjectClass * klass,const void * data)734 static void aspeed_ast2700_hace_class_init(ObjectClass *klass, const void *data)
735 {
736 DeviceClass *dc = DEVICE_CLASS(klass);
737 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
738
739 dc->desc = "AST2700 Hash and Crypto Engine";
740
741 ahc->nr_regs = 0x9C >> 2;
742 ahc->src_mask = 0x7FFFFFFF;
743 ahc->dest_mask = 0x7FFFFFF8;
744 ahc->key_mask = 0x7FFFFFF8;
745 ahc->hash_mask = 0x00147FFF;
746
747 /*
748 * The AST2700 supports a maximum DRAM size of 8 GB, with a DRAM
749 * addressable range from 0x0_0000_0000 to 0x1_FFFF_FFFF. Since this range
750 * fits within 34 bits, only bits [33:0] are needed to store the DRAM
751 * offset. To optimize address storage, the high physical address bits
752 * [1:0] of the source, digest and key buffer addresses are stored as
753 * dram_offset bits [33:32].
754 *
755 * This approach eliminates the need to reduce the high part of the DRAM
756 * physical address for DMA operations. Previously, this was calculated as
757 * (high physical address bits [7:0] - 4), since the DRAM start address is
758 * 0x4_00000000, making the high part address [7:0] - 4.
759 */
760 ahc->src_hi_mask = 0x00000003;
761 ahc->dest_hi_mask = 0x00000003;
762 ahc->key_hi_mask = 0x00000003;
763
764 /*
765 * Currently, it does not support the CRYPT command. Instead, it only
766 * sends an interrupt to notify the firmware that the crypt command
767 * has completed. It is a temporary workaround.
768 */
769 ahc->raise_crypt_interrupt_workaround = true;
770 ahc->has_dma64 = true;
771 }
772
773 static const TypeInfo aspeed_ast2700_hace_info = {
774 .name = TYPE_ASPEED_AST2700_HACE,
775 .parent = TYPE_ASPEED_HACE,
776 .class_init = aspeed_ast2700_hace_class_init,
777 };
778
aspeed_hace_register_types(void)779 static void aspeed_hace_register_types(void)
780 {
781 type_register_static(&aspeed_ast2400_hace_info);
782 type_register_static(&aspeed_ast2500_hace_info);
783 type_register_static(&aspeed_ast2600_hace_info);
784 type_register_static(&aspeed_ast1030_hace_info);
785 type_register_static(&aspeed_ast2700_hace_info);
786 type_register_static(&aspeed_hace_info);
787 }
788
789 type_init(aspeed_hace_register_types);
790