1 /* 2 * ASPEED Hash and Crypto Engine 3 * 4 * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates 5 * Copyright (C) 2021 IBM Corp. 6 * 7 * Joel Stanley <joel@jms.id.au> 8 * 9 * SPDX-License-Identifier: GPL-2.0-or-later 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qemu/log.h" 14 #include "qemu/error-report.h" 15 #include "hw/misc/aspeed_hace.h" 16 #include "qapi/error.h" 17 #include "migration/vmstate.h" 18 #include "crypto/hash.h" 19 #include "hw/qdev-properties.h" 20 #include "hw/irq.h" 21 22 #define R_CRYPT_CMD (0x10 / 4) 23 24 #define R_STATUS (0x1c / 4) 25 #define HASH_IRQ BIT(9) 26 #define CRYPT_IRQ BIT(12) 27 #define TAG_IRQ BIT(15) 28 29 #define R_HASH_SRC (0x20 / 4) 30 #define R_HASH_DEST (0x24 / 4) 31 #define R_HASH_KEY_BUFF (0x28 / 4) 32 #define R_HASH_SRC_LEN (0x2c / 4) 33 34 #define R_HASH_CMD (0x30 / 4) 35 /* Hash algorithm selection */ 36 #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6)) 37 #define HASH_ALGO_MD5 0 38 #define HASH_ALGO_SHA1 BIT(5) 39 #define HASH_ALGO_SHA224 BIT(6) 40 #define HASH_ALGO_SHA256 (BIT(4) | BIT(6)) 41 #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6)) 42 /* SHA512 algorithm selection */ 43 #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12)) 44 #define HASH_ALGO_SHA512_SHA512 0 45 #define HASH_ALGO_SHA512_SHA384 BIT(10) 46 #define HASH_ALGO_SHA512_SHA256 BIT(11) 47 #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11)) 48 /* HMAC modes */ 49 #define HASH_HMAC_MASK (BIT(7) | BIT(8)) 50 #define HASH_DIGEST 0 51 #define HASH_DIGEST_HMAC BIT(7) 52 #define HASH_DIGEST_ACCUM BIT(8) 53 #define HASH_HMAC_KEY (BIT(7) | BIT(8)) 54 /* Cascaded operation modes */ 55 #define HASH_ONLY 0 56 #define HASH_ONLY2 BIT(0) 57 #define HASH_CRYPT_THEN_HASH BIT(1) 58 #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1)) 59 /* Other cmd bits */ 60 #define HASH_IRQ_EN BIT(9) 61 #define HASH_SG_EN BIT(18) 62 #define CRYPT_IRQ_EN BIT(12) 63 /* Scatter-gather data list */ 64 #define SG_LIST_LEN_SIZE 4 65 #define SG_LIST_LEN_MASK 0x0FFFFFFF 66 #define SG_LIST_LEN_LAST BIT(31) 67 #define SG_LIST_ADDR_SIZE 4 68 #define SG_LIST_ADDR_MASK 0x7FFFFFFF 69 #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE) 70 71 static const struct { 72 uint32_t mask; 73 QCryptoHashAlgo algo; 74 } hash_algo_map[] = { 75 { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 }, 76 { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 }, 77 { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 }, 78 { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 }, 79 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, 80 QCRYPTO_HASH_ALGO_SHA512 }, 81 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, 82 QCRYPTO_HASH_ALGO_SHA384 }, 83 { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, 84 QCRYPTO_HASH_ALGO_SHA256 }, 85 }; 86 87 static int hash_algo_lookup(uint32_t reg) 88 { 89 int i; 90 91 reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK; 92 93 for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) { 94 if (reg == hash_algo_map[i].mask) { 95 return hash_algo_map[i].algo; 96 } 97 } 98 99 return -1; 100 } 101 102 /** 103 * Check whether the request contains padding message. 104 * 105 * @param s aspeed hace state object 106 * @param iov iov of current request 107 * @param req_len length of the current request 108 * @param total_msg_len length of all acc_mode requests(excluding padding msg) 109 * @param pad_offset start offset of padding message 110 */ 111 static bool has_padding(AspeedHACEState *s, struct iovec *iov, 112 hwaddr req_len, uint32_t *total_msg_len, 113 uint32_t *pad_offset) 114 { 115 *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8); 116 /* 117 * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the 118 * last request. The last request should contain padding message. 119 * We check whether message contains padding by 120 * 1. Get total message length. If the current message contains 121 * padding, the last 8 bytes are total message length. 122 * 2. Check whether the total message length is valid. 123 * If it is valid, the value should less than or equal to 124 * total_req_len. 125 * 3. Current request len - padding_size to get padding offset. 126 * The padding message's first byte should be 0x80 127 */ 128 if (*total_msg_len <= s->total_req_len) { 129 uint32_t padding_size = s->total_req_len - *total_msg_len; 130 uint8_t *padding = iov->iov_base; 131 *pad_offset = req_len - padding_size; 132 if (padding[*pad_offset] == 0x80) { 133 return true; 134 } 135 } 136 137 return false; 138 } 139 140 static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id, 141 uint32_t *pad_offset) 142 { 143 int i, iov_count; 144 if (*pad_offset != 0) { 145 s->iov_cache[s->iov_count].iov_base = iov[id].iov_base; 146 s->iov_cache[s->iov_count].iov_len = *pad_offset; 147 ++s->iov_count; 148 } 149 for (i = 0; i < s->iov_count; i++) { 150 iov[i].iov_base = s->iov_cache[i].iov_base; 151 iov[i].iov_len = s->iov_cache[i].iov_len; 152 } 153 iov_count = s->iov_count; 154 s->iov_count = 0; 155 s->total_req_len = 0; 156 return iov_count; 157 } 158 159 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, 160 bool acc_mode) 161 { 162 struct iovec iov[ASPEED_HACE_MAX_SG]; 163 uint32_t total_msg_len; 164 uint32_t pad_offset; 165 g_autofree uint8_t *digest_buf = NULL; 166 size_t digest_len = 0; 167 bool sg_acc_mode_final_request = false; 168 int i; 169 void *haddr; 170 Error *local_err = NULL; 171 172 if (acc_mode && s->hash_ctx == NULL) { 173 s->hash_ctx = qcrypto_hash_new(algo, &local_err); 174 if (s->hash_ctx == NULL) { 175 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash failed : %s", 176 error_get_pretty(local_err)); 177 error_free(local_err); 178 return; 179 } 180 } 181 182 if (sg_mode) { 183 uint32_t len = 0; 184 185 for (i = 0; !(len & SG_LIST_LEN_LAST); i++) { 186 uint32_t addr, src; 187 hwaddr plen; 188 189 if (i == ASPEED_HACE_MAX_SG) { 190 qemu_log_mask(LOG_GUEST_ERROR, 191 "aspeed_hace: guest failed to set end of sg list marker\n"); 192 break; 193 } 194 195 src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE); 196 197 len = address_space_ldl_le(&s->dram_as, src, 198 MEMTXATTRS_UNSPECIFIED, NULL); 199 200 addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, 201 MEMTXATTRS_UNSPECIFIED, NULL); 202 addr &= SG_LIST_ADDR_MASK; 203 204 plen = len & SG_LIST_LEN_MASK; 205 haddr = address_space_map(&s->dram_as, addr, &plen, false, 206 MEMTXATTRS_UNSPECIFIED); 207 if (haddr == NULL) { 208 qemu_log_mask(LOG_GUEST_ERROR, 209 "%s: qcrypto failed\n", __func__); 210 return; 211 } 212 iov[i].iov_base = haddr; 213 if (acc_mode) { 214 s->total_req_len += plen; 215 216 if (has_padding(s, &iov[i], plen, &total_msg_len, 217 &pad_offset)) { 218 /* Padding being present indicates the final request */ 219 sg_acc_mode_final_request = true; 220 iov[i].iov_len = pad_offset; 221 } else { 222 iov[i].iov_len = plen; 223 } 224 } else { 225 iov[i].iov_len = plen; 226 } 227 } 228 } else { 229 hwaddr len = s->regs[R_HASH_SRC_LEN]; 230 231 haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC], 232 &len, false, MEMTXATTRS_UNSPECIFIED); 233 if (haddr == NULL) { 234 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); 235 return; 236 } 237 iov[0].iov_base = haddr; 238 iov[0].iov_len = len; 239 i = 1; 240 241 if (s->iov_count) { 242 /* 243 * In aspeed sdk kernel driver, sg_mode is disabled in hash_final(). 244 * Thus if we received a request with sg_mode disabled, it is 245 * required to check whether cache is empty. If no, we should 246 * combine cached iov and the current iov. 247 */ 248 s->total_req_len += len; 249 if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) { 250 i = reconstruct_iov(s, iov, 0, &pad_offset); 251 } 252 } 253 } 254 255 if (acc_mode) { 256 if (qcrypto_hash_updatev(s->hash_ctx, iov, i, &local_err) < 0) { 257 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash update failed : %s", 258 error_get_pretty(local_err)); 259 error_free(local_err); 260 return; 261 } 262 263 if (sg_acc_mode_final_request) { 264 if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf, 265 &digest_len, &local_err)) { 266 qemu_log_mask(LOG_GUEST_ERROR, 267 "qcrypto hash finalize failed : %s", 268 error_get_pretty(local_err)); 269 error_free(local_err); 270 local_err = NULL; 271 } 272 273 qcrypto_hash_free(s->hash_ctx); 274 275 s->hash_ctx = NULL; 276 s->iov_count = 0; 277 s->total_req_len = 0; 278 } 279 } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, 280 &digest_len, &local_err) < 0) { 281 qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash bytesv failed : %s", 282 error_get_pretty(local_err)); 283 error_free(local_err); 284 return; 285 } 286 287 if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST], 288 MEMTXATTRS_UNSPECIFIED, 289 digest_buf, digest_len)) { 290 qemu_log_mask(LOG_GUEST_ERROR, 291 "aspeed_hace: address space write failed\n"); 292 } 293 294 for (; i > 0; i--) { 295 address_space_unmap(&s->dram_as, iov[i - 1].iov_base, 296 iov[i - 1].iov_len, false, 297 iov[i - 1].iov_len); 298 } 299 300 /* 301 * Set status bits to indicate completion. Testing shows hardware sets 302 * these irrespective of HASH_IRQ_EN. 303 */ 304 s->regs[R_STATUS] |= HASH_IRQ; 305 } 306 307 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) 308 { 309 AspeedHACEState *s = ASPEED_HACE(opaque); 310 311 addr >>= 2; 312 313 if (addr >= ASPEED_HACE_NR_REGS) { 314 qemu_log_mask(LOG_GUEST_ERROR, 315 "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", 316 __func__, addr << 2); 317 return 0; 318 } 319 320 return s->regs[addr]; 321 } 322 323 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, 324 unsigned int size) 325 { 326 AspeedHACEState *s = ASPEED_HACE(opaque); 327 AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); 328 329 addr >>= 2; 330 331 if (addr >= ASPEED_HACE_NR_REGS) { 332 qemu_log_mask(LOG_GUEST_ERROR, 333 "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", 334 __func__, addr << 2); 335 return; 336 } 337 338 switch (addr) { 339 case R_STATUS: 340 if (data & HASH_IRQ) { 341 data &= ~HASH_IRQ; 342 343 if (s->regs[addr] & HASH_IRQ) { 344 qemu_irq_lower(s->irq); 345 } 346 } 347 if (ahc->raise_crypt_interrupt_workaround) { 348 if (data & CRYPT_IRQ) { 349 data &= ~CRYPT_IRQ; 350 351 if (s->regs[addr] & CRYPT_IRQ) { 352 qemu_irq_lower(s->irq); 353 } 354 } 355 } 356 break; 357 case R_HASH_SRC: 358 data &= ahc->src_mask; 359 break; 360 case R_HASH_DEST: 361 data &= ahc->dest_mask; 362 break; 363 case R_HASH_KEY_BUFF: 364 data &= ahc->key_mask; 365 break; 366 case R_HASH_SRC_LEN: 367 data &= 0x0FFFFFFF; 368 break; 369 case R_HASH_CMD: { 370 int algo; 371 data &= ahc->hash_mask; 372 373 if ((data & HASH_DIGEST_HMAC)) { 374 qemu_log_mask(LOG_UNIMP, 375 "%s: HMAC mode not implemented\n", 376 __func__); 377 } 378 if (data & BIT(1)) { 379 qemu_log_mask(LOG_UNIMP, 380 "%s: Cascaded mode not implemented\n", 381 __func__); 382 } 383 algo = hash_algo_lookup(data); 384 if (algo < 0) { 385 qemu_log_mask(LOG_GUEST_ERROR, 386 "%s: Invalid hash algorithm selection 0x%"PRIx64"\n", 387 __func__, data & ahc->hash_mask); 388 break; 389 } 390 do_hash_operation(s, algo, data & HASH_SG_EN, 391 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM)); 392 393 if (data & HASH_IRQ_EN) { 394 qemu_irq_raise(s->irq); 395 } 396 break; 397 } 398 case R_CRYPT_CMD: 399 qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n", 400 __func__); 401 if (ahc->raise_crypt_interrupt_workaround) { 402 s->regs[R_STATUS] |= CRYPT_IRQ; 403 if (data & CRYPT_IRQ_EN) { 404 qemu_irq_raise(s->irq); 405 } 406 } 407 break; 408 default: 409 break; 410 } 411 412 s->regs[addr] = data; 413 } 414 415 static const MemoryRegionOps aspeed_hace_ops = { 416 .read = aspeed_hace_read, 417 .write = aspeed_hace_write, 418 .endianness = DEVICE_LITTLE_ENDIAN, 419 .valid = { 420 .min_access_size = 1, 421 .max_access_size = 4, 422 }, 423 }; 424 425 static void aspeed_hace_reset(DeviceState *dev) 426 { 427 struct AspeedHACEState *s = ASPEED_HACE(dev); 428 429 if (s->hash_ctx != NULL) { 430 qcrypto_hash_free(s->hash_ctx); 431 s->hash_ctx = NULL; 432 } 433 434 memset(s->regs, 0, sizeof(s->regs)); 435 s->iov_count = 0; 436 s->total_req_len = 0; 437 } 438 439 static void aspeed_hace_realize(DeviceState *dev, Error **errp) 440 { 441 AspeedHACEState *s = ASPEED_HACE(dev); 442 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 443 444 sysbus_init_irq(sbd, &s->irq); 445 446 memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s, 447 TYPE_ASPEED_HACE, 0x1000); 448 449 if (!s->dram_mr) { 450 error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set"); 451 return; 452 } 453 454 address_space_init(&s->dram_as, s->dram_mr, "dram"); 455 456 sysbus_init_mmio(sbd, &s->iomem); 457 } 458 459 static const Property aspeed_hace_properties[] = { 460 DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr, 461 TYPE_MEMORY_REGION, MemoryRegion *), 462 }; 463 464 465 static const VMStateDescription vmstate_aspeed_hace = { 466 .name = TYPE_ASPEED_HACE, 467 .version_id = 1, 468 .minimum_version_id = 1, 469 .fields = (const VMStateField[]) { 470 VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), 471 VMSTATE_UINT32(total_req_len, AspeedHACEState), 472 VMSTATE_UINT32(iov_count, AspeedHACEState), 473 VMSTATE_END_OF_LIST(), 474 } 475 }; 476 477 static void aspeed_hace_class_init(ObjectClass *klass, void *data) 478 { 479 DeviceClass *dc = DEVICE_CLASS(klass); 480 481 dc->realize = aspeed_hace_realize; 482 device_class_set_legacy_reset(dc, aspeed_hace_reset); 483 device_class_set_props(dc, aspeed_hace_properties); 484 dc->vmsd = &vmstate_aspeed_hace; 485 } 486 487 static const TypeInfo aspeed_hace_info = { 488 .name = TYPE_ASPEED_HACE, 489 .parent = TYPE_SYS_BUS_DEVICE, 490 .instance_size = sizeof(AspeedHACEState), 491 .class_init = aspeed_hace_class_init, 492 .class_size = sizeof(AspeedHACEClass) 493 }; 494 495 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data) 496 { 497 DeviceClass *dc = DEVICE_CLASS(klass); 498 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 499 500 dc->desc = "AST2400 Hash and Crypto Engine"; 501 502 ahc->src_mask = 0x0FFFFFFF; 503 ahc->dest_mask = 0x0FFFFFF8; 504 ahc->key_mask = 0x0FFFFFC0; 505 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ 506 } 507 508 static const TypeInfo aspeed_ast2400_hace_info = { 509 .name = TYPE_ASPEED_AST2400_HACE, 510 .parent = TYPE_ASPEED_HACE, 511 .class_init = aspeed_ast2400_hace_class_init, 512 }; 513 514 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data) 515 { 516 DeviceClass *dc = DEVICE_CLASS(klass); 517 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 518 519 dc->desc = "AST2500 Hash and Crypto Engine"; 520 521 ahc->src_mask = 0x3fffffff; 522 ahc->dest_mask = 0x3ffffff8; 523 ahc->key_mask = 0x3FFFFFC0; 524 ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ 525 } 526 527 static const TypeInfo aspeed_ast2500_hace_info = { 528 .name = TYPE_ASPEED_AST2500_HACE, 529 .parent = TYPE_ASPEED_HACE, 530 .class_init = aspeed_ast2500_hace_class_init, 531 }; 532 533 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data) 534 { 535 DeviceClass *dc = DEVICE_CLASS(klass); 536 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 537 538 dc->desc = "AST2600 Hash and Crypto Engine"; 539 540 ahc->src_mask = 0x7FFFFFFF; 541 ahc->dest_mask = 0x7FFFFFF8; 542 ahc->key_mask = 0x7FFFFFF8; 543 ahc->hash_mask = 0x00147FFF; 544 } 545 546 static const TypeInfo aspeed_ast2600_hace_info = { 547 .name = TYPE_ASPEED_AST2600_HACE, 548 .parent = TYPE_ASPEED_HACE, 549 .class_init = aspeed_ast2600_hace_class_init, 550 }; 551 552 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data) 553 { 554 DeviceClass *dc = DEVICE_CLASS(klass); 555 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 556 557 dc->desc = "AST1030 Hash and Crypto Engine"; 558 559 ahc->src_mask = 0x7FFFFFFF; 560 ahc->dest_mask = 0x7FFFFFF8; 561 ahc->key_mask = 0x7FFFFFF8; 562 ahc->hash_mask = 0x00147FFF; 563 } 564 565 static const TypeInfo aspeed_ast1030_hace_info = { 566 .name = TYPE_ASPEED_AST1030_HACE, 567 .parent = TYPE_ASPEED_HACE, 568 .class_init = aspeed_ast1030_hace_class_init, 569 }; 570 571 static void aspeed_ast2700_hace_class_init(ObjectClass *klass, void *data) 572 { 573 DeviceClass *dc = DEVICE_CLASS(klass); 574 AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); 575 576 dc->desc = "AST2700 Hash and Crypto Engine"; 577 578 ahc->src_mask = 0x7FFFFFFF; 579 ahc->dest_mask = 0x7FFFFFF8; 580 ahc->key_mask = 0x7FFFFFF8; 581 ahc->hash_mask = 0x00147FFF; 582 583 /* 584 * Currently, it does not support the CRYPT command. Instead, it only 585 * sends an interrupt to notify the firmware that the crypt command 586 * has completed. It is a temporary workaround. 587 */ 588 ahc->raise_crypt_interrupt_workaround = true; 589 } 590 591 static const TypeInfo aspeed_ast2700_hace_info = { 592 .name = TYPE_ASPEED_AST2700_HACE, 593 .parent = TYPE_ASPEED_HACE, 594 .class_init = aspeed_ast2700_hace_class_init, 595 }; 596 597 static void aspeed_hace_register_types(void) 598 { 599 type_register_static(&aspeed_ast2400_hace_info); 600 type_register_static(&aspeed_ast2500_hace_info); 601 type_register_static(&aspeed_ast2600_hace_info); 602 type_register_static(&aspeed_ast1030_hace_info); 603 type_register_static(&aspeed_ast2700_hace_info); 604 type_register_static(&aspeed_hace_info); 605 } 606 607 type_init(aspeed_hace_register_types); 608