1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support 4 * 5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #include <crypto/aes.h> 11 #include <crypto/ctr.h> 12 #include <crypto/internal/skcipher.h> 13 #include <linux/err.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/scatterlist.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 21 #include "ccp-crypto.h" 22 23 static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) 24 { 25 struct skcipher_request *req = skcipher_request_cast(async_req); 26 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma( 27 crypto_skcipher_reqtfm(req)); 28 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 29 30 if (ret) 31 return ret; 32 33 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) 34 memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE); 35 36 return 0; 37 } 38 39 static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 40 unsigned int key_len) 41 { 42 struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); 43 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 44 45 switch (key_len) { 46 case AES_KEYSIZE_128: 47 ctx->u.aes.type = CCP_AES_TYPE_128; 48 break; 49 case AES_KEYSIZE_192: 50 ctx->u.aes.type = CCP_AES_TYPE_192; 51 break; 52 case AES_KEYSIZE_256: 53 ctx->u.aes.type = CCP_AES_TYPE_256; 54 break; 55 default: 56 return -EINVAL; 57 } 58 ctx->u.aes.mode = alg->mode; 59 ctx->u.aes.key_len = key_len; 60 61 memcpy(ctx->u.aes.key, key, key_len); 62 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 63 64 return 0; 65 } 66 67 static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) 68 { 69 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 70 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 71 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 72 struct scatterlist *iv_sg = NULL; 73 unsigned int iv_len = 0; 74 75 if (!ctx->u.aes.key_len) 76 return -EINVAL; 77 78 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || 79 (ctx->u.aes.mode == CCP_AES_MODE_CBC)) && 80 (req->cryptlen & (AES_BLOCK_SIZE - 1))) 81 return -EINVAL; 82 83 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { 84 if (!req->iv) 85 return -EINVAL; 86 87 memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE); 88 iv_sg = &rctx->iv_sg; 89 iv_len = AES_BLOCK_SIZE; 90 sg_init_one(iv_sg, rctx->iv, iv_len); 91 } 92 93 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 94 INIT_LIST_HEAD(&rctx->cmd.entry); 95 rctx->cmd.engine = CCP_ENGINE_AES; 96 rctx->cmd.u.aes.type = ctx->u.aes.type; 97 rctx->cmd.u.aes.mode = ctx->u.aes.mode; 98 rctx->cmd.u.aes.action = 99 (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; 100 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; 101 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; 102 rctx->cmd.u.aes.iv = iv_sg; 103 rctx->cmd.u.aes.iv_len = iv_len; 104 rctx->cmd.u.aes.src = req->src; 105 rctx->cmd.u.aes.src_len = req->cryptlen; 106 rctx->cmd.u.aes.dst = req->dst; 107 108 return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 109 } 110 111 static int ccp_aes_encrypt(struct skcipher_request *req) 112 { 113 return ccp_aes_crypt(req, true); 114 } 115 116 static int ccp_aes_decrypt(struct skcipher_request *req) 117 { 118 return ccp_aes_crypt(req, false); 119 } 120 121 static int ccp_aes_init_tfm(struct crypto_skcipher *tfm) 122 { 123 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 124 125 ctx->complete = ccp_aes_complete; 126 ctx->u.aes.key_len = 0; 127 128 crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); 129 130 return 0; 131 } 132 133 static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, 134 int ret) 135 { 136 struct skcipher_request *req = skcipher_request_cast(async_req); 137 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 138 139 /* Restore the original pointer */ 140 req->iv = rctx->rfc3686_info; 141 142 return ccp_aes_complete(async_req, ret); 143 } 144 145 static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, 146 unsigned int key_len) 147 { 148 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 149 150 if (key_len < CTR_RFC3686_NONCE_SIZE) 151 return -EINVAL; 152 153 key_len -= CTR_RFC3686_NONCE_SIZE; 154 memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); 155 156 return ccp_aes_setkey(tfm, key, key_len); 157 } 158 159 static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt) 160 { 161 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 162 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 163 struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); 164 u8 *iv; 165 166 /* Initialize the CTR block */ 167 iv = rctx->rfc3686_iv; 168 memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); 169 170 iv += CTR_RFC3686_NONCE_SIZE; 171 memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE); 172 173 iv += CTR_RFC3686_IV_SIZE; 174 *(__be32 *)iv = cpu_to_be32(1); 175 176 /* Point to the new IV */ 177 rctx->rfc3686_info = req->iv; 178 req->iv = rctx->rfc3686_iv; 179 180 return ccp_aes_crypt(req, encrypt); 181 } 182 183 static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req) 184 { 185 return ccp_aes_rfc3686_crypt(req, true); 186 } 187 188 static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req) 189 { 190 return ccp_aes_rfc3686_crypt(req, false); 191 } 192 193 static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm) 194 { 195 struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); 196 197 ctx->complete = ccp_aes_rfc3686_complete; 198 ctx->u.aes.key_len = 0; 199 200 crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx)); 201 202 return 0; 203 } 204 205 static const struct skcipher_alg ccp_aes_defaults = { 206 .setkey = ccp_aes_setkey, 207 .encrypt = ccp_aes_encrypt, 208 .decrypt = ccp_aes_decrypt, 209 .min_keysize = AES_MIN_KEY_SIZE, 210 .max_keysize = AES_MAX_KEY_SIZE, 211 .init = ccp_aes_init_tfm, 212 213 .base.cra_flags = CRYPTO_ALG_ASYNC | 214 CRYPTO_ALG_ALLOCATES_MEMORY | 215 CRYPTO_ALG_KERN_DRIVER_ONLY | 216 CRYPTO_ALG_NEED_FALLBACK, 217 .base.cra_blocksize = AES_BLOCK_SIZE, 218 .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, 219 .base.cra_priority = CCP_CRA_PRIORITY, 220 .base.cra_module = THIS_MODULE, 221 }; 222 223 static const struct skcipher_alg ccp_aes_rfc3686_defaults = { 224 .setkey = ccp_aes_rfc3686_setkey, 225 .encrypt = ccp_aes_rfc3686_encrypt, 226 .decrypt = ccp_aes_rfc3686_decrypt, 227 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 228 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 229 .init = ccp_aes_rfc3686_init_tfm, 230 231 .base.cra_flags = CRYPTO_ALG_ASYNC | 232 CRYPTO_ALG_ALLOCATES_MEMORY | 233 CRYPTO_ALG_KERN_DRIVER_ONLY | 234 CRYPTO_ALG_NEED_FALLBACK, 235 .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, 236 .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, 237 .base.cra_priority = CCP_CRA_PRIORITY, 238 .base.cra_module = THIS_MODULE, 239 }; 240 241 struct ccp_aes_def { 242 enum ccp_aes_mode mode; 243 unsigned int version; 244 const char *name; 245 const char *driver_name; 246 unsigned int blocksize; 247 unsigned int ivsize; 248 const struct skcipher_alg *alg_defaults; 249 }; 250 251 static struct ccp_aes_def aes_algs[] = { 252 { 253 .mode = CCP_AES_MODE_ECB, 254 .version = CCP_VERSION(3, 0), 255 .name = "ecb(aes)", 256 .driver_name = "ecb-aes-ccp", 257 .blocksize = AES_BLOCK_SIZE, 258 .ivsize = 0, 259 .alg_defaults = &ccp_aes_defaults, 260 }, 261 { 262 .mode = CCP_AES_MODE_CBC, 263 .version = CCP_VERSION(3, 0), 264 .name = "cbc(aes)", 265 .driver_name = "cbc-aes-ccp", 266 .blocksize = AES_BLOCK_SIZE, 267 .ivsize = AES_BLOCK_SIZE, 268 .alg_defaults = &ccp_aes_defaults, 269 }, 270 { 271 .mode = CCP_AES_MODE_CTR, 272 .version = CCP_VERSION(3, 0), 273 .name = "ctr(aes)", 274 .driver_name = "ctr-aes-ccp", 275 .blocksize = 1, 276 .ivsize = AES_BLOCK_SIZE, 277 .alg_defaults = &ccp_aes_defaults, 278 }, 279 { 280 .mode = CCP_AES_MODE_CTR, 281 .version = CCP_VERSION(3, 0), 282 .name = "rfc3686(ctr(aes))", 283 .driver_name = "rfc3686-ctr-aes-ccp", 284 .blocksize = 1, 285 .ivsize = CTR_RFC3686_IV_SIZE, 286 .alg_defaults = &ccp_aes_rfc3686_defaults, 287 }, 288 }; 289 290 static int ccp_register_aes_alg(struct list_head *head, 291 const struct ccp_aes_def *def) 292 { 293 struct ccp_crypto_skcipher_alg *ccp_alg; 294 struct skcipher_alg *alg; 295 int ret; 296 297 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 298 if (!ccp_alg) 299 return -ENOMEM; 300 301 INIT_LIST_HEAD(&ccp_alg->entry); 302 303 ccp_alg->mode = def->mode; 304 305 /* Copy the defaults and override as necessary */ 306 alg = &ccp_alg->alg; 307 *alg = *def->alg_defaults; 308 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 309 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 310 def->driver_name); 311 alg->base.cra_blocksize = def->blocksize; 312 alg->ivsize = def->ivsize; 313 314 ret = crypto_register_skcipher(alg); 315 if (ret) { 316 pr_err("%s skcipher algorithm registration error (%d)\n", 317 alg->base.cra_name, ret); 318 kfree(ccp_alg); 319 return ret; 320 } 321 322 list_add(&ccp_alg->entry, head); 323 324 return 0; 325 } 326 327 int ccp_register_aes_algs(struct list_head *head) 328 { 329 int i, ret; 330 unsigned int ccpversion = ccp_version(); 331 332 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 333 if (aes_algs[i].version > ccpversion) 334 continue; 335 ret = ccp_register_aes_alg(head, &aes_algs[i]); 336 if (ret) 337 return ret; 338 } 339 340 return 0; 341 } 342