1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Cryptographic Coprocessor (CCP) crypto API support
4 *
5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10 #include <crypto/internal/akcipher.h>
11 #include <crypto/internal/hash.h>
12 #include <crypto/internal/skcipher.h>
13 #include <linux/ccp.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21
22 #include "ccp-crypto.h"
23
24 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
25 MODULE_LICENSE("GPL");
26 MODULE_VERSION("1.0.0");
27 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
28
29 static unsigned int aes_disable;
30 module_param(aes_disable, uint, 0444);
31 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
32
33 static unsigned int sha_disable;
34 module_param(sha_disable, uint, 0444);
35 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
36
37 static unsigned int des3_disable;
38 module_param(des3_disable, uint, 0444);
39 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
40
41 static unsigned int rsa_disable;
42 module_param(rsa_disable, uint, 0444);
43 MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
44
45 /* List heads for the supported algorithms */
46 static LIST_HEAD(hash_algs);
47 static LIST_HEAD(skcipher_algs);
48 static LIST_HEAD(aead_algs);
49 static LIST_HEAD(akcipher_algs);
50
51 /* For any tfm, requests for that tfm must be returned on the order
52 * received. With multiple queues available, the CCP can process more
53 * than one cmd at a time. Therefore we must maintain a cmd list to insure
54 * the proper ordering of requests on a given tfm.
55 */
56 struct ccp_crypto_queue {
57 struct list_head cmds;
58 struct list_head *backlog;
59 unsigned int cmd_count;
60 };
61
62 #define CCP_CRYPTO_MAX_QLEN 100
63
64 static struct ccp_crypto_queue req_queue;
65 static DEFINE_SPINLOCK(req_queue_lock);
66
67 struct ccp_crypto_cmd {
68 struct list_head entry;
69
70 struct ccp_cmd *cmd;
71
72 /* Save the crypto_tfm and crypto_async_request addresses
73 * separately to avoid any reference to a possibly invalid
74 * crypto_async_request structure after invoking the request
75 * callback
76 */
77 struct crypto_async_request *req;
78 struct crypto_tfm *tfm;
79
80 /* Used for held command processing to determine state */
81 int ret;
82 };
83
ccp_crypto_success(int err)84 static inline bool ccp_crypto_success(int err)
85 {
86 if (err && (err != -EINPROGRESS) && (err != -EBUSY))
87 return false;
88
89 return true;
90 }
91
ccp_crypto_cmd_complete(struct ccp_crypto_cmd * crypto_cmd,struct ccp_crypto_cmd ** backlog)92 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
93 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
94 {
95 struct ccp_crypto_cmd *held = NULL, *tmp;
96 unsigned long flags;
97
98 *backlog = NULL;
99
100 spin_lock_irqsave(&req_queue_lock, flags);
101
102 /* Held cmds will be after the current cmd in the queue so start
103 * searching for a cmd with a matching tfm for submission.
104 */
105 tmp = crypto_cmd;
106 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
107 if (crypto_cmd->tfm != tmp->tfm)
108 continue;
109 held = tmp;
110 break;
111 }
112
113 /* Process the backlog:
114 * Because cmds can be executed from any point in the cmd list
115 * special precautions have to be taken when handling the backlog.
116 */
117 if (req_queue.backlog != &req_queue.cmds) {
118 /* Skip over this cmd if it is the next backlog cmd */
119 if (req_queue.backlog == &crypto_cmd->entry)
120 req_queue.backlog = crypto_cmd->entry.next;
121
122 *backlog = container_of(req_queue.backlog,
123 struct ccp_crypto_cmd, entry);
124 req_queue.backlog = req_queue.backlog->next;
125
126 /* Skip over this cmd if it is now the next backlog cmd */
127 if (req_queue.backlog == &crypto_cmd->entry)
128 req_queue.backlog = crypto_cmd->entry.next;
129 }
130
131 /* Remove the cmd entry from the list of cmds */
132 req_queue.cmd_count--;
133 list_del(&crypto_cmd->entry);
134
135 spin_unlock_irqrestore(&req_queue_lock, flags);
136
137 return held;
138 }
139
ccp_crypto_complete(void * data,int err)140 static void ccp_crypto_complete(void *data, int err)
141 {
142 struct ccp_crypto_cmd *crypto_cmd = data;
143 struct ccp_crypto_cmd *held, *next, *backlog;
144 struct crypto_async_request *req = crypto_cmd->req;
145 struct ccp_ctx *ctx = crypto_tfm_ctx_dma(req->tfm);
146 int ret;
147
148 if (err == -EINPROGRESS) {
149 /* Only propagate the -EINPROGRESS if necessary */
150 if (crypto_cmd->ret == -EBUSY) {
151 crypto_cmd->ret = -EINPROGRESS;
152 crypto_request_complete(req, -EINPROGRESS);
153 }
154
155 return;
156 }
157
158 /* Operation has completed - update the queue before invoking
159 * the completion callbacks and retrieve the next cmd (cmd with
160 * a matching tfm) that can be submitted to the CCP.
161 */
162 held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
163 if (backlog) {
164 backlog->ret = -EINPROGRESS;
165 crypto_request_complete(backlog->req, -EINPROGRESS);
166 }
167
168 /* Transition the state from -EBUSY to -EINPROGRESS first */
169 if (crypto_cmd->ret == -EBUSY)
170 crypto_request_complete(req, -EINPROGRESS);
171
172 /* Completion callbacks */
173 ret = err;
174 if (ctx->complete)
175 ret = ctx->complete(req, ret);
176 crypto_request_complete(req, ret);
177
178 /* Submit the next cmd */
179 while (held) {
180 /* Since we have already queued the cmd, we must indicate that
181 * we can backlog so as not to "lose" this request.
182 */
183 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
184 ret = ccp_enqueue_cmd(held->cmd);
185 if (ccp_crypto_success(ret))
186 break;
187
188 /* Error occurred, report it and get the next entry */
189 ctx = crypto_tfm_ctx_dma(held->req->tfm);
190 if (ctx->complete)
191 ret = ctx->complete(held->req, ret);
192 crypto_request_complete(held->req, ret);
193
194 next = ccp_crypto_cmd_complete(held, &backlog);
195 if (backlog) {
196 backlog->ret = -EINPROGRESS;
197 crypto_request_complete(backlog->req, -EINPROGRESS);
198 }
199
200 kfree(held);
201 held = next;
202 }
203
204 kfree(crypto_cmd);
205 }
206
ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd * crypto_cmd)207 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
208 {
209 struct ccp_crypto_cmd *active = NULL, *tmp;
210 unsigned long flags;
211 bool free_cmd = true;
212 int ret;
213
214 spin_lock_irqsave(&req_queue_lock, flags);
215
216 /* Check if the cmd can/should be queued */
217 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
218 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
219 ret = -ENOSPC;
220 goto e_lock;
221 }
222 }
223
224 /* Look for an entry with the same tfm. If there is a cmd
225 * with the same tfm in the list then the current cmd cannot
226 * be submitted to the CCP yet.
227 */
228 list_for_each_entry(tmp, &req_queue.cmds, entry) {
229 if (crypto_cmd->tfm != tmp->tfm)
230 continue;
231 active = tmp;
232 break;
233 }
234
235 ret = -EINPROGRESS;
236 if (!active) {
237 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
238 if (!ccp_crypto_success(ret))
239 goto e_lock; /* Error, don't queue it */
240 }
241
242 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
243 ret = -EBUSY;
244 if (req_queue.backlog == &req_queue.cmds)
245 req_queue.backlog = &crypto_cmd->entry;
246 }
247 crypto_cmd->ret = ret;
248
249 req_queue.cmd_count++;
250 list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
251
252 free_cmd = false;
253
254 e_lock:
255 spin_unlock_irqrestore(&req_queue_lock, flags);
256
257 if (free_cmd)
258 kfree(crypto_cmd);
259
260 return ret;
261 }
262
263 /**
264 * ccp_crypto_enqueue_request - queue an crypto async request for processing
265 * by the CCP
266 *
267 * @req: crypto_async_request struct to be processed
268 * @cmd: ccp_cmd struct to be sent to the CCP
269 */
ccp_crypto_enqueue_request(struct crypto_async_request * req,struct ccp_cmd * cmd)270 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
271 struct ccp_cmd *cmd)
272 {
273 struct ccp_crypto_cmd *crypto_cmd;
274 gfp_t gfp;
275
276 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
277
278 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
279 if (!crypto_cmd)
280 return -ENOMEM;
281
282 /* The tfm pointer must be saved and not referenced from the
283 * crypto_async_request (req) pointer because it is used after
284 * completion callback for the request and the req pointer
285 * might not be valid anymore.
286 */
287 crypto_cmd->cmd = cmd;
288 crypto_cmd->req = req;
289 crypto_cmd->tfm = req->tfm;
290
291 cmd->callback = ccp_crypto_complete;
292 cmd->data = crypto_cmd;
293
294 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
295 cmd->flags |= CCP_CMD_MAY_BACKLOG;
296 else
297 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
298
299 return ccp_crypto_enqueue_cmd(crypto_cmd);
300 }
301
ccp_crypto_sg_table_add(struct sg_table * table,struct scatterlist * sg_add)302 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
303 struct scatterlist *sg_add)
304 {
305 struct scatterlist *sg, *sg_last = NULL;
306
307 for (sg = table->sgl; sg; sg = sg_next(sg))
308 if (!sg_page(sg))
309 break;
310 if (WARN_ON(!sg))
311 return NULL;
312
313 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
314 sg_set_page(sg, sg_page(sg_add), sg_add->length,
315 sg_add->offset);
316 sg_last = sg;
317 }
318 if (WARN_ON(sg_add))
319 return NULL;
320
321 return sg_last;
322 }
323
ccp_register_algs(void)324 static int ccp_register_algs(void)
325 {
326 int ret;
327
328 if (!aes_disable) {
329 ret = ccp_register_aes_algs(&skcipher_algs);
330 if (ret)
331 return ret;
332
333 ret = ccp_register_aes_cmac_algs(&hash_algs);
334 if (ret)
335 return ret;
336
337 ret = ccp_register_aes_xts_algs(&skcipher_algs);
338 if (ret)
339 return ret;
340
341 ret = ccp_register_aes_aeads(&aead_algs);
342 if (ret)
343 return ret;
344 }
345
346 if (!des3_disable) {
347 ret = ccp_register_des3_algs(&skcipher_algs);
348 if (ret)
349 return ret;
350 }
351
352 if (!sha_disable) {
353 ret = ccp_register_sha_algs(&hash_algs);
354 if (ret)
355 return ret;
356 }
357
358 if (!rsa_disable) {
359 ret = ccp_register_rsa_algs(&akcipher_algs);
360 if (ret)
361 return ret;
362 }
363
364 return 0;
365 }
366
ccp_unregister_algs(void)367 static void ccp_unregister_algs(void)
368 {
369 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
370 struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
371 struct ccp_crypto_aead *aead_alg, *aead_tmp;
372 struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
373
374 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
375 crypto_unregister_ahash(&ahash_alg->alg);
376 list_del(&ahash_alg->entry);
377 kfree(ahash_alg);
378 }
379
380 list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
381 crypto_unregister_skcipher(&ablk_alg->alg);
382 list_del(&ablk_alg->entry);
383 kfree(ablk_alg);
384 }
385
386 list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
387 crypto_unregister_aead(&aead_alg->alg);
388 list_del(&aead_alg->entry);
389 kfree(aead_alg);
390 }
391
392 list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
393 crypto_unregister_akcipher(&akc_alg->alg);
394 list_del(&akc_alg->entry);
395 kfree(akc_alg);
396 }
397 }
398
ccp_crypto_init(void)399 static int __init ccp_crypto_init(void)
400 {
401 int ret;
402
403 ret = ccp_present();
404 if (ret) {
405 pr_err("Cannot load: there are no available CCPs\n");
406 return ret;
407 }
408
409 INIT_LIST_HEAD(&req_queue.cmds);
410 req_queue.backlog = &req_queue.cmds;
411 req_queue.cmd_count = 0;
412
413 ret = ccp_register_algs();
414 if (ret)
415 ccp_unregister_algs();
416
417 return ret;
418 }
419
ccp_crypto_exit(void)420 static void __exit ccp_crypto_exit(void)
421 {
422 ccp_unregister_algs();
423 }
424
425 module_init(ccp_crypto_init);
426 module_exit(ccp_crypto_exit);
427