1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics DH-HMAC-CHAP authentication.
4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5 * All rights reserved.
6 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/err.h>
12 #include <linux/crc32.h>
13 #include <linux/base64.h>
14 #include <linux/ctype.h>
15 #include <linux/random.h>
16 #include <linux/nvme-auth.h>
17 #include <linux/nvme-keyring.h>
18 #include <linux/unaligned.h>
19
20 #include "nvmet.h"
21
nvmet_auth_set_key(struct nvmet_host * host,const char * secret,bool set_ctrl)22 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
23 bool set_ctrl)
24 {
25 unsigned char key_hash;
26 char *dhchap_secret;
27
28 if (!strlen(secret)) {
29 if (set_ctrl) {
30 kfree(host->dhchap_ctrl_secret);
31 host->dhchap_ctrl_secret = NULL;
32 host->dhchap_ctrl_key_hash = 0;
33 } else {
34 kfree(host->dhchap_secret);
35 host->dhchap_secret = NULL;
36 host->dhchap_key_hash = 0;
37 }
38 return 0;
39 }
40 if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
41 return -EINVAL;
42 if (key_hash > 3) {
43 pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
44 key_hash);
45 return -EINVAL;
46 }
47 dhchap_secret = kstrdup(secret, GFP_KERNEL);
48 if (!dhchap_secret)
49 return -ENOMEM;
50 down_write(&nvmet_config_sem);
51 if (set_ctrl) {
52 kfree(host->dhchap_ctrl_secret);
53 host->dhchap_ctrl_secret = strim(dhchap_secret);
54 host->dhchap_ctrl_key_hash = key_hash;
55 } else {
56 kfree(host->dhchap_secret);
57 host->dhchap_secret = strim(dhchap_secret);
58 host->dhchap_key_hash = key_hash;
59 }
60 up_write(&nvmet_config_sem);
61 return 0;
62 }
63
nvmet_setup_dhgroup(struct nvmet_ctrl * ctrl,u8 dhgroup_id)64 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
65 {
66 const char *dhgroup_kpp;
67 int ret = 0;
68
69 pr_debug("%s: ctrl %d selecting dhgroup %d\n",
70 __func__, ctrl->cntlid, dhgroup_id);
71
72 if (ctrl->dh_tfm) {
73 if (ctrl->dh_gid == dhgroup_id) {
74 pr_debug("%s: ctrl %d reuse existing DH group %d\n",
75 __func__, ctrl->cntlid, dhgroup_id);
76 return 0;
77 }
78 crypto_free_kpp(ctrl->dh_tfm);
79 ctrl->dh_tfm = NULL;
80 ctrl->dh_gid = 0;
81 }
82
83 if (dhgroup_id == NVME_AUTH_DHGROUP_NULL)
84 return 0;
85
86 dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
87 if (!dhgroup_kpp) {
88 pr_debug("%s: ctrl %d invalid DH group %d\n",
89 __func__, ctrl->cntlid, dhgroup_id);
90 return -EINVAL;
91 }
92 ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0);
93 if (IS_ERR(ctrl->dh_tfm)) {
94 pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n",
95 __func__, ctrl->cntlid, dhgroup_id,
96 PTR_ERR(ctrl->dh_tfm));
97 ret = PTR_ERR(ctrl->dh_tfm);
98 ctrl->dh_tfm = NULL;
99 ctrl->dh_gid = 0;
100 } else {
101 ctrl->dh_gid = dhgroup_id;
102 pr_debug("%s: ctrl %d setup DH group %d\n",
103 __func__, ctrl->cntlid, ctrl->dh_gid);
104 ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid);
105 if (ret < 0) {
106 pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
107 __func__, ctrl->cntlid, ret);
108 kfree_sensitive(ctrl->dh_key);
109 ctrl->dh_key = NULL;
110 return ret;
111 }
112 ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
113 kfree_sensitive(ctrl->dh_key);
114 ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL);
115 if (!ctrl->dh_key) {
116 pr_warn("ctrl %d failed to allocate public key\n",
117 ctrl->cntlid);
118 return -ENOMEM;
119 }
120 ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key,
121 ctrl->dh_keysize);
122 if (ret < 0) {
123 pr_warn("ctrl %d failed to generate public key\n",
124 ctrl->cntlid);
125 kfree(ctrl->dh_key);
126 ctrl->dh_key = NULL;
127 }
128 }
129
130 return ret;
131 }
132
nvmet_setup_auth(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq,bool reset)133 u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset)
134 {
135 int ret = 0;
136 struct nvmet_host_link *p;
137 struct nvmet_host *host = NULL;
138
139 down_read(&nvmet_config_sem);
140 if (nvmet_is_disc_subsys(ctrl->subsys))
141 goto out_unlock;
142
143 if (ctrl->subsys->allow_any_host)
144 goto out_unlock;
145
146 list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
147 pr_debug("check %s\n", nvmet_host_name(p->host));
148 if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
149 continue;
150 host = p->host;
151 break;
152 }
153 if (!host) {
154 pr_debug("host %s not found\n", ctrl->hostnqn);
155 ret = NVME_AUTH_DHCHAP_FAILURE_FAILED;
156 goto out_unlock;
157 }
158
159 if (!reset && nvmet_queue_tls_keyid(sq)) {
160 pr_debug("host %s tls enabled\n", ctrl->hostnqn);
161 goto out_unlock;
162 }
163
164 ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
165 if (ret < 0) {
166 pr_warn("Failed to setup DH group");
167 ret = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
168 goto out_unlock;
169 }
170
171 if (!host->dhchap_secret) {
172 pr_debug("No authentication provided\n");
173 goto out_unlock;
174 }
175
176 if (host->dhchap_hash_id == ctrl->shash_id) {
177 pr_debug("Re-use existing hash ID %d\n",
178 ctrl->shash_id);
179 } else {
180 ctrl->shash_id = host->dhchap_hash_id;
181 }
182
183 /* Skip the 'DHHC-1:XX:' prefix */
184 nvme_auth_free_key(ctrl->host_key);
185 ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
186 host->dhchap_key_hash);
187 if (IS_ERR(ctrl->host_key)) {
188 ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
189 ctrl->host_key = NULL;
190 goto out_free_hash;
191 }
192 pr_debug("%s: using hash %s key %*ph\n", __func__,
193 ctrl->host_key->hash > 0 ?
194 nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
195 (int)ctrl->host_key->len, ctrl->host_key->key);
196
197 nvme_auth_free_key(ctrl->ctrl_key);
198 if (!host->dhchap_ctrl_secret) {
199 ctrl->ctrl_key = NULL;
200 goto out_unlock;
201 }
202
203 ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
204 host->dhchap_ctrl_key_hash);
205 if (IS_ERR(ctrl->ctrl_key)) {
206 ret = NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE;
207 ctrl->ctrl_key = NULL;
208 goto out_free_hash;
209 }
210 pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
211 ctrl->ctrl_key->hash > 0 ?
212 nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
213 (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
214
215 out_free_hash:
216 if (ret) {
217 if (ctrl->host_key) {
218 nvme_auth_free_key(ctrl->host_key);
219 ctrl->host_key = NULL;
220 }
221 ctrl->shash_id = 0;
222 }
223 out_unlock:
224 up_read(&nvmet_config_sem);
225
226 return ret;
227 }
228
nvmet_auth_sq_free(struct nvmet_sq * sq)229 void nvmet_auth_sq_free(struct nvmet_sq *sq)
230 {
231 cancel_delayed_work(&sq->auth_expired_work);
232 #ifdef CONFIG_NVME_TARGET_TCP_TLS
233 sq->tls_key = NULL;
234 #endif
235 kfree(sq->dhchap_c1);
236 sq->dhchap_c1 = NULL;
237 kfree(sq->dhchap_c2);
238 sq->dhchap_c2 = NULL;
239 kfree(sq->dhchap_skey);
240 sq->dhchap_skey = NULL;
241 }
242
nvmet_destroy_auth(struct nvmet_ctrl * ctrl)243 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
244 {
245 ctrl->shash_id = 0;
246
247 if (ctrl->dh_tfm) {
248 crypto_free_kpp(ctrl->dh_tfm);
249 ctrl->dh_tfm = NULL;
250 ctrl->dh_gid = 0;
251 }
252 kfree_sensitive(ctrl->dh_key);
253 ctrl->dh_key = NULL;
254
255 if (ctrl->host_key) {
256 nvme_auth_free_key(ctrl->host_key);
257 ctrl->host_key = NULL;
258 }
259 if (ctrl->ctrl_key) {
260 nvme_auth_free_key(ctrl->ctrl_key);
261 ctrl->ctrl_key = NULL;
262 }
263 #ifdef CONFIG_NVME_TARGET_TCP_TLS
264 if (ctrl->tls_key) {
265 key_put(ctrl->tls_key);
266 ctrl->tls_key = NULL;
267 }
268 #endif
269 }
270
nvmet_check_auth_status(struct nvmet_req * req)271 bool nvmet_check_auth_status(struct nvmet_req *req)
272 {
273 if (req->sq->ctrl->host_key) {
274 if (req->sq->qid > 0)
275 return true;
276 if (!req->sq->authenticated)
277 return false;
278 }
279 return true;
280 }
281
nvmet_auth_host_hash(struct nvmet_req * req,u8 * response,unsigned int shash_len)282 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
283 unsigned int shash_len)
284 {
285 struct nvme_auth_hmac_ctx hmac;
286 struct nvmet_ctrl *ctrl = req->sq->ctrl;
287 u8 *challenge = req->sq->dhchap_c1;
288 struct nvme_dhchap_key *transformed_key;
289 u8 buf[4];
290 int ret;
291
292 transformed_key = nvme_auth_transform_key(ctrl->host_key,
293 ctrl->hostnqn);
294 if (IS_ERR(transformed_key))
295 return PTR_ERR(transformed_key);
296
297 ret = nvme_auth_hmac_init(&hmac, ctrl->shash_id, transformed_key->key,
298 transformed_key->len);
299 if (ret)
300 goto out_free_response;
301
302 if (shash_len != nvme_auth_hmac_hash_len(ctrl->shash_id)) {
303 pr_err("%s: hash len mismatch (len %u digest %zu)\n", __func__,
304 shash_len, nvme_auth_hmac_hash_len(ctrl->shash_id));
305 ret = -EINVAL;
306 goto out_free_response;
307 }
308
309 if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
310 challenge = kmalloc(shash_len, GFP_KERNEL);
311 if (!challenge) {
312 ret = -ENOMEM;
313 goto out_free_response;
314 }
315 ret = nvme_auth_augmented_challenge(ctrl->shash_id,
316 req->sq->dhchap_skey,
317 req->sq->dhchap_skey_len,
318 req->sq->dhchap_c1,
319 challenge, shash_len);
320 if (ret)
321 goto out_free_challenge;
322 }
323
324 pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
325 ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
326 req->sq->dhchap_tid);
327
328 nvme_auth_hmac_update(&hmac, challenge, shash_len);
329
330 put_unaligned_le32(req->sq->dhchap_s1, buf);
331 nvme_auth_hmac_update(&hmac, buf, 4);
332
333 put_unaligned_le16(req->sq->dhchap_tid, buf);
334 nvme_auth_hmac_update(&hmac, buf, 2);
335
336 *buf = req->sq->sc_c;
337 nvme_auth_hmac_update(&hmac, buf, 1);
338 nvme_auth_hmac_update(&hmac, "HostHost", 8);
339 memset(buf, 0, 4);
340 nvme_auth_hmac_update(&hmac, ctrl->hostnqn, strlen(ctrl->hostnqn));
341 nvme_auth_hmac_update(&hmac, buf, 1);
342 nvme_auth_hmac_update(&hmac, ctrl->subsys->subsysnqn,
343 strlen(ctrl->subsys->subsysnqn));
344 nvme_auth_hmac_final(&hmac, response);
345 ret = 0;
346 out_free_challenge:
347 if (challenge != req->sq->dhchap_c1)
348 kfree(challenge);
349 out_free_response:
350 memzero_explicit(&hmac, sizeof(hmac));
351 nvme_auth_free_key(transformed_key);
352 return ret;
353 }
354
nvmet_auth_ctrl_hash(struct nvmet_req * req,u8 * response,unsigned int shash_len)355 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
356 unsigned int shash_len)
357 {
358 struct nvme_auth_hmac_ctx hmac;
359 struct nvmet_ctrl *ctrl = req->sq->ctrl;
360 u8 *challenge = req->sq->dhchap_c2;
361 struct nvme_dhchap_key *transformed_key;
362 u8 buf[4];
363 int ret;
364
365 transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
366 ctrl->subsys->subsysnqn);
367 if (IS_ERR(transformed_key))
368 return PTR_ERR(transformed_key);
369
370 ret = nvme_auth_hmac_init(&hmac, ctrl->shash_id, transformed_key->key,
371 transformed_key->len);
372 if (ret)
373 goto out_free_response;
374
375 if (shash_len != nvme_auth_hmac_hash_len(ctrl->shash_id)) {
376 pr_err("%s: hash len mismatch (len %u digest %zu)\n", __func__,
377 shash_len, nvme_auth_hmac_hash_len(ctrl->shash_id));
378 ret = -EINVAL;
379 goto out_free_response;
380 }
381
382 if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
383 challenge = kmalloc(shash_len, GFP_KERNEL);
384 if (!challenge) {
385 ret = -ENOMEM;
386 goto out_free_response;
387 }
388 ret = nvme_auth_augmented_challenge(ctrl->shash_id,
389 req->sq->dhchap_skey,
390 req->sq->dhchap_skey_len,
391 req->sq->dhchap_c2,
392 challenge, shash_len);
393 if (ret)
394 goto out_free_challenge;
395 }
396
397 nvme_auth_hmac_update(&hmac, challenge, shash_len);
398
399 put_unaligned_le32(req->sq->dhchap_s2, buf);
400 nvme_auth_hmac_update(&hmac, buf, 4);
401
402 put_unaligned_le16(req->sq->dhchap_tid, buf);
403 nvme_auth_hmac_update(&hmac, buf, 2);
404
405 memset(buf, 0, 4);
406 nvme_auth_hmac_update(&hmac, buf, 1);
407 nvme_auth_hmac_update(&hmac, "Controller", 10);
408 nvme_auth_hmac_update(&hmac, ctrl->subsys->subsysnqn,
409 strlen(ctrl->subsys->subsysnqn));
410 nvme_auth_hmac_update(&hmac, buf, 1);
411 nvme_auth_hmac_update(&hmac, ctrl->hostnqn, strlen(ctrl->hostnqn));
412 nvme_auth_hmac_final(&hmac, response);
413 ret = 0;
414 out_free_challenge:
415 if (challenge != req->sq->dhchap_c2)
416 kfree(challenge);
417 out_free_response:
418 memzero_explicit(&hmac, sizeof(hmac));
419 nvme_auth_free_key(transformed_key);
420 return ret;
421 }
422
nvmet_auth_ctrl_exponential(struct nvmet_req * req,u8 * buf,int buf_size)423 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
424 u8 *buf, int buf_size)
425 {
426 struct nvmet_ctrl *ctrl = req->sq->ctrl;
427 int ret = 0;
428
429 if (!ctrl->dh_key) {
430 pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid);
431 return -ENOKEY;
432 }
433 if (buf_size != ctrl->dh_keysize) {
434 pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n",
435 ctrl->cntlid, ctrl->dh_keysize, buf_size);
436 ret = -EINVAL;
437 } else {
438 memcpy(buf, ctrl->dh_key, buf_size);
439 pr_debug("%s: ctrl %d public key %*ph\n", __func__,
440 ctrl->cntlid, (int)buf_size, buf);
441 }
442
443 return ret;
444 }
445
nvmet_auth_ctrl_sesskey(struct nvmet_req * req,const u8 * pkey,int pkey_size)446 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
447 const u8 *pkey, int pkey_size)
448 {
449 struct nvmet_ctrl *ctrl = req->sq->ctrl;
450 int ret;
451
452 req->sq->dhchap_skey_len = ctrl->dh_keysize;
453 req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL);
454 if (!req->sq->dhchap_skey)
455 return -ENOMEM;
456 ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm,
457 pkey, pkey_size,
458 req->sq->dhchap_skey,
459 req->sq->dhchap_skey_len);
460 if (ret)
461 pr_debug("failed to compute shared secret, err %d\n", ret);
462 else
463 pr_debug("%s: shared secret %*ph\n", __func__,
464 (int)req->sq->dhchap_skey_len,
465 req->sq->dhchap_skey);
466
467 return ret;
468 }
469
nvmet_auth_insert_psk(struct nvmet_sq * sq)470 void nvmet_auth_insert_psk(struct nvmet_sq *sq)
471 {
472 int hash_len = nvme_auth_hmac_hash_len(sq->ctrl->shash_id);
473 u8 *psk, *tls_psk;
474 char *digest;
475 size_t psk_len;
476 int ret;
477 #ifdef CONFIG_NVME_TARGET_TCP_TLS
478 struct key *tls_key = NULL;
479 #endif
480
481 ret = nvme_auth_generate_psk(sq->ctrl->shash_id,
482 sq->dhchap_skey,
483 sq->dhchap_skey_len,
484 sq->dhchap_c1, sq->dhchap_c2,
485 hash_len, &psk, &psk_len);
486 if (ret) {
487 pr_warn("%s: ctrl %d qid %d failed to generate PSK, error %d\n",
488 __func__, sq->ctrl->cntlid, sq->qid, ret);
489 return;
490 }
491 ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
492 sq->ctrl->subsys->subsysnqn,
493 sq->ctrl->hostnqn, &digest);
494 if (ret) {
495 pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
496 __func__, sq->ctrl->cntlid, sq->qid, ret);
497 goto out_free_psk;
498 }
499 ret = nvme_auth_derive_tls_psk(sq->ctrl->shash_id, psk, psk_len,
500 digest, &tls_psk);
501 if (ret) {
502 pr_warn("%s: ctrl %d qid %d failed to derive TLS PSK, error %d\n",
503 __func__, sq->ctrl->cntlid, sq->qid, ret);
504 goto out_free_digest;
505 }
506 #ifdef CONFIG_NVME_TARGET_TCP_TLS
507 tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn,
508 sq->ctrl->subsys->subsysnqn,
509 sq->ctrl->shash_id, tls_psk, psk_len,
510 digest);
511 if (IS_ERR(tls_key)) {
512 pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
513 __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
514 tls_key = NULL;
515 }
516 if (sq->ctrl->tls_key)
517 key_put(sq->ctrl->tls_key);
518 sq->ctrl->tls_key = tls_key;
519 #endif
520 kfree_sensitive(tls_psk);
521 out_free_digest:
522 kfree_sensitive(digest);
523 out_free_psk:
524 kfree_sensitive(psk);
525 }
526