xref: /linux/drivers/nvme/host/auth.c (revision 2988dfed8a5dc752921a5790b81c06e781af51ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4  */
5 
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <linux/unaligned.h>
10 #include <crypto/hash.h>
11 #include <crypto/dh.h>
12 #include "nvme.h"
13 #include "fabrics.h"
14 #include <linux/nvme-auth.h>
15 #include <linux/nvme-keyring.h>
16 
17 #define CHAP_BUF_SIZE 4096
18 static struct kmem_cache *nvme_chap_buf_cache;
19 static mempool_t *nvme_chap_buf_pool;
20 
21 struct nvme_dhchap_queue_context {
22 	struct list_head entry;
23 	struct work_struct auth_work;
24 	struct nvme_ctrl *ctrl;
25 	struct crypto_shash *shash_tfm;
26 	struct crypto_kpp *dh_tfm;
27 	struct nvme_dhchap_key *transformed_key;
28 	void *buf;
29 	int qid;
30 	int error;
31 	u32 s1;
32 	u32 s2;
33 	bool bi_directional;
34 	bool authenticated;
35 	u16 transaction;
36 	u8 status;
37 	u8 dhgroup_id;
38 	u8 hash_id;
39 	size_t hash_len;
40 	u8 c1[64];
41 	u8 c2[64];
42 	u8 response[64];
43 	u8 *ctrl_key;
44 	u8 *host_key;
45 	u8 *sess_key;
46 	int ctrl_key_len;
47 	int host_key_len;
48 	int sess_key_len;
49 };
50 
51 static struct workqueue_struct *nvme_auth_wq;
52 
ctrl_max_dhchaps(struct nvme_ctrl * ctrl)53 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
54 {
55 	return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
56 			ctrl->opts->nr_poll_queues + 1;
57 }
58 
nvme_auth_submit(struct nvme_ctrl * ctrl,int qid,void * data,size_t data_len,bool auth_send)59 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
60 			    void *data, size_t data_len, bool auth_send)
61 {
62 	struct nvme_command cmd = {};
63 	nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
64 	struct request_queue *q = ctrl->fabrics_q;
65 	int ret;
66 
67 	if (qid != 0) {
68 		flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
69 		q = ctrl->connect_q;
70 	}
71 
72 	cmd.auth_common.opcode = nvme_fabrics_command;
73 	cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
74 	cmd.auth_common.spsp0 = 0x01;
75 	cmd.auth_common.spsp1 = 0x01;
76 	if (auth_send) {
77 		cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
78 		cmd.auth_send.tl = cpu_to_le32(data_len);
79 	} else {
80 		cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
81 		cmd.auth_receive.al = cpu_to_le32(data_len);
82 	}
83 
84 	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
85 				     qid == 0 ? NVME_QID_ANY : qid, flags);
86 	if (ret > 0)
87 		dev_warn(ctrl->device,
88 			"qid %d auth_send failed with status %d\n", qid, ret);
89 	else if (ret < 0)
90 		dev_err(ctrl->device,
91 			"qid %d auth_send failed with error %d\n", qid, ret);
92 	return ret;
93 }
94 
nvme_auth_receive_validate(struct nvme_ctrl * ctrl,int qid,struct nvmf_auth_dhchap_failure_data * data,u16 transaction,u8 expected_msg)95 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
96 		struct nvmf_auth_dhchap_failure_data *data,
97 		u16 transaction, u8 expected_msg)
98 {
99 	dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
100 		__func__, qid, data->auth_type, data->auth_id);
101 
102 	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
103 	    data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
104 		return data->rescode_exp;
105 	}
106 	if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
107 	    data->auth_id != expected_msg) {
108 		dev_warn(ctrl->device,
109 			 "qid %d invalid message %02x/%02x\n",
110 			 qid, data->auth_type, data->auth_id);
111 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
112 	}
113 	if (le16_to_cpu(data->t_id) != transaction) {
114 		dev_warn(ctrl->device,
115 			 "qid %d invalid transaction ID %d\n",
116 			 qid, le16_to_cpu(data->t_id));
117 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
118 	}
119 	return 0;
120 }
121 
nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)122 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
123 		struct nvme_dhchap_queue_context *chap)
124 {
125 	struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
126 	size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
127 
128 	if (size > CHAP_BUF_SIZE) {
129 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
130 		return -EINVAL;
131 	}
132 	memset((u8 *)chap->buf, 0, size);
133 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
134 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
135 	data->t_id = cpu_to_le16(chap->transaction);
136 	if (ctrl->opts->concat && chap->qid == 0) {
137 		if (ctrl->opts->tls_key)
138 			data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
139 		else
140 			data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
141 	} else
142 		data->sc_c = NVME_AUTH_SECP_NOSC;
143 	data->napd = 1;
144 	data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
145 	data->auth_protocol[0].dhchap.halen = 3;
146 	data->auth_protocol[0].dhchap.dhlen = 6;
147 	data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
148 	data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
149 	data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
150 	data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
151 	data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
152 	data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
153 	data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
154 	data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
155 	data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
156 
157 	return size;
158 }
159 
nvme_auth_process_dhchap_challenge(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)160 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
161 		struct nvme_dhchap_queue_context *chap)
162 {
163 	struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
164 	u16 dhvlen = le16_to_cpu(data->dhvlen);
165 	size_t size = sizeof(*data) + data->hl + dhvlen;
166 	const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
167 	const char *hmac_name, *kpp_name;
168 
169 	if (size > CHAP_BUF_SIZE) {
170 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
171 		return -EINVAL;
172 	}
173 
174 	hmac_name = nvme_auth_hmac_name(data->hashid);
175 	if (!hmac_name) {
176 		dev_warn(ctrl->device,
177 			 "qid %d: invalid HASH ID %d\n",
178 			 chap->qid, data->hashid);
179 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
180 		return -EPROTO;
181 	}
182 
183 	if (chap->hash_id == data->hashid && chap->shash_tfm &&
184 	    !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
185 	    crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
186 		dev_dbg(ctrl->device,
187 			"qid %d: reuse existing hash %s\n",
188 			chap->qid, hmac_name);
189 		goto select_kpp;
190 	}
191 
192 	/* Reset if hash cannot be reused */
193 	if (chap->shash_tfm) {
194 		crypto_free_shash(chap->shash_tfm);
195 		chap->hash_id = 0;
196 		chap->hash_len = 0;
197 	}
198 	chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
199 					     CRYPTO_ALG_ALLOCATES_MEMORY);
200 	if (IS_ERR(chap->shash_tfm)) {
201 		dev_warn(ctrl->device,
202 			 "qid %d: failed to allocate hash %s, error %ld\n",
203 			 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
204 		chap->shash_tfm = NULL;
205 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
206 		return -ENOMEM;
207 	}
208 
209 	if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
210 		dev_warn(ctrl->device,
211 			 "qid %d: invalid hash length %d\n",
212 			 chap->qid, data->hl);
213 		crypto_free_shash(chap->shash_tfm);
214 		chap->shash_tfm = NULL;
215 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
216 		return -EPROTO;
217 	}
218 
219 	chap->hash_id = data->hashid;
220 	chap->hash_len = data->hl;
221 	dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
222 		chap->qid, hmac_name);
223 
224 select_kpp:
225 	kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
226 	if (!kpp_name) {
227 		dev_warn(ctrl->device,
228 			 "qid %d: invalid DH group id %d\n",
229 			 chap->qid, data->dhgid);
230 		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
231 		/* Leave previous dh_tfm intact */
232 		return -EPROTO;
233 	}
234 
235 	if (chap->dhgroup_id == data->dhgid &&
236 	    (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
237 		dev_dbg(ctrl->device,
238 			"qid %d: reuse existing DH group %s\n",
239 			chap->qid, gid_name);
240 		goto skip_kpp;
241 	}
242 
243 	/* Reset dh_tfm if it can't be reused */
244 	if (chap->dh_tfm) {
245 		crypto_free_kpp(chap->dh_tfm);
246 		chap->dh_tfm = NULL;
247 	}
248 
249 	if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
250 		if (dhvlen == 0) {
251 			dev_warn(ctrl->device,
252 				 "qid %d: empty DH value\n",
253 				 chap->qid);
254 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
255 			return -EPROTO;
256 		}
257 
258 		chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
259 		if (IS_ERR(chap->dh_tfm)) {
260 			int ret = PTR_ERR(chap->dh_tfm);
261 
262 			dev_warn(ctrl->device,
263 				 "qid %d: error %d initializing DH group %s\n",
264 				 chap->qid, ret, gid_name);
265 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
266 			chap->dh_tfm = NULL;
267 			return ret;
268 		}
269 		dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
270 			chap->qid, gid_name);
271 	} else if (dhvlen != 0) {
272 		dev_warn(ctrl->device,
273 			 "qid %d: invalid DH value for NULL DH\n",
274 			 chap->qid);
275 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
276 		return -EPROTO;
277 	}
278 	chap->dhgroup_id = data->dhgid;
279 
280 skip_kpp:
281 	chap->s1 = le32_to_cpu(data->seqnum);
282 	memcpy(chap->c1, data->cval, chap->hash_len);
283 	if (dhvlen) {
284 		chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
285 		if (!chap->ctrl_key) {
286 			chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
287 			return -ENOMEM;
288 		}
289 		chap->ctrl_key_len = dhvlen;
290 		memcpy(chap->ctrl_key, data->cval + chap->hash_len,
291 		       dhvlen);
292 		dev_dbg(ctrl->device, "ctrl public key %*ph\n",
293 			 (int)chap->ctrl_key_len, chap->ctrl_key);
294 	}
295 
296 	return 0;
297 }
298 
nvme_auth_set_dhchap_reply_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)299 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
300 		struct nvme_dhchap_queue_context *chap)
301 {
302 	struct nvmf_auth_dhchap_reply_data *data = chap->buf;
303 	size_t size = sizeof(*data);
304 
305 	size += 2 * chap->hash_len;
306 
307 	if (chap->host_key_len)
308 		size += chap->host_key_len;
309 
310 	if (size > CHAP_BUF_SIZE) {
311 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
312 		return -EINVAL;
313 	}
314 
315 	memset(chap->buf, 0, size);
316 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
317 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
318 	data->t_id = cpu_to_le16(chap->transaction);
319 	data->hl = chap->hash_len;
320 	data->dhvlen = cpu_to_le16(chap->host_key_len);
321 	memcpy(data->rval, chap->response, chap->hash_len);
322 	if (ctrl->ctrl_key)
323 		chap->bi_directional = true;
324 	if (ctrl->ctrl_key || ctrl->opts->concat) {
325 		get_random_bytes(chap->c2, chap->hash_len);
326 		data->cvalid = 1;
327 		memcpy(data->rval + chap->hash_len, chap->c2,
328 		       chap->hash_len);
329 		dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
330 			__func__, chap->qid, (int)chap->hash_len, chap->c2);
331 	} else {
332 		memset(chap->c2, 0, chap->hash_len);
333 	}
334 	if (ctrl->opts->concat)
335 		chap->s2 = 0;
336 	else
337 		chap->s2 = nvme_auth_get_seqnum();
338 	data->seqnum = cpu_to_le32(chap->s2);
339 	if (chap->host_key_len) {
340 		dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
341 			__func__, chap->qid,
342 			chap->host_key_len, chap->host_key);
343 		memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
344 		       chap->host_key_len);
345 	}
346 
347 	return size;
348 }
349 
nvme_auth_process_dhchap_success1(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)350 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
351 		struct nvme_dhchap_queue_context *chap)
352 {
353 	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
354 	size_t size = sizeof(*data) + chap->hash_len;
355 
356 	if (size > CHAP_BUF_SIZE) {
357 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
358 		return -EINVAL;
359 	}
360 
361 	if (data->hl != chap->hash_len) {
362 		dev_warn(ctrl->device,
363 			 "qid %d: invalid hash length %u\n",
364 			 chap->qid, data->hl);
365 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
366 		return -EPROTO;
367 	}
368 
369 	/* Just print out information for the admin queue */
370 	if (chap->qid == 0)
371 		dev_info(ctrl->device,
372 			 "qid 0: authenticated with hash %s dhgroup %s\n",
373 			 nvme_auth_hmac_name(chap->hash_id),
374 			 nvme_auth_dhgroup_name(chap->dhgroup_id));
375 
376 	if (!data->rvalid)
377 		return 0;
378 
379 	/* Validate controller response */
380 	if (memcmp(chap->response, data->rval, data->hl)) {
381 		dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
382 			__func__, chap->qid, (int)chap->hash_len, data->rval);
383 		dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
384 			__func__, chap->qid, (int)chap->hash_len,
385 			chap->response);
386 		dev_warn(ctrl->device,
387 			 "qid %d: controller authentication failed\n",
388 			 chap->qid);
389 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
390 		return -ECONNREFUSED;
391 	}
392 
393 	/* Just print out information for the admin queue */
394 	if (chap->qid == 0)
395 		dev_info(ctrl->device,
396 			 "qid 0: controller authenticated\n");
397 	return 0;
398 }
399 
nvme_auth_set_dhchap_success2_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)400 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
401 		struct nvme_dhchap_queue_context *chap)
402 {
403 	struct nvmf_auth_dhchap_success2_data *data = chap->buf;
404 	size_t size = sizeof(*data);
405 
406 	memset(chap->buf, 0, size);
407 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
408 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
409 	data->t_id = cpu_to_le16(chap->transaction);
410 
411 	return size;
412 }
413 
nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)414 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
415 		struct nvme_dhchap_queue_context *chap)
416 {
417 	struct nvmf_auth_dhchap_failure_data *data = chap->buf;
418 	size_t size = sizeof(*data);
419 
420 	memset(chap->buf, 0, size);
421 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
422 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
423 	data->t_id = cpu_to_le16(chap->transaction);
424 	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
425 	data->rescode_exp = chap->status;
426 
427 	return size;
428 }
429 
nvme_auth_dhchap_setup_host_response(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)430 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
431 		struct nvme_dhchap_queue_context *chap)
432 {
433 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
434 	u8 buf[4], *challenge = chap->c1;
435 	int ret;
436 
437 	dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
438 		__func__, chap->qid, chap->s1, chap->transaction);
439 
440 	if (!chap->transformed_key) {
441 		chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
442 						ctrl->opts->host->nqn);
443 		if (IS_ERR(chap->transformed_key)) {
444 			ret = PTR_ERR(chap->transformed_key);
445 			chap->transformed_key = NULL;
446 			return ret;
447 		}
448 	} else {
449 		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
450 			__func__, chap->qid);
451 	}
452 
453 	ret = crypto_shash_setkey(chap->shash_tfm,
454 			chap->transformed_key->key, chap->transformed_key->len);
455 	if (ret) {
456 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
457 			 chap->qid, ret);
458 		goto out;
459 	}
460 
461 	if (chap->dh_tfm) {
462 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
463 		if (!challenge) {
464 			ret = -ENOMEM;
465 			goto out;
466 		}
467 		ret = nvme_auth_augmented_challenge(chap->hash_id,
468 						    chap->sess_key,
469 						    chap->sess_key_len,
470 						    chap->c1, challenge,
471 						    chap->hash_len);
472 		if (ret)
473 			goto out;
474 	}
475 
476 	shash->tfm = chap->shash_tfm;
477 	ret = crypto_shash_init(shash);
478 	if (ret)
479 		goto out;
480 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
481 	if (ret)
482 		goto out;
483 	put_unaligned_le32(chap->s1, buf);
484 	ret = crypto_shash_update(shash, buf, 4);
485 	if (ret)
486 		goto out;
487 	put_unaligned_le16(chap->transaction, buf);
488 	ret = crypto_shash_update(shash, buf, 2);
489 	if (ret)
490 		goto out;
491 	memset(buf, 0, sizeof(buf));
492 	ret = crypto_shash_update(shash, buf, 1);
493 	if (ret)
494 		goto out;
495 	ret = crypto_shash_update(shash, "HostHost", 8);
496 	if (ret)
497 		goto out;
498 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
499 				  strlen(ctrl->opts->host->nqn));
500 	if (ret)
501 		goto out;
502 	ret = crypto_shash_update(shash, buf, 1);
503 	if (ret)
504 		goto out;
505 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
506 			    strlen(ctrl->opts->subsysnqn));
507 	if (ret)
508 		goto out;
509 	ret = crypto_shash_final(shash, chap->response);
510 out:
511 	if (challenge != chap->c1)
512 		kfree(challenge);
513 	return ret;
514 }
515 
nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)516 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
517 		struct nvme_dhchap_queue_context *chap)
518 {
519 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
520 	struct nvme_dhchap_key *transformed_key;
521 	u8 buf[4], *challenge = chap->c2;
522 	int ret;
523 
524 	transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
525 				ctrl->opts->subsysnqn);
526 	if (IS_ERR(transformed_key)) {
527 		ret = PTR_ERR(transformed_key);
528 		return ret;
529 	}
530 
531 	ret = crypto_shash_setkey(chap->shash_tfm,
532 			transformed_key->key, transformed_key->len);
533 	if (ret) {
534 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
535 			 chap->qid, ret);
536 		goto out;
537 	}
538 
539 	if (chap->dh_tfm) {
540 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
541 		if (!challenge) {
542 			ret = -ENOMEM;
543 			goto out;
544 		}
545 		ret = nvme_auth_augmented_challenge(chap->hash_id,
546 						    chap->sess_key,
547 						    chap->sess_key_len,
548 						    chap->c2, challenge,
549 						    chap->hash_len);
550 		if (ret)
551 			goto out;
552 	}
553 	dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
554 		__func__, chap->qid, chap->s2, chap->transaction);
555 	dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
556 		__func__, chap->qid, (int)chap->hash_len, challenge);
557 	dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
558 		__func__, chap->qid, ctrl->opts->subsysnqn);
559 	dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
560 		__func__, chap->qid, ctrl->opts->host->nqn);
561 	shash->tfm = chap->shash_tfm;
562 	ret = crypto_shash_init(shash);
563 	if (ret)
564 		goto out;
565 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
566 	if (ret)
567 		goto out;
568 	put_unaligned_le32(chap->s2, buf);
569 	ret = crypto_shash_update(shash, buf, 4);
570 	if (ret)
571 		goto out;
572 	put_unaligned_le16(chap->transaction, buf);
573 	ret = crypto_shash_update(shash, buf, 2);
574 	if (ret)
575 		goto out;
576 	memset(buf, 0, 4);
577 	ret = crypto_shash_update(shash, buf, 1);
578 	if (ret)
579 		goto out;
580 	ret = crypto_shash_update(shash, "Controller", 10);
581 	if (ret)
582 		goto out;
583 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
584 				  strlen(ctrl->opts->subsysnqn));
585 	if (ret)
586 		goto out;
587 	ret = crypto_shash_update(shash, buf, 1);
588 	if (ret)
589 		goto out;
590 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
591 				  strlen(ctrl->opts->host->nqn));
592 	if (ret)
593 		goto out;
594 	ret = crypto_shash_final(shash, chap->response);
595 out:
596 	if (challenge != chap->c2)
597 		kfree(challenge);
598 	nvme_auth_free_key(transformed_key);
599 	return ret;
600 }
601 
nvme_auth_dhchap_exponential(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)602 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
603 		struct nvme_dhchap_queue_context *chap)
604 {
605 	int ret;
606 
607 	if (chap->host_key && chap->host_key_len) {
608 		dev_dbg(ctrl->device,
609 			"qid %d: reusing host key\n", chap->qid);
610 		goto gen_sesskey;
611 	}
612 	ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
613 	if (ret < 0) {
614 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
615 		return ret;
616 	}
617 
618 	chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
619 
620 	chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
621 	if (!chap->host_key) {
622 		chap->host_key_len = 0;
623 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
624 		return -ENOMEM;
625 	}
626 	ret = nvme_auth_gen_pubkey(chap->dh_tfm,
627 				   chap->host_key, chap->host_key_len);
628 	if (ret) {
629 		dev_dbg(ctrl->device,
630 			"failed to generate public key, error %d\n", ret);
631 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
632 		return ret;
633 	}
634 
635 gen_sesskey:
636 	chap->sess_key_len = chap->host_key_len;
637 	chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
638 	if (!chap->sess_key) {
639 		chap->sess_key_len = 0;
640 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
641 		return -ENOMEM;
642 	}
643 
644 	ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
645 					  chap->ctrl_key, chap->ctrl_key_len,
646 					  chap->sess_key, chap->sess_key_len);
647 	if (ret) {
648 		dev_dbg(ctrl->device,
649 			"failed to generate shared secret, error %d\n", ret);
650 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
651 		return ret;
652 	}
653 	dev_dbg(ctrl->device, "shared secret %*ph\n",
654 		(int)chap->sess_key_len, chap->sess_key);
655 	return 0;
656 }
657 
nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context * chap)658 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
659 {
660 	nvme_auth_free_key(chap->transformed_key);
661 	chap->transformed_key = NULL;
662 	kfree_sensitive(chap->host_key);
663 	chap->host_key = NULL;
664 	chap->host_key_len = 0;
665 	kfree_sensitive(chap->ctrl_key);
666 	chap->ctrl_key = NULL;
667 	chap->ctrl_key_len = 0;
668 	kfree_sensitive(chap->sess_key);
669 	chap->sess_key = NULL;
670 	chap->sess_key_len = 0;
671 	chap->status = 0;
672 	chap->error = 0;
673 	chap->s1 = 0;
674 	chap->s2 = 0;
675 	chap->bi_directional = false;
676 	chap->transaction = 0;
677 	memset(chap->c1, 0, sizeof(chap->c1));
678 	memset(chap->c2, 0, sizeof(chap->c2));
679 	mempool_free(chap->buf, nvme_chap_buf_pool);
680 	chap->buf = NULL;
681 }
682 
nvme_auth_free_dhchap(struct nvme_dhchap_queue_context * chap)683 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
684 {
685 	nvme_auth_reset_dhchap(chap);
686 	chap->authenticated = false;
687 	if (chap->shash_tfm)
688 		crypto_free_shash(chap->shash_tfm);
689 	if (chap->dh_tfm)
690 		crypto_free_kpp(chap->dh_tfm);
691 }
692 
nvme_auth_revoke_tls_key(struct nvme_ctrl * ctrl)693 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
694 {
695 	dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
696 		key_serial(ctrl->opts->tls_key));
697 	key_revoke(ctrl->opts->tls_key);
698 	key_put(ctrl->opts->tls_key);
699 	ctrl->opts->tls_key = NULL;
700 }
701 EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
702 
nvme_auth_secure_concat(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)703 static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
704 				   struct nvme_dhchap_queue_context *chap)
705 {
706 	u8 *psk, *digest, *tls_psk;
707 	struct key *tls_key;
708 	size_t psk_len;
709 	int ret = 0;
710 
711 	if (!chap->sess_key) {
712 		dev_warn(ctrl->device,
713 			 "%s: qid %d no session key negotiated\n",
714 			 __func__, chap->qid);
715 		return -ENOKEY;
716 	}
717 
718 	if (chap->qid) {
719 		dev_warn(ctrl->device,
720 			 "qid %d: secure concatenation not supported on I/O queues\n",
721 			 chap->qid);
722 		return -EINVAL;
723 	}
724 	ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
725 				     chap->sess_key_len,
726 				     chap->c1, chap->c2,
727 				     chap->hash_len, &psk, &psk_len);
728 	if (ret) {
729 		dev_warn(ctrl->device,
730 			 "%s: qid %d failed to generate PSK, error %d\n",
731 			 __func__, chap->qid, ret);
732 		return ret;
733 	}
734 	dev_dbg(ctrl->device,
735 		  "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
736 
737 	ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
738 					ctrl->opts->subsysnqn,
739 					ctrl->opts->host->nqn, &digest);
740 	if (ret) {
741 		dev_warn(ctrl->device,
742 			 "%s: qid %d failed to generate digest, error %d\n",
743 			 __func__, chap->qid, ret);
744 		goto out_free_psk;
745 	}
746 	dev_dbg(ctrl->device, "%s: generated digest %s\n",
747 		 __func__, digest);
748 	ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
749 				       digest, &tls_psk);
750 	if (ret) {
751 		dev_warn(ctrl->device,
752 			 "%s: qid %d failed to derive TLS psk, error %d\n",
753 			 __func__, chap->qid, ret);
754 		goto out_free_digest;
755 	}
756 
757 	tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
758 				       ctrl->opts->host->nqn,
759 				       ctrl->opts->subsysnqn, chap->hash_id,
760 				       tls_psk, psk_len, digest);
761 	if (IS_ERR(tls_key)) {
762 		ret = PTR_ERR(tls_key);
763 		dev_warn(ctrl->device,
764 			 "%s: qid %d failed to insert generated key, error %d\n",
765 			 __func__, chap->qid, ret);
766 		tls_key = NULL;
767 	}
768 	kfree_sensitive(tls_psk);
769 	if (ctrl->opts->tls_key)
770 		nvme_auth_revoke_tls_key(ctrl);
771 	ctrl->opts->tls_key = tls_key;
772 out_free_digest:
773 	kfree_sensitive(digest);
774 out_free_psk:
775 	kfree_sensitive(psk);
776 	return ret;
777 }
778 
nvme_queue_auth_work(struct work_struct * work)779 static void nvme_queue_auth_work(struct work_struct *work)
780 {
781 	struct nvme_dhchap_queue_context *chap =
782 		container_of(work, struct nvme_dhchap_queue_context, auth_work);
783 	struct nvme_ctrl *ctrl = chap->ctrl;
784 	size_t tl;
785 	int ret = 0;
786 
787 	/*
788 	 * Allocate a large enough buffer for the entire negotiation:
789 	 * 4k is enough to ffdhe8192.
790 	 */
791 	chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
792 	if (!chap->buf) {
793 		chap->error = -ENOMEM;
794 		return;
795 	}
796 
797 	chap->transaction = ctrl->transaction++;
798 
799 	/* DH-HMAC-CHAP Step 1: send negotiate */
800 	dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
801 		__func__, chap->qid);
802 	ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
803 	if (ret < 0) {
804 		chap->error = ret;
805 		return;
806 	}
807 	tl = ret;
808 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
809 	if (ret) {
810 		chap->error = ret;
811 		return;
812 	}
813 
814 	/* DH-HMAC-CHAP Step 2: receive challenge */
815 	dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
816 		__func__, chap->qid);
817 
818 	memset(chap->buf, 0, CHAP_BUF_SIZE);
819 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
820 			       false);
821 	if (ret) {
822 		dev_warn(ctrl->device,
823 			 "qid %d failed to receive challenge, %s %d\n",
824 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
825 		chap->error = ret;
826 		return;
827 	}
828 	ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
829 					 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
830 	if (ret) {
831 		chap->status = ret;
832 		chap->error = -EKEYREJECTED;
833 		return;
834 	}
835 
836 	ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
837 	if (ret) {
838 		/* Invalid challenge parameters */
839 		chap->error = ret;
840 		goto fail2;
841 	}
842 
843 	if (chap->ctrl_key_len) {
844 		dev_dbg(ctrl->device,
845 			"%s: qid %d DH exponential\n",
846 			__func__, chap->qid);
847 		ret = nvme_auth_dhchap_exponential(ctrl, chap);
848 		if (ret) {
849 			chap->error = ret;
850 			goto fail2;
851 		}
852 	}
853 
854 	dev_dbg(ctrl->device, "%s: qid %d host response\n",
855 		__func__, chap->qid);
856 	mutex_lock(&ctrl->dhchap_auth_mutex);
857 	ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
858 	mutex_unlock(&ctrl->dhchap_auth_mutex);
859 	if (ret) {
860 		chap->error = ret;
861 		goto fail2;
862 	}
863 
864 	/* DH-HMAC-CHAP Step 3: send reply */
865 	dev_dbg(ctrl->device, "%s: qid %d send reply\n",
866 		__func__, chap->qid);
867 	ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
868 	if (ret < 0) {
869 		chap->error = ret;
870 		goto fail2;
871 	}
872 
873 	tl = ret;
874 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
875 	if (ret) {
876 		chap->error = ret;
877 		goto fail2;
878 	}
879 
880 	/* DH-HMAC-CHAP Step 4: receive success1 */
881 	dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
882 		__func__, chap->qid);
883 
884 	memset(chap->buf, 0, CHAP_BUF_SIZE);
885 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
886 			       false);
887 	if (ret) {
888 		dev_warn(ctrl->device,
889 			 "qid %d failed to receive success1, %s %d\n",
890 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
891 		chap->error = ret;
892 		return;
893 	}
894 	ret = nvme_auth_receive_validate(ctrl, chap->qid,
895 					 chap->buf, chap->transaction,
896 					 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
897 	if (ret) {
898 		chap->status = ret;
899 		chap->error = -EKEYREJECTED;
900 		return;
901 	}
902 
903 	mutex_lock(&ctrl->dhchap_auth_mutex);
904 	if (ctrl->ctrl_key) {
905 		dev_dbg(ctrl->device,
906 			"%s: qid %d controller response\n",
907 			__func__, chap->qid);
908 		ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
909 		if (ret) {
910 			mutex_unlock(&ctrl->dhchap_auth_mutex);
911 			chap->error = ret;
912 			goto fail2;
913 		}
914 	}
915 	mutex_unlock(&ctrl->dhchap_auth_mutex);
916 
917 	ret = nvme_auth_process_dhchap_success1(ctrl, chap);
918 	if (ret) {
919 		/* Controller authentication failed */
920 		chap->error = -EKEYREJECTED;
921 		goto fail2;
922 	}
923 
924 	if (chap->bi_directional) {
925 		/* DH-HMAC-CHAP Step 5: send success2 */
926 		dev_dbg(ctrl->device, "%s: qid %d send success2\n",
927 			__func__, chap->qid);
928 		tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
929 		ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
930 		if (ret)
931 			chap->error = ret;
932 	}
933 	if (!ret) {
934 		chap->error = 0;
935 		chap->authenticated = true;
936 		if (ctrl->opts->concat &&
937 		    (ret = nvme_auth_secure_concat(ctrl, chap))) {
938 			dev_warn(ctrl->device,
939 				 "%s: qid %d failed to enable secure concatenation\n",
940 				 __func__, chap->qid);
941 			chap->error = ret;
942 			chap->authenticated = false;
943 		}
944 		return;
945 	}
946 
947 fail2:
948 	if (chap->status == 0)
949 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
950 	dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
951 		__func__, chap->qid, chap->status);
952 	tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
953 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
954 	/*
955 	 * only update error if send failure2 failed and no other
956 	 * error had been set during authentication.
957 	 */
958 	if (ret && !chap->error)
959 		chap->error = ret;
960 }
961 
nvme_auth_negotiate(struct nvme_ctrl * ctrl,int qid)962 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
963 {
964 	struct nvme_dhchap_queue_context *chap;
965 
966 	if (!ctrl->host_key) {
967 		dev_warn(ctrl->device, "qid %d: no key\n", qid);
968 		return -ENOKEY;
969 	}
970 
971 	if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
972 		dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
973 		return -ENOKEY;
974 	}
975 
976 	chap = &ctrl->dhchap_ctxs[qid];
977 	cancel_work_sync(&chap->auth_work);
978 	queue_work(nvme_auth_wq, &chap->auth_work);
979 	return 0;
980 }
981 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
982 
nvme_auth_wait(struct nvme_ctrl * ctrl,int qid)983 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
984 {
985 	struct nvme_dhchap_queue_context *chap;
986 	int ret;
987 
988 	chap = &ctrl->dhchap_ctxs[qid];
989 	flush_work(&chap->auth_work);
990 	ret = chap->error;
991 	/* clear sensitive info */
992 	nvme_auth_reset_dhchap(chap);
993 	return ret;
994 }
995 EXPORT_SYMBOL_GPL(nvme_auth_wait);
996 
nvme_ctrl_auth_work(struct work_struct * work)997 static void nvme_ctrl_auth_work(struct work_struct *work)
998 {
999 	struct nvme_ctrl *ctrl =
1000 		container_of(work, struct nvme_ctrl, dhchap_auth_work);
1001 	int ret, q;
1002 
1003 	/*
1004 	 * If the ctrl is no connected, bail as reconnect will handle
1005 	 * authentication.
1006 	 */
1007 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
1008 		return;
1009 
1010 	/* Authenticate admin queue first */
1011 	ret = nvme_auth_negotiate(ctrl, 0);
1012 	if (ret) {
1013 		dev_warn(ctrl->device,
1014 			 "qid 0: error %d setting up authentication\n", ret);
1015 		return;
1016 	}
1017 	ret = nvme_auth_wait(ctrl, 0);
1018 	if (ret) {
1019 		dev_warn(ctrl->device,
1020 			 "qid 0: authentication failed\n");
1021 		return;
1022 	}
1023 	/*
1024 	 * Only run authentication on the admin queue for secure concatenation.
1025 	 */
1026 	if (ctrl->opts->concat)
1027 		return;
1028 
1029 	for (q = 1; q < ctrl->queue_count; q++) {
1030 		struct nvme_dhchap_queue_context *chap =
1031 			&ctrl->dhchap_ctxs[q];
1032 		/*
1033 		 * Skip re-authentication if the queue had
1034 		 * not been authenticated initially.
1035 		 */
1036 		if (!chap->authenticated)
1037 			continue;
1038 		cancel_work_sync(&chap->auth_work);
1039 		queue_work(nvme_auth_wq, &chap->auth_work);
1040 	}
1041 
1042 	/*
1043 	 * Failure is a soft-state; credentials remain valid until
1044 	 * the controller terminates the connection.
1045 	 */
1046 	for (q = 1; q < ctrl->queue_count; q++) {
1047 		struct nvme_dhchap_queue_context *chap =
1048 			&ctrl->dhchap_ctxs[q];
1049 		if (!chap->authenticated)
1050 			continue;
1051 		flush_work(&chap->auth_work);
1052 		ret = chap->error;
1053 		nvme_auth_reset_dhchap(chap);
1054 		if (ret)
1055 			dev_warn(ctrl->device,
1056 				 "qid %d: authentication failed\n", q);
1057 	}
1058 }
1059 
nvme_auth_init_ctrl(struct nvme_ctrl * ctrl)1060 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1061 {
1062 	struct nvme_dhchap_queue_context *chap;
1063 	int i, ret;
1064 
1065 	mutex_init(&ctrl->dhchap_auth_mutex);
1066 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
1067 	if (!ctrl->opts)
1068 		return 0;
1069 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
1070 			&ctrl->host_key);
1071 	if (ret)
1072 		return ret;
1073 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
1074 			&ctrl->ctrl_key);
1075 	if (ret)
1076 		goto err_free_dhchap_secret;
1077 
1078 	if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
1079 		return 0;
1080 
1081 	ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
1082 				sizeof(*chap), GFP_KERNEL);
1083 	if (!ctrl->dhchap_ctxs) {
1084 		ret = -ENOMEM;
1085 		goto err_free_dhchap_ctrl_secret;
1086 	}
1087 
1088 	for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
1089 		chap = &ctrl->dhchap_ctxs[i];
1090 		chap->qid = i;
1091 		chap->ctrl = ctrl;
1092 		chap->authenticated = false;
1093 		INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
1094 	}
1095 
1096 	return 0;
1097 err_free_dhchap_ctrl_secret:
1098 	nvme_auth_free_key(ctrl->ctrl_key);
1099 	ctrl->ctrl_key = NULL;
1100 err_free_dhchap_secret:
1101 	nvme_auth_free_key(ctrl->host_key);
1102 	ctrl->host_key = NULL;
1103 	return ret;
1104 }
1105 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
1106 
nvme_auth_stop(struct nvme_ctrl * ctrl)1107 void nvme_auth_stop(struct nvme_ctrl *ctrl)
1108 {
1109 	cancel_work_sync(&ctrl->dhchap_auth_work);
1110 }
1111 EXPORT_SYMBOL_GPL(nvme_auth_stop);
1112 
nvme_auth_free(struct nvme_ctrl * ctrl)1113 void nvme_auth_free(struct nvme_ctrl *ctrl)
1114 {
1115 	int i;
1116 
1117 	if (ctrl->dhchap_ctxs) {
1118 		for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
1119 			nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
1120 		kfree(ctrl->dhchap_ctxs);
1121 	}
1122 	if (ctrl->host_key) {
1123 		nvme_auth_free_key(ctrl->host_key);
1124 		ctrl->host_key = NULL;
1125 	}
1126 	if (ctrl->ctrl_key) {
1127 		nvme_auth_free_key(ctrl->ctrl_key);
1128 		ctrl->ctrl_key = NULL;
1129 	}
1130 }
1131 EXPORT_SYMBOL_GPL(nvme_auth_free);
1132 
nvme_init_auth(void)1133 int __init nvme_init_auth(void)
1134 {
1135 	nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1136 			       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1137 	if (!nvme_auth_wq)
1138 		return -ENOMEM;
1139 
1140 	nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1141 				CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1142 	if (!nvme_chap_buf_cache)
1143 		goto err_destroy_workqueue;
1144 
1145 	nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1146 			mempool_free_slab, nvme_chap_buf_cache);
1147 	if (!nvme_chap_buf_pool)
1148 		goto err_destroy_chap_buf_cache;
1149 
1150 	return 0;
1151 err_destroy_chap_buf_cache:
1152 	kmem_cache_destroy(nvme_chap_buf_cache);
1153 err_destroy_workqueue:
1154 	destroy_workqueue(nvme_auth_wq);
1155 	return -ENOMEM;
1156 }
1157 
nvme_exit_auth(void)1158 void __exit nvme_exit_auth(void)
1159 {
1160 	mempool_destroy(nvme_chap_buf_pool);
1161 	kmem_cache_destroy(nvme_chap_buf_cache);
1162 	destroy_workqueue(nvme_auth_wq);
1163 }
1164