1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5 * All rights reserved.
6 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/blkdev.h>
9 #include <linux/random.h>
10 #include <linux/nvme-auth.h>
11 #include <crypto/kpp.h>
12 #include "nvmet.h"
13
nvmet_auth_expired_work(struct work_struct * work)14 static void nvmet_auth_expired_work(struct work_struct *work)
15 {
16 struct nvmet_sq *sq = container_of(to_delayed_work(work),
17 struct nvmet_sq, auth_expired_work);
18
19 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
20 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
21 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
22 sq->dhchap_tid = -1;
23 }
24
nvmet_auth_sq_init(struct nvmet_sq * sq)25 void nvmet_auth_sq_init(struct nvmet_sq *sq)
26 {
27 /* Initialize in-band authentication */
28 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
29 sq->authenticated = false;
30 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
31 }
32
nvmet_auth_negotiate(struct nvmet_req * req,void * d)33 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
34 {
35 struct nvmet_ctrl *ctrl = req->sq->ctrl;
36 struct nvmf_auth_dhchap_negotiate_data *data = d;
37 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
38
39 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
40 __func__, ctrl->cntlid, req->sq->qid,
41 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
42 data->auth_protocol[0].dhchap.halen,
43 data->auth_protocol[0].dhchap.dhlen);
44 req->sq->dhchap_tid = le16_to_cpu(data->t_id);
45 req->sq->sc_c = data->sc_c;
46 if (data->sc_c != NVME_AUTH_SECP_NOSC) {
47 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
48 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
49 /* Secure concatenation can only be enabled on the admin queue */
50 if (req->sq->qid)
51 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
52 switch (data->sc_c) {
53 case NVME_AUTH_SECP_NEWTLSPSK:
54 if (nvmet_queue_tls_keyid(req->sq))
55 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
56 break;
57 case NVME_AUTH_SECP_REPLACETLSPSK:
58 if (!nvmet_queue_tls_keyid(req->sq))
59 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
60 break;
61 default:
62 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
63 }
64 ctrl->concat = true;
65 }
66
67 if (data->napd != 1)
68 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
69
70 if (data->auth_protocol[0].dhchap.authid !=
71 NVME_AUTH_DHCHAP_AUTH_ID)
72 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
73
74 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
75 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
76
77 if (!fallback_hash_id && nvme_auth_hmac_hash_len(host_hmac_id))
78 fallback_hash_id = host_hmac_id;
79 if (ctrl->shash_id != host_hmac_id)
80 continue;
81 hash_id = ctrl->shash_id;
82 break;
83 }
84 if (hash_id == 0) {
85 if (fallback_hash_id == 0) {
86 pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
87 __func__, ctrl->cntlid, req->sq->qid);
88 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
89 }
90 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
91 __func__, ctrl->cntlid, req->sq->qid,
92 nvme_auth_hmac_name(fallback_hash_id));
93 ctrl->shash_id = fallback_hash_id;
94 }
95
96 dhgid = -1;
97 fallback_dhgid = -1;
98 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
99 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
100
101 if (tmp_dhgid != ctrl->dh_gid) {
102 dhgid = tmp_dhgid;
103 break;
104 }
105 if (fallback_dhgid < 0) {
106 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
107
108 if (crypto_has_kpp(kpp, 0, 0))
109 fallback_dhgid = tmp_dhgid;
110 }
111 }
112 if (dhgid < 0) {
113 if (fallback_dhgid < 0) {
114 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
115 __func__, ctrl->cntlid, req->sq->qid);
116 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
117 }
118 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
119 __func__, ctrl->cntlid, req->sq->qid,
120 nvme_auth_dhgroup_name(fallback_dhgid));
121 ctrl->dh_gid = fallback_dhgid;
122 }
123 if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
124 pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
125 "for secure channel concatenation\n", __func__,
126 ctrl->cntlid, req->sq->qid);
127 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
128 }
129 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
130 __func__, ctrl->cntlid, req->sq->qid,
131 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
132 return 0;
133 }
134
nvmet_auth_reply(struct nvmet_req * req,void * d)135 static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
136 {
137 struct nvmet_ctrl *ctrl = req->sq->ctrl;
138 struct nvmf_auth_dhchap_reply_data *data = d;
139 u16 dhvlen = le16_to_cpu(data->dhvlen);
140 u8 *response;
141
142 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
143 __func__, ctrl->cntlid, req->sq->qid,
144 data->hl, data->cvalid, dhvlen);
145
146 if (dhvlen) {
147 if (!ctrl->dh_tfm)
148 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
149 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
150 dhvlen) < 0)
151 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
152 }
153
154 response = kmalloc(data->hl, GFP_KERNEL);
155 if (!response)
156 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
157
158 if (!ctrl->host_key) {
159 pr_warn("ctrl %d qid %d no host key\n",
160 ctrl->cntlid, req->sq->qid);
161 kfree(response);
162 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
163 }
164 if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
165 pr_debug("ctrl %d qid %d host hash failed\n",
166 ctrl->cntlid, req->sq->qid);
167 kfree(response);
168 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
169 }
170
171 if (memcmp(data->rval, response, data->hl)) {
172 pr_info("ctrl %d qid %d host response mismatch\n",
173 ctrl->cntlid, req->sq->qid);
174 pr_debug("ctrl %d qid %d rval %*ph\n",
175 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
176 pr_debug("ctrl %d qid %d response %*ph\n",
177 ctrl->cntlid, req->sq->qid, data->hl, response);
178 kfree(response);
179 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
180 }
181 kfree(response);
182 pr_debug("%s: ctrl %d qid %d host authenticated\n",
183 __func__, ctrl->cntlid, req->sq->qid);
184 if (!data->cvalid && ctrl->concat) {
185 pr_debug("%s: ctrl %d qid %d invalid challenge\n",
186 __func__, ctrl->cntlid, req->sq->qid);
187 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
188 }
189 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
190 if (data->cvalid) {
191 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
192 GFP_KERNEL);
193 if (!req->sq->dhchap_c2)
194 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
195
196 pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
197 __func__, ctrl->cntlid, req->sq->qid, data->hl,
198 req->sq->dhchap_c2);
199 }
200 /*
201 * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
202 * Sequence Number (SEQNUM): [ .. ]
203 * The value 0h is used to indicate that bidirectional authentication
204 * is not performed, but a challenge value C2 is carried in order to
205 * generate a pre-shared key (PSK) for subsequent establishment of a
206 * secure channel.
207 */
208 if (req->sq->dhchap_s2 == 0) {
209 if (ctrl->concat)
210 nvmet_auth_insert_psk(req->sq);
211 req->sq->authenticated = true;
212 kfree(req->sq->dhchap_c2);
213 req->sq->dhchap_c2 = NULL;
214 } else if (!data->cvalid)
215 req->sq->authenticated = true;
216
217 return 0;
218 }
219
nvmet_auth_failure2(void * d)220 static u8 nvmet_auth_failure2(void *d)
221 {
222 struct nvmf_auth_dhchap_failure_data *data = d;
223
224 return data->rescode_exp;
225 }
226
nvmet_auth_send_data_len(struct nvmet_req * req)227 u32 nvmet_auth_send_data_len(struct nvmet_req *req)
228 {
229 return le32_to_cpu(req->cmd->auth_send.tl);
230 }
231
nvmet_execute_auth_send(struct nvmet_req * req)232 void nvmet_execute_auth_send(struct nvmet_req *req)
233 {
234 struct nvmet_ctrl *ctrl = req->sq->ctrl;
235 struct nvmf_auth_dhchap_success2_data *data;
236 void *d;
237 u32 tl;
238 u16 status = 0;
239 u8 dhchap_status;
240
241 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
242 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
243 req->error_loc =
244 offsetof(struct nvmf_auth_send_command, secp);
245 goto done;
246 }
247 if (req->cmd->auth_send.spsp0 != 0x01) {
248 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
249 req->error_loc =
250 offsetof(struct nvmf_auth_send_command, spsp0);
251 goto done;
252 }
253 if (req->cmd->auth_send.spsp1 != 0x01) {
254 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
255 req->error_loc =
256 offsetof(struct nvmf_auth_send_command, spsp1);
257 goto done;
258 }
259 tl = nvmet_auth_send_data_len(req);
260 if (!tl) {
261 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
262 req->error_loc =
263 offsetof(struct nvmf_auth_send_command, tl);
264 goto done;
265 }
266 if (!nvmet_check_transfer_len(req, tl)) {
267 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
268 return;
269 }
270
271 d = kmalloc(tl, GFP_KERNEL);
272 if (!d) {
273 status = NVME_SC_INTERNAL;
274 goto done;
275 }
276
277 status = nvmet_copy_from_sgl(req, 0, d, tl);
278 if (status)
279 goto done_kfree;
280
281 data = d;
282 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
283 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
284 req->sq->dhchap_step);
285 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
286 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
287 goto done_failure1;
288 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
289 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
290 /* Restart negotiation */
291 pr_debug("%s: ctrl %d qid %d reset negotiation\n",
292 __func__, ctrl->cntlid, req->sq->qid);
293 if (!req->sq->qid) {
294 dhchap_status = nvmet_setup_auth(ctrl, req->sq,
295 true);
296 if (dhchap_status) {
297 pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
298 ctrl->cntlid);
299 req->sq->dhchap_status = dhchap_status;
300 req->sq->dhchap_step =
301 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
302 goto done_kfree;
303 }
304 }
305 req->sq->dhchap_step =
306 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
307 } else if (data->auth_id != req->sq->dhchap_step)
308 goto done_failure1;
309 /* Validate negotiation parameters */
310 dhchap_status = nvmet_auth_negotiate(req, d);
311 if (dhchap_status == 0)
312 req->sq->dhchap_step =
313 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
314 else {
315 req->sq->dhchap_step =
316 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
317 req->sq->dhchap_status = dhchap_status;
318 }
319 goto done_kfree;
320 }
321 if (data->auth_id != req->sq->dhchap_step) {
322 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
323 __func__, ctrl->cntlid, req->sq->qid,
324 data->auth_id, req->sq->dhchap_step);
325 goto done_failure1;
326 }
327 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
328 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
329 __func__, ctrl->cntlid, req->sq->qid,
330 le16_to_cpu(data->t_id),
331 req->sq->dhchap_tid);
332 req->sq->dhchap_step =
333 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
334 req->sq->dhchap_status =
335 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
336 goto done_kfree;
337 }
338
339 switch (data->auth_id) {
340 case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
341 dhchap_status = nvmet_auth_reply(req, d);
342 if (dhchap_status == 0)
343 req->sq->dhchap_step =
344 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
345 else {
346 req->sq->dhchap_step =
347 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
348 req->sq->dhchap_status = dhchap_status;
349 }
350 goto done_kfree;
351 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
352 if (ctrl->concat)
353 nvmet_auth_insert_psk(req->sq);
354 req->sq->authenticated = true;
355 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
356 __func__, ctrl->cntlid, req->sq->qid);
357 goto done_kfree;
358 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
359 dhchap_status = nvmet_auth_failure2(d);
360 if (dhchap_status) {
361 pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
362 ctrl->cntlid, req->sq->qid, dhchap_status);
363 req->sq->dhchap_status = dhchap_status;
364 req->sq->authenticated = false;
365 }
366 goto done_kfree;
367 default:
368 req->sq->dhchap_status =
369 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
370 req->sq->dhchap_step =
371 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
372 req->sq->authenticated = false;
373 goto done_kfree;
374 }
375 done_failure1:
376 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
377 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
378
379 done_kfree:
380 kfree(d);
381 done:
382 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
383 ctrl->cntlid, req->sq->qid,
384 req->sq->dhchap_status, req->sq->dhchap_step);
385 if (status)
386 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
387 __func__, ctrl->cntlid, req->sq->qid,
388 status, req->error_loc);
389 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
390 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
391 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
392
393 mod_delayed_work(system_percpu_wq, &req->sq->auth_expired_work,
394 auth_expire_secs * HZ);
395 goto complete;
396 }
397 /* Final states, clear up variables */
398 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
399 nvmet_auth_sq_free(req->sq);
400 nvmet_ctrl_fatal_error(ctrl);
401 }
402
403 complete:
404 nvmet_req_complete(req, status);
405 }
406
nvmet_auth_challenge(struct nvmet_req * req,void * d,int al)407 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
408 {
409 struct nvmf_auth_dhchap_challenge_data *data = d;
410 struct nvmet_ctrl *ctrl = req->sq->ctrl;
411 int ret = 0;
412 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
413 int data_size = sizeof(*d) + hash_len;
414
415 if (ctrl->dh_tfm)
416 data_size += ctrl->dh_keysize;
417 if (al < data_size) {
418 pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
419 al, data_size);
420 return -EINVAL;
421 }
422 memset(data, 0, data_size);
423 req->sq->dhchap_s1 = nvme_auth_get_seqnum();
424 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
425 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
426 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
427 data->hashid = ctrl->shash_id;
428 data->hl = hash_len;
429 data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
430 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
431 if (!req->sq->dhchap_c1)
432 return -ENOMEM;
433 get_random_bytes(req->sq->dhchap_c1, data->hl);
434 memcpy(data->cval, req->sq->dhchap_c1, data->hl);
435 if (ctrl->dh_tfm) {
436 data->dhgid = ctrl->dh_gid;
437 data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
438 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
439 ctrl->dh_keysize);
440 }
441 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
442 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
443 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
444 return ret;
445 }
446
nvmet_auth_success1(struct nvmet_req * req,void * d,int al)447 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
448 {
449 struct nvmf_auth_dhchap_success1_data *data = d;
450 struct nvmet_ctrl *ctrl = req->sq->ctrl;
451 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
452
453 WARN_ON(al < sizeof(*data));
454 memset(data, 0, sizeof(*data));
455 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
456 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
457 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
458 data->hl = hash_len;
459 if (req->sq->dhchap_c2) {
460 if (!ctrl->ctrl_key) {
461 pr_warn("ctrl %d qid %d no ctrl key\n",
462 ctrl->cntlid, req->sq->qid);
463 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
464 }
465 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
466 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
467 data->rvalid = 1;
468 pr_debug("ctrl %d qid %d response %*ph\n",
469 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
470 }
471 return 0;
472 }
473
nvmet_auth_failure1(struct nvmet_req * req,void * d,int al)474 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
475 {
476 struct nvmf_auth_dhchap_failure_data *data = d;
477
478 WARN_ON(al < sizeof(*data));
479 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
480 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
481 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
482 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
483 data->rescode_exp = req->sq->dhchap_status;
484 }
485
nvmet_auth_receive_data_len(struct nvmet_req * req)486 u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
487 {
488 return le32_to_cpu(req->cmd->auth_receive.al);
489 }
490
nvmet_execute_auth_receive(struct nvmet_req * req)491 void nvmet_execute_auth_receive(struct nvmet_req *req)
492 {
493 struct nvmet_ctrl *ctrl = req->sq->ctrl;
494 void *d;
495 u32 al;
496 u16 status = 0;
497
498 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
499 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
500 req->error_loc =
501 offsetof(struct nvmf_auth_receive_command, secp);
502 goto done;
503 }
504 if (req->cmd->auth_receive.spsp0 != 0x01) {
505 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
506 req->error_loc =
507 offsetof(struct nvmf_auth_receive_command, spsp0);
508 goto done;
509 }
510 if (req->cmd->auth_receive.spsp1 != 0x01) {
511 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
512 req->error_loc =
513 offsetof(struct nvmf_auth_receive_command, spsp1);
514 goto done;
515 }
516 al = nvmet_auth_receive_data_len(req);
517 if (!al) {
518 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
519 req->error_loc =
520 offsetof(struct nvmf_auth_receive_command, al);
521 goto done;
522 }
523 if (!nvmet_check_transfer_len(req, al)) {
524 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
525 return;
526 }
527
528 d = kmalloc(al, GFP_KERNEL);
529 if (!d) {
530 status = NVME_SC_INTERNAL;
531 goto done;
532 }
533 pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
534 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
535 switch (req->sq->dhchap_step) {
536 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
537 if (nvmet_auth_challenge(req, d, al) < 0) {
538 pr_warn("ctrl %d qid %d: challenge error (%d)\n",
539 ctrl->cntlid, req->sq->qid, status);
540 status = NVME_SC_INTERNAL;
541 break;
542 }
543 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
544 break;
545 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
546 status = nvmet_auth_success1(req, d, al);
547 if (status) {
548 req->sq->dhchap_status = status;
549 req->sq->authenticated = false;
550 nvmet_auth_failure1(req, d, al);
551 pr_warn("ctrl %d qid %d: success1 status (%x)\n",
552 ctrl->cntlid, req->sq->qid,
553 req->sq->dhchap_status);
554 break;
555 }
556 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
557 break;
558 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
559 req->sq->authenticated = false;
560 nvmet_auth_failure1(req, d, al);
561 pr_warn("ctrl %d qid %d failure1 (%x)\n",
562 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
563 break;
564 default:
565 pr_warn("ctrl %d qid %d unhandled step (%d)\n",
566 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
567 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
568 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
569 nvmet_auth_failure1(req, d, al);
570 status = 0;
571 break;
572 }
573
574 status = nvmet_copy_to_sgl(req, 0, d, al);
575 kfree(d);
576 done:
577 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
578 nvmet_auth_sq_free(req->sq);
579 nvmet_ctrl_fatal_error(ctrl);
580 }
581 nvmet_req_complete(req, status);
582 }
583