1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2012-2016 Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include "opt_nvme.h" 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/bus.h> 35 #include <sys/conf.h> 36 #include <sys/ioccom.h> 37 #include <sys/proc.h> 38 #include <sys/smp.h> 39 #include <sys/uio.h> 40 #include <sys/sbuf.h> 41 #include <sys/endian.h> 42 #include <sys/stdarg.h> 43 #include <vm/vm.h> 44 45 #include "nvme_private.h" 46 #include "nvme_linux.h" 47 48 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */ 49 50 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 51 struct nvme_async_event_request *aer); 52 53 static void 54 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags) 55 { 56 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags); 57 } 58 59 static void 60 nvme_ctrlr_devctl_va(struct nvme_controller *ctrlr, const char *type, 61 const char *msg, va_list ap) 62 { 63 struct sbuf sb; 64 int error; 65 66 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL) 67 return; 68 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev)); 69 sbuf_vprintf(&sb, msg, ap); 70 error = sbuf_finish(&sb); 71 if (error == 0) 72 devctl_notify("nvme", "controller", type, sbuf_data(&sb)); 73 sbuf_delete(&sb); 74 } 75 76 static void 77 nvme_ctrlr_devctl(struct nvme_controller *ctrlr, const char *type, const char *msg, ...) 78 { 79 va_list ap; 80 81 va_start(ap, msg); 82 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap); 83 va_end(ap); 84 } 85 86 static void 87 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...) 88 { 89 struct sbuf sb; 90 va_list ap; 91 int error; 92 93 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL) 94 return; 95 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev)); 96 va_start(ap, msg); 97 sbuf_vprintf(&sb, msg, ap); 98 va_end(ap); 99 error = sbuf_finish(&sb); 100 if (error == 0) 101 printf("%s\n", sbuf_data(&sb)); 102 sbuf_delete(&sb); 103 va_start(ap, msg); 104 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap); 105 va_end(ap); 106 } 107 108 static int 109 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) 110 { 111 struct nvme_qpair *qpair; 112 uint32_t num_entries; 113 int error; 114 115 qpair = &ctrlr->adminq; 116 qpair->id = 0; 117 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; 118 qpair->domain = ctrlr->domain; 119 120 num_entries = NVME_ADMIN_ENTRIES; 121 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); 122 /* 123 * If admin_entries was overridden to an invalid value, revert it 124 * back to our default value. 125 */ 126 if (num_entries < NVME_MIN_ADMIN_ENTRIES || 127 num_entries > NVME_MAX_ADMIN_ENTRIES) { 128 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " 129 "specified\n", num_entries); 130 num_entries = NVME_ADMIN_ENTRIES; 131 } 132 133 /* 134 * The admin queue's max xfer size is treated differently than the 135 * max I/O xfer size. 16KB is sufficient here - maybe even less? 136 */ 137 error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS, 138 ctrlr); 139 return (error); 140 } 141 142 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus) 143 144 static int 145 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) 146 { 147 struct nvme_qpair *qpair; 148 uint32_t cap_lo; 149 uint16_t mqes; 150 int c, error, i, n; 151 int num_entries, num_trackers, max_entries; 152 153 /* 154 * NVMe spec sets a hard limit of 64K max entries, but devices may 155 * specify a smaller limit, so we need to check the MQES field in the 156 * capabilities register. We have to cap the number of entries to the 157 * current stride allows for in BAR 0/1, otherwise the remainder entries 158 * are inaccessible. MQES should reflect this, and this is just a 159 * fail-safe. 160 */ 161 max_entries = 162 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) / 163 (1 << (ctrlr->dstrd + 1)); 164 num_entries = NVME_IO_ENTRIES; 165 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); 166 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 167 mqes = NVME_CAP_LO_MQES(cap_lo); 168 num_entries = min(num_entries, mqes + 1); 169 num_entries = min(num_entries, max_entries); 170 171 num_trackers = NVME_IO_TRACKERS; 172 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); 173 174 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); 175 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); 176 /* 177 * No need to have more trackers than entries in the submit queue. Note 178 * also that for a queue size of N, we can only have (N-1) commands 179 * outstanding, hence the "-1" here. 180 */ 181 num_trackers = min(num_trackers, (num_entries-1)); 182 183 /* 184 * Our best estimate for the maximum number of I/Os that we should 185 * normally have in flight at one time. This should be viewed as a hint, 186 * not a hard limit and will need to be revisited when the upper layers 187 * of the storage system grows multi-queue support. 188 */ 189 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4; 190 191 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), 192 M_NVME, M_ZERO | M_WAITOK); 193 194 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) { 195 qpair = &ctrlr->ioq[i]; 196 197 /* 198 * Admin queue has ID=0. IO queues start at ID=1 - 199 * hence the 'i+1' here. 200 */ 201 qpair->id = i + 1; 202 if (ctrlr->num_io_queues > 1) { 203 /* Find number of CPUs served by this queue. */ 204 for (n = 1; QP(ctrlr, c + n) == i; n++) 205 ; 206 /* Shuffle multiple NVMe devices between CPUs. */ 207 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n; 208 qpair->domain = pcpu_find(qpair->cpu)->pc_domain; 209 } else { 210 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1; 211 qpair->domain = ctrlr->domain; 212 } 213 214 /* 215 * For I/O queues, use the controller-wide max_xfer_size 216 * calculated in nvme_attach(). 217 */ 218 error = nvme_qpair_construct(qpair, num_entries, num_trackers, 219 ctrlr); 220 if (error) 221 return (error); 222 223 /* 224 * Do not bother binding interrupts if we only have one I/O 225 * interrupt thread for this controller. 226 */ 227 if (ctrlr->num_io_queues > 1) 228 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu); 229 } 230 231 return (0); 232 } 233 234 static void 235 nvme_ctrlr_fail(struct nvme_controller *ctrlr, bool admin_also) 236 { 237 int i; 238 239 /* 240 * No need to disable queues before failing them. Failing is a superet 241 * of disabling (though pedantically we'd abort the AERs silently with 242 * a different error, though when we fail, that hardly matters). 243 */ 244 ctrlr->is_failed = true; 245 if (admin_also) { 246 ctrlr->is_failed_admin = true; 247 nvme_qpair_fail(&ctrlr->adminq); 248 } 249 if (ctrlr->ioq != NULL) { 250 for (i = 0; i < ctrlr->num_io_queues; i++) { 251 nvme_qpair_fail(&ctrlr->ioq[i]); 252 } 253 } 254 nvme_notify_fail_consumers(ctrlr); 255 } 256 257 /* 258 * Wait for RDY to change. 259 * 260 * Starts sleeping for 1us and geometrically increases it the longer we wait, 261 * capped at 1ms. 262 */ 263 static int 264 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) 265 { 266 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms); 267 sbintime_t delta_t = SBT_1US; 268 uint32_t csts; 269 270 while (1) { 271 csts = nvme_mmio_read_4(ctrlr, csts); 272 if (csts == NVME_GONE) /* Hot unplug. */ 273 return (ENXIO); 274 if (NVMEV(NVME_CSTS_REG_RDY, csts) == desired_val) 275 break; 276 if (timeout - ticks < 0) { 277 nvme_printf(ctrlr, "controller ready did not become %d " 278 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); 279 return (ENXIO); 280 } 281 282 pause_sbt("nvmerdy", delta_t, 0, C_PREL(1)); 283 delta_t = min(SBT_1MS, delta_t * 3 / 2); 284 } 285 286 return (0); 287 } 288 289 static int 290 nvme_ctrlr_disable(struct nvme_controller *ctrlr) 291 { 292 uint32_t cc; 293 uint32_t csts; 294 uint8_t en, rdy; 295 int err; 296 297 cc = nvme_mmio_read_4(ctrlr, cc); 298 csts = nvme_mmio_read_4(ctrlr, csts); 299 300 en = NVMEV(NVME_CC_REG_EN, cc); 301 rdy = NVMEV(NVME_CSTS_REG_RDY, csts); 302 303 /* 304 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1 305 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when 306 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY 307 * isn't the desired value. Short circuit if we're already disabled. 308 */ 309 if (en == 0) { 310 /* Wait for RDY == 0 or timeout & fail */ 311 if (rdy == 0) 312 return (0); 313 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 314 } 315 if (rdy == 0) { 316 /* EN == 1, wait for RDY == 1 or timeout & fail */ 317 err = nvme_ctrlr_wait_for_ready(ctrlr, 1); 318 if (err != 0) 319 return (err); 320 } 321 322 cc &= ~NVMEM(NVME_CC_REG_EN); 323 nvme_mmio_write_4(ctrlr, cc, cc); 324 325 /* 326 * A few drives have firmware bugs that freeze the drive if we access 327 * the mmio too soon after we disable. 328 */ 329 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY) 330 pause("nvmeR", MSEC_2_TICKS(B4_CHK_RDY_DELAY_MS)); 331 return (nvme_ctrlr_wait_for_ready(ctrlr, 0)); 332 } 333 334 static int 335 nvme_ctrlr_enable(struct nvme_controller *ctrlr) 336 { 337 uint32_t cc; 338 uint32_t csts; 339 uint32_t aqa; 340 uint32_t qsize; 341 uint8_t en, rdy; 342 int err; 343 344 cc = nvme_mmio_read_4(ctrlr, cc); 345 csts = nvme_mmio_read_4(ctrlr, csts); 346 347 en = NVMEV(NVME_CC_REG_EN, cc); 348 rdy = NVMEV(NVME_CSTS_REG_RDY, csts); 349 350 /* 351 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled. 352 */ 353 if (en == 1) { 354 if (rdy == 1) 355 return (0); 356 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 357 } 358 359 /* EN == 0 already wait for RDY == 0 or timeout & fail */ 360 err = nvme_ctrlr_wait_for_ready(ctrlr, 0); 361 if (err != 0) 362 return (err); 363 364 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); 365 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); 366 367 /* acqs and asqs are 0-based. */ 368 qsize = ctrlr->adminq.num_entries - 1; 369 370 aqa = 0; 371 aqa |= NVMEF(NVME_AQA_REG_ACQS, qsize); 372 aqa |= NVMEF(NVME_AQA_REG_ASQS, qsize); 373 nvme_mmio_write_4(ctrlr, aqa, aqa); 374 375 /* Initialization values for CC */ 376 cc = 0; 377 cc |= NVMEF(NVME_CC_REG_EN, 1); 378 cc |= NVMEF(NVME_CC_REG_CSS, 0); 379 cc |= NVMEF(NVME_CC_REG_AMS, 0); 380 cc |= NVMEF(NVME_CC_REG_SHN, 0); 381 cc |= NVMEF(NVME_CC_REG_IOSQES, 6); /* SQ entry size == 64 == 2^6 */ 382 cc |= NVMEF(NVME_CC_REG_IOCQES, 4); /* CQ entry size == 16 == 2^4 */ 383 384 /* 385 * Use the Memory Page Size selected during device initialization. Note 386 * that value stored in mps is suitable to use here without adjusting by 387 * NVME_MPS_SHIFT. 388 */ 389 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps); 390 391 nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE); 392 nvme_mmio_write_4(ctrlr, cc, cc); 393 394 return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); 395 } 396 397 static void 398 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr) 399 { 400 int i; 401 402 nvme_admin_qpair_disable(&ctrlr->adminq); 403 /* 404 * I/O queues are not allocated before the initial HW 405 * reset, so do not try to disable them. Use is_initialized 406 * to determine if this is the initial HW reset. 407 */ 408 if (ctrlr->is_initialized) { 409 for (i = 0; i < ctrlr->num_io_queues; i++) 410 nvme_io_qpair_disable(&ctrlr->ioq[i]); 411 } 412 } 413 414 static int 415 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) 416 { 417 int err; 418 419 TSENTER(); 420 421 ctrlr->is_failed_admin = true; 422 nvme_ctrlr_disable_qpairs(ctrlr); 423 424 err = nvme_ctrlr_disable(ctrlr); 425 if (err != 0) 426 goto out; 427 428 err = nvme_ctrlr_enable(ctrlr); 429 out: 430 if (err == 0) 431 ctrlr->is_failed_admin = false; 432 433 TSEXIT(); 434 return (err); 435 } 436 437 void 438 nvme_ctrlr_reset(struct nvme_controller *ctrlr) 439 { 440 int cmpset; 441 442 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); 443 444 if (cmpset == 0) 445 /* 446 * Controller is already resetting. Return immediately since 447 * there is no need to kick off another reset. 448 */ 449 return; 450 451 if (!ctrlr->is_dying) 452 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 453 } 454 455 static int 456 nvme_ctrlr_identify(struct nvme_controller *ctrlr) 457 { 458 struct nvme_completion_poll_status status; 459 460 status.done = 0; 461 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, 462 nvme_completion_poll_cb, &status); 463 nvme_completion_poll(&status); 464 if (nvme_completion_is_error(&status.cpl)) { 465 nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); 466 return (ENXIO); 467 } 468 469 /* Convert data to host endian */ 470 nvme_controller_data_swapbytes(&ctrlr->cdata); 471 472 /* 473 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the 474 * controller supports. 475 */ 476 if (ctrlr->cdata.mdts > 0) 477 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, 478 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT + 479 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi))); 480 481 return (0); 482 } 483 484 static int 485 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) 486 { 487 struct nvme_completion_poll_status status; 488 int cq_allocated, sq_allocated; 489 490 status.done = 0; 491 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, 492 nvme_completion_poll_cb, &status); 493 nvme_completion_poll(&status); 494 if (nvme_completion_is_error(&status.cpl)) { 495 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n"); 496 return (ENXIO); 497 } 498 499 /* 500 * Data in cdw0 is 0-based. 501 * Lower 16-bits indicate number of submission queues allocated. 502 * Upper 16-bits indicate number of completion queues allocated. 503 */ 504 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; 505 cq_allocated = (status.cpl.cdw0 >> 16) + 1; 506 507 /* 508 * Controller may allocate more queues than we requested, 509 * so use the minimum of the number requested and what was 510 * actually allocated. 511 */ 512 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); 513 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); 514 if (ctrlr->num_io_queues > vm_ndomains) 515 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains; 516 517 return (0); 518 } 519 520 static int 521 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) 522 { 523 struct nvme_completion_poll_status status; 524 struct nvme_qpair *qpair; 525 int i; 526 527 for (i = 0; i < ctrlr->num_io_queues; i++) { 528 qpair = &ctrlr->ioq[i]; 529 530 status.done = 0; 531 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, 532 nvme_completion_poll_cb, &status); 533 nvme_completion_poll(&status); 534 if (nvme_completion_is_error(&status.cpl)) { 535 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); 536 return (ENXIO); 537 } 538 539 status.done = 0; 540 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair, 541 nvme_completion_poll_cb, &status); 542 nvme_completion_poll(&status); 543 if (nvme_completion_is_error(&status.cpl)) { 544 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); 545 return (ENXIO); 546 } 547 } 548 549 return (0); 550 } 551 552 static int 553 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr) 554 { 555 struct nvme_completion_poll_status status; 556 struct nvme_qpair *qpair; 557 558 for (int i = 0; i < ctrlr->num_io_queues; i++) { 559 qpair = &ctrlr->ioq[i]; 560 561 status.done = 0; 562 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, 563 nvme_completion_poll_cb, &status); 564 nvme_completion_poll(&status); 565 if (nvme_completion_is_error(&status.cpl)) { 566 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n"); 567 return (ENXIO); 568 } 569 570 status.done = 0; 571 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, 572 nvme_completion_poll_cb, &status); 573 nvme_completion_poll(&status); 574 if (nvme_completion_is_error(&status.cpl)) { 575 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n"); 576 return (ENXIO); 577 } 578 } 579 580 return (0); 581 } 582 583 static int 584 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) 585 { 586 struct nvme_namespace *ns; 587 uint32_t i; 588 589 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) { 590 ns = &ctrlr->ns[i]; 591 nvme_ns_construct(ns, i+1, ctrlr); 592 } 593 594 return (0); 595 } 596 597 static bool 598 is_log_page_id_valid(uint8_t page_id) 599 { 600 switch (page_id) { 601 case NVME_LOG_ERROR: 602 case NVME_LOG_HEALTH_INFORMATION: 603 case NVME_LOG_FIRMWARE_SLOT: 604 case NVME_LOG_CHANGED_NAMESPACE: 605 case NVME_LOG_COMMAND_EFFECT: 606 case NVME_LOG_RES_NOTIFICATION: 607 case NVME_LOG_SANITIZE_STATUS: 608 return (true); 609 } 610 611 return (false); 612 } 613 614 static uint32_t 615 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) 616 { 617 uint32_t log_page_size; 618 619 switch (page_id) { 620 case NVME_LOG_ERROR: 621 log_page_size = min( 622 sizeof(struct nvme_error_information_entry) * 623 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE); 624 break; 625 case NVME_LOG_HEALTH_INFORMATION: 626 log_page_size = sizeof(struct nvme_health_information_page); 627 break; 628 case NVME_LOG_FIRMWARE_SLOT: 629 log_page_size = sizeof(struct nvme_firmware_page); 630 break; 631 case NVME_LOG_CHANGED_NAMESPACE: 632 log_page_size = sizeof(struct nvme_ns_list); 633 break; 634 case NVME_LOG_COMMAND_EFFECT: 635 log_page_size = sizeof(struct nvme_command_effects_page); 636 break; 637 case NVME_LOG_RES_NOTIFICATION: 638 log_page_size = sizeof(struct nvme_res_notification_page); 639 break; 640 case NVME_LOG_SANITIZE_STATUS: 641 log_page_size = sizeof(struct nvme_sanitize_status_page); 642 break; 643 default: 644 log_page_size = 0; 645 break; 646 } 647 648 return (log_page_size); 649 } 650 651 static void 652 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, 653 uint8_t state) 654 { 655 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE) 656 nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n"); 657 658 if (state & NVME_CRIT_WARN_ST_TEMPERATURE) 659 nvme_printf(ctrlr, "SMART WARNING: temperature above threshold\n"); 660 661 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY) 662 nvme_printf(ctrlr, "SMART WARNING: device reliability degraded\n"); 663 664 if (state & NVME_CRIT_WARN_ST_READ_ONLY) 665 nvme_printf(ctrlr, "SMART WARNING: media placed in read only mode\n"); 666 667 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP) 668 nvme_printf(ctrlr, "SMART WARNING: volatile memory backup device failed\n"); 669 670 if (state & NVME_CRIT_WARN_ST_PERSISTENT_MEMORY_REGION) 671 nvme_printf(ctrlr, "SMART WARNING: persistent memory read only or unreliable\n"); 672 673 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK) 674 nvme_printf(ctrlr, "SMART WARNING: unknown critical warning(s): state = 0x%02x\n", 675 state & NVME_CRIT_WARN_ST_RESERVED_MASK); 676 677 nvme_ctrlr_devctl(ctrlr, "critical", "SMART_ERROR", "state=0x%02x", state); 678 } 679 680 static void 681 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) 682 { 683 struct nvme_async_event_request *aer = arg; 684 685 if (nvme_completion_is_error(cpl)) { 686 /* 687 * Do not retry failed async event requests. This avoids 688 * infinite loops where a new async event request is submitted 689 * to replace the one just failed, only to fail again and 690 * perpetuate the loop. 691 */ 692 return; 693 } 694 695 /* 696 * Save the completion status and associated log page is in bits 23:16 697 * of completion entry dw0. Print a message and queue it for further 698 * processing. 699 */ 700 memcpy(&aer->cpl, cpl, sizeof(*cpl)); 701 aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0); 702 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x," 703 " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0), 704 NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0), 705 aer->log_page_id); 706 taskqueue_enqueue(aer->ctrlr->taskqueue, &aer->task); 707 } 708 709 static void 710 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, 711 struct nvme_async_event_request *aer) 712 { 713 struct nvme_request *req; 714 715 /* 716 * We're racing the reset thread, so let that process submit this again. 717 * XXX does this really solve that race? And is that race even possible 718 * since we only reset when we've no theard from the card in a long 719 * time. Why would we get an AER in the middle of that just before we 720 * kick off the reset? 721 */ 722 if (ctrlr->is_resetting) 723 return; 724 725 aer->ctrlr = ctrlr; 726 req = nvme_allocate_request_null(M_WAITOK, nvme_ctrlr_async_event_cb, 727 aer); 728 aer->req = req; 729 aer->log_page_id = 0; /* Not a valid page */ 730 731 /* 732 * Disable timeout here, since asynchronous event requests should by 733 * nature never be timed out. 734 */ 735 req->timeout = false; 736 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; 737 nvme_ctrlr_submit_admin_request(ctrlr, req); 738 } 739 740 static void 741 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) 742 { 743 struct nvme_completion_poll_status status; 744 struct nvme_async_event_request *aer; 745 uint32_t i; 746 747 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE | 748 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY | 749 NVME_CRIT_WARN_ST_READ_ONLY | 750 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP; 751 if (ctrlr->cdata.ver >= NVME_REV(1, 2)) 752 ctrlr->async_event_config |= 753 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE | 754 NVME_ASYNC_EVENT_FW_ACTIVATE); 755 756 status.done = 0; 757 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 758 0, NULL, 0, nvme_completion_poll_cb, &status); 759 nvme_completion_poll(&status); 760 if (nvme_completion_is_error(&status.cpl) || 761 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || 762 (status.cpl.cdw0 & 0xFFFF) == 0x0000) { 763 nvme_printf(ctrlr, "temperature threshold not supported\n"); 764 } else 765 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE; 766 767 nvme_ctrlr_cmd_set_async_event_config(ctrlr, 768 ctrlr->async_event_config, NULL, NULL); 769 770 /* aerl is a zero-based value, so we need to add 1 here. */ 771 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); 772 773 for (i = 0; i < ctrlr->num_aers; i++) { 774 aer = &ctrlr->aer[i]; 775 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 776 } 777 } 778 779 static void 780 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) 781 { 782 ctrlr->int_coal_time = 0; 783 TUNABLE_INT_FETCH("hw.nvme.int_coal_time", 784 &ctrlr->int_coal_time); 785 786 ctrlr->int_coal_threshold = 0; 787 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", 788 &ctrlr->int_coal_threshold); 789 790 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, 791 ctrlr->int_coal_threshold, NULL, NULL); 792 } 793 794 static void 795 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr) 796 { 797 struct nvme_hmb_chunk *hmbc; 798 int i; 799 800 if (ctrlr->hmb_desc_paddr) { 801 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map); 802 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, 803 ctrlr->hmb_desc_map); 804 ctrlr->hmb_desc_paddr = 0; 805 } 806 if (ctrlr->hmb_desc_tag) { 807 bus_dma_tag_destroy(ctrlr->hmb_desc_tag); 808 ctrlr->hmb_desc_tag = NULL; 809 } 810 for (i = 0; i < ctrlr->hmb_nchunks; i++) { 811 hmbc = &ctrlr->hmb_chunks[i]; 812 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map); 813 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, 814 hmbc->hmbc_map); 815 } 816 ctrlr->hmb_nchunks = 0; 817 if (ctrlr->hmb_tag) { 818 bus_dma_tag_destroy(ctrlr->hmb_tag); 819 ctrlr->hmb_tag = NULL; 820 } 821 if (ctrlr->hmb_chunks) { 822 free(ctrlr->hmb_chunks, M_NVME); 823 ctrlr->hmb_chunks = NULL; 824 } 825 } 826 827 static void 828 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr) 829 { 830 struct nvme_hmb_chunk *hmbc; 831 size_t pref, min, minc, size; 832 int err, i; 833 uint64_t max; 834 835 /* Limit HMB to 5% of RAM size per device by default. */ 836 max = (uint64_t)physmem * PAGE_SIZE / 20; 837 TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max); 838 839 /* 840 * Units of Host Memory Buffer in the Identify info are always in terms 841 * of 4k units. 842 */ 843 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS; 844 if (max == 0 || max < min) 845 return; 846 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max); 847 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size); 848 if (min > 0 && ctrlr->cdata.hmmaxd > 0) 849 minc = MAX(minc, min / ctrlr->cdata.hmmaxd); 850 ctrlr->hmb_chunk = pref; 851 852 again: 853 /* 854 * However, the chunk sizes, number of chunks, and alignment of chunks 855 * are all based on the current MPS (ctrlr->page_size). 856 */ 857 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size); 858 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk); 859 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd) 860 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd; 861 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) * 862 ctrlr->hmb_nchunks, M_NVME, M_WAITOK); 863 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 864 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 865 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag); 866 if (err != 0) { 867 nvme_printf(ctrlr, "HMB tag create failed %d\n", err); 868 nvme_ctrlr_hmb_free(ctrlr); 869 return; 870 } 871 872 for (i = 0; i < ctrlr->hmb_nchunks; i++) { 873 hmbc = &ctrlr->hmb_chunks[i]; 874 if (bus_dmamem_alloc(ctrlr->hmb_tag, 875 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT, 876 &hmbc->hmbc_map)) { 877 nvme_printf(ctrlr, "failed to alloc HMB\n"); 878 break; 879 } 880 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map, 881 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map, 882 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) { 883 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr, 884 hmbc->hmbc_map); 885 nvme_printf(ctrlr, "failed to load HMB\n"); 886 break; 887 } 888 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map, 889 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 890 } 891 892 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min && 893 ctrlr->hmb_chunk / 2 >= minc) { 894 ctrlr->hmb_nchunks = i; 895 nvme_ctrlr_hmb_free(ctrlr); 896 ctrlr->hmb_chunk /= 2; 897 goto again; 898 } 899 ctrlr->hmb_nchunks = i; 900 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) { 901 nvme_ctrlr_hmb_free(ctrlr); 902 return; 903 } 904 905 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks; 906 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 907 16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 908 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag); 909 if (err != 0) { 910 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err); 911 nvme_ctrlr_hmb_free(ctrlr); 912 return; 913 } 914 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag, 915 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK, 916 &ctrlr->hmb_desc_map)) { 917 nvme_printf(ctrlr, "failed to alloc HMB desc\n"); 918 nvme_ctrlr_hmb_free(ctrlr); 919 return; 920 } 921 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, 922 ctrlr->hmb_desc_vaddr, size, nvme_single_map, 923 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) { 924 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr, 925 ctrlr->hmb_desc_map); 926 nvme_printf(ctrlr, "failed to load HMB desc\n"); 927 nvme_ctrlr_hmb_free(ctrlr); 928 return; 929 } 930 931 for (i = 0; i < ctrlr->hmb_nchunks; i++) { 932 memset(&ctrlr->hmb_desc_vaddr[i], 0, 933 sizeof(struct nvme_hmb_desc)); 934 ctrlr->hmb_desc_vaddr[i].addr = 935 htole64(ctrlr->hmb_chunks[i].hmbc_paddr); 936 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size); 937 } 938 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map, 939 BUS_DMASYNC_PREWRITE); 940 941 nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n", 942 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk 943 / 1024 / 1024); 944 } 945 946 static void 947 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret) 948 { 949 struct nvme_completion_poll_status status; 950 uint32_t cdw11; 951 952 cdw11 = 0; 953 if (enable) 954 cdw11 |= 1; 955 if (memret) 956 cdw11 |= 2; 957 status.done = 0; 958 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11, 959 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size, 960 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32, 961 ctrlr->hmb_nchunks, NULL, 0, 962 nvme_completion_poll_cb, &status); 963 nvme_completion_poll(&status); 964 if (nvme_completion_is_error(&status.cpl)) 965 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n"); 966 } 967 968 static void 969 nvme_ctrlr_start(void *ctrlr_arg, bool resetting) 970 { 971 struct nvme_controller *ctrlr = ctrlr_arg; 972 uint32_t old_num_io_queues; 973 int i; 974 975 TSENTER(); 976 977 /* 978 * Only reset adminq here when we are restarting the 979 * controller after a reset. During initialization, 980 * we have already submitted admin commands to get 981 * the number of I/O queues supported, so cannot reset 982 * the adminq again here. 983 */ 984 if (resetting) { 985 nvme_qpair_reset(&ctrlr->adminq); 986 nvme_admin_qpair_enable(&ctrlr->adminq); 987 } 988 989 if (ctrlr->ioq != NULL) { 990 for (i = 0; i < ctrlr->num_io_queues; i++) 991 nvme_qpair_reset(&ctrlr->ioq[i]); 992 } 993 994 /* 995 * If it was a reset on initialization command timeout, just 996 * return here, letting initialization code fail gracefully. 997 */ 998 if (resetting && !ctrlr->is_initialized) 999 return; 1000 1001 if (resetting && nvme_ctrlr_identify(ctrlr) != 0) { 1002 nvme_ctrlr_fail(ctrlr, false); 1003 return; 1004 } 1005 1006 /* 1007 * The number of qpairs are determined during controller initialization, 1008 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the 1009 * HW limit. We call SET_FEATURES again here so that it gets called 1010 * after any reset for controllers that depend on the driver to 1011 * explicit specify how many queues it will use. This value should 1012 * never change between resets, so panic if somehow that does happen. 1013 */ 1014 if (resetting) { 1015 old_num_io_queues = ctrlr->num_io_queues; 1016 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { 1017 nvme_ctrlr_fail(ctrlr, false); 1018 return; 1019 } 1020 1021 if (old_num_io_queues != ctrlr->num_io_queues) { 1022 panic("num_io_queues changed from %u to %u", 1023 old_num_io_queues, ctrlr->num_io_queues); 1024 } 1025 } 1026 1027 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) { 1028 nvme_ctrlr_hmb_alloc(ctrlr); 1029 if (ctrlr->hmb_nchunks > 0) 1030 nvme_ctrlr_hmb_enable(ctrlr, true, false); 1031 } else if (ctrlr->hmb_nchunks > 0) 1032 nvme_ctrlr_hmb_enable(ctrlr, true, true); 1033 1034 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { 1035 nvme_ctrlr_fail(ctrlr, false); 1036 return; 1037 } 1038 1039 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { 1040 nvme_ctrlr_fail(ctrlr, false); 1041 return; 1042 } 1043 1044 nvme_ctrlr_configure_aer(ctrlr); 1045 nvme_ctrlr_configure_int_coalescing(ctrlr); 1046 1047 for (i = 0; i < ctrlr->num_io_queues; i++) 1048 nvme_io_qpair_enable(&ctrlr->ioq[i]); 1049 TSEXIT(); 1050 } 1051 1052 void 1053 nvme_ctrlr_start_config_hook(void *arg) 1054 { 1055 struct nvme_controller *ctrlr = arg; 1056 1057 TSENTER(); 1058 1059 if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) { 1060 nvme_ctrlr_fail(ctrlr, true); 1061 config_intrhook_disestablish(&ctrlr->config_hook); 1062 return; 1063 } 1064 1065 nvme_qpair_reset(&ctrlr->adminq); 1066 nvme_admin_qpair_enable(&ctrlr->adminq); 1067 1068 if (nvme_ctrlr_identify(ctrlr) == 0 && 1069 nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && 1070 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) 1071 nvme_ctrlr_start(ctrlr, false); 1072 else 1073 nvme_ctrlr_fail(ctrlr, false); 1074 1075 nvme_sysctl_initialize_ctrlr(ctrlr); 1076 config_intrhook_disestablish(&ctrlr->config_hook); 1077 1078 if (!ctrlr->is_failed) { 1079 ctrlr->is_initialized = true; 1080 nvme_notify_new_controller(ctrlr); 1081 } 1082 TSEXIT(); 1083 } 1084 1085 static void 1086 nvme_ctrlr_reset_task(void *arg, int pending) 1087 { 1088 struct nvme_controller *ctrlr = arg; 1089 int status; 1090 1091 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"start\""); 1092 status = nvme_ctrlr_hw_reset(ctrlr); 1093 if (status == 0) { 1094 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"success\""); 1095 nvme_ctrlr_start(ctrlr, true); 1096 } else { 1097 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"timed_out\""); 1098 nvme_ctrlr_fail(ctrlr, true); 1099 } 1100 1101 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1102 } 1103 1104 static void 1105 nvme_ctrlr_aer_done(void *arg, const struct nvme_completion *cpl) 1106 { 1107 struct nvme_async_event_request *aer = arg; 1108 1109 mtx_lock(&aer->mtx); 1110 if (nvme_completion_is_error(cpl)) 1111 aer->log_page_size = (uint32_t)-1; 1112 else 1113 aer->log_page_size = nvme_ctrlr_get_log_page_size( 1114 aer->ctrlr, aer->log_page_id); 1115 wakeup(aer); 1116 mtx_unlock(&aer->mtx); 1117 } 1118 1119 static void 1120 nvme_ctrlr_aer_task(void *arg, int pending) 1121 { 1122 struct nvme_async_event_request *aer = arg; 1123 struct nvme_controller *ctrlr = aer->ctrlr; 1124 uint32_t len; 1125 1126 /* 1127 * We're resetting, so just punt. 1128 */ 1129 if (ctrlr->is_resetting) 1130 return; 1131 1132 if (!is_log_page_id_valid(aer->log_page_id)) { 1133 /* 1134 * Repost another asynchronous event request to replace the one 1135 * that just completed. 1136 */ 1137 nvme_notify_async_consumers(ctrlr, &aer->cpl, aer->log_page_id, 1138 NULL, 0); 1139 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 1140 goto out; 1141 } 1142 1143 aer->log_page_size = 0; 1144 len = nvme_ctrlr_get_log_page_size(aer->ctrlr, aer->log_page_id); 1145 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, 1146 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, len, 1147 nvme_ctrlr_aer_done, aer); 1148 mtx_lock(&aer->mtx); 1149 while (aer->log_page_size == 0) 1150 mtx_sleep(aer, &aer->mtx, PRIBIO, "nvme_pt", 0); 1151 mtx_unlock(&aer->mtx); 1152 1153 if (aer->log_page_size != (uint32_t)-1) { 1154 /* 1155 * If the log page fetch for some reason completed with an 1156 * error, don't pass log page data to the consumers. In 1157 * practice, this case should never happen. 1158 */ 1159 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 1160 aer->log_page_id, NULL, 0); 1161 goto out; 1162 } 1163 1164 /* Convert data to host endian */ 1165 switch (aer->log_page_id) { 1166 case NVME_LOG_ERROR: { 1167 struct nvme_error_information_entry *err = 1168 (struct nvme_error_information_entry *)aer->log_page_buffer; 1169 for (int i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++) 1170 nvme_error_information_entry_swapbytes(err++); 1171 break; 1172 } 1173 case NVME_LOG_HEALTH_INFORMATION: 1174 nvme_health_information_page_swapbytes( 1175 (struct nvme_health_information_page *)aer->log_page_buffer); 1176 break; 1177 case NVME_LOG_CHANGED_NAMESPACE: 1178 nvme_ns_list_swapbytes( 1179 (struct nvme_ns_list *)aer->log_page_buffer); 1180 break; 1181 case NVME_LOG_COMMAND_EFFECT: 1182 nvme_command_effects_page_swapbytes( 1183 (struct nvme_command_effects_page *)aer->log_page_buffer); 1184 break; 1185 case NVME_LOG_RES_NOTIFICATION: 1186 nvme_res_notification_page_swapbytes( 1187 (struct nvme_res_notification_page *)aer->log_page_buffer); 1188 break; 1189 case NVME_LOG_SANITIZE_STATUS: 1190 nvme_sanitize_status_page_swapbytes( 1191 (struct nvme_sanitize_status_page *)aer->log_page_buffer); 1192 break; 1193 default: 1194 break; 1195 } 1196 1197 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { 1198 struct nvme_health_information_page *health_info = 1199 (struct nvme_health_information_page *)aer->log_page_buffer; 1200 1201 /* 1202 * Critical warnings reported through the SMART/health log page 1203 * are persistent, so clear the associated bits in the async 1204 * event config so that we do not receive repeated notifications 1205 * for the same event. 1206 */ 1207 nvme_ctrlr_log_critical_warnings(aer->ctrlr, 1208 health_info->critical_warning); 1209 aer->ctrlr->async_event_config &= 1210 ~health_info->critical_warning; 1211 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, 1212 aer->ctrlr->async_event_config, NULL, NULL); 1213 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE) { 1214 struct nvme_ns_list *nsl = 1215 (struct nvme_ns_list *)aer->log_page_buffer; 1216 for (int i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) { 1217 if (nsl->ns[i] > NVME_MAX_NAMESPACES) 1218 break; 1219 nvme_notify_ns(aer->ctrlr, nsl->ns[i]); 1220 } 1221 } 1222 1223 /* 1224 * Pass the cpl data from the original async event completion, not the 1225 * log page fetch. 1226 */ 1227 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, 1228 aer->log_page_id, aer->log_page_buffer, aer->log_page_size); 1229 1230 /* 1231 * Repost another asynchronous event request to replace the one 1232 * that just completed. 1233 */ 1234 out: 1235 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); 1236 } 1237 1238 /* 1239 * Poll all the queues enabled on the device for completion. 1240 */ 1241 void 1242 nvme_ctrlr_poll(struct nvme_controller *ctrlr) 1243 { 1244 int i; 1245 1246 nvme_qpair_process_completions(&ctrlr->adminq); 1247 1248 for (i = 0; i < ctrlr->num_io_queues; i++) 1249 if (ctrlr->ioq && ctrlr->ioq[i].cpl) 1250 nvme_qpair_process_completions(&ctrlr->ioq[i]); 1251 } 1252 1253 /* 1254 * Poll the single-vector interrupt case: num_io_queues will be 1 and 1255 * there's only a single vector. While we're polling, we mask further 1256 * interrupts in the controller. 1257 */ 1258 void 1259 nvme_ctrlr_shared_handler(void *arg) 1260 { 1261 struct nvme_controller *ctrlr = arg; 1262 1263 nvme_mmio_write_4(ctrlr, intms, 1); 1264 nvme_ctrlr_poll(ctrlr); 1265 nvme_mmio_write_4(ctrlr, intmc, 1); 1266 } 1267 1268 static void 1269 nvme_pt_done(void *arg, const struct nvme_completion *cpl) 1270 { 1271 struct nvme_pt_command *pt = arg; 1272 struct mtx *mtx = pt->driver_lock; 1273 uint16_t status; 1274 1275 bzero(&pt->cpl, sizeof(pt->cpl)); 1276 pt->cpl.cdw0 = cpl->cdw0; 1277 1278 status = cpl->status; 1279 status &= ~NVMEM(NVME_STATUS_P); 1280 pt->cpl.status = status; 1281 1282 mtx_lock(mtx); 1283 pt->driver_lock = NULL; 1284 wakeup(pt); 1285 mtx_unlock(mtx); 1286 } 1287 1288 int 1289 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, 1290 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, 1291 int is_admin_cmd) 1292 { 1293 struct nvme_request *req; 1294 struct mtx *mtx; 1295 struct buf *buf = NULL; 1296 int ret = 0; 1297 1298 if (pt->len > 0) { 1299 if (pt->len > ctrlr->max_xfer_size) { 1300 nvme_printf(ctrlr, "pt->len (%d) " 1301 "exceeds max_xfer_size (%d)\n", pt->len, 1302 ctrlr->max_xfer_size); 1303 return EIO; 1304 } 1305 if (is_user_buffer) { 1306 buf = uma_zalloc(pbuf_zone, M_WAITOK); 1307 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; 1308 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) { 1309 ret = EFAULT; 1310 goto err; 1311 } 1312 req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 1313 M_WAITOK, nvme_pt_done, pt); 1314 } else 1315 req = nvme_allocate_request_vaddr(pt->buf, pt->len, 1316 M_WAITOK, nvme_pt_done, pt); 1317 } else 1318 req = nvme_allocate_request_null(M_WAITOK, nvme_pt_done, pt); 1319 1320 /* Assume user space already converted to little-endian */ 1321 req->cmd.opc = pt->cmd.opc; 1322 req->cmd.fuse = pt->cmd.fuse; 1323 req->cmd.rsvd2 = pt->cmd.rsvd2; 1324 req->cmd.rsvd3 = pt->cmd.rsvd3; 1325 req->cmd.cdw10 = pt->cmd.cdw10; 1326 req->cmd.cdw11 = pt->cmd.cdw11; 1327 req->cmd.cdw12 = pt->cmd.cdw12; 1328 req->cmd.cdw13 = pt->cmd.cdw13; 1329 req->cmd.cdw14 = pt->cmd.cdw14; 1330 req->cmd.cdw15 = pt->cmd.cdw15; 1331 1332 req->cmd.nsid = htole32(nsid); 1333 1334 mtx = mtx_pool_find(mtxpool_sleep, pt); 1335 pt->driver_lock = mtx; 1336 1337 if (is_admin_cmd) 1338 nvme_ctrlr_submit_admin_request(ctrlr, req); 1339 else 1340 nvme_ctrlr_submit_io_request(ctrlr, req); 1341 1342 mtx_lock(mtx); 1343 while (pt->driver_lock != NULL) 1344 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); 1345 mtx_unlock(mtx); 1346 1347 if (buf != NULL) { 1348 vunmapbuf(buf); 1349 err: 1350 uma_zfree(pbuf_zone, buf); 1351 } 1352 1353 return (ret); 1354 } 1355 1356 static void 1357 nvme_npc_done(void *arg, const struct nvme_completion *cpl) 1358 { 1359 struct nvme_passthru_cmd *npc = arg; 1360 struct mtx *mtx = (void *)(uintptr_t)npc->metadata; 1361 1362 npc->result = cpl->cdw0; /* cpl in host order by now */ 1363 mtx_lock(mtx); 1364 npc->metadata = 0; 1365 wakeup(npc); 1366 mtx_unlock(mtx); 1367 } 1368 1369 /* XXX refactor? */ 1370 1371 int 1372 nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr, 1373 struct nvme_passthru_cmd *npc, uint32_t nsid, bool is_user, bool is_admin) 1374 { 1375 struct nvme_request *req; 1376 struct mtx *mtx; 1377 struct buf *buf = NULL; 1378 int ret = 0; 1379 1380 /* 1381 * We don't support metadata. 1382 */ 1383 if (npc->metadata != 0 || npc->metadata_len != 0) 1384 return (EIO); 1385 1386 if (npc->data_len > 0 && npc->addr != 0) { 1387 if (npc->data_len > ctrlr->max_xfer_size) { 1388 nvme_printf(ctrlr, 1389 "npc->data_len (%d) exceeds max_xfer_size (%d)\n", 1390 npc->data_len, ctrlr->max_xfer_size); 1391 return (EIO); 1392 } 1393 /* 1394 * We only support data out or data in commands, but not both at 1395 * once. However, there's some comands with lower bit cleared 1396 * that are really read commands, so we should filter & 3 == 0, 1397 * but don't. 1398 */ 1399 if ((npc->opcode & 0x3) == 3) 1400 return (EINVAL); 1401 if (is_user) { 1402 buf = uma_zalloc(pbuf_zone, M_WAITOK); 1403 buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ; 1404 if (vmapbuf(buf, (void *)(uintptr_t)npc->addr, 1405 npc->data_len, 1) < 0) { 1406 ret = EFAULT; 1407 goto err; 1408 } 1409 req = nvme_allocate_request_vaddr(buf->b_data, 1410 npc->data_len, M_WAITOK, nvme_npc_done, npc); 1411 } else 1412 req = nvme_allocate_request_vaddr( 1413 (void *)(uintptr_t)npc->addr, npc->data_len, 1414 M_WAITOK, nvme_npc_done, npc); 1415 } else 1416 req = nvme_allocate_request_null(M_WAITOK, nvme_npc_done, npc); 1417 1418 req->cmd.opc = npc->opcode; 1419 req->cmd.fuse = npc->flags; 1420 req->cmd.rsvd2 = htole32(npc->cdw2); 1421 req->cmd.rsvd3 = htole32(npc->cdw3); 1422 req->cmd.cdw10 = htole32(npc->cdw10); 1423 req->cmd.cdw11 = htole32(npc->cdw11); 1424 req->cmd.cdw12 = htole32(npc->cdw12); 1425 req->cmd.cdw13 = htole32(npc->cdw13); 1426 req->cmd.cdw14 = htole32(npc->cdw14); 1427 req->cmd.cdw15 = htole32(npc->cdw15); 1428 1429 req->cmd.nsid = htole32(nsid); 1430 1431 mtx = mtx_pool_find(mtxpool_sleep, npc); 1432 npc->metadata = (uintptr_t) mtx; 1433 1434 /* XXX no timeout passed down */ 1435 if (is_admin) 1436 nvme_ctrlr_submit_admin_request(ctrlr, req); 1437 else 1438 nvme_ctrlr_submit_io_request(ctrlr, req); 1439 1440 mtx_lock(mtx); 1441 while (npc->metadata != 0) 1442 mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0); 1443 mtx_unlock(mtx); 1444 1445 if (buf != NULL) { 1446 vunmapbuf(buf); 1447 err: 1448 uma_zfree(pbuf_zone, buf); 1449 } 1450 1451 return (ret); 1452 } 1453 1454 static int 1455 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, 1456 struct thread *td) 1457 { 1458 struct nvme_controller *ctrlr; 1459 struct nvme_pt_command *pt; 1460 1461 ctrlr = cdev->si_drv1; 1462 1463 switch (cmd) { 1464 case NVME_IOCTL_RESET: /* Linux compat */ 1465 case NVME_RESET_CONTROLLER: 1466 nvme_ctrlr_reset(ctrlr); 1467 break; 1468 case NVME_PASSTHROUGH_CMD: 1469 pt = (struct nvme_pt_command *)arg; 1470 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid), 1471 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); 1472 case NVME_GET_NSID: 1473 { 1474 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg; 1475 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev), 1476 sizeof(gnsid->cdev)); 1477 gnsid->nsid = 0; 1478 break; 1479 } 1480 case NVME_GET_MAX_XFER_SIZE: 1481 *(uint64_t *)arg = ctrlr->max_xfer_size; 1482 break; 1483 case NVME_GET_CONTROLLER_DATA: 1484 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata)); 1485 break; 1486 /* Linux Compatible (see nvme_linux.h) */ 1487 case NVME_IOCTL_ID: 1488 td->td_retval[0] = 0xfffffffful; 1489 return (0); 1490 1491 case NVME_IOCTL_ADMIN_CMD: 1492 case NVME_IOCTL_IO_CMD: { 1493 struct nvme_passthru_cmd *npc = (struct nvme_passthru_cmd *)arg; 1494 1495 return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true, 1496 cmd == NVME_IOCTL_ADMIN_CMD)); 1497 } 1498 1499 default: 1500 return (ENOTTY); 1501 } 1502 1503 return (0); 1504 } 1505 1506 static struct cdevsw nvme_ctrlr_cdevsw = { 1507 .d_version = D_VERSION, 1508 .d_flags = 0, 1509 .d_ioctl = nvme_ctrlr_ioctl 1510 }; 1511 1512 int 1513 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) 1514 { 1515 struct make_dev_args md_args; 1516 uint32_t cap_lo; 1517 uint32_t cap_hi; 1518 uint32_t to, vs, pmrcap; 1519 int status, timeout_period; 1520 1521 ctrlr->dev = dev; 1522 1523 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); 1524 if (bus_get_domain(dev, &ctrlr->domain) != 0) 1525 ctrlr->domain = 0; 1526 1527 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo); 1528 if (bootverbose) { 1529 device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n", 1530 cap_lo, NVME_CAP_LO_MQES(cap_lo), 1531 NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "", 1532 NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "", 1533 (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "", 1534 (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "", 1535 NVME_CAP_LO_TO(cap_lo)); 1536 } 1537 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi); 1538 if (bootverbose) { 1539 device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, " 1540 "CPS %x, MPSMIN %u, MPSMAX %u%s%s%s%s%s\n", cap_hi, 1541 NVME_CAP_HI_DSTRD(cap_hi), 1542 NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "", 1543 NVME_CAP_HI_CSS(cap_hi), 1544 NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "", 1545 NVME_CAP_HI_CPS(cap_hi), 1546 NVME_CAP_HI_MPSMIN(cap_hi), 1547 NVME_CAP_HI_MPSMAX(cap_hi), 1548 NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "", 1549 NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "", 1550 NVME_CAP_HI_NSSS(cap_hi) ? ", NSSS" : "", 1551 NVME_CAP_HI_CRWMS(cap_hi) ? ", CRWMS" : "", 1552 NVME_CAP_HI_CRIMS(cap_hi) ? ", CRIMS" : ""); 1553 } 1554 if (bootverbose) { 1555 vs = nvme_mmio_read_4(ctrlr, vs); 1556 device_printf(dev, "Version: 0x%08x: %d.%d\n", vs, 1557 NVME_MAJOR(vs), NVME_MINOR(vs)); 1558 } 1559 if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) { 1560 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap); 1561 device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, " 1562 "PMRWBM %x, PMRTO %u%s\n", pmrcap, 1563 NVME_PMRCAP_BIR(pmrcap), 1564 NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "", 1565 NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "", 1566 NVME_PMRCAP_PMRTU(pmrcap), 1567 NVME_PMRCAP_PMRWBM(pmrcap), 1568 NVME_PMRCAP_PMRTO(pmrcap), 1569 NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : ""); 1570 } 1571 1572 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2; 1573 1574 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi); 1575 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps); 1576 1577 /* Get ready timeout value from controller, in units of 500ms. */ 1578 to = NVME_CAP_LO_TO(cap_lo) + 1; 1579 ctrlr->ready_timeout_in_ms = to * 500; 1580 1581 timeout_period = NVME_ADMIN_TIMEOUT_PERIOD; 1582 TUNABLE_INT_FETCH("hw.nvme.admin_timeout_period", &timeout_period); 1583 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1584 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1585 ctrlr->admin_timeout_period = timeout_period; 1586 1587 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; 1588 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); 1589 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); 1590 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); 1591 ctrlr->timeout_period = timeout_period; 1592 1593 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; 1594 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); 1595 1596 ctrlr->enable_aborts = 0; 1597 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); 1598 1599 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK); 1600 1601 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */ 1602 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size)); 1603 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0) 1604 return (ENXIO); 1605 1606 /* 1607 * Create 2 threads for the taskqueue. The reset thread will block when 1608 * it detects that the controller has failed until all I/O has been 1609 * failed up the stack. The second thread is used for AER events, which 1610 * can block, but only briefly for memory and log page fetching. 1611 */ 1612 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, 1613 taskqueue_thread_enqueue, &ctrlr->taskqueue); 1614 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq"); 1615 1616 ctrlr->is_resetting = 0; 1617 ctrlr->is_initialized = false; 1618 ctrlr->notification_sent = 0; 1619 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); 1620 for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) { 1621 struct nvme_async_event_request *aer = &ctrlr->aer[i]; 1622 1623 TASK_INIT(&aer->task, 0, nvme_ctrlr_aer_task, aer); 1624 mtx_init(&aer->mtx, "AER mutex", NULL, MTX_DEF); 1625 } 1626 ctrlr->is_failed = false; 1627 1628 make_dev_args_init(&md_args); 1629 md_args.mda_devsw = &nvme_ctrlr_cdevsw; 1630 md_args.mda_uid = UID_ROOT; 1631 md_args.mda_gid = GID_WHEEL; 1632 md_args.mda_mode = 0600; 1633 md_args.mda_unit = device_get_unit(dev); 1634 md_args.mda_si_drv1 = (void *)ctrlr; 1635 status = make_dev_s(&md_args, &ctrlr->cdev, "%s", 1636 device_get_nameunit(dev)); 1637 if (status != 0) 1638 return (ENXIO); 1639 1640 return (0); 1641 } 1642 1643 /* 1644 * Called on detach, or on error on attach. The nvme_controller won't be used 1645 * again once we return, so we have to tear everything down (so nothing 1646 * references this, no callbacks, etc), but don't need to reset all the state 1647 * since nvme_controller will be freed soon. 1648 */ 1649 void 1650 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) 1651 { 1652 int i; 1653 bool gone; 1654 1655 ctrlr->is_dying = true; 1656 1657 if (ctrlr->resource == NULL) 1658 goto nores; 1659 if (!mtx_initialized(&ctrlr->adminq.lock)) 1660 goto noadminq; 1661 1662 /* 1663 * Check whether it is a hot unplug or a clean driver detach. If device 1664 * is not there any more, skip any shutdown commands. Some hotplug 1665 * bridges will return zeros instead of ff's when the device is 1666 * departing, so ask the bridge if the device is gone. Some systems can 1667 * remove the drive w/o the bridge knowing its gone (they don't really 1668 * do hotplug), so failsafe with detecting all ff's (impossible with 1669 * this hardware) as the device being gone. 1670 */ 1671 gone = bus_child_present(dev) == 0 || 1672 (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE); 1673 if (gone) 1674 nvme_ctrlr_fail(ctrlr, true); 1675 else 1676 nvme_notify_fail_consumers(ctrlr); 1677 1678 for (i = 0; i < NVME_MAX_NAMESPACES; i++) 1679 nvme_ns_destruct(&ctrlr->ns[i]); 1680 1681 if (ctrlr->cdev) 1682 destroy_dev(ctrlr->cdev); 1683 1684 if (ctrlr->is_initialized) { 1685 if (!gone) { 1686 if (ctrlr->hmb_nchunks > 0) 1687 nvme_ctrlr_hmb_enable(ctrlr, false, false); 1688 nvme_ctrlr_delete_qpairs(ctrlr); 1689 } 1690 nvme_ctrlr_hmb_free(ctrlr); 1691 } 1692 if (ctrlr->ioq != NULL) { 1693 for (i = 0; i < ctrlr->num_io_queues; i++) 1694 nvme_io_qpair_destroy(&ctrlr->ioq[i]); 1695 free(ctrlr->ioq, M_NVME); 1696 } 1697 nvme_admin_qpair_destroy(&ctrlr->adminq); 1698 1699 /* 1700 * Notify the controller of a shutdown, even though this is due to a 1701 * driver unload, not a system shutdown (this path is not invoked uring 1702 * shutdown). This ensures the controller receives a shutdown 1703 * notification in case the system is shutdown before reloading the 1704 * driver. Some NVMe drives need this to flush their cache to stable 1705 * media and consider it a safe shutdown in SMART stats. 1706 */ 1707 if (!gone) { 1708 nvme_ctrlr_shutdown(ctrlr); 1709 nvme_ctrlr_disable(ctrlr); 1710 } 1711 1712 noadminq: 1713 if (ctrlr->taskqueue) { 1714 taskqueue_free(ctrlr->taskqueue); 1715 for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) { 1716 struct nvme_async_event_request *aer = &ctrlr->aer[i]; 1717 1718 mtx_destroy(&aer->mtx); 1719 } 1720 } 1721 1722 if (ctrlr->tag) 1723 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 1724 1725 if (ctrlr->res) 1726 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 1727 rman_get_rid(ctrlr->res), ctrlr->res); 1728 1729 if (ctrlr->bar4_resource != NULL) { 1730 bus_release_resource(dev, SYS_RES_MEMORY, 1731 ctrlr->bar4_resource_id, ctrlr->bar4_resource); 1732 } 1733 1734 bus_release_resource(dev, SYS_RES_MEMORY, 1735 ctrlr->resource_id, ctrlr->resource); 1736 1737 nores: 1738 if (ctrlr->alignment_splits) 1739 counter_u64_free(ctrlr->alignment_splits); 1740 1741 mtx_destroy(&ctrlr->lock); 1742 } 1743 1744 void 1745 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) 1746 { 1747 uint32_t cc; 1748 uint32_t csts; 1749 int timeout; 1750 1751 cc = nvme_mmio_read_4(ctrlr, cc); 1752 cc &= ~NVMEM(NVME_CC_REG_SHN); 1753 cc |= NVMEF(NVME_CC_REG_SHN, NVME_SHN_NORMAL); 1754 nvme_mmio_write_4(ctrlr, cc, cc); 1755 1756 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz : 1757 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000); 1758 while (1) { 1759 csts = nvme_mmio_read_4(ctrlr, csts); 1760 if (csts == NVME_GONE) /* Hot unplug. */ 1761 break; 1762 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE) 1763 break; 1764 if (timeout - ticks < 0) { 1765 nvme_printf(ctrlr, "shutdown timeout\n"); 1766 break; 1767 } 1768 pause("nvmeshut", 1); 1769 } 1770 } 1771 1772 void 1773 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, 1774 struct nvme_request *req) 1775 { 1776 nvme_qpair_submit_request(&ctrlr->adminq, req); 1777 } 1778 1779 void 1780 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, 1781 struct nvme_request *req) 1782 { 1783 struct nvme_qpair *qpair; 1784 1785 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)]; 1786 nvme_qpair_submit_request(qpair, req); 1787 } 1788 1789 device_t 1790 nvme_ctrlr_get_device(struct nvme_controller *ctrlr) 1791 { 1792 return (ctrlr->dev); 1793 } 1794 1795 const struct nvme_controller_data * 1796 nvme_ctrlr_get_data(struct nvme_controller *ctrlr) 1797 { 1798 return (&ctrlr->cdata); 1799 } 1800 1801 int 1802 nvme_ctrlr_suspend(struct nvme_controller *ctrlr) 1803 { 1804 int to = hz; 1805 1806 /* 1807 * Can't touch failed controllers, so it's already suspended. User will 1808 * need to do an explicit reset to bring it back, if that's even 1809 * possible. 1810 */ 1811 if (ctrlr->is_failed) 1812 return (0); 1813 1814 /* 1815 * We don't want the reset taskqueue running, since it does similar 1816 * things, so prevent it from running after we start. Wait for any reset 1817 * that may have been started to complete. The reset process we follow 1818 * will ensure that any new I/O will queue and be given to the hardware 1819 * after we resume (though there should be none). 1820 */ 1821 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0) 1822 pause("nvmesusp", 1); 1823 if (to <= 0) { 1824 nvme_printf(ctrlr, 1825 "Competing reset task didn't finish. Try again later.\n"); 1826 return (EWOULDBLOCK); 1827 } 1828 1829 if (ctrlr->hmb_nchunks > 0) 1830 nvme_ctrlr_hmb_enable(ctrlr, false, false); 1831 1832 /* 1833 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to 1834 * delete the hardware I/O queues, and then shutdown. This properly 1835 * flushes any metadata the drive may have stored so it can survive 1836 * having its power removed and prevents the unsafe shutdown count from 1837 * incriminating. Once we delete the qpairs, we have to disable them 1838 * before shutting down. 1839 */ 1840 nvme_ctrlr_delete_qpairs(ctrlr); 1841 nvme_ctrlr_disable_qpairs(ctrlr); 1842 nvme_ctrlr_shutdown(ctrlr); 1843 1844 return (0); 1845 } 1846 1847 int 1848 nvme_ctrlr_resume(struct nvme_controller *ctrlr) 1849 { 1850 /* 1851 * Can't touch failed controllers, so nothing to do to resume. 1852 */ 1853 if (ctrlr->is_failed) 1854 return (0); 1855 1856 if (nvme_ctrlr_hw_reset(ctrlr) != 0) 1857 goto fail; 1858 1859 /* 1860 * Now that we've reset the hardware, we can restart the controller. Any 1861 * I/O that was pending is requeued. Any admin commands are aborted with 1862 * an error. Once we've restarted, stop flagging the controller as being 1863 * in the reset phase. 1864 */ 1865 nvme_ctrlr_start(ctrlr, true); 1866 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1867 1868 return (0); 1869 fail: 1870 /* 1871 * Since we can't bring the controller out of reset, announce and fail 1872 * the controller. However, we have to return success for the resume 1873 * itself, due to questionable APIs. 1874 */ 1875 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n"); 1876 nvme_ctrlr_fail(ctrlr, true); 1877 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); 1878 return (0); 1879 } 1880