Lines Matching +full:freeze +full:- +full:bridge +full:- +full:controller

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
10 #include <linux/blk-mq.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-integrity.h>
25 #include <linux/t10-pi.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/io-64-nonatomic-hi-lo.h>
29 #include <linux/sed-opal.h>
30 #include <linux/pci-p2pdma.h>
35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
53 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
58 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
85 return -EINVAL; in io_queue_count_set()
245 return dev->nr_allocated_queues * 8 * dev->db_stride; in nvme_dbbuf_size()
252 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) in nvme_dbbuf_dma_alloc()
255 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_alloc()
260 memset(dev->dbbuf_dbs, 0, mem_size); in nvme_dbbuf_dma_alloc()
261 memset(dev->dbbuf_eis, 0, mem_size); in nvme_dbbuf_dma_alloc()
265 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
266 &dev->dbbuf_dbs_dma_addr, in nvme_dbbuf_dma_alloc()
268 if (!dev->dbbuf_dbs) in nvme_dbbuf_dma_alloc()
270 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
271 &dev->dbbuf_eis_dma_addr, in nvme_dbbuf_dma_alloc()
273 if (!dev->dbbuf_eis) in nvme_dbbuf_dma_alloc()
278 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, in nvme_dbbuf_dma_alloc()
279 dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_alloc()
280 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_alloc()
282 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); in nvme_dbbuf_dma_alloc()
289 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_free()
290 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
291 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_free()
292 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_free()
294 if (dev->dbbuf_eis) { in nvme_dbbuf_dma_free()
295 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
296 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); in nvme_dbbuf_dma_free()
297 dev->dbbuf_eis = NULL; in nvme_dbbuf_dma_free()
304 if (!dev->dbbuf_dbs || !qid) in nvme_dbbuf_init()
307 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
308 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
309 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
310 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
315 if (!nvmeq->qid) in nvme_dbbuf_free()
318 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
319 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
320 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
321 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
329 if (!dev->dbbuf_dbs) in nvme_dbbuf_set()
333 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_set()
334 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); in nvme_dbbuf_set()
336 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
337 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); in nvme_dbbuf_set()
341 for (i = 1; i <= dev->online_queues; i++) in nvme_dbbuf_set()
342 nvme_dbbuf_free(&dev->queues[i]); in nvme_dbbuf_set()
348 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); in nvme_dbbuf_need_event()
369 * index from memory. The controller needs to provide similar in nvme_dbbuf_update_and_check_event()
392 return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); in nvme_pci_npages_prp()
399 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx()
402 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
404 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx()
414 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
415 hctx->driver_data = nvmeq; in nvme_init_hctx()
425 nvme_req(req)->ctrl = set->driver_data; in nvme_pci_init_request()
426 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request()
433 if (dev->num_vecs > 1) in queue_irq_offset()
441 struct nvme_dev *dev = to_nvme_dev(set->driver_data); in nvme_pci_map_queues()
445 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in nvme_pci_map_queues()
446 struct blk_mq_queue_map *map = &set->map[i]; in nvme_pci_map_queues()
448 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues()
449 if (!map->nr_queues) { in nvme_pci_map_queues()
456 * affinity), so use the regular blk-mq cpu mapping in nvme_pci_map_queues()
458 map->queue_offset = qoff; in nvme_pci_map_queues()
460 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); in nvme_pci_map_queues()
463 qoff += map->nr_queues; in nvme_pci_map_queues()
464 offset += map->nr_queues; in nvme_pci_map_queues()
474 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
476 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
478 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
482 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
483 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
484 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
485 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
491 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_sq_copy_cmd()
493 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd()
494 nvmeq->sq_tail = 0; in nvme_sq_copy_cmd()
499 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs()
501 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
502 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
504 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
510 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_use_sgls()
515 if (!nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_pci_use_sgls()
517 if (!nvmeq->qid) in nvme_pci_use_sgls()
526 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; in nvme_free_prps()
528 dma_addr_t dma_addr = iod->first_dma; in nvme_free_prps()
531 for (i = 0; i < iod->nr_allocations; i++) { in nvme_free_prps()
532 __le64 *prp_list = iod->list[i].prp_list; in nvme_free_prps()
535 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); in nvme_free_prps()
544 if (iod->dma_len) { in nvme_unmap_data()
545 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
550 WARN_ON_ONCE(!iod->sgt.nents); in nvme_unmap_data()
552 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_unmap_data()
554 if (iod->nr_allocations == 0) in nvme_unmap_data()
555 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, in nvme_unmap_data()
556 iod->first_dma); in nvme_unmap_data()
557 else if (iod->nr_allocations == 1) in nvme_unmap_data()
558 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, in nvme_unmap_data()
559 iod->first_dma); in nvme_unmap_data()
562 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_unmap_data()
574 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), in nvme_print_sgl()
585 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_prps()
588 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_setup_prps()
593 length -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
595 iod->first_dma = 0; in nvme_pci_setup_prps()
599 dma_len -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
601 dma_addr += (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
609 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
615 pool = dev->prp_small_pool; in nvme_pci_setup_prps()
616 iod->nr_allocations = 0; in nvme_pci_setup_prps()
618 pool = dev->prp_page_pool; in nvme_pci_setup_prps()
619 iod->nr_allocations = 1; in nvme_pci_setup_prps()
624 iod->nr_allocations = -1; in nvme_pci_setup_prps()
627 iod->list[0].prp_list = prp_list; in nvme_pci_setup_prps()
628 iod->first_dma = prp_dma; in nvme_pci_setup_prps()
636 iod->list[iod->nr_allocations++].prp_list = prp_list; in nvme_pci_setup_prps()
637 prp_list[0] = old_prp_list[i - 1]; in nvme_pci_setup_prps()
638 old_prp_list[i - 1] = cpu_to_le64(prp_dma); in nvme_pci_setup_prps()
642 dma_len -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
644 length -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
656 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); in nvme_pci_setup_prps()
657 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); in nvme_pci_setup_prps()
663 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), in nvme_pci_setup_prps()
665 blk_rq_payload_bytes(req), iod->sgt.nents); in nvme_pci_setup_prps()
672 sge->addr = cpu_to_le64(sg_dma_address(sg)); in nvme_pci_sgl_set_data()
673 sge->length = cpu_to_le32(sg_dma_len(sg)); in nvme_pci_sgl_set_data()
674 sge->type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_pci_sgl_set_data()
680 sge->addr = cpu_to_le64(dma_addr); in nvme_pci_sgl_set_seg()
681 sge->length = cpu_to_le32(entries * sizeof(*sge)); in nvme_pci_sgl_set_seg()
682 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; in nvme_pci_sgl_set_seg()
691 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_sgls()
692 unsigned int entries = iod->sgt.nents; in nvme_pci_setup_sgls()
697 cmd->flags = NVME_CMD_SGL_METABUF; in nvme_pci_setup_sgls()
700 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); in nvme_pci_setup_sgls()
705 pool = dev->prp_small_pool; in nvme_pci_setup_sgls()
706 iod->nr_allocations = 0; in nvme_pci_setup_sgls()
708 pool = dev->prp_page_pool; in nvme_pci_setup_sgls()
709 iod->nr_allocations = 1; in nvme_pci_setup_sgls()
714 iod->nr_allocations = -1; in nvme_pci_setup_sgls()
718 iod->list[0].sg_list = sg_list; in nvme_pci_setup_sgls()
719 iod->first_dma = sgl_dma; in nvme_pci_setup_sgls()
721 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); in nvme_pci_setup_sgls()
725 } while (--entries > 0); in nvme_pci_setup_sgls()
735 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); in nvme_setup_prp_simple()
736 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; in nvme_setup_prp_simple()
738 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
739 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
741 iod->dma_len = bv->bv_len; in nvme_setup_prp_simple()
743 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); in nvme_setup_prp_simple()
744 if (bv->bv_len > first_prp_len) in nvme_setup_prp_simple()
745 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); in nvme_setup_prp_simple()
747 cmnd->dptr.prp2 = 0; in nvme_setup_prp_simple()
757 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
758 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
760 iod->dma_len = bv->bv_len; in nvme_setup_sgl_simple()
762 cmnd->flags = NVME_CMD_SGL_METABUF; in nvme_setup_sgl_simple()
763 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); in nvme_setup_sgl_simple()
764 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); in nvme_setup_sgl_simple()
765 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_setup_sgl_simple()
777 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_map_data()
783 &cmnd->rw, &bv); in nvme_map_data()
785 if (nvmeq->qid && sgl_threshold && in nvme_map_data()
786 nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_map_data()
788 &cmnd->rw, &bv); in nvme_map_data()
792 iod->dma_len = 0; in nvme_map_data()
793 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
794 if (!iod->sgt.sgl) in nvme_map_data()
796 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); in nvme_map_data()
797 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); in nvme_map_data()
798 if (!iod->sgt.orig_nents) in nvme_map_data()
801 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), in nvme_map_data()
804 if (rc == -EREMOTEIO) in nvme_map_data()
809 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) in nvme_map_data()
810 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); in nvme_map_data()
812 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); in nvme_map_data()
818 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_map_data()
820 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_map_data()
829 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), in nvme_map_metadata()
831 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_map_metadata()
833 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); in nvme_map_metadata()
842 iod->aborted = false; in nvme_prep_rq()
843 iod->nr_allocations = -1; in nvme_prep_rq()
844 iod->sgt.nents = 0; in nvme_prep_rq()
846 ret = nvme_setup_cmd(req->q->queuedata, req); in nvme_prep_rq()
851 ret = nvme_map_data(dev, req, &iod->cmd); in nvme_prep_rq()
857 ret = nvme_map_metadata(dev, req, &iod->cmd); in nvme_prep_rq()
877 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq()
878 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
879 struct request *req = bd->rq; in nvme_queue_rq()
887 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
890 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) in nvme_queue_rq()
891 return nvme_fail_nonready_command(&dev->ctrl, req); in nvme_queue_rq()
896 spin_lock(&nvmeq->sq_lock); in nvme_queue_rq()
897 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_queue_rq()
898 nvme_write_sq_db(nvmeq, bd->last); in nvme_queue_rq()
899 spin_unlock(&nvmeq->sq_lock); in nvme_queue_rq()
905 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmds()
910 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_submit_cmds()
913 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmds()
922 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_prep_rq_batch()
924 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) in nvme_prep_rq_batch()
927 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; in nvme_prep_rq_batch()
936 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_queue_rqs()
947 if (!next || req->mq_hctx != next->mq_hctx) { in nvme_queue_rqs()
949 req->rq_next = NULL; in nvme_queue_rqs()
962 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_unmap_rq()
963 struct nvme_dev *dev = nvmeq->dev; in nvme_pci_unmap_rq()
968 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_pci_unmap_rq()
969 rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); in nvme_pci_unmap_rq()
990 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
992 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
997 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
999 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
1000 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
1001 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
1006 if (!nvmeq->qid) in nvme_queue_tagset()
1007 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1008 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
1014 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1015 __u16 command_id = READ_ONCE(cqe->command_id); in nvme_handle_cqe()
1020 * survive any kind of queue freeze and often don't respond to in nvme_handle_cqe()
1024 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { in nvme_handle_cqe()
1025 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1026 cqe->status, &cqe->result); in nvme_handle_cqe()
1032 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1034 command_id, le16_to_cpu(cqe->sq_id)); in nvme_handle_cqe()
1038 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
1039 if (!nvme_try_complete_req(req, cqe->status, cqe->result) && in nvme_handle_cqe()
1040 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, in nvme_handle_cqe()
1047 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1049 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1050 nvmeq->cq_head = 0; in nvme_update_cq_head()
1051 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1053 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1065 * load-load control dependency between phase and the rest of in nvme_poll_cq()
1069 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq()
1106 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1108 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1110 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1112 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1117 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll()
1123 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1125 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1133 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1139 spin_lock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1142 spin_unlock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1152 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1161 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1169 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1171 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1175 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1181 struct nvme_ctrl *ctrl = &dev->ctrl; in adapter_alloc_sq()
1186 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't in adapter_alloc_sq()
1190 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) in adapter_alloc_sq()
1198 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1200 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1204 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1219 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in abort_endio()
1221 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1222 "Abort status: 0x%x", nvme_req(req)->status); in abort_endio()
1223 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1233 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); in nvme_should_reset()
1236 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_should_reset()
1244 /* We shouldn't reset unless the controller is on fatal error state in nvme_should_reset()
1259 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, in nvme_warn_reset()
1262 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1263 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", in nvme_warn_reset()
1266 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1267 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", in nvme_warn_reset()
1273 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1275 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1282 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_timeout()
1283 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1286 u32 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_timeout()
1293 if (pci_channel_offline(to_pci_dev(dev->dev))) in nvme_timeout()
1297 * Reset immediately if the controller is failed in nvme_timeout()
1307 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1308 nvme_poll(req->mq_hctx, NULL); in nvme_timeout()
1313 dev_warn(dev->ctrl.device, in nvme_timeout()
1315 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1320 * Shutdown immediately if controller times out while starting. The in nvme_timeout()
1325 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_timeout()
1327 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1330 dev_warn_ratelimited(dev->ctrl.device, in nvme_timeout()
1331 "I/O tag %d (%04x) QID %d timeout, disable controller\n", in nvme_timeout()
1332 req->tag, nvme_cid(req), nvmeq->qid); in nvme_timeout()
1333 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1343 * Shutdown the controller immediately and schedule a reset if the in nvme_timeout()
1347 opcode = nvme_req(req)->cmd->common.opcode; in nvme_timeout()
1348 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1349 dev_warn(dev->ctrl.device, in nvme_timeout()
1350 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", in nvme_timeout()
1351 req->tag, nvme_cid(req), opcode, in nvme_timeout()
1352 nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid); in nvme_timeout()
1353 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1357 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1358 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1361 iod->aborted = true; in nvme_timeout()
1365 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1367 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1369 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode), in nvme_timeout()
1370 nvmeq->qid, blk_op_str(req_op(req)), req_op(req), in nvme_timeout()
1373 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), in nvme_timeout()
1376 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1381 abort_req->end_io = abort_endio; in nvme_timeout()
1382 abort_req->end_io_data = NULL; in nvme_timeout()
1393 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) in nvme_timeout()
1397 if (nvme_try_sched_reset(&dev->ctrl)) in nvme_timeout()
1398 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_timeout()
1404 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1405 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1406 if (!nvmeq->sq_cmds) in nvme_free_queue()
1409 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1410 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1411 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1413 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1414 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1422 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { in nvme_free_queues()
1423 dev->ctrl.queue_count--; in nvme_free_queues()
1424 nvme_free_queue(&dev->queues[i]); in nvme_free_queues()
1430 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_suspend_queue()
1432 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1438 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1439 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1440 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1441 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1442 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1449 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1463 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1464 spin_lock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1465 nvme_poll_cq(&dev->queues[i], NULL); in nvme_reap_pending_cqes()
1466 spin_unlock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1473 int q_depth = dev->q_depth; in nvme_cmb_qdepth()
1477 if (q_size_aligned * nr_io_queues > dev->cmb_size) { in nvme_cmb_qdepth()
1478 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); in nvme_cmb_qdepth()
1489 return -ENOMEM; in nvme_cmb_qdepth()
1498 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_alloc_sq_cmds()
1500 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { in nvme_alloc_sq_cmds()
1501 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1502 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1503 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1504 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1505 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1506 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1510 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1514 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1515 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1516 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1517 return -ENOMEM; in nvme_alloc_sq_cmds()
1523 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue()
1525 if (dev->ctrl.queue_count > qid) in nvme_alloc_queue()
1528 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1529 nvmeq->q_depth = depth; in nvme_alloc_queue()
1530 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1531 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1532 if (!nvmeq->cqes) in nvme_alloc_queue()
1538 nvmeq->dev = dev; in nvme_alloc_queue()
1539 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1540 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1541 nvmeq->cq_head = 0; in nvme_alloc_queue()
1542 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1543 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1544 nvmeq->qid = qid; in nvme_alloc_queue()
1545 dev->ctrl.queue_count++; in nvme_alloc_queue()
1550 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1551 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1553 return -ENOMEM; in nvme_alloc_queue()
1558 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1559 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1562 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1563 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1565 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1566 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1572 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1574 nvmeq->sq_tail = 0; in nvme_init_queue()
1575 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1576 nvmeq->cq_head = 0; in nvme_init_queue()
1577 nvmeq->cq_phase = 1; in nvme_init_queue()
1578 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1579 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1581 dev->online_queues++; in nvme_init_queue()
1593 if (!mutex_trylock(&dev->shutdown_lock)) in nvme_setup_io_queues_trylock()
1594 return -ENODEV; in nvme_setup_io_queues_trylock()
1597 * Controller is in wrong state, fail early. in nvme_setup_io_queues_trylock()
1599 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { in nvme_setup_io_queues_trylock()
1600 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues_trylock()
1601 return -ENODEV; in nvme_setup_io_queues_trylock()
1609 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1613 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1616 * A queue's vector matches the queue identifier unless the controller in nvme_create_queue()
1620 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1622 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1634 nvmeq->cq_vector = vector; in nvme_create_queue()
1646 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1647 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1651 dev->online_queues--; in nvme_create_queue()
1652 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1681 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { in nvme_dev_remove_admin()
1683 * If the controller was reset during removal, it's possible in nvme_dev_remove_admin()
1687 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_remove_admin()
1688 nvme_remove_admin_tag_set(&dev->ctrl); in nvme_dev_remove_admin()
1694 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); in db_bar_size()
1699 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_remap_bar()
1701 if (size <= dev->bar_mapped_size) in nvme_remap_bar()
1704 return -ENOMEM; in nvme_remap_bar()
1705 if (dev->bar) in nvme_remap_bar()
1706 iounmap(dev->bar); in nvme_remap_bar()
1707 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
1708 if (!dev->bar) { in nvme_remap_bar()
1709 dev->bar_mapped_size = 0; in nvme_remap_bar()
1710 return -ENOMEM; in nvme_remap_bar()
1712 dev->bar_mapped_size = size; in nvme_remap_bar()
1713 dev->dbs = dev->bar + NVME_REG_DBS; in nvme_remap_bar()
1728 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
1729 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
1731 if (dev->subsystem && in nvme_pci_configure_admin_queue()
1732 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) in nvme_pci_configure_admin_queue()
1733 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); in nvme_pci_configure_admin_queue()
1742 result = nvme_disable_ctrl(&dev->ctrl, false); in nvme_pci_configure_admin_queue()
1750 dev->ctrl.numa_node = dev_to_node(dev->dev); in nvme_pci_configure_admin_queue()
1752 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1753 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1756 writel(aqa, dev->bar + NVME_REG_AQA); in nvme_pci_configure_admin_queue()
1757 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1758 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1760 result = nvme_enable_ctrl(&dev->ctrl); in nvme_pci_configure_admin_queue()
1764 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1768 dev->online_queues--; in nvme_pci_configure_admin_queue()
1772 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
1781 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { in nvme_create_io_queues()
1782 if (nvme_alloc_queue(dev, i, dev->q_depth)) { in nvme_create_io_queues()
1783 ret = -ENOMEM; in nvme_create_io_queues()
1788 max = min(dev->max_qid, dev->ctrl.queue_count - 1); in nvme_create_io_queues()
1789 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { in nvme_create_io_queues()
1790 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + in nvme_create_io_queues()
1791 dev->io_queues[HCTX_TYPE_READ]; in nvme_create_io_queues()
1796 for (i = dev->online_queues; i <= max; i++) { in nvme_create_io_queues()
1799 ret = nvme_create_queue(&dev->queues[i], i, polled); in nvme_create_io_queues()
1806 * than the desired amount of queues, and even a controller without in nvme_create_io_queues()
1815 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; in nvme_cmb_size_unit()
1822 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; in nvme_cmb_size()
1829 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_map_cmb()
1832 if (dev->cmb_size) in nvme_map_cmb()
1835 if (NVME_CAP_CMBS(dev->ctrl.cap)) in nvme_map_cmb()
1836 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1838 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); in nvme_map_cmb()
1839 if (!dev->cmbsz) in nvme_map_cmb()
1841 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); in nvme_map_cmb()
1844 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); in nvme_map_cmb()
1845 bar = NVME_CMB_BIR(dev->cmbloc); in nvme_map_cmb()
1852 * Tell the controller about the host side address mapping the CMB, in nvme_map_cmb()
1855 if (NVME_CAP_CMBS(dev->ctrl.cap)) { in nvme_map_cmb()
1858 dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1863 * for example, due to being behind a bridge. Reduce the CMB to in nvme_map_cmb()
1866 if (size > bar_size - offset) in nvme_map_cmb()
1867 size = bar_size - offset; in nvme_map_cmb()
1870 dev_warn(dev->ctrl.device, in nvme_map_cmb()
1875 dev->cmb_size = size; in nvme_map_cmb()
1876 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); in nvme_map_cmb()
1878 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == in nvme_map_cmb()
1887 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; in nvme_set_host_mem()
1888 u64 dma_addr = dev->host_mem_descs_dma; in nvme_set_host_mem()
1898 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); in nvme_set_host_mem()
1900 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
1902 dev_warn(dev->ctrl.device, in nvme_set_host_mem()
1906 dev->hmb = bits & NVME_HOST_MEM_ENABLE; in nvme_set_host_mem()
1915 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem()
1916 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; in nvme_free_host_mem()
1917 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; in nvme_free_host_mem()
1919 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], in nvme_free_host_mem()
1920 le64_to_cpu(desc->addr), in nvme_free_host_mem()
1924 kfree(dev->host_mem_desc_bufs); in nvme_free_host_mem()
1925 dev->host_mem_desc_bufs = NULL; in nvme_free_host_mem()
1926 dma_free_coherent(dev->dev, in nvme_free_host_mem()
1927 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), in nvme_free_host_mem()
1928 dev->host_mem_descs, dev->host_mem_descs_dma); in nvme_free_host_mem()
1929 dev->host_mem_descs = NULL; in nvme_free_host_mem()
1930 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
1943 tmp = (preferred + chunk_size - 1); in __nvme_alloc_host_mem()
1947 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) in __nvme_alloc_host_mem()
1948 max_entries = dev->ctrl.hmmaxd; in __nvme_alloc_host_mem()
1950 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), in __nvme_alloc_host_mem()
1962 len = min_t(u64, chunk_size, preferred - size); in __nvme_alloc_host_mem()
1963 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, in __nvme_alloc_host_mem()
1976 dev->nr_host_mem_descs = i; in __nvme_alloc_host_mem()
1977 dev->host_mem_size = size; in __nvme_alloc_host_mem()
1978 dev->host_mem_descs = descs; in __nvme_alloc_host_mem()
1979 dev->host_mem_descs_dma = descs_dma; in __nvme_alloc_host_mem()
1980 dev->host_mem_desc_bufs = bufs; in __nvme_alloc_host_mem()
1984 while (--i >= 0) { in __nvme_alloc_host_mem()
1987 dma_free_attrs(dev->dev, size, bufs[i], in __nvme_alloc_host_mem()
1994 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, in __nvme_alloc_host_mem()
1997 dev->host_mem_descs = NULL; in __nvme_alloc_host_mem()
1998 return -ENOMEM; in __nvme_alloc_host_mem()
2004 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); in nvme_alloc_host_mem()
2010 if (!min || dev->host_mem_size >= min) in nvme_alloc_host_mem()
2016 return -ENOMEM; in nvme_alloc_host_mem()
2022 u64 preferred = (u64)dev->ctrl.hmpre * 4096; in nvme_setup_host_mem()
2023 u64 min = (u64)dev->ctrl.hmmin * 4096; in nvme_setup_host_mem()
2027 if (!dev->ctrl.hmpre) in nvme_setup_host_mem()
2032 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2042 if (dev->host_mem_descs) { in nvme_setup_host_mem()
2043 if (dev->host_mem_size >= min) in nvme_setup_host_mem()
2049 if (!dev->host_mem_descs) { in nvme_setup_host_mem()
2051 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2053 return 0; /* controller must work without HMB */ in nvme_setup_host_mem()
2056 dev_info(dev->ctrl.device, in nvme_setup_host_mem()
2058 dev->host_mem_size >> ilog2(SZ_1M)); in nvme_setup_host_mem()
2073 ndev->cmbloc, ndev->cmbsz); in cmb_show()
2082 return sysfs_emit(buf, "%u\n", ndev->cmbloc); in cmbloc_show()
2091 return sysfs_emit(buf, "%u\n", ndev->cmbsz); in cmbsz_show()
2100 return sysfs_emit(buf, "%d\n", ndev->hmb); in hmb_show()
2111 return -EINVAL; in hmb_store()
2113 if (new == ndev->hmb) in hmb_store()
2141 if (!dev->cmbsz) in nvme_pci_attrs_are_visible()
2144 if (a == &dev_attr_hmb.attr && !ctrl->hmpre) in nvme_pci_attrs_are_visible()
2147 return a->mode; in nvme_pci_attrs_are_visible()
2171 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); in nvme_update_attrs()
2180 struct nvme_dev *dev = affd->priv; in nvme_calc_irq_sets()
2181 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; in nvme_calc_irq_sets()
2202 nr_read_queues = nrirqs - nr_write_queues; in nvme_calc_irq_sets()
2205 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2206 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2207 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2208 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2209 affd->nr_sets = nr_read_queues ? 2 : 1; in nvme_calc_irq_sets()
2214 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_irqs()
2224 * left over for non-polled I/O. in nvme_setup_irqs()
2226 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); in nvme_setup_irqs()
2227 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; in nvme_setup_irqs()
2233 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; in nvme_setup_irqs()
2234 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2237 * We need interrupts for the admin queue and each non-polled I/O queue, in nvme_setup_irqs()
2242 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) in nvme_setup_irqs()
2243 irq_queues += (nr_io_queues - poll_queues); in nvme_setup_irqs()
2254 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) in nvme_max_io_queues()
2256 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; in nvme_max_io_queues()
2261 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2262 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_io_queues()
2271 dev->nr_write_queues = write_queues; in nvme_setup_io_queues()
2272 dev->nr_poll_queues = poll_queues; in nvme_setup_io_queues()
2274 nr_io_queues = dev->nr_allocated_queues - 1; in nvme_setup_io_queues()
2275 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); in nvme_setup_io_queues()
2292 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2295 if (dev->cmb_use_sqes) { in nvme_setup_io_queues()
2299 dev->q_depth = result; in nvme_setup_io_queues()
2300 dev->ctrl.sqsize = result - 1; in nvme_setup_io_queues()
2302 dev->cmb_use_sqes = false; in nvme_setup_io_queues()
2311 if (!--nr_io_queues) { in nvme_setup_io_queues()
2312 result = -ENOMEM; in nvme_setup_io_queues()
2316 adminq->q_db = dev->dbs; in nvme_setup_io_queues()
2320 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2331 result = -EIO; in nvme_setup_io_queues()
2335 dev->num_vecs = result; in nvme_setup_io_queues()
2336 result = max(result - 1, 1); in nvme_setup_io_queues()
2337 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; in nvme_setup_io_queues()
2348 set_bit(NVMEQ_ENABLED, &adminq->flags); in nvme_setup_io_queues()
2349 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2352 if (result || dev->online_queues < 2) in nvme_setup_io_queues()
2355 if (dev->online_queues - 1 < dev->max_qid) { in nvme_setup_io_queues()
2356 nr_io_queues = dev->online_queues - 1; in nvme_setup_io_queues()
2364 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", in nvme_setup_io_queues()
2365 dev->io_queues[HCTX_TYPE_DEFAULT], in nvme_setup_io_queues()
2366 dev->io_queues[HCTX_TYPE_READ], in nvme_setup_io_queues()
2367 dev->io_queues[HCTX_TYPE_POLL]); in nvme_setup_io_queues()
2370 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2377 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end()
2380 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2387 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end()
2390 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2397 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2402 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2410 req->end_io = nvme_del_cq_end; in nvme_delete_queue()
2412 req->end_io = nvme_del_queue_end; in nvme_delete_queue()
2413 req->end_io_data = nvmeq; in nvme_delete_queue()
2415 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2422 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_delete_io_queues()
2428 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_delete_io_queues()
2430 nr_queues--; in __nvme_delete_io_queues()
2434 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_delete_io_queues()
2436 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_delete_io_queues()
2441 sent--; in __nvme_delete_io_queues()
2456 if (dev->io_queues[HCTX_TYPE_POLL]) in nvme_pci_nr_maps()
2458 if (dev->io_queues[HCTX_TYPE_READ]) in nvme_pci_nr_maps()
2465 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); in nvme_pci_update_nr_queues()
2467 nvme_free_queues(dev, dev->online_queues); in nvme_pci_update_nr_queues()
2472 int result = -ENOMEM; in nvme_pci_enable()
2473 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_enable()
2480 if (readl(dev->bar + NVME_REG_CSTS) == -1) { in nvme_pci_enable()
2481 result = -ENODEV; in nvme_pci_enable()
2487 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll in nvme_pci_enable()
2494 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); in nvme_pci_enable()
2496 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, in nvme_pci_enable()
2498 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); in nvme_pci_enable()
2499 dev->dbs = dev->bar + 4096; in nvme_pci_enable()
2502 * Some Apple controllers require a non-standard SQE size. in nvme_pci_enable()
2506 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) in nvme_pci_enable()
2507 dev->io_sqes = 7; in nvme_pci_enable()
2509 dev->io_sqes = NVME_NVM_IOSQES; in nvme_pci_enable()
2512 * Temporary fix for the Apple controller found in the MacBook8,1 and in nvme_pci_enable()
2513 * some MacBook7,1 to avoid controller resets and data loss. in nvme_pci_enable()
2515 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { in nvme_pci_enable()
2516 dev->q_depth = 2; in nvme_pci_enable()
2517 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " in nvme_pci_enable()
2518 "set queue depth=%u to work around controller resets\n", in nvme_pci_enable()
2519 dev->q_depth); in nvme_pci_enable()
2520 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && in nvme_pci_enable()
2521 (pdev->device == 0xa821 || pdev->device == 0xa822) && in nvme_pci_enable()
2522 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2523 dev->q_depth = 64; in nvme_pci_enable()
2524 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " in nvme_pci_enable()
2525 "set queue depth=%u\n", dev->q_depth); in nvme_pci_enable()
2532 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && in nvme_pci_enable()
2533 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { in nvme_pci_enable()
2534 dev->q_depth = NVME_AQ_DEPTH + 2; in nvme_pci_enable()
2535 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", in nvme_pci_enable()
2536 dev->q_depth); in nvme_pci_enable()
2538 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
2558 if (dev->bar) in nvme_dev_unmap()
2559 iounmap(dev->bar); in nvme_dev_unmap()
2560 pci_release_mem_regions(to_pci_dev(dev->dev)); in nvme_dev_unmap()
2565 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_ctrl_is_dead()
2570 if (pdev->error_state != pci_channel_io_normal) in nvme_pci_ctrl_is_dead()
2573 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_pci_ctrl_is_dead()
2579 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); in nvme_dev_disable()
2580 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_disable()
2583 mutex_lock(&dev->shutdown_lock); in nvme_dev_disable()
2587 nvme_start_freeze(&dev->ctrl); in nvme_dev_disable()
2589 * Give the controller a chance to complete all entered requests in nvme_dev_disable()
2593 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); in nvme_dev_disable()
2596 nvme_quiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2598 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
2600 nvme_disable_ctrl(&dev->ctrl, shutdown); in nvme_dev_disable()
2601 nvme_poll_irqdisable(&dev->queues[0]); in nvme_dev_disable()
2610 nvme_cancel_tagset(&dev->ctrl); in nvme_dev_disable()
2611 nvme_cancel_admin_tagset(&dev->ctrl); in nvme_dev_disable()
2616 * deadlocking blk-mq hot-cpu notifier. in nvme_dev_disable()
2619 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2620 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) in nvme_dev_disable()
2621 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_disable()
2623 mutex_unlock(&dev->shutdown_lock); in nvme_dev_disable()
2628 if (!nvme_wait_reset(&dev->ctrl)) in nvme_disable_prepare_reset()
2629 return -EBUSY; in nvme_disable_prepare_reset()
2636 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, in nvme_setup_prp_pools()
2639 if (!dev->prp_page_pool) in nvme_setup_prp_pools()
2640 return -ENOMEM; in nvme_setup_prp_pools()
2643 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, in nvme_setup_prp_pools()
2645 if (!dev->prp_small_pool) { in nvme_setup_prp_pools()
2646 dma_pool_destroy(dev->prp_page_pool); in nvme_setup_prp_pools()
2647 return -ENOMEM; in nvme_setup_prp_pools()
2654 dma_pool_destroy(dev->prp_page_pool); in nvme_release_prp_pools()
2655 dma_pool_destroy(dev->prp_small_pool); in nvme_release_prp_pools()
2662 dev->iod_mempool = mempool_create_node(1, in nvme_pci_alloc_iod_mempool()
2665 dev_to_node(dev->dev)); in nvme_pci_alloc_iod_mempool()
2666 if (!dev->iod_mempool) in nvme_pci_alloc_iod_mempool()
2667 return -ENOMEM; in nvme_pci_alloc_iod_mempool()
2673 if (dev->tagset.tags) in nvme_free_tagset()
2674 nvme_remove_io_tag_set(&dev->ctrl); in nvme_free_tagset()
2675 dev->ctrl.tagset = NULL; in nvme_free_tagset()
2684 put_device(dev->dev); in nvme_pci_free_ctrl()
2685 kfree(dev->queues); in nvme_pci_free_ctrl()
2693 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); in nvme_reset_work()
2696 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { in nvme_reset_work()
2697 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", in nvme_reset_work()
2698 dev->ctrl.state); in nvme_reset_work()
2699 result = -ENODEV; in nvme_reset_work()
2704 * If we're called to reset a live controller first shut it down before in nvme_reset_work()
2707 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) in nvme_reset_work()
2709 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2711 mutex_lock(&dev->shutdown_lock); in nvme_reset_work()
2715 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_reset_work()
2716 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2719 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the in nvme_reset_work()
2722 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_work()
2723 dev_warn(dev->ctrl.device, in nvme_reset_work()
2724 "failed to mark controller CONNECTING\n"); in nvme_reset_work()
2725 result = -EBUSY; in nvme_reset_work()
2729 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); in nvme_reset_work()
2744 * Freeze and update the number of I/O queues as thos might have in nvme_reset_work()
2746 * controller around but remove all namespaces. in nvme_reset_work()
2748 if (dev->online_queues > 1) { in nvme_reset_work()
2750 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2751 nvme_wait_freeze(&dev->ctrl); in nvme_reset_work()
2753 nvme_unfreeze(&dev->ctrl); in nvme_reset_work()
2755 dev_warn(dev->ctrl.device, "IO queues lost\n"); in nvme_reset_work()
2756 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
2757 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2758 nvme_remove_namespaces(&dev->ctrl); in nvme_reset_work()
2766 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_reset_work()
2767 dev_warn(dev->ctrl.device, in nvme_reset_work()
2768 "failed to mark controller live state\n"); in nvme_reset_work()
2769 result = -ENODEV; in nvme_reset_work()
2773 nvme_start_ctrl(&dev->ctrl); in nvme_reset_work()
2777 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2783 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", in nvme_reset_work()
2785 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_reset_work()
2787 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2788 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
2789 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2790 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_reset_work()
2795 *val = readl(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read32()
2801 writel(val, to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_write32()
2807 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read64()
2813 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_get_address()
2815 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); in nvme_pci_get_address()
2820 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_print_device_info()
2821 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_pci_print_device_info()
2823 dev_err(ctrl->device, in nvme_pci_print_device_info()
2825 pdev->vendor, pdev->device, in nvme_pci_print_device_info()
2826 nvme_strlen(subsys->model, sizeof(subsys->model)), in nvme_pci_print_device_info()
2827 subsys->model, nvme_strlen(subsys->firmware_rev, in nvme_pci_print_device_info()
2828 sizeof(subsys->firmware_rev)), in nvme_pci_print_device_info()
2829 subsys->firmware_rev); in nvme_pci_print_device_info()
2836 return dma_pci_p2pdma_supported(dev->dev); in nvme_pci_supports_pci_p2pdma()
2856 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_map()
2859 return -ENODEV; in nvme_dev_map()
2867 return -ENODEV; in nvme_dev_map()
2872 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { in check_vendor_combination_bug()
2885 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { in check_vendor_combination_bug()
2888 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as in check_vendor_combination_bug()
2889 * within few minutes after bootup on a Coffee Lake board - in check_vendor_combination_bug()
2890 * ASUS PRIME Z370-A in check_vendor_combination_bug()
2893 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || in check_vendor_combination_bug()
2894 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) in check_vendor_combination_bug()
2896 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || in check_vendor_combination_bug()
2897 pdev->device == 0xa808 || pdev->device == 0xa809)) || in check_vendor_combination_bug()
2898 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { in check_vendor_combination_bug()
2908 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || in check_vendor_combination_bug()
2909 pdev->device == 0x500f)) { in check_vendor_combination_bug()
2928 unsigned long quirks = id->driver_data; in nvme_pci_alloc_dev()
2929 int node = dev_to_node(&pdev->dev); in nvme_pci_alloc_dev()
2931 int ret = -ENOMEM; in nvme_pci_alloc_dev()
2935 return ERR_PTR(-ENOMEM); in nvme_pci_alloc_dev()
2936 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); in nvme_pci_alloc_dev()
2937 mutex_init(&dev->shutdown_lock); in nvme_pci_alloc_dev()
2939 dev->nr_write_queues = write_queues; in nvme_pci_alloc_dev()
2940 dev->nr_poll_queues = poll_queues; in nvme_pci_alloc_dev()
2941 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; in nvme_pci_alloc_dev()
2942 dev->queues = kcalloc_node(dev->nr_allocated_queues, in nvme_pci_alloc_dev()
2944 if (!dev->queues) in nvme_pci_alloc_dev()
2947 dev->dev = get_device(&pdev->dev); in nvme_pci_alloc_dev()
2952 acpi_storage_d3(&pdev->dev)) { in nvme_pci_alloc_dev()
2957 dev_info(&pdev->dev, in nvme_pci_alloc_dev()
2961 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, in nvme_pci_alloc_dev()
2966 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) in nvme_pci_alloc_dev()
2967 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); in nvme_pci_alloc_dev()
2969 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nvme_pci_alloc_dev()
2970 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_alloc_dev()
2971 dma_set_max_seg_size(&pdev->dev, 0xffffffff); in nvme_pci_alloc_dev()
2974 * Limit the max command size to prevent iod->sg allocations going in nvme_pci_alloc_dev()
2977 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_pci_alloc_dev()
2978 NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); in nvme_pci_alloc_dev()
2979 dev->ctrl.max_segments = NVME_MAX_SEGS; in nvme_pci_alloc_dev()
2985 dev->ctrl.max_integrity_segments = 1; in nvme_pci_alloc_dev()
2989 put_device(dev->dev); in nvme_pci_alloc_dev()
2990 kfree(dev->queues); in nvme_pci_alloc_dev()
2999 int result = -ENOMEM; in nvme_probe()
3017 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); in nvme_probe()
3023 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, in nvme_probe()
3029 * Mark the controller as connecting before sending admin commands to in nvme_probe()
3032 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_probe()
3033 dev_warn(dev->ctrl.device, in nvme_probe()
3034 "failed to mark controller CONNECTING\n"); in nvme_probe()
3035 result = -EBUSY; in nvme_probe()
3039 result = nvme_init_ctrl_finish(&dev->ctrl, false); in nvme_probe()
3053 if (dev->online_queues > 1) { in nvme_probe()
3054 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_probe()
3059 if (!dev->ctrl.tagset) in nvme_probe()
3060 dev_warn(dev->ctrl.device, "IO queues not created\n"); in nvme_probe()
3062 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_probe()
3063 dev_warn(dev->ctrl.device, in nvme_probe()
3064 "failed to mark controller live state\n"); in nvme_probe()
3065 result = -ENODEV; in nvme_probe()
3071 nvme_start_ctrl(&dev->ctrl); in nvme_probe()
3072 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3073 flush_work(&dev->ctrl.scan_work); in nvme_probe()
3077 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_probe()
3084 mempool_destroy(dev->iod_mempool); in nvme_probe()
3090 nvme_uninit_ctrl(&dev->ctrl); in nvme_probe()
3091 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3102 * with ->remove(). in nvme_reset_prepare()
3105 nvme_sync_queues(&dev->ctrl); in nvme_reset_prepare()
3112 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_reset_done()
3113 flush_work(&dev->ctrl.reset_work); in nvme_reset_done()
3132 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_remove()
3136 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_remove()
3140 flush_work(&dev->ctrl.reset_work); in nvme_remove()
3141 nvme_stop_ctrl(&dev->ctrl); in nvme_remove()
3142 nvme_remove_namespaces(&dev->ctrl); in nvme_remove()
3148 mempool_destroy(dev->iod_mempool); in nvme_remove()
3151 nvme_uninit_ctrl(&dev->ctrl); in nvme_remove()
3168 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_resume()
3170 if (ndev->last_ps == U32_MAX || in nvme_resume()
3171 nvme_set_power_state(ctrl, ndev->last_ps) != 0) in nvme_resume()
3173 if (ctrl->hmpre && nvme_setup_host_mem(ndev)) in nvme_resume()
3185 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_suspend()
3186 int ret = -EBUSY; in nvme_suspend()
3188 ndev->last_ps = U32_MAX; in nvme_suspend()
3195 * device does not support any non-default power states, shut down the in nvme_suspend()
3200 * down, so as to allow the platform to achieve its minimum low-power in nvme_suspend()
3203 if (pm_suspend_via_firmware() || !ctrl->npss || in nvme_suspend()
3205 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) in nvme_suspend()
3217 * but the specification allows the controller to access memory in a in nvme_suspend()
3218 * non-operational power state. in nvme_suspend()
3220 if (ndev->hmb) { in nvme_suspend()
3226 ret = nvme_get_power_state(ctrl, &ndev->last_ps); in nvme_suspend()
3237 ret = nvme_set_power_state(ctrl, ctrl->npss); in nvme_suspend()
3246 * Clearing npss forces a controller reset on resume. The in nvme_suspend()
3250 ctrl->npss = 0; in nvme_suspend()
3269 return nvme_try_sched_reset(&ndev->ctrl); in nvme_simple_resume()
3275 .freeze = nvme_simple_suspend,
3289 * shutdown the controller to quiesce. The controller will be restarted in nvme_error_detected()
3296 dev_warn(dev->ctrl.device, in nvme_error_detected()
3297 "frozen state error detected, reset controller\n"); in nvme_error_detected()
3298 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_error_detected()
3305 dev_warn(dev->ctrl.device, in nvme_error_detected()
3316 dev_info(dev->ctrl.device, "restart after slot reset\n"); in nvme_slot_reset()
3318 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_slot_reset()
3319 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_slot_reset()
3327 flush_work(&dev->ctrl.reset_work); in nvme_error_resume()
3360 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
3364 { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
3476 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
3478 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */