Lines Matching +full:- +full:n
14 * https://nvmexpress.org/developers/nvme-specification/
18 * ---------------------
24 * -----
28 * -drive file=<file>,if=none,id=<drive_id>
29 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
30 * -device nvme,serial=<serial>,id=<bus_name>, \
33 * max_ioqpairs=<N[optional]>, \
34 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
35 * mdts=<N[optional]>,vsl=<N[optional]>, \
36 * zoned.zasl=<N[optional]>, \
38 * sriov_max_vfs=<N[optional]> \
39 * sriov_vq_flexible=<N[optional]> \
40 * sriov_vi_flexible=<N[optional]> \
41 * sriov_max_vi_per_vf=<N[optional]> \
42 * sriov_max_vq_per_vf=<N[optional]> \
44 * atomic.awun<N[optional]>, \
45 * atomic.awupf<N[optional]>, \
47 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
51 * zoned.zone_size=<N[optional]>, \
52 * zoned.zone_capacity=<N[optional]>, \
53 * zoned.descr_ext_size=<N[optional]>, \
54 * zoned.max_active=<N[optional]>, \
55 * zoned.max_open=<N[optional]>, \
60 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
63 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
65 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
66 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
71 * nvme-subsys device as above.
75 * - `nqn`
77 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
84 * - `subsys`
88 * Identify Controller data structure in CMIC (Controller Multi-path I/O and
91 * - `aerl`
96 * - `aer_max_queued`
101 * - `mdts`
103 * between host-accessible memory and the controller. The value is specified
104 * as a power of two (2^n) and is in units of the minimum memory page size
107 * - `vsl`
109 * this value is specified as a power of two (2^n) and is in units of the
113 * - `zoned.zasl`
115 * `mdts`, the value is specified as a power of two (2^n) and is in units of
119 * - `zoned.auto_transition`
124 * - `sriov_max_vfs`
126 * by the controller. The default value is 0. Specifying a non-zero value
127 * enables reporting of both SR-IOV and ARI capabilities by the NVMe device.
128 * Virtual function controllers will not report SR-IOV capability.
133 * - `sriov_vq_flexible`
136 * controller's private resources to `(max_ioqpairs - sriov_vq_flexible)`.
138 * - `sriov_vi_flexible`
141 * controller's private resources to `(msix_qsize - sriov_vi_flexible)`.
143 * - `sriov_max_vi_per_vf`
148 * - `sriov_max_vq_per_vf`
155 * - `shared`
158 * nvme-subsys device, the namespace will be attached to all controllers in
163 * - `detached`
166 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
174 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
198 #include "qemu/error-report.h"
205 #include "system/block-backend.h"
209 #include "system/spdm-socket.h"
234 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
318 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst);
319 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n);
323 return le16_to_cpu(req->sq->sqid); in nvme_sqid()
329 uint16_t rgif = ns->endgrp->fdp.rgif; in nvme_make_pid()
335 return (rg << (16 - rgif)) | ph; in nvme_make_pid()
340 return ph < ns->fdp.nphs; in nvme_ph_valid()
345 return rg < endgrp->fdp.nrg; in nvme_rg_valid()
350 uint16_t rgif = ns->endgrp->fdp.rgif; in nvme_pid2ph()
356 return pid & ((1 << (15 - rgif)) - 1); in nvme_pid2ph()
361 uint16_t rgif = ns->endgrp->fdp.rgif; in nvme_pid2rg()
367 return pid >> (16 - rgif); in nvme_pid2rg()
376 return nvme_ph_valid(ns, *ph) && nvme_rg_valid(ns->endgrp, *rg); in nvme_parse_pid()
385 QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); in nvme_assign_zone_state()
388 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); in nvme_assign_zone_state()
391 QTAILQ_REMOVE(&ns->closed_zones, zone, entry); in nvme_assign_zone_state()
394 QTAILQ_REMOVE(&ns->full_zones, zone, entry); in nvme_assign_zone_state()
404 QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry); in nvme_assign_zone_state()
407 QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry); in nvme_assign_zone_state()
410 QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry); in nvme_assign_zone_state()
413 QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry); in nvme_assign_zone_state()
417 zone->d.za = 0; in nvme_assign_zone_state()
424 if (ns->params.max_active_zones != 0 && in nvme_zns_check_resources()
425 ns->nr_active_zones + act > ns->params.max_active_zones) { in nvme_zns_check_resources()
426 trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones); in nvme_zns_check_resources()
430 if (ns->params.max_open_zones != 0 && in nvme_zns_check_resources()
431 ns->nr_open_zones + opn > ns->params.max_open_zones) { in nvme_zns_check_resources()
432 trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones); in nvme_zns_check_resources()
436 if (zrwa > ns->zns.numzrwa) { in nvme_zns_check_resources()
452 static NvmeFdpEvent *nvme_fdp_alloc_event(NvmeCtrl *n, NvmeFdpEventBuffer *ebuf) in nvme_fdp_alloc_event() argument
455 bool is_full = ebuf->next == ebuf->start && ebuf->nelems; in nvme_fdp_alloc_event()
457 ret = &ebuf->events[ebuf->next++]; in nvme_fdp_alloc_event()
458 if (unlikely(ebuf->next == NVME_FDP_MAX_EVENTS)) { in nvme_fdp_alloc_event()
459 ebuf->next = 0; in nvme_fdp_alloc_event()
462 ebuf->start = ebuf->next; in nvme_fdp_alloc_event()
464 ebuf->nelems++; in nvme_fdp_alloc_event()
468 ret->timestamp = nvme_get_timestamp(n); in nvme_fdp_alloc_event()
475 return (ruh->event_filter >> nvme_fdp_evf_shifts[event_type]) & 0x1; in log_event()
478 static bool nvme_update_ruh(NvmeCtrl *n, NvmeNamespace *ns, uint16_t pid) in nvme_update_ruh() argument
480 NvmeEnduranceGroup *endgrp = ns->endgrp; in nvme_update_ruh()
490 ruhid = ns->fdp.phs[ph]; in nvme_update_ruh()
492 ruh = &endgrp->fdp.ruhs[ruhid]; in nvme_update_ruh()
493 ru = &ruh->rus[rg]; in nvme_update_ruh()
495 if (ru->ruamw) { in nvme_update_ruh()
497 e = nvme_fdp_alloc_event(n, &endgrp->fdp.host_events); in nvme_update_ruh()
498 e->type = FDP_EVT_RU_NOT_FULLY_WRITTEN; in nvme_update_ruh()
499 e->flags = FDPEF_PIV | FDPEF_NSIDV | FDPEF_LV; in nvme_update_ruh()
500 e->pid = cpu_to_le16(pid); in nvme_update_ruh()
501 e->nsid = cpu_to_le32(ns->params.nsid); in nvme_update_ruh()
502 e->rgid = cpu_to_le16(rg); in nvme_update_ruh()
503 e->ruhid = cpu_to_le16(ruhid); in nvme_update_ruh()
507 nvme_fdp_stat_inc(&endgrp->fdp.mbmw, nvme_l2b(ns, ru->ruamw)); in nvme_update_ruh()
510 ru->ruamw = ruh->ruamw; in nvme_update_ruh()
515 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) in nvme_addr_is_cmb() argument
519 if (!n->cmb.cmse) { in nvme_addr_is_cmb()
523 lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; in nvme_addr_is_cmb()
524 hi = lo + int128_get64(n->cmb.mem.size); in nvme_addr_is_cmb()
529 static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr) in nvme_addr_to_cmb() argument
531 hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; in nvme_addr_to_cmb()
532 return &n->cmb.buf[addr - base]; in nvme_addr_to_cmb()
535 static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr) in nvme_addr_is_pmr() argument
539 if (!n->pmr.cmse) { in nvme_addr_is_pmr()
543 hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size); in nvme_addr_is_pmr()
545 return addr >= n->pmr.cba && addr < hi; in nvme_addr_is_pmr()
548 static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr) in nvme_addr_to_pmr() argument
550 return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); in nvme_addr_to_pmr()
553 static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr) in nvme_addr_is_iomem() argument
561 * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however, in nvme_addr_is_iomem()
565 lo = n->bar0.addr; in nvme_addr_is_iomem()
566 hi = lo + int128_get64(n->bar0.size); in nvme_addr_is_iomem()
571 static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) in nvme_addr_read() argument
573 hwaddr hi = addr + size - 1; in nvme_addr_read()
578 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { in nvme_addr_read()
579 memcpy(buf, nvme_addr_to_cmb(n, addr), size); in nvme_addr_read()
583 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { in nvme_addr_read()
584 memcpy(buf, nvme_addr_to_pmr(n, addr), size); in nvme_addr_read()
588 return pci_dma_read(PCI_DEVICE(n), addr, buf, size); in nvme_addr_read()
591 static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size) in nvme_addr_write() argument
593 hwaddr hi = addr + size - 1; in nvme_addr_write()
598 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { in nvme_addr_write()
599 memcpy(nvme_addr_to_cmb(n, addr), buf, size); in nvme_addr_write()
603 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { in nvme_addr_write()
604 memcpy(nvme_addr_to_pmr(n, addr), buf, size); in nvme_addr_write()
608 return pci_dma_write(PCI_DEVICE(n), addr, buf, size); in nvme_addr_write()
611 static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid) in nvme_nsid_valid() argument
617 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) in nvme_check_sqid() argument
619 return sqid < n->conf_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; in nvme_check_sqid()
622 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) in nvme_check_cqid() argument
624 return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; in nvme_check_cqid()
629 cq->tail++; in nvme_inc_cq_tail()
630 if (cq->tail >= cq->size) { in nvme_inc_cq_tail()
631 cq->tail = 0; in nvme_inc_cq_tail()
632 cq->phase = !cq->phase; in nvme_inc_cq_tail()
638 sq->head = (sq->head + 1) % sq->size; in nvme_inc_sq_head()
643 return (cq->tail + 1) % cq->size == cq->head; in nvme_cq_full()
648 return sq->head == sq->tail; in nvme_sq_empty()
651 static void nvme_irq_check(NvmeCtrl *n) in nvme_irq_check() argument
653 PCIDevice *pci = PCI_DEVICE(n); in nvme_irq_check()
654 uint32_t intms = ldl_le_p(&n->bar.intms); in nvme_irq_check()
665 if (~intms & n->irq_status) { in nvme_irq_check()
672 static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) in nvme_irq_assert() argument
674 PCIDevice *pci = PCI_DEVICE(n); in nvme_irq_assert()
676 if (cq->irq_enabled) { in nvme_irq_assert()
678 trace_pci_nvme_irq_msix(cq->vector); in nvme_irq_assert()
679 msix_notify(pci, cq->vector); in nvme_irq_assert()
682 assert(cq->vector < 32); in nvme_irq_assert()
683 n->irq_status |= 1 << cq->vector; in nvme_irq_assert()
684 nvme_irq_check(n); in nvme_irq_assert()
691 static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) in nvme_irq_deassert() argument
693 if (cq->irq_enabled) { in nvme_irq_deassert()
694 if (msix_enabled(PCI_DEVICE(n))) { in nvme_irq_deassert()
697 assert(cq->vector < 32); in nvme_irq_deassert()
698 if (!n->cq_pending) { in nvme_irq_deassert()
699 n->irq_status &= ~(1 << cq->vector); in nvme_irq_deassert()
701 nvme_irq_check(n); in nvme_irq_deassert()
708 req->ns = NULL; in nvme_req_clear()
709 req->opaque = NULL; in nvme_req_clear()
710 req->aiocb = NULL; in nvme_req_clear()
711 memset(&req->cqe, 0x0, sizeof(req->cqe)); in nvme_req_clear()
712 req->status = NVME_SUCCESS; in nvme_req_clear()
715 static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma) in nvme_sg_init() argument
718 pci_dma_sglist_init(&sg->qsg, PCI_DEVICE(n), 0); in nvme_sg_init()
719 sg->flags = NVME_SG_DMA; in nvme_sg_init()
721 qemu_iovec_init(&sg->iov, 0); in nvme_sg_init()
724 sg->flags |= NVME_SG_ALLOC; in nvme_sg_init()
729 if (!(sg->flags & NVME_SG_ALLOC)) { in nvme_sg_unmap()
733 if (sg->flags & NVME_SG_DMA) { in nvme_sg_unmap()
734 qemu_sglist_destroy(&sg->qsg); in nvme_sg_unmap()
736 qemu_iovec_destroy(&sg->iov); in nvme_sg_unmap()
751 uint32_t trans_len, count = ns->lbasz; in nvme_sg_split()
753 bool dma = sg->flags & NVME_SG_DMA; in nvme_sg_split()
755 size_t sg_len = dma ? sg->qsg.size : sg->iov.size; in nvme_sg_split()
758 assert(sg->flags & NVME_SG_ALLOC); in nvme_sg_split()
761 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; in nvme_sg_split()
764 trans_len = MIN(trans_len, sge_len - offset); in nvme_sg_split()
768 qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset, in nvme_sg_split()
771 qemu_iovec_add(&dst->iov, in nvme_sg_split()
772 sg->iov.iov[sg_idx].iov_base + offset, in nvme_sg_split()
777 sg_len -= trans_len; in nvme_sg_split()
778 count -= trans_len; in nvme_sg_split()
783 count = (dst == data) ? ns->lbasz : ns->lbaf.ms; in nvme_sg_split()
793 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, in nvme_map_addr_cmb() argument
802 if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) { in nvme_map_addr_cmb()
806 qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len); in nvme_map_addr_cmb()
811 static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, in nvme_map_addr_pmr() argument
818 if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) { in nvme_map_addr_pmr()
822 qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len); in nvme_map_addr_pmr()
827 static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len) in nvme_map_addr() argument
837 if (nvme_addr_is_iomem(n, addr)) { in nvme_map_addr()
841 if (nvme_addr_is_cmb(n, addr)) { in nvme_map_addr()
843 } else if (nvme_addr_is_pmr(n, addr)) { in nvme_map_addr()
848 if (sg->flags & NVME_SG_DMA) { in nvme_map_addr()
852 if (sg->iov.niov + 1 > IOV_MAX) { in nvme_map_addr()
857 return nvme_map_addr_cmb(n, &sg->iov, addr, len); in nvme_map_addr()
859 return nvme_map_addr_pmr(n, &sg->iov, addr, len); in nvme_map_addr()
863 if (!(sg->flags & NVME_SG_DMA)) { in nvme_map_addr()
867 if (sg->qsg.nsg + 1 > IOV_MAX) { in nvme_map_addr()
871 qemu_sglist_add(&sg->qsg, addr, len); in nvme_map_addr()
881 static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr) in nvme_addr_is_dma() argument
883 return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr)); in nvme_addr_is_dma()
886 static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1, in nvme_map_prp() argument
889 hwaddr trans_len = n->page_size - (prp1 % n->page_size); in nvme_map_prp()
891 int num_prps = (len >> n->page_bits) + 1; in nvme_map_prp()
897 nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1)); in nvme_map_prp()
899 status = nvme_map_addr(n, sg, prp1, trans_len); in nvme_map_prp()
904 len -= trans_len; in nvme_map_prp()
906 if (len > n->page_size) { in nvme_map_prp()
907 g_autofree uint64_t *prp_list = g_new(uint64_t, n->max_prp_ents); in nvme_map_prp()
916 nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3; in nvme_map_prp()
917 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); in nvme_map_prp()
918 ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); in nvme_map_prp()
927 if (i == nents - 1 && len > n->page_size) { in nvme_map_prp()
928 if (unlikely(prp_ent & (n->page_size - 1))) { in nvme_map_prp()
935 nents = (len + n->page_size - 1) >> n->page_bits; in nvme_map_prp()
936 nents = MIN(nents, n->max_prp_ents); in nvme_map_prp()
938 ret = nvme_addr_read(n, prp_ent, (void *)prp_list, in nvme_map_prp()
948 if (unlikely(prp_ent & (n->page_size - 1))) { in nvme_map_prp()
954 trans_len = MIN(len, n->page_size); in nvme_map_prp()
955 status = nvme_map_addr(n, sg, prp_ent, trans_len); in nvme_map_prp()
960 len -= trans_len; in nvme_map_prp()
964 if (unlikely(prp2 & (n->page_size - 1))) { in nvme_map_prp()
969 status = nvme_map_addr(n, sg, prp2, len); in nvme_map_prp()
987 static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, in nvme_map_sgl_data() argument
1020 uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls); in nvme_map_sgl_data()
1033 if (UINT64_MAX - addr < dlen) { in nvme_map_sgl_data()
1037 status = nvme_map_addr(n, sg, addr, trans_len); in nvme_map_sgl_data()
1042 *len -= trans_len; in nvme_map_sgl_data()
1048 static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, in nvme_map_sgl() argument
1073 nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr)); in nvme_map_sgl()
1080 status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd); in nvme_map_sgl()
1089 switch (NVME_SGL_TYPE(sgld->type)) { in nvme_map_sgl()
1097 seg_len = le32_to_cpu(sgld->len); in nvme_map_sgl()
1104 if (UINT64_MAX - addr < seg_len) { in nvme_map_sgl()
1111 if (nvme_addr_read(n, addr, segment, sizeof(segment))) { in nvme_map_sgl()
1117 status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE, in nvme_map_sgl()
1123 nsgld -= SEG_CHUNK_SIZE; in nvme_map_sgl()
1127 ret = nvme_addr_read(n, addr, segment, nsgld * in nvme_map_sgl()
1135 last_sgld = &segment[nsgld - 1]; in nvme_map_sgl()
1140 if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { in nvme_map_sgl()
1141 status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); in nvme_map_sgl()
1153 if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { in nvme_map_sgl()
1159 addr = le64_to_cpu(sgld->addr); in nvme_map_sgl()
1165 status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd); in nvme_map_sgl()
1185 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, in nvme_map_dptr() argument
1190 switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) { in nvme_map_dptr()
1192 prp1 = le64_to_cpu(cmd->dptr.prp1); in nvme_map_dptr()
1193 prp2 = le64_to_cpu(cmd->dptr.prp2); in nvme_map_dptr()
1195 return nvme_map_prp(n, sg, prp1, prp2, len); in nvme_map_dptr()
1198 return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd); in nvme_map_dptr()
1204 static uint16_t nvme_map_mptr(NvmeCtrl *n, NvmeSg *sg, size_t len, in nvme_map_mptr() argument
1207 int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags); in nvme_map_mptr()
1208 hwaddr mptr = le64_to_cpu(cmd->mptr); in nvme_map_mptr()
1214 if (nvme_addr_read(n, mptr, &sgl, sizeof(sgl))) { in nvme_map_mptr()
1218 status = nvme_map_sgl(n, sg, sgl, len, cmd); in nvme_map_mptr()
1226 nvme_sg_init(n, sg, nvme_addr_is_dma(n, mptr)); in nvme_map_mptr()
1227 status = nvme_map_addr(n, sg, mptr, len); in nvme_map_mptr()
1235 static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) in nvme_map_data() argument
1237 NvmeNamespace *ns = req->ns; in nvme_map_data()
1238 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_map_data()
1239 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); in nvme_map_data()
1240 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); in nvme_map_data()
1245 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { in nvme_map_data()
1250 status = nvme_map_dptr(n, &sg, len, &req->cmd); in nvme_map_data()
1255 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); in nvme_map_data()
1256 nvme_sg_split(&sg, ns, &req->sg, NULL); in nvme_map_data()
1262 return nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_map_data()
1265 static uint16_t nvme_map_mdata(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) in nvme_map_mdata() argument
1267 NvmeNamespace *ns = req->ns; in nvme_map_mdata()
1276 status = nvme_map_dptr(n, &sg, len, &req->cmd); in nvme_map_mdata()
1281 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); in nvme_map_mdata()
1282 nvme_sg_split(&sg, ns, NULL, &req->sg); in nvme_map_mdata()
1288 return nvme_map_mptr(n, &req->sg, len, &req->cmd); in nvme_map_mdata()
1291 static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, in nvme_tx_interleaved() argument
1298 bool dma = sg->flags & NVME_SG_DMA; in nvme_tx_interleaved()
1303 assert(sg->flags & NVME_SG_ALLOC); in nvme_tx_interleaved()
1306 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; in nvme_tx_interleaved()
1308 if (sge_len - offset < 0) { in nvme_tx_interleaved()
1309 offset -= sge_len; in nvme_tx_interleaved()
1321 trans_len = MIN(trans_len, sge_len - offset); in nvme_tx_interleaved()
1324 addr = sg->qsg.sg[sg_idx].base + offset; in nvme_tx_interleaved()
1326 addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset; in nvme_tx_interleaved()
1330 ret = nvme_addr_read(n, addr, ptr, trans_len); in nvme_tx_interleaved()
1332 ret = nvme_addr_write(n, addr, ptr, trans_len); in nvme_tx_interleaved()
1340 len -= trans_len; in nvme_tx_interleaved()
1341 count -= trans_len; in nvme_tx_interleaved()
1353 static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, void *ptr, uint32_t len, in nvme_tx() argument
1356 assert(sg->flags & NVME_SG_ALLOC); in nvme_tx()
1358 if (sg->flags & NVME_SG_DMA) { in nvme_tx()
1363 dma_buf_write(ptr, len, &residual, &sg->qsg, attrs); in nvme_tx()
1365 dma_buf_read(ptr, len, &residual, &sg->qsg, attrs); in nvme_tx()
1376 bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len); in nvme_tx()
1378 bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len); in nvme_tx()
1390 static inline uint16_t nvme_c2h(NvmeCtrl *n, void *ptr, uint32_t len, in nvme_c2h() argument
1395 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_c2h()
1400 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE); in nvme_c2h()
1403 static inline uint16_t nvme_h2c(NvmeCtrl *n, void *ptr, uint32_t len, in nvme_h2c() argument
1408 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_h2c()
1413 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE); in nvme_h2c()
1416 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len, in nvme_bounce_data() argument
1419 NvmeNamespace *ns = req->ns; in nvme_bounce_data()
1420 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_bounce_data()
1421 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); in nvme_bounce_data()
1422 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); in nvme_bounce_data()
1425 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { in nvme_bounce_data()
1426 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz, in nvme_bounce_data()
1427 ns->lbaf.ms, 0, dir); in nvme_bounce_data()
1430 return nvme_tx(n, &req->sg, ptr, len, dir); in nvme_bounce_data()
1433 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, in nvme_bounce_mdata() argument
1436 NvmeNamespace *ns = req->ns; in nvme_bounce_mdata()
1440 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbaf.ms, in nvme_bounce_mdata()
1441 ns->lbasz, ns->lbasz, dir); in nvme_bounce_mdata()
1444 nvme_sg_unmap(&req->sg); in nvme_bounce_mdata()
1446 status = nvme_map_mptr(n, &req->sg, len, &req->cmd); in nvme_bounce_mdata()
1451 return nvme_tx(n, &req->sg, ptr, len, dir); in nvme_bounce_mdata()
1458 assert(req->sg.flags & NVME_SG_ALLOC); in nvme_blk_read()
1460 if (req->sg.flags & NVME_SG_DMA) { in nvme_blk_read()
1461 req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req); in nvme_blk_read()
1463 req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); in nvme_blk_read()
1471 assert(req->sg.flags & NVME_SG_ALLOC); in nvme_blk_write()
1473 if (req->sg.flags & NVME_SG_DMA) { in nvme_blk_write()
1474 req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req); in nvme_blk_write()
1476 req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); in nvme_blk_write()
1482 trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head); in nvme_update_cq_eventidx()
1484 stl_le_pci_dma(PCI_DEVICE(cq->ctrl), cq->ei_addr, cq->head, in nvme_update_cq_eventidx()
1490 ldl_le_pci_dma(PCI_DEVICE(cq->ctrl), cq->db_addr, &cq->head, in nvme_update_cq_head()
1493 trace_pci_nvme_update_cq_head(cq->cqid, cq->head); in nvme_update_cq_head()
1499 NvmeCtrl *n = cq->ctrl; in nvme_post_cqes() local
1501 bool pending = cq->head != cq->tail; in nvme_post_cqes()
1504 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { in nvme_post_cqes()
1508 if (n->dbbuf_enabled) { in nvme_post_cqes()
1517 sq = req->sq; in nvme_post_cqes()
1518 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); in nvme_post_cqes()
1519 req->cqe.sq_id = cpu_to_le16(sq->sqid); in nvme_post_cqes()
1520 req->cqe.sq_head = cpu_to_le16(sq->head); in nvme_post_cqes()
1521 addr = cq->dma_addr + (cq->tail << NVME_CQES); in nvme_post_cqes()
1522 ret = pci_dma_write(PCI_DEVICE(n), addr, (void *)&req->cqe, in nvme_post_cqes()
1523 sizeof(req->cqe)); in nvme_post_cqes()
1527 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); in nvme_post_cqes()
1531 QTAILQ_REMOVE(&cq->req_list, req, entry); in nvme_post_cqes()
1534 nvme_sg_unmap(&req->sg); in nvme_post_cqes()
1536 if (QTAILQ_EMPTY(&sq->req_list) && !nvme_sq_empty(sq)) { in nvme_post_cqes()
1537 qemu_bh_schedule(sq->bh); in nvme_post_cqes()
1540 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); in nvme_post_cqes()
1542 if (cq->tail != cq->head) { in nvme_post_cqes()
1543 if (cq->irq_enabled && !pending) { in nvme_post_cqes()
1544 n->cq_pending++; in nvme_post_cqes()
1547 nvme_irq_assert(n, cq); in nvme_post_cqes()
1553 assert(cq->cqid == req->sq->cqid); in nvme_enqueue_req_completion()
1554 trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid, in nvme_enqueue_req_completion()
1555 le32_to_cpu(req->cqe.result), in nvme_enqueue_req_completion()
1556 le32_to_cpu(req->cqe.dw1), in nvme_enqueue_req_completion()
1557 req->status); in nvme_enqueue_req_completion()
1559 if (req->status) { in nvme_enqueue_req_completion()
1560 trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns), in nvme_enqueue_req_completion()
1561 req->status, req->cmd.opcode); in nvme_enqueue_req_completion()
1564 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); in nvme_enqueue_req_completion()
1565 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); in nvme_enqueue_req_completion()
1567 qemu_bh_schedule(cq->bh); in nvme_enqueue_req_completion()
1572 NvmeCtrl *n = opaque; in nvme_process_aers() local
1575 trace_pci_nvme_process_aers(n->aer_queued); in nvme_process_aers()
1577 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { in nvme_process_aers()
1582 if (!n->outstanding_aers) { in nvme_process_aers()
1588 if (n->aer_mask & (1 << event->result.event_type)) { in nvme_process_aers()
1589 trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask); in nvme_process_aers()
1593 QTAILQ_REMOVE(&n->aer_queue, event, entry); in nvme_process_aers()
1594 n->aer_queued--; in nvme_process_aers()
1596 n->aer_mask |= 1 << event->result.event_type; in nvme_process_aers()
1597 n->outstanding_aers--; in nvme_process_aers()
1599 req = n->aer_reqs[n->outstanding_aers]; in nvme_process_aers()
1601 result = (NvmeAerResult *) &req->cqe.result; in nvme_process_aers()
1602 result->event_type = event->result.event_type; in nvme_process_aers()
1603 result->event_info = event->result.event_info; in nvme_process_aers()
1604 result->log_page = event->result.log_page; in nvme_process_aers()
1607 trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info, in nvme_process_aers()
1608 result->log_page); in nvme_process_aers()
1610 nvme_enqueue_req_completion(&n->admin_cq, req); in nvme_process_aers()
1614 static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type, in nvme_enqueue_event() argument
1621 if (n->aer_queued == n->params.aer_max_queued) { in nvme_enqueue_event()
1622 trace_pci_nvme_enqueue_event_noqueue(n->aer_queued); in nvme_enqueue_event()
1627 event->result = (NvmeAerResult) { in nvme_enqueue_event()
1633 QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry); in nvme_enqueue_event()
1634 n->aer_queued++; in nvme_enqueue_event()
1636 nvme_process_aers(n); in nvme_enqueue_event()
1639 static void nvme_smart_event(NvmeCtrl *n, uint8_t event) in nvme_smart_event() argument
1644 if (!(NVME_AEC_SMART(n->features.async_config) & event)) { in nvme_smart_event()
1665 nvme_enqueue_event(n, NVME_AER_TYPE_SMART, aer_info, NVME_LOG_SMART_INFO); in nvme_smart_event()
1668 static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type) in nvme_clear_events() argument
1672 n->aer_mask &= ~(1 << event_type); in nvme_clear_events()
1674 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { in nvme_clear_events()
1675 if (event->result.event_type == event_type) { in nvme_clear_events()
1676 QTAILQ_REMOVE(&n->aer_queue, event, entry); in nvme_clear_events()
1677 n->aer_queued--; in nvme_clear_events()
1683 static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len) in nvme_check_mdts() argument
1685 uint8_t mdts = n->params.mdts; in nvme_check_mdts()
1687 if (mdts && len > n->page_size << mdts) { in nvme_check_mdts()
1698 uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); in nvme_check_bounds()
1700 if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { in nvme_check_bounds()
1711 BlockDriverState *bs = blk_bs(ns->blkconf.blk); in nvme_block_status_all()
1724 bytes -= pnum; in nvme_block_status_all()
1754 error_setg_errno(&err, -ret, "unable to get block status"); in nvme_check_dulbe()
1768 return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 : in nvme_zone_idx()
1769 slba / ns->zone_size; in nvme_zone_idx()
1776 if (zone_idx >= ns->num_zones) { in nvme_get_zone_by_slba()
1780 return &ns->zone_array[zone_idx]; in nvme_get_zone_by_slba()
1785 uint64_t zslba = zone->d.zslba; in nvme_check_zone_state_for_write()
1820 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_check_zone_write()
1821 uint64_t ezrwa = zone->w_ptr + 2 * ns->zns.zrwas; in nvme_check_zone_write()
1823 if (slba < zone->w_ptr || slba + nlb > ezrwa) { in nvme_check_zone_write()
1824 trace_pci_nvme_err_zone_invalid_write(slba, zone->w_ptr); in nvme_check_zone_write()
1828 if (unlikely(slba != zone->w_ptr)) { in nvme_check_zone_write()
1829 trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, in nvme_check_zone_write()
1830 zone->w_ptr); in nvme_check_zone_write()
1854 trace_pci_nvme_err_zone_is_offline(zone->d.zslba); in nvme_check_zone_state_for_read()
1880 if (!ns->params.cross_zone_read) { in nvme_check_zone_read()
1884 * Read across zone boundary - check that all subsequent in nvme_check_zone_read()
1913 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_zrm_finish()
1914 zone->d.za &= ~NVME_ZA_ZRWA_VALID; in nvme_zrm_finish()
1915 if (ns->params.numzrwa) { in nvme_zrm_finish()
1916 ns->zns.numzrwa++; in nvme_zrm_finish()
1956 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_zrm_reset()
1957 if (ns->params.numzrwa) { in nvme_zrm_reset()
1958 ns->zns.numzrwa++; in nvme_zrm_reset()
1964 zone->w_ptr = zone->d.zslba; in nvme_zrm_reset()
1965 zone->d.wp = zone->w_ptr; in nvme_zrm_reset()
1980 if (ns->params.max_open_zones && in nvme_zrm_auto_transition_zone()
1981 ns->nr_open_zones == ns->params.max_open_zones) { in nvme_zrm_auto_transition_zone()
1982 zone = QTAILQ_FIRST(&ns->imp_open_zones); in nvme_zrm_auto_transition_zone()
1987 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); in nvme_zrm_auto_transition_zone()
1998 static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns, in nvme_zrm_open_flags() argument
2011 if (n->params.auto_transition_zones) { in nvme_zrm_open_flags()
2044 ns->zns.numzrwa--; in nvme_zrm_open_flags()
2046 zone->d.za |= NVME_ZA_ZRWA_VALID; in nvme_zrm_open_flags()
2056 static inline uint16_t nvme_zrm_auto(NvmeCtrl *n, NvmeNamespace *ns, in nvme_zrm_auto() argument
2059 return nvme_zrm_open_flags(n, ns, zone, NVME_ZRM_AUTO); in nvme_zrm_auto()
2065 zone->d.wp += nlb; in nvme_advance_zone_wp()
2067 if (zone->d.wp == nvme_zone_wr_boundary(zone)) { in nvme_advance_zone_wp()
2075 uint16_t nzrwafgs = DIV_ROUND_UP(nlbc, ns->zns.zrwafg); in nvme_zoned_zrwa_implicit_flush()
2077 nlbc = nzrwafgs * ns->zns.zrwafg; in nvme_zoned_zrwa_implicit_flush()
2079 trace_pci_nvme_zoned_zrwa_implicit_flush(zone->d.zslba, nlbc); in nvme_zoned_zrwa_implicit_flush()
2081 zone->w_ptr += nlbc; in nvme_zoned_zrwa_implicit_flush()
2088 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_finalize_zoned_write()
2093 slba = le64_to_cpu(rw->slba); in nvme_finalize_zoned_write()
2094 nlb = le16_to_cpu(rw->nlb) + 1; in nvme_finalize_zoned_write()
2098 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_finalize_zoned_write()
2099 uint64_t ezrwa = zone->w_ptr + ns->zns.zrwas - 1; in nvme_finalize_zoned_write()
2100 uint64_t elba = slba + nlb - 1; in nvme_finalize_zoned_write()
2103 nvme_zoned_zrwa_implicit_flush(ns, zone, elba - ezrwa); in nvme_finalize_zoned_write()
2114 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_is_write()
2116 return rw->opcode == NVME_CMD_WRITE || in nvme_is_write()
2117 rw->opcode == NVME_CMD_ZONE_APPEND || in nvme_is_write()
2118 rw->opcode == NVME_CMD_WRITE_ZEROES; in nvme_is_write()
2129 if (!req->status) { in nvme_misc_cb()
2130 req->status = NVME_INTERNAL_DEV_ERROR; in nvme_misc_cb()
2133 trace_pci_nvme_err_aio(cid, strerror(-ret), req->status); in nvme_misc_cb()
2142 NvmeNamespace *ns = req->ns; in nvme_rw_complete_cb()
2143 BlockBackend *blk = ns->blkconf.blk; in nvme_rw_complete_cb()
2144 BlockAcctCookie *acct = &req->acct; in nvme_rw_complete_cb()
2154 switch (req->cmd.opcode) { in nvme_rw_complete_cb()
2156 req->status = NVME_UNRECOVERED_READ; in nvme_rw_complete_cb()
2162 req->status = NVME_WRITE_FAULT; in nvme_rw_complete_cb()
2166 req->status = NVME_INTERNAL_DEV_ERROR; in nvme_rw_complete_cb()
2170 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_rw_complete_cb()
2172 error_setg_errno(&err, -ret, "aio failed"); in nvme_rw_complete_cb()
2178 if (ns->params.zoned && nvme_is_write(req)) { in nvme_rw_complete_cb()
2188 NvmeNamespace *ns = req->ns; in nvme_rw_cb()
2190 BlockBackend *blk = ns->blkconf.blk; in nvme_rw_cb()
2198 if (ns->lbaf.ms) { in nvme_rw_cb()
2199 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_rw_cb()
2200 uint64_t slba = le64_to_cpu(rw->slba); in nvme_rw_cb()
2201 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; in nvme_rw_cb()
2204 if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) { in nvme_rw_cb()
2207 req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen, in nvme_rw_cb()
2213 if (nvme_ns_ext(ns) || req->cmd.mptr) { in nvme_rw_cb()
2216 nvme_sg_unmap(&req->sg); in nvme_rw_cb()
2219 ret = -EFAULT; in nvme_rw_cb()
2223 if (req->cmd.opcode == NVME_CMD_READ) { in nvme_rw_cb()
2238 NvmeRequest *req = ctx->req; in nvme_verify_cb()
2239 NvmeNamespace *ns = req->ns; in nvme_verify_cb()
2240 BlockBackend *blk = ns->blkconf.blk; in nvme_verify_cb()
2241 BlockAcctCookie *acct = &req->acct; in nvme_verify_cb()
2243 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_verify_cb()
2244 uint64_t slba = le64_to_cpu(rw->slba); in nvme_verify_cb()
2245 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_verify_cb()
2246 uint16_t apptag = le16_to_cpu(rw->apptag); in nvme_verify_cb()
2247 uint16_t appmask = le16_to_cpu(rw->appmask); in nvme_verify_cb()
2248 uint64_t reftag = le32_to_cpu(rw->reftag); in nvme_verify_cb()
2249 uint64_t cdw3 = le32_to_cpu(rw->cdw3); in nvme_verify_cb()
2258 req->status = NVME_UNRECOVERED_READ; in nvme_verify_cb()
2260 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_verify_cb()
2267 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_verify_cb()
2268 status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, in nvme_verify_cb()
2269 ctx->mdata.iov.size, slba); in nvme_verify_cb()
2271 req->status = status; in nvme_verify_cb()
2275 req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, in nvme_verify_cb()
2276 ctx->mdata.bounce, ctx->mdata.iov.size, in nvme_verify_cb()
2281 qemu_iovec_destroy(&ctx->data.iov); in nvme_verify_cb()
2282 g_free(ctx->data.bounce); in nvme_verify_cb()
2284 qemu_iovec_destroy(&ctx->mdata.iov); in nvme_verify_cb()
2285 g_free(ctx->mdata.bounce); in nvme_verify_cb()
2296 NvmeRequest *req = ctx->req; in nvme_verify_mdata_in_cb()
2297 NvmeNamespace *ns = req->ns; in nvme_verify_mdata_in_cb()
2298 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_verify_mdata_in_cb()
2299 uint64_t slba = le64_to_cpu(rw->slba); in nvme_verify_mdata_in_cb()
2300 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_verify_mdata_in_cb()
2303 BlockBackend *blk = ns->blkconf.blk; in nvme_verify_mdata_in_cb()
2311 ctx->mdata.bounce = g_malloc(mlen); in nvme_verify_mdata_in_cb()
2313 qemu_iovec_reset(&ctx->mdata.iov); in nvme_verify_mdata_in_cb()
2314 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); in nvme_verify_mdata_in_cb()
2316 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, in nvme_verify_mdata_in_cb()
2339 NvmeNamespace *ns = req->ns; in nvme_compare_mdata_cb()
2340 NvmeCtrl *n = nvme_ctrl(req); in nvme_compare_mdata_cb() local
2341 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_compare_mdata_cb()
2342 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_compare_mdata_cb()
2343 uint16_t apptag = le16_to_cpu(rw->apptag); in nvme_compare_mdata_cb()
2344 uint16_t appmask = le16_to_cpu(rw->appmask); in nvme_compare_mdata_cb()
2345 uint64_t reftag = le32_to_cpu(rw->reftag); in nvme_compare_mdata_cb()
2346 uint64_t cdw3 = le32_to_cpu(rw->cdw3); in nvme_compare_mdata_cb()
2347 struct nvme_compare_ctx *ctx = req->opaque; in nvme_compare_mdata_cb()
2349 BlockBackend *blk = ns->blkconf.blk; in nvme_compare_mdata_cb()
2350 BlockAcctCookie *acct = &req->acct; in nvme_compare_mdata_cb()
2360 req->status = NVME_UNRECOVERED_READ; in nvme_compare_mdata_cb()
2362 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_compare_mdata_cb()
2367 buf = g_malloc(ctx->mdata.iov.size); in nvme_compare_mdata_cb()
2369 status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, in nvme_compare_mdata_cb()
2372 req->status = status; in nvme_compare_mdata_cb()
2376 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_compare_mdata_cb()
2377 uint64_t slba = le64_to_cpu(rw->slba); in nvme_compare_mdata_cb()
2379 uint8_t *mbufp = ctx->mdata.bounce; in nvme_compare_mdata_cb()
2380 uint8_t *end = mbufp + ctx->mdata.iov.size; in nvme_compare_mdata_cb()
2383 status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, in nvme_compare_mdata_cb()
2384 ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, in nvme_compare_mdata_cb()
2387 req->status = status; in nvme_compare_mdata_cb()
2395 if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { in nvme_compare_mdata_cb()
2396 pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); in nvme_compare_mdata_cb()
2399 for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { in nvme_compare_mdata_cb()
2400 if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { in nvme_compare_mdata_cb()
2401 req->status = NVME_CMP_FAILURE | NVME_DNR; in nvme_compare_mdata_cb()
2409 if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { in nvme_compare_mdata_cb()
2410 req->status = NVME_CMP_FAILURE | NVME_DNR; in nvme_compare_mdata_cb()
2417 qemu_iovec_destroy(&ctx->data.iov); in nvme_compare_mdata_cb()
2418 g_free(ctx->data.bounce); in nvme_compare_mdata_cb()
2420 qemu_iovec_destroy(&ctx->mdata.iov); in nvme_compare_mdata_cb()
2421 g_free(ctx->mdata.bounce); in nvme_compare_mdata_cb()
2431 NvmeCtrl *n = nvme_ctrl(req); in nvme_compare_data_cb() local
2432 NvmeNamespace *ns = req->ns; in nvme_compare_data_cb()
2433 BlockBackend *blk = ns->blkconf.blk; in nvme_compare_data_cb()
2434 BlockAcctCookie *acct = &req->acct; in nvme_compare_data_cb()
2437 struct nvme_compare_ctx *ctx = req->opaque; in nvme_compare_data_cb()
2445 req->status = NVME_UNRECOVERED_READ; in nvme_compare_data_cb()
2447 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_compare_data_cb()
2452 buf = g_malloc(ctx->data.iov.size); in nvme_compare_data_cb()
2454 status = nvme_bounce_data(n, buf, ctx->data.iov.size, in nvme_compare_data_cb()
2457 req->status = status; in nvme_compare_data_cb()
2461 if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { in nvme_compare_data_cb()
2462 req->status = NVME_CMP_FAILURE | NVME_DNR; in nvme_compare_data_cb()
2466 if (ns->lbaf.ms) { in nvme_compare_data_cb()
2467 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_compare_data_cb()
2468 uint64_t slba = le64_to_cpu(rw->slba); in nvme_compare_data_cb()
2469 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_compare_data_cb()
2473 ctx->mdata.bounce = g_malloc(mlen); in nvme_compare_data_cb()
2475 qemu_iovec_init(&ctx->mdata.iov, 1); in nvme_compare_data_cb()
2476 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); in nvme_compare_data_cb()
2478 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, in nvme_compare_data_cb()
2486 qemu_iovec_destroy(&ctx->data.iov); in nvme_compare_data_cb()
2487 g_free(ctx->data.bounce); in nvme_compare_data_cb()
2509 iocb->idx = iocb->nr; in nvme_dsm_cancel()
2510 iocb->ret = -ECANCELED; in nvme_dsm_cancel()
2512 if (iocb->aiocb) { in nvme_dsm_cancel()
2513 blk_aio_cancel_async(iocb->aiocb); in nvme_dsm_cancel()
2514 iocb->aiocb = NULL; in nvme_dsm_cancel()
2520 assert(iocb->idx == iocb->nr); in nvme_dsm_cancel()
2534 NvmeRequest *req = iocb->req; in nvme_dsm_md_cb()
2535 NvmeNamespace *ns = req->ns; in nvme_dsm_md_cb()
2540 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { in nvme_dsm_md_cb()
2544 range = &iocb->range[iocb->idx - 1]; in nvme_dsm_md_cb()
2545 slba = le64_to_cpu(range->slba); in nvme_dsm_md_cb()
2546 nlb = le32_to_cpu(range->nlb); in nvme_dsm_md_cb()
2563 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba), in nvme_dsm_md_cb()
2575 NvmeRequest *req = iocb->req; in nvme_dsm_cb()
2576 NvmeCtrl *n = nvme_ctrl(req); in nvme_dsm_cb() local
2577 NvmeNamespace *ns = req->ns; in nvme_dsm_cb()
2582 if (iocb->ret < 0) { in nvme_dsm_cb()
2585 iocb->ret = ret; in nvme_dsm_cb()
2590 if (iocb->idx == iocb->nr) { in nvme_dsm_cb()
2594 range = &iocb->range[iocb->idx++]; in nvme_dsm_cb()
2595 slba = le64_to_cpu(range->slba); in nvme_dsm_cb()
2596 nlb = le32_to_cpu(range->nlb); in nvme_dsm_cb()
2600 if (nlb > n->dmrsl) { in nvme_dsm_cb()
2601 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl); in nvme_dsm_cb()
2607 ns->id_ns.nsze); in nvme_dsm_cb()
2611 iocb->aiocb = blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, slba), in nvme_dsm_cb()
2617 iocb->aiocb = NULL; in nvme_dsm_cb()
2618 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_dsm_cb()
2619 g_free(iocb->range); in nvme_dsm_cb()
2623 static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) in nvme_dsm() argument
2625 NvmeNamespace *ns = req->ns; in nvme_dsm()
2626 NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd; in nvme_dsm()
2627 uint32_t attr = le32_to_cpu(dsm->attributes); in nvme_dsm()
2628 uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1; in nvme_dsm()
2634 NvmeDSMAIOCB *iocb = blk_aio_get(&nvme_dsm_aiocb_info, ns->blkconf.blk, in nvme_dsm()
2637 iocb->req = req; in nvme_dsm()
2638 iocb->ret = 0; in nvme_dsm()
2639 iocb->range = g_new(NvmeDsmRange, nr); in nvme_dsm()
2640 iocb->nr = nr; in nvme_dsm()
2641 iocb->idx = 0; in nvme_dsm()
2643 status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, in nvme_dsm()
2646 g_free(iocb->range); in nvme_dsm()
2652 req->aiocb = &iocb->common; in nvme_dsm()
2661 static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) in nvme_verify() argument
2663 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_verify()
2664 NvmeNamespace *ns = req->ns; in nvme_verify()
2665 BlockBackend *blk = ns->blkconf.blk; in nvme_verify()
2666 uint64_t slba = le64_to_cpu(rw->slba); in nvme_verify()
2667 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_verify()
2671 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_verify()
2672 uint32_t reftag = le32_to_cpu(rw->reftag); in nvme_verify()
2678 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_verify()
2689 if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { in nvme_verify()
2693 if (data_len > (n->page_size << n->params.vsl)) { in nvme_verify()
2702 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { in nvme_verify()
2710 ctx->req = req; in nvme_verify()
2712 ctx->data.bounce = g_malloc(len); in nvme_verify()
2714 qemu_iovec_init(&ctx->data.iov, 1); in nvme_verify()
2715 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); in nvme_verify()
2717 block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, in nvme_verify()
2720 req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, in nvme_verify()
2729 NvmeCtrl *n; member
2756 iocb->ret = -ECANCELED; in nvme_copy_cancel()
2758 if (iocb->aiocb) { in nvme_copy_cancel()
2759 blk_aio_cancel_async(iocb->aiocb); in nvme_copy_cancel()
2760 iocb->aiocb = NULL; in nvme_copy_cancel()
2771 NvmeRequest *req = iocb->req; in nvme_copy_done()
2772 NvmeNamespace *ns = req->ns; in nvme_copy_done()
2773 BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk); in nvme_copy_done()
2775 if (iocb->idx != iocb->nr) { in nvme_copy_done()
2776 req->cqe.result = cpu_to_le32(iocb->idx); in nvme_copy_done()
2779 qemu_iovec_destroy(&iocb->iov); in nvme_copy_done()
2780 g_free(iocb->bounce); in nvme_copy_done()
2782 if (iocb->ret < 0) { in nvme_copy_done()
2783 block_acct_failed(stats, &iocb->acct.read); in nvme_copy_done()
2784 block_acct_failed(stats, &iocb->acct.write); in nvme_copy_done()
2786 block_acct_done(stats, &iocb->acct.read); in nvme_copy_done()
2787 block_acct_done(stats, &iocb->acct.write); in nvme_copy_done()
2790 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_copy_done()
2903 nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL, in nvme_check_copy_mcl()
2907 iocb->tcl = copy_len; in nvme_check_copy_mcl()
2908 if (copy_len > ns->id_ns.mcl) { in nvme_check_copy_mcl()
2918 NvmeRequest *req = iocb->req; in nvme_copy_out_completed_cb()
2919 NvmeNamespace *dns = req->ns; in nvme_copy_out_completed_cb()
2922 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, in nvme_copy_out_completed_cb()
2926 iocb->ret = ret; in nvme_copy_out_completed_cb()
2927 req->status = NVME_WRITE_FAULT; in nvme_copy_out_completed_cb()
2929 } else if (iocb->ret < 0) { in nvme_copy_out_completed_cb()
2933 if (dns->params.zoned) { in nvme_copy_out_completed_cb()
2934 nvme_advance_zone_wp(dns, iocb->zone, nlb); in nvme_copy_out_completed_cb()
2937 iocb->idx++; in nvme_copy_out_completed_cb()
2938 iocb->slba += nlb; in nvme_copy_out_completed_cb()
2946 NvmeRequest *req = iocb->req; in nvme_copy_out_cb()
2947 NvmeNamespace *dns = req->ns; in nvme_copy_out_cb()
2952 if (ret < 0 || iocb->ret < 0 || !dns->lbaf.ms) { in nvme_copy_out_cb()
2956 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, in nvme_copy_out_cb()
2960 mbounce = iocb->bounce + nvme_l2b(dns, nlb); in nvme_copy_out_cb()
2962 qemu_iovec_reset(&iocb->iov); in nvme_copy_out_cb()
2963 qemu_iovec_add(&iocb->iov, mbounce, mlen); in nvme_copy_out_cb()
2965 iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_moff(dns, iocb->slba), in nvme_copy_out_cb()
2966 &iocb->iov, 0, nvme_copy_out_completed_cb, in nvme_copy_out_cb()
2978 NvmeRequest *req = iocb->req; in nvme_copy_in_completed_cb()
2979 NvmeNamespace *sns = iocb->sns; in nvme_copy_in_completed_cb()
2980 NvmeNamespace *dns = req->ns; in nvme_copy_in_completed_cb()
2991 iocb->ret = ret; in nvme_copy_in_completed_cb()
2992 req->status = NVME_UNRECOVERED_READ; in nvme_copy_in_completed_cb()
2994 } else if (iocb->ret < 0) { in nvme_copy_in_completed_cb()
2998 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, in nvme_copy_in_completed_cb()
3001 trace_pci_nvme_copy_out(iocb->slba, nlb); in nvme_copy_in_completed_cb()
3005 if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps)) { in nvme_copy_in_completed_cb()
3006 copy = (NvmeCopyCmd *)&req->cmd; in nvme_copy_in_completed_cb()
3008 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); in nvme_copy_in_completed_cb()
3011 mbounce = iocb->bounce + nvme_l2b(sns, nlb); in nvme_copy_in_completed_cb()
3017 status = nvme_dif_check(sns, iocb->bounce, len, mbounce, mlen, prinfor, in nvme_copy_in_completed_cb()
3024 if (NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_copy_in_completed_cb()
3025 copy = (NvmeCopyCmd *)&req->cmd; in nvme_copy_in_completed_cb()
3026 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); in nvme_copy_in_completed_cb()
3029 mbounce = iocb->bounce + nvme_l2b(dns, nlb); in nvme_copy_in_completed_cb()
3031 apptag = le16_to_cpu(copy->apptag); in nvme_copy_in_completed_cb()
3032 appmask = le16_to_cpu(copy->appmask); in nvme_copy_in_completed_cb()
3035 status = nvme_check_prinfo(dns, prinfow, iocb->slba, iocb->reftag); in nvme_copy_in_completed_cb()
3040 nvme_dif_pract_generate_dif(dns, iocb->bounce, len, mbounce, mlen, in nvme_copy_in_completed_cb()
3041 apptag, &iocb->reftag); in nvme_copy_in_completed_cb()
3043 status = nvme_dif_check(dns, iocb->bounce, len, mbounce, mlen, in nvme_copy_in_completed_cb()
3044 prinfow, iocb->slba, apptag, appmask, in nvme_copy_in_completed_cb()
3045 &iocb->reftag); in nvme_copy_in_completed_cb()
3052 status = nvme_check_bounds(dns, iocb->slba, nlb); in nvme_copy_in_completed_cb()
3057 if (dns->params.zoned) { in nvme_copy_in_completed_cb()
3058 status = nvme_check_zone_write(dns, iocb->zone, iocb->slba, nlb); in nvme_copy_in_completed_cb()
3063 if (!(iocb->zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_copy_in_completed_cb()
3064 iocb->zone->w_ptr += nlb; in nvme_copy_in_completed_cb()
3068 qemu_iovec_reset(&iocb->iov); in nvme_copy_in_completed_cb()
3069 qemu_iovec_add(&iocb->iov, iocb->bounce, len); in nvme_copy_in_completed_cb()
3071 block_acct_start(blk_get_stats(dns->blkconf.blk), &iocb->acct.write, 0, in nvme_copy_in_completed_cb()
3074 iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_l2b(dns, iocb->slba), in nvme_copy_in_completed_cb()
3075 &iocb->iov, 0, nvme_copy_out_cb, iocb); in nvme_copy_in_completed_cb()
3080 req->status = status; in nvme_copy_in_completed_cb()
3081 iocb->ret = -1; in nvme_copy_in_completed_cb()
3089 NvmeNamespace *sns = iocb->sns; in nvme_copy_in_cb()
3093 if (ret < 0 || iocb->ret < 0 || !sns->lbaf.ms) { in nvme_copy_in_cb()
3097 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, in nvme_copy_in_cb()
3100 qemu_iovec_reset(&iocb->iov); in nvme_copy_in_cb()
3101 qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(sns, nlb), in nvme_copy_in_cb()
3104 iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_moff(sns, slba), in nvme_copy_in_cb()
3105 &iocb->iov, 0, nvme_copy_in_completed_cb, in nvme_copy_in_cb()
3121 return sns->lbaf.ds == dns->lbaf.ds && sns->lbaf.ms == dns->lbaf.ms; in nvme_copy_ns_format_match()
3127 if (!nvme_csi_supports_copy(sns->csi) || in nvme_copy_matching_ns_format()
3128 !nvme_csi_supports_copy(dns->csi)) { in nvme_copy_matching_ns_format()
3137 sns->id_ns.dps != dns->id_ns.dps)) { in nvme_copy_matching_ns_format()
3147 return sns->lbaf.ms == 0 && in nvme_copy_corresp_pi_match()
3148 ((dns->lbaf.ms == 8 && dns->pif == 0) || in nvme_copy_corresp_pi_match()
3149 (dns->lbaf.ms == 16 && dns->pif == 1)); in nvme_copy_corresp_pi_match()
3155 if (!nvme_csi_supports_copy(sns->csi) || in nvme_copy_corresp_pi_format()
3156 !nvme_csi_supports_copy(dns->csi)) { in nvme_copy_corresp_pi_format()
3173 NvmeRequest *req = iocb->req; in nvme_do_copy()
3175 NvmeNamespace *dns = req->ns; in nvme_do_copy()
3176 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; in nvme_do_copy()
3177 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); in nvme_do_copy()
3178 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); in nvme_do_copy()
3183 uint32_t dnsid = le32_to_cpu(req->cmd.nsid); in nvme_do_copy()
3186 if (iocb->ret < 0) { in nvme_do_copy()
3190 if (iocb->idx == iocb->nr) { in nvme_do_copy()
3194 if (iocb->format == 2 || iocb->format == 3) { in nvme_do_copy()
3195 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, in nvme_do_copy()
3199 !nvme_nsid_valid(iocb->n, snsid)) { in nvme_do_copy()
3203 iocb->sns = nvme_ns(iocb->n, snsid); in nvme_do_copy()
3204 if (unlikely(!iocb->sns)) { in nvme_do_copy()
3209 if (((slba + nlb) > iocb->slba) && in nvme_do_copy()
3210 ((slba + nlb) < (iocb->slba + iocb->tcl))) { in nvme_do_copy()
3216 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, in nvme_do_copy()
3220 sns = iocb->sns; in nvme_do_copy()
3221 if ((snsid == dnsid) && NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3226 if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3227 !NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3233 if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3234 NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3247 if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3248 NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3260 if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3261 !NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3277 if (nlb > le16_to_cpu(sns->id_ns.mssrl)) { in nvme_do_copy()
3287 if (NVME_ERR_REC_DULBE(sns->features.err_rec)) { in nvme_do_copy()
3294 if (sns->params.zoned) { in nvme_do_copy()
3301 g_free(iocb->bounce); in nvme_do_copy()
3302 iocb->bounce = g_malloc_n(le16_to_cpu(sns->id_ns.mssrl), in nvme_do_copy()
3303 sns->lbasz + sns->lbaf.ms); in nvme_do_copy()
3305 qemu_iovec_reset(&iocb->iov); in nvme_do_copy()
3306 qemu_iovec_add(&iocb->iov, iocb->bounce, len); in nvme_do_copy()
3308 block_acct_start(blk_get_stats(sns->blkconf.blk), &iocb->acct.read, 0, in nvme_do_copy()
3311 iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_l2b(sns, slba), in nvme_do_copy()
3312 &iocb->iov, 0, nvme_copy_in_cb, iocb); in nvme_do_copy()
3316 req->status = status; in nvme_do_copy()
3317 iocb->ret = -1; in nvme_do_copy()
3322 static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) in nvme_copy() argument
3324 NvmeNamespace *ns = req->ns; in nvme_copy()
3325 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; in nvme_copy()
3326 NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk, in nvme_copy()
3328 uint16_t nr = copy->nr + 1; in nvme_copy()
3329 uint8_t format = copy->control[0] & 0xf; in nvme_copy()
3336 iocb->ranges = NULL; in nvme_copy()
3337 iocb->zone = NULL; in nvme_copy()
3339 if (!(n->id_ctrl.ocfs & (1 << format)) || in nvme_copy()
3341 !(n->features.hbs.cdfe & (1 << format)))) { in nvme_copy()
3347 if (nr > ns->id_ns.msrc + 1) { in nvme_copy()
3352 if ((ns->pif == 0x0 && (format != 0x0 && format != 0x2)) || in nvme_copy()
3353 (ns->pif != 0x0 && (format != 0x1 && format != 0x3))) { in nvme_copy()
3358 if (ns->pif) { in nvme_copy()
3362 iocb->format = format; in nvme_copy()
3363 iocb->ranges = g_malloc_n(nr, len); in nvme_copy()
3364 status = nvme_h2c(n, (uint8_t *)iocb->ranges, len * nr, req); in nvme_copy()
3369 iocb->slba = le64_to_cpu(copy->sdlba); in nvme_copy()
3371 if (ns->params.zoned) { in nvme_copy()
3372 iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba); in nvme_copy()
3373 if (!iocb->zone) { in nvme_copy()
3378 status = nvme_zrm_auto(n, ns, iocb->zone); in nvme_copy()
3389 iocb->req = req; in nvme_copy()
3390 iocb->ret = 0; in nvme_copy()
3391 iocb->nr = nr; in nvme_copy()
3392 iocb->idx = 0; in nvme_copy()
3393 iocb->reftag = le32_to_cpu(copy->reftag); in nvme_copy()
3394 iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32; in nvme_copy()
3396 qemu_iovec_init(&iocb->iov, 1); in nvme_copy()
3398 req->aiocb = &iocb->common; in nvme_copy()
3399 iocb->sns = req->ns; in nvme_copy()
3400 iocb->n = n; in nvme_copy()
3401 iocb->bounce = NULL; in nvme_copy()
3407 g_free(iocb->ranges); in nvme_copy()
3412 static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req) in nvme_compare() argument
3414 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_compare()
3415 NvmeNamespace *ns = req->ns; in nvme_compare()
3416 BlockBackend *blk = ns->blkconf.blk; in nvme_compare()
3417 uint64_t slba = le64_to_cpu(rw->slba); in nvme_compare()
3418 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_compare()
3419 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_compare()
3428 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (prinfo & NVME_PRINFO_PRACT)) { in nvme_compare()
3436 if (NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt)) { in nvme_compare()
3437 status = nvme_check_mdts(n, data_len); in nvme_compare()
3439 status = nvme_check_mdts(n, len); in nvme_compare()
3450 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { in nvme_compare()
3457 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_compare()
3463 ctx->data.bounce = g_malloc(data_len); in nvme_compare()
3465 req->opaque = ctx; in nvme_compare()
3467 qemu_iovec_init(&ctx->data.iov, 1); in nvme_compare()
3468 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len); in nvme_compare()
3470 block_acct_start(blk_get_stats(blk), &req->acct, data_len, in nvme_compare()
3472 req->aiocb = blk_aio_preadv(blk, offset, &ctx->data.iov, 0, in nvme_compare()
3493 iocb->ret = -ECANCELED; in nvme_flush_cancel()
3495 if (iocb->aiocb) { in nvme_flush_cancel()
3496 blk_aio_cancel_async(iocb->aiocb); in nvme_flush_cancel()
3497 iocb->aiocb = NULL; in nvme_flush_cancel()
3511 NvmeNamespace *ns = iocb->ns; in nvme_flush_ns_cb()
3514 iocb->ret = ret; in nvme_flush_ns_cb()
3515 iocb->req->status = NVME_WRITE_FAULT; in nvme_flush_ns_cb()
3517 } else if (iocb->ret < 0) { in nvme_flush_ns_cb()
3522 trace_pci_nvme_flush_ns(iocb->nsid); in nvme_flush_ns_cb()
3524 iocb->ns = NULL; in nvme_flush_ns_cb()
3525 iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_ns_cb, iocb); in nvme_flush_ns_cb()
3535 NvmeRequest *req = iocb->req; in nvme_do_flush()
3536 NvmeCtrl *n = nvme_ctrl(req); in nvme_do_flush() local
3539 if (iocb->ret < 0) { in nvme_do_flush()
3543 if (iocb->broadcast) { in nvme_do_flush()
3544 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { in nvme_do_flush()
3545 iocb->ns = nvme_ns(n, i); in nvme_do_flush()
3546 if (iocb->ns) { in nvme_do_flush()
3547 iocb->nsid = i; in nvme_do_flush()
3553 if (!iocb->ns) { in nvme_do_flush()
3561 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_do_flush()
3565 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) in nvme_flush() argument
3568 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_flush()
3573 iocb->req = req; in nvme_flush()
3574 iocb->ret = 0; in nvme_flush()
3575 iocb->ns = NULL; in nvme_flush()
3576 iocb->nsid = 0; in nvme_flush()
3577 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); in nvme_flush()
3579 if (!iocb->broadcast) { in nvme_flush()
3580 if (!nvme_nsid_valid(n, nsid)) { in nvme_flush()
3585 iocb->ns = nvme_ns(n, nsid); in nvme_flush()
3586 if (!iocb->ns) { in nvme_flush()
3591 iocb->nsid = nsid; in nvme_flush()
3594 req->aiocb = &iocb->common; in nvme_flush()
3605 static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) in nvme_read() argument
3607 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_read()
3608 NvmeNamespace *ns = req->ns; in nvme_read()
3609 uint64_t slba = le64_to_cpu(rw->slba); in nvme_read()
3610 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; in nvme_read()
3611 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_read()
3615 BlockBackend *blk = ns->blkconf.blk; in nvme_read()
3618 if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { in nvme_read()
3621 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_read()
3624 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { in nvme_read()
3632 status = nvme_check_mdts(n, mapped_size); in nvme_read()
3642 if (ns->params.zoned) { in nvme_read()
3650 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { in nvme_read()
3657 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_read()
3658 return nvme_dif_rw(n, req); in nvme_read()
3661 status = nvme_map_data(n, nlb, req); in nvme_read()
3668 block_acct_start(blk_get_stats(blk), &req->acct, data_size, in nvme_read()
3678 static void nvme_do_write_fdp(NvmeCtrl *n, NvmeRequest *req, uint64_t slba, in nvme_do_write_fdp() argument
3681 NvmeNamespace *ns = req->ns; in nvme_do_write_fdp()
3682 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_do_write_fdp()
3684 uint32_t dw12 = le32_to_cpu(req->cmd.cdw12); in nvme_do_write_fdp()
3686 uint16_t pid = le16_to_cpu(rw->dspec); in nvme_do_write_fdp()
3696 ruhid = ns->fdp.phs[ph]; in nvme_do_write_fdp()
3697 ru = &ns->endgrp->fdp.ruhs[ruhid].rus[rg]; in nvme_do_write_fdp()
3699 nvme_fdp_stat_inc(&ns->endgrp->fdp.hbmw, data_size); in nvme_do_write_fdp()
3700 nvme_fdp_stat_inc(&ns->endgrp->fdp.mbmw, data_size); in nvme_do_write_fdp()
3703 if (nlb < ru->ruamw) { in nvme_do_write_fdp()
3704 ru->ruamw -= nlb; in nvme_do_write_fdp()
3708 nlb -= ru->ruamw; in nvme_do_write_fdp()
3709 nvme_update_ruh(n, ns, pid); in nvme_do_write_fdp()
3713 static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, in nvme_do_write() argument
3716 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_do_write()
3717 NvmeNamespace *ns = req->ns; in nvme_do_write()
3718 uint64_t slba = le64_to_cpu(rw->slba); in nvme_do_write()
3719 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; in nvme_do_write()
3720 uint16_t ctrl = le16_to_cpu(rw->control); in nvme_do_write()
3726 NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe; in nvme_do_write()
3727 BlockBackend *blk = ns->blkconf.blk; in nvme_do_write()
3730 if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { in nvme_do_write()
3733 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_do_write()
3736 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { in nvme_do_write()
3737 mapped_size -= nvme_m2b(ns, nlb); in nvme_do_write()
3742 trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode), in nvme_do_write()
3746 status = nvme_check_mdts(n, mapped_size); in nvme_do_write()
3757 if (ns->params.zoned) { in nvme_do_write()
3764 if (unlikely(zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_do_write()
3768 if (unlikely(slba != zone->d.zslba)) { in nvme_do_write()
3769 trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); in nvme_do_write()
3774 if (n->params.zasl && in nvme_do_write()
3775 data_size > (uint64_t)n->page_size << n->params.zasl) { in nvme_do_write()
3780 slba = zone->w_ptr; in nvme_do_write()
3781 rw->slba = cpu_to_le64(slba); in nvme_do_write()
3782 res->slba = cpu_to_le64(slba); in nvme_do_write()
3784 switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_do_write()
3794 uint32_t reftag = le32_to_cpu(rw->reftag); in nvme_do_write()
3795 rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba)); in nvme_do_write()
3814 status = nvme_zrm_auto(n, ns, zone); in nvme_do_write()
3819 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_do_write()
3820 zone->w_ptr += nlb; in nvme_do_write()
3822 } else if (ns->endgrp && ns->endgrp->fdp.enabled) { in nvme_do_write()
3823 nvme_do_write_fdp(n, req, slba, nlb); in nvme_do_write()
3828 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_do_write()
3829 return nvme_dif_rw(n, req); in nvme_do_write()
3833 status = nvme_map_data(n, nlb, req); in nvme_do_write()
3838 block_acct_start(blk_get_stats(blk), &req->acct, data_size, in nvme_do_write()
3842 req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, in nvme_do_write()
3854 static inline uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req) in nvme_write() argument
3856 return nvme_do_write(n, req, false, false); in nvme_write()
3859 static inline uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) in nvme_write_zeroes() argument
3861 return nvme_do_write(n, req, false, true); in nvme_write_zeroes()
3864 static inline uint16_t nvme_zone_append(NvmeCtrl *n, NvmeRequest *req) in nvme_zone_append() argument
3866 return nvme_do_write(n, req, true, false); in nvme_zone_append()
3872 uint32_t dw10 = le32_to_cpu(c->cdw10); in nvme_get_mgmt_zone_slba_idx()
3873 uint32_t dw11 = le32_to_cpu(c->cdw11); in nvme_get_mgmt_zone_slba_idx()
3875 if (!ns->params.zoned) { in nvme_get_mgmt_zone_slba_idx()
3876 trace_pci_nvme_err_invalid_opc(c->opcode); in nvme_get_mgmt_zone_slba_idx()
3881 if (unlikely(*slba >= ns->id_ns.nsze)) { in nvme_get_mgmt_zone_slba_idx()
3882 trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze); in nvme_get_mgmt_zone_slba_idx()
3888 assert(*zone_idx < ns->num_zones); in nvme_get_mgmt_zone_slba_idx()
3907 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; in nvme_open_zone()
3910 if (cmd->zsflags & NVME_ZSFLAG_ZRWA_ALLOC) { in nvme_open_zone()
3911 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); in nvme_open_zone()
3917 if (zone->w_ptr % ns->zns.zrwafg) { in nvme_open_zone()
3964 zone->d.za |= NVME_ZA_ZD_EXT_VALID; in nvme_set_zd_ext()
4017 QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { in nvme_do_zone_op()
4026 QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { in nvme_do_zone_op()
4034 QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { in nvme_do_zone_op()
4043 QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) { in nvme_do_zone_op()
4053 for (i = 0; i < ns->num_zones; i++, zone++) { in nvme_do_zone_op()
4081 NvmeRequest *req = iocb->req; in nvme_zone_reset_cancel()
4082 NvmeNamespace *ns = req->ns; in nvme_zone_reset_cancel()
4084 iocb->idx = ns->num_zones; in nvme_zone_reset_cancel()
4086 iocb->ret = -ECANCELED; in nvme_zone_reset_cancel()
4088 if (iocb->aiocb) { in nvme_zone_reset_cancel()
4089 blk_aio_cancel_async(iocb->aiocb); in nvme_zone_reset_cancel()
4090 iocb->aiocb = NULL; in nvme_zone_reset_cancel()
4104 NvmeRequest *req = iocb->req; in nvme_zone_reset_epilogue_cb()
4105 NvmeNamespace *ns = req->ns; in nvme_zone_reset_epilogue_cb()
4109 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { in nvme_zone_reset_epilogue_cb()
4113 moff = nvme_moff(ns, iocb->zone->d.zslba); in nvme_zone_reset_epilogue_cb()
4114 count = nvme_m2b(ns, ns->zone_size); in nvme_zone_reset_epilogue_cb()
4116 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, moff, count, in nvme_zone_reset_epilogue_cb()
4128 NvmeRequest *req = iocb->req; in nvme_zone_reset_cb()
4129 NvmeNamespace *ns = req->ns; in nvme_zone_reset_cb()
4131 if (iocb->ret < 0) { in nvme_zone_reset_cb()
4134 iocb->ret = ret; in nvme_zone_reset_cb()
4138 if (iocb->zone) { in nvme_zone_reset_cb()
4139 nvme_zrm_reset(ns, iocb->zone); in nvme_zone_reset_cb()
4141 if (!iocb->all) { in nvme_zone_reset_cb()
4146 while (iocb->idx < ns->num_zones) { in nvme_zone_reset_cb()
4147 NvmeZone *zone = &ns->zone_array[iocb->idx++]; in nvme_zone_reset_cb()
4151 if (!iocb->all) { in nvme_zone_reset_cb()
4161 iocb->zone = zone; in nvme_zone_reset_cb()
4168 trace_pci_nvme_zns_zone_reset(zone->d.zslba); in nvme_zone_reset_cb()
4170 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, in nvme_zone_reset_cb()
4171 nvme_l2b(ns, zone->d.zslba), in nvme_zone_reset_cb()
4172 nvme_l2b(ns, ns->zone_size), in nvme_zone_reset_cb()
4180 iocb->aiocb = NULL; in nvme_zone_reset_cb()
4182 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_zone_reset_cb()
4186 static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl *n, NvmeZone *zone, in nvme_zone_mgmt_send_zrwa_flush() argument
4189 NvmeNamespace *ns = req->ns; in nvme_zone_mgmt_send_zrwa_flush()
4190 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); in nvme_zone_mgmt_send_zrwa_flush()
4191 uint64_t wp = zone->d.wp; in nvme_zone_mgmt_send_zrwa_flush()
4192 uint32_t nlb = elba - wp + 1; in nvme_zone_mgmt_send_zrwa_flush()
4200 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_zone_mgmt_send_zrwa_flush()
4204 if (elba < wp || elba > wp + ns->zns.zrwas) { in nvme_zone_mgmt_send_zrwa_flush()
4208 if (nlb % ns->zns.zrwafg) { in nvme_zone_mgmt_send_zrwa_flush()
4212 status = nvme_zrm_auto(n, ns, zone); in nvme_zone_mgmt_send_zrwa_flush()
4217 zone->w_ptr += nlb; in nvme_zone_mgmt_send_zrwa_flush()
4224 static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) in nvme_zone_mgmt_send() argument
4226 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; in nvme_zone_mgmt_send()
4227 NvmeNamespace *ns = req->ns; in nvme_zone_mgmt_send()
4234 uint8_t action = cmd->zsa; in nvme_zone_mgmt_send()
4238 all = cmd->zsflags & NVME_ZSFLAG_SELECT_ALL; in nvme_zone_mgmt_send()
4240 req->status = NVME_SUCCESS; in nvme_zone_mgmt_send()
4243 status = nvme_get_mgmt_zone_slba_idx(ns, &req->cmd, &slba, &zone_idx); in nvme_zone_mgmt_send()
4249 zone = &ns->zone_array[zone_idx]; in nvme_zone_mgmt_send()
4250 if (slba != zone->d.zslba && action != NVME_ZONE_ACTION_ZRWA_FLUSH) { in nvme_zone_mgmt_send()
4251 trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba); in nvme_zone_mgmt_send()
4284 iocb = blk_aio_get(&nvme_zone_reset_aiocb_info, ns->blkconf.blk, in nvme_zone_mgmt_send()
4287 iocb->req = req; in nvme_zone_mgmt_send()
4288 iocb->ret = 0; in nvme_zone_mgmt_send()
4289 iocb->all = all; in nvme_zone_mgmt_send()
4290 iocb->idx = zone_idx; in nvme_zone_mgmt_send()
4291 iocb->zone = NULL; in nvme_zone_mgmt_send()
4293 req->aiocb = &iocb->common; in nvme_zone_mgmt_send()
4308 if (all || !ns->params.zd_extension_size) { in nvme_zone_mgmt_send()
4312 status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req); in nvme_zone_mgmt_send()
4330 return nvme_zone_mgmt_send_zrwa_flush(n, zone, slba, req); in nvme_zone_mgmt_send()
4339 zone->d.za); in nvme_zone_mgmt_send()
4374 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) in nvme_zone_mgmt_recv() argument
4376 NvmeCmd *cmd = &req->cmd; in nvme_zone_mgmt_recv()
4377 NvmeNamespace *ns = req->ns; in nvme_zone_mgmt_recv()
4378 /* cdw12 is zero-based number of dwords to return. Convert to bytes */ in nvme_zone_mgmt_recv()
4379 uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2; in nvme_zone_mgmt_recv()
4380 uint32_t dw13 = le32_to_cpu(cmd->cdw13); in nvme_zone_mgmt_recv()
4392 req->status = NVME_SUCCESS; in nvme_zone_mgmt_recv()
4403 if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) { in nvme_zone_mgmt_recv()
4416 status = nvme_check_mdts(n, data_size); in nvme_zone_mgmt_recv()
4425 zone_entry_sz += ns->params.zd_extension_size; in nvme_zone_mgmt_recv()
4428 max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz; in nvme_zone_mgmt_recv()
4431 zone = &ns->zone_array[zone_idx]; in nvme_zone_mgmt_recv()
4432 for (i = zone_idx; i < ns->num_zones; i++) { in nvme_zone_mgmt_recv()
4441 header->nr_zones = cpu_to_le64(nr_zones); in nvme_zone_mgmt_recv()
4444 for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) { in nvme_zone_mgmt_recv()
4445 zone = &ns->zone_array[zone_idx]; in nvme_zone_mgmt_recv()
4450 z->zt = zone->d.zt; in nvme_zone_mgmt_recv()
4451 z->zs = zone->d.zs; in nvme_zone_mgmt_recv()
4452 z->zcap = cpu_to_le64(zone->d.zcap); in nvme_zone_mgmt_recv()
4453 z->zslba = cpu_to_le64(zone->d.zslba); in nvme_zone_mgmt_recv()
4454 z->za = zone->d.za; in nvme_zone_mgmt_recv()
4457 z->wp = cpu_to_le64(zone->d.wp); in nvme_zone_mgmt_recv()
4459 z->wp = cpu_to_le64(~0ULL); in nvme_zone_mgmt_recv()
4463 if (zone->d.za & NVME_ZA_ZD_EXT_VALID) { in nvme_zone_mgmt_recv()
4465 ns->params.zd_extension_size); in nvme_zone_mgmt_recv()
4467 buf_p += ns->params.zd_extension_size; in nvme_zone_mgmt_recv()
4470 max_zones--; in nvme_zone_mgmt_recv()
4474 status = nvme_c2h(n, (uint8_t *)buf, data_size, req); in nvme_zone_mgmt_recv()
4481 static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl *n, NvmeRequest *req, in nvme_io_mgmt_recv_ruhs() argument
4484 NvmeNamespace *ns = req->ns; in nvme_io_mgmt_recv_ruhs()
4493 if (!n->subsys) { in nvme_io_mgmt_recv_ruhs()
4497 if (ns->params.nsid == 0 || ns->params.nsid == 0xffffffff) { in nvme_io_mgmt_recv_ruhs()
4501 if (!n->subsys->endgrp.fdp.enabled) { in nvme_io_mgmt_recv_ruhs()
4505 endgrp = ns->endgrp; in nvme_io_mgmt_recv_ruhs()
4507 nruhsd = ns->fdp.nphs * endgrp->fdp.nrg; in nvme_io_mgmt_recv_ruhs()
4516 hdr->nruhsd = cpu_to_le16(nruhsd); in nvme_io_mgmt_recv_ruhs()
4518 ruhid = ns->fdp.phs; in nvme_io_mgmt_recv_ruhs()
4520 for (ph = 0; ph < ns->fdp.nphs; ph++, ruhid++) { in nvme_io_mgmt_recv_ruhs()
4521 NvmeRuHandle *ruh = &endgrp->fdp.ruhs[*ruhid]; in nvme_io_mgmt_recv_ruhs()
4523 for (rg = 0; rg < endgrp->fdp.nrg; rg++, ruhsd++) { in nvme_io_mgmt_recv_ruhs()
4526 ruhsd->pid = cpu_to_le16(pid); in nvme_io_mgmt_recv_ruhs()
4527 ruhsd->ruhid = *ruhid; in nvme_io_mgmt_recv_ruhs()
4528 ruhsd->earutr = 0; in nvme_io_mgmt_recv_ruhs()
4529 ruhsd->ruamw = cpu_to_le64(ruh->rus[rg].ruamw); in nvme_io_mgmt_recv_ruhs()
4533 return nvme_c2h(n, buf, trans_len, req); in nvme_io_mgmt_recv_ruhs()
4536 static uint16_t nvme_io_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) in nvme_io_mgmt_recv() argument
4538 NvmeCmd *cmd = &req->cmd; in nvme_io_mgmt_recv()
4539 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); in nvme_io_mgmt_recv()
4540 uint32_t numd = le32_to_cpu(cmd->cdw11); in nvme_io_mgmt_recv()
4548 return nvme_io_mgmt_recv_ruhs(n, req, len); in nvme_io_mgmt_recv()
4554 static uint16_t nvme_io_mgmt_send_ruh_update(NvmeCtrl *n, NvmeRequest *req) in nvme_io_mgmt_send_ruh_update() argument
4556 NvmeCmd *cmd = &req->cmd; in nvme_io_mgmt_send_ruh_update()
4557 NvmeNamespace *ns = req->ns; in nvme_io_mgmt_send_ruh_update()
4558 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); in nvme_io_mgmt_send_ruh_update()
4565 if (!ns->endgrp || !ns->endgrp->fdp.enabled) { in nvme_io_mgmt_send_ruh_update()
4569 maxnpid = n->subsys->endgrp.fdp.nrg * n->subsys->endgrp.fdp.nruh; in nvme_io_mgmt_send_ruh_update()
4577 ret = nvme_h2c(n, pids, npid * sizeof(uint16_t), req); in nvme_io_mgmt_send_ruh_update()
4583 if (!nvme_update_ruh(n, ns, pids[i])) { in nvme_io_mgmt_send_ruh_update()
4591 static uint16_t nvme_io_mgmt_send(NvmeCtrl *n, NvmeRequest *req) in nvme_io_mgmt_send() argument
4593 NvmeCmd *cmd = &req->cmd; in nvme_io_mgmt_send()
4594 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); in nvme_io_mgmt_send()
4601 return nvme_io_mgmt_send_ruh_update(n, req); in nvme_io_mgmt_send()
4607 static uint16_t __nvme_io_cmd_nvm(NvmeCtrl *n, NvmeRequest *req) in __nvme_io_cmd_nvm() argument
4609 switch (req->cmd.opcode) { in __nvme_io_cmd_nvm()
4611 return nvme_write(n, req); in __nvme_io_cmd_nvm()
4613 return nvme_read(n, req); in __nvme_io_cmd_nvm()
4615 return nvme_compare(n, req); in __nvme_io_cmd_nvm()
4617 return nvme_write_zeroes(n, req); in __nvme_io_cmd_nvm()
4619 return nvme_dsm(n, req); in __nvme_io_cmd_nvm()
4621 return nvme_verify(n, req); in __nvme_io_cmd_nvm()
4623 return nvme_copy(n, req); in __nvme_io_cmd_nvm()
4625 return nvme_io_mgmt_recv(n, req); in __nvme_io_cmd_nvm()
4627 return nvme_io_mgmt_send(n, req); in __nvme_io_cmd_nvm()
4633 static uint16_t nvme_io_cmd_nvm(NvmeCtrl *n, NvmeRequest *req) in nvme_io_cmd_nvm() argument
4635 if (!(n->cse.iocs.nvm[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { in nvme_io_cmd_nvm()
4636 trace_pci_nvme_err_invalid_opc(req->cmd.opcode); in nvme_io_cmd_nvm()
4640 return __nvme_io_cmd_nvm(n, req); in nvme_io_cmd_nvm()
4643 static uint16_t nvme_io_cmd_zoned(NvmeCtrl *n, NvmeRequest *req) in nvme_io_cmd_zoned() argument
4645 if (!(n->cse.iocs.zoned[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { in nvme_io_cmd_zoned()
4646 trace_pci_nvme_err_invalid_opc(req->cmd.opcode); in nvme_io_cmd_zoned()
4650 switch (req->cmd.opcode) { in nvme_io_cmd_zoned()
4652 return nvme_zone_append(n, req); in nvme_io_cmd_zoned()
4654 return nvme_zone_mgmt_send(n, req); in nvme_io_cmd_zoned()
4656 return nvme_zone_mgmt_recv(n, req); in nvme_io_cmd_zoned()
4659 return __nvme_io_cmd_nvm(n, req); in nvme_io_cmd_zoned()
4662 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) in nvme_io_cmd() argument
4665 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_io_cmd()
4668 req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode)); in nvme_io_cmd()
4679 * semantics in some other command set - does an NSID of FFFFFFFFh then in nvme_io_cmd()
4690 if (req->cmd.opcode == NVME_CMD_FLUSH) { in nvme_io_cmd()
4691 return nvme_flush(n, req); in nvme_io_cmd()
4694 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { in nvme_io_cmd()
4698 ns = nvme_ns(n, nsid); in nvme_io_cmd()
4703 if (ns->status) { in nvme_io_cmd()
4704 return ns->status; in nvme_io_cmd()
4707 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { in nvme_io_cmd()
4711 req->ns = ns; in nvme_io_cmd()
4713 switch (ns->csi) { in nvme_io_cmd()
4715 return nvme_io_cmd_nvm(n, req); in nvme_io_cmd()
4717 return nvme_io_cmd_zoned(n, req); in nvme_io_cmd()
4726 NvmeCtrl *n = cq->ctrl; in nvme_cq_notifier() local
4734 if (cq->tail == cq->head) { in nvme_cq_notifier()
4735 if (cq->irq_enabled) { in nvme_cq_notifier()
4736 n->cq_pending--; in nvme_cq_notifier()
4739 nvme_irq_deassert(n, cq); in nvme_cq_notifier()
4742 qemu_bh_schedule(cq->bh); in nvme_cq_notifier()
4747 NvmeCtrl *n = cq->ctrl; in nvme_init_cq_ioeventfd() local
4748 uint16_t offset = (cq->cqid << 3) + (1 << 2); in nvme_init_cq_ioeventfd()
4751 ret = event_notifier_init(&cq->notifier, 0); in nvme_init_cq_ioeventfd()
4756 event_notifier_set_handler(&cq->notifier, nvme_cq_notifier); in nvme_init_cq_ioeventfd()
4757 memory_region_add_eventfd(&n->iomem, in nvme_init_cq_ioeventfd()
4758 0x1000 + offset, 4, false, 0, &cq->notifier); in nvme_init_cq_ioeventfd()
4776 NvmeCtrl *n = sq->ctrl; in nvme_init_sq_ioeventfd() local
4777 uint16_t offset = sq->sqid << 3; in nvme_init_sq_ioeventfd()
4780 ret = event_notifier_init(&sq->notifier, 0); in nvme_init_sq_ioeventfd()
4785 event_notifier_set_handler(&sq->notifier, nvme_sq_notifier); in nvme_init_sq_ioeventfd()
4786 memory_region_add_eventfd(&n->iomem, in nvme_init_sq_ioeventfd()
4787 0x1000 + offset, 4, false, 0, &sq->notifier); in nvme_init_sq_ioeventfd()
4792 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) in nvme_free_sq() argument
4794 uint16_t offset = sq->sqid << 3; in nvme_free_sq()
4796 n->sq[sq->sqid] = NULL; in nvme_free_sq()
4797 qemu_bh_delete(sq->bh); in nvme_free_sq()
4798 if (sq->ioeventfd_enabled) { in nvme_free_sq()
4799 memory_region_del_eventfd(&n->iomem, in nvme_free_sq()
4800 0x1000 + offset, 4, false, 0, &sq->notifier); in nvme_free_sq()
4801 event_notifier_set_handler(&sq->notifier, NULL); in nvme_free_sq()
4802 event_notifier_cleanup(&sq->notifier); in nvme_free_sq()
4804 g_free(sq->io_req); in nvme_free_sq()
4805 if (sq->sqid) { in nvme_free_sq()
4810 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) in nvme_del_sq() argument
4812 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; in nvme_del_sq()
4816 uint16_t qid = le16_to_cpu(c->qid); in nvme_del_sq()
4818 if (unlikely(!qid || nvme_check_sqid(n, qid))) { in nvme_del_sq()
4825 sq = n->sq[qid]; in nvme_del_sq()
4826 while (!QTAILQ_EMPTY(&sq->out_req_list)) { in nvme_del_sq()
4827 r = QTAILQ_FIRST(&sq->out_req_list); in nvme_del_sq()
4828 assert(r->aiocb); in nvme_del_sq()
4829 r->status = NVME_CMD_ABORT_SQ_DEL; in nvme_del_sq()
4830 blk_aio_cancel(r->aiocb); in nvme_del_sq()
4833 assert(QTAILQ_EMPTY(&sq->out_req_list)); in nvme_del_sq()
4835 if (!nvme_check_cqid(n, sq->cqid)) { in nvme_del_sq()
4836 cq = n->cq[sq->cqid]; in nvme_del_sq()
4837 QTAILQ_REMOVE(&cq->sq_list, sq, entry); in nvme_del_sq()
4840 QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { in nvme_del_sq()
4841 if (r->sq == sq) { in nvme_del_sq()
4842 QTAILQ_REMOVE(&cq->req_list, r, entry); in nvme_del_sq()
4843 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); in nvme_del_sq()
4848 nvme_free_sq(sq, n); in nvme_del_sq()
4852 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, in nvme_init_sq() argument
4858 sq->ctrl = n; in nvme_init_sq()
4859 sq->dma_addr = dma_addr; in nvme_init_sq()
4860 sq->sqid = sqid; in nvme_init_sq()
4861 sq->size = size; in nvme_init_sq()
4862 sq->cqid = cqid; in nvme_init_sq()
4863 sq->head = sq->tail = 0; in nvme_init_sq()
4864 sq->io_req = g_new0(NvmeRequest, sq->size); in nvme_init_sq()
4866 QTAILQ_INIT(&sq->req_list); in nvme_init_sq()
4867 QTAILQ_INIT(&sq->out_req_list); in nvme_init_sq()
4868 for (i = 0; i < sq->size; i++) { in nvme_init_sq()
4869 sq->io_req[i].sq = sq; in nvme_init_sq()
4870 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); in nvme_init_sq()
4873 sq->bh = qemu_bh_new_guarded(nvme_process_sq, sq, in nvme_init_sq()
4874 &DEVICE(sq->ctrl)->mem_reentrancy_guard); in nvme_init_sq()
4876 if (n->dbbuf_enabled) { in nvme_init_sq()
4877 sq->db_addr = n->dbbuf_dbs + (sqid << 3); in nvme_init_sq()
4878 sq->ei_addr = n->dbbuf_eis + (sqid << 3); in nvme_init_sq()
4880 if (n->params.ioeventfd && sq->sqid != 0) { in nvme_init_sq()
4882 sq->ioeventfd_enabled = true; in nvme_init_sq()
4887 assert(n->cq[cqid]); in nvme_init_sq()
4888 cq = n->cq[cqid]; in nvme_init_sq()
4889 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); in nvme_init_sq()
4890 n->sq[sqid] = sq; in nvme_init_sq()
4893 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req) in nvme_create_sq() argument
4896 NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd; in nvme_create_sq()
4898 uint16_t cqid = le16_to_cpu(c->cqid); in nvme_create_sq()
4899 uint16_t sqid = le16_to_cpu(c->sqid); in nvme_create_sq()
4900 uint16_t qsize = le16_to_cpu(c->qsize); in nvme_create_sq()
4901 uint16_t qflags = le16_to_cpu(c->sq_flags); in nvme_create_sq()
4902 uint64_t prp1 = le64_to_cpu(c->prp1); in nvme_create_sq()
4906 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { in nvme_create_sq()
4910 if (unlikely(!sqid || sqid > n->conf_ioqpairs || n->sq[sqid] != NULL)) { in nvme_create_sq()
4914 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { in nvme_create_sq()
4918 if (unlikely(prp1 & (n->page_size - 1))) { in nvme_create_sq()
4927 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); in nvme_create_sq()
4940 BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); in nvme_set_blk_stats()
4942 stats->units_read += s->nr_bytes[BLOCK_ACCT_READ]; in nvme_set_blk_stats()
4943 stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE]; in nvme_set_blk_stats()
4944 stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; in nvme_set_blk_stats()
4945 stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; in nvme_set_blk_stats()
4948 static uint16_t nvme_ocp_extended_smart_info(NvmeCtrl *n, uint8_t rae, in nvme_ocp_extended_smart_info() argument
4963 ns = nvme_ns(n, i); in nvme_ocp_extended_smart_info()
4980 nvme_clear_events(n, NVME_AER_TYPE_SMART); in nvme_ocp_extended_smart_info()
4983 trans_len = MIN(sizeof(smart_l) - off, buf_len); in nvme_ocp_extended_smart_info()
4984 return nvme_c2h(n, (uint8_t *) &smart_l + off, trans_len, req); in nvme_ocp_extended_smart_info()
4987 static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, in nvme_smart_info() argument
4990 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_smart_info()
5003 ns = nvme_ns(n, nsid); in nvme_smart_info()
5012 ns = nvme_ns(n, i); in nvme_smart_info()
5020 trans_len = MIN(sizeof(smart) - off, buf_len); in nvme_smart_info()
5021 smart.critical_warning = n->smart_critical_warning; in nvme_smart_info()
5031 smart.temperature = cpu_to_le16(n->temperature); in nvme_smart_info()
5033 if ((n->temperature >= n->features.temp_thresh_hi) || in nvme_smart_info()
5034 (n->temperature <= n->features.temp_thresh_low)) { in nvme_smart_info()
5040 cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60); in nvme_smart_info()
5043 nvme_clear_events(n, NVME_AER_TYPE_SMART); in nvme_smart_info()
5046 return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req); in nvme_smart_info()
5049 static uint16_t nvme_endgrp_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, in nvme_endgrp_info() argument
5052 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); in nvme_endgrp_info()
5058 if (!n->subsys || endgrpid != 0x1) { in nvme_endgrp_info()
5067 NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); in nvme_endgrp_info()
5085 buf_len = MIN(sizeof(info) - off, buf_len); in nvme_endgrp_info()
5087 return nvme_c2h(n, (uint8_t *)&info + off, buf_len, req); in nvme_endgrp_info()
5091 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off, in nvme_fw_log_info() argument
5104 trans_len = MIN(sizeof(fw_log) - off, buf_len); in nvme_fw_log_info()
5106 return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req); in nvme_fw_log_info()
5109 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, in nvme_error_info() argument
5120 nvme_clear_events(n, NVME_AER_TYPE_ERROR); in nvme_error_info()
5124 trans_len = MIN(sizeof(errlog) - off, buf_len); in nvme_error_info()
5126 return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req); in nvme_error_info()
5129 static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, in nvme_changed_nslist() argument
5142 trans_len = MIN(sizeof(nslist) - off, buf_len); in nvme_changed_nslist()
5144 while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) != in nvme_changed_nslist()
5157 clear_bit(nsid, n->changed_nsids); in nvme_changed_nslist()
5165 bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE); in nvme_changed_nslist()
5169 nvme_clear_events(n, NVME_AER_TYPE_NOTICE); in nvme_changed_nslist()
5172 return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req); in nvme_changed_nslist()
5175 static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, in nvme_cmd_effects() argument
5187 switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) { in nvme_cmd_effects()
5189 iocs = n->cse.iocs.nvm; in nvme_cmd_effects()
5195 iocs = n->cse.iocs.nvm; in nvme_cmd_effects()
5198 iocs = n->cse.iocs.zoned; in nvme_cmd_effects()
5205 memcpy(log.acs, n->cse.acs, sizeof(log.acs)); in nvme_cmd_effects()
5211 trans_len = MIN(sizeof(log) - off, buf_len); in nvme_cmd_effects()
5213 return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); in nvme_cmd_effects()
5216 static uint16_t nvme_vendor_specific_log(NvmeCtrl *n, uint8_t rae, in nvme_vendor_specific_log() argument
5222 if (n->params.ocp) { in nvme_vendor_specific_log()
5223 return nvme_ocp_extended_smart_info(n, rae, buf_len, off, req); in nvme_vendor_specific_log()
5240 static uint16_t nvme_fdp_confs(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, in nvme_fdp_confs() argument
5252 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_confs()
5256 endgrp = &n->subsys->endgrp; in nvme_fdp_confs()
5258 if (endgrp->fdp.enabled) { in nvme_fdp_confs()
5259 nruh = endgrp->fdp.nruh; in nvme_fdp_confs()
5271 trans_len = MIN(log_size - off, buf_len); in nvme_fdp_confs()
5278 log->num_confs = cpu_to_le16(0); in nvme_fdp_confs()
5279 log->size = cpu_to_le32(log_size); in nvme_fdp_confs()
5281 hdr->descr_size = cpu_to_le16(fdp_descr_size); in nvme_fdp_confs()
5282 if (endgrp->fdp.enabled) { in nvme_fdp_confs()
5283 hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, VALID, 1); in nvme_fdp_confs()
5284 hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, RGIF, endgrp->fdp.rgif); in nvme_fdp_confs()
5285 hdr->nrg = cpu_to_le16(endgrp->fdp.nrg); in nvme_fdp_confs()
5286 hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); in nvme_fdp_confs()
5287 hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); in nvme_fdp_confs()
5288 hdr->nnss = cpu_to_le32(NVME_MAX_NAMESPACES); in nvme_fdp_confs()
5289 hdr->runs = cpu_to_le64(endgrp->fdp.runs); in nvme_fdp_confs()
5292 ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; in nvme_fdp_confs()
5296 /* 1 bit for RUH in PIF -> 2 RUHs max. */ in nvme_fdp_confs()
5297 hdr->nrg = cpu_to_le16(1); in nvme_fdp_confs()
5298 hdr->nruh = cpu_to_le16(1); in nvme_fdp_confs()
5299 hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); in nvme_fdp_confs()
5300 hdr->nnss = cpu_to_le32(1); in nvme_fdp_confs()
5301 hdr->runs = cpu_to_le64(96 * MiB); in nvme_fdp_confs()
5303 ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; in nvme_fdp_confs()
5306 return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); in nvme_fdp_confs()
5309 static uint16_t nvme_fdp_ruh_usage(NvmeCtrl *n, uint32_t endgrpid, in nvme_fdp_ruh_usage() argument
5322 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_ruh_usage()
5326 endgrp = &n->subsys->endgrp; in nvme_fdp_ruh_usage()
5328 if (!endgrp->fdp.enabled) { in nvme_fdp_ruh_usage()
5332 log_size = sizeof(NvmeRuhuLog) + endgrp->fdp.nruh * sizeof(NvmeRuhuDescr); in nvme_fdp_ruh_usage()
5338 trans_len = MIN(log_size - off, buf_len); in nvme_fdp_ruh_usage()
5344 ruh = endgrp->fdp.ruhs; in nvme_fdp_ruh_usage()
5345 hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); in nvme_fdp_ruh_usage()
5347 for (i = 0; i < endgrp->fdp.nruh; i++, ruhud++, ruh++) { in nvme_fdp_ruh_usage()
5348 ruhud->ruha = ruh->ruha; in nvme_fdp_ruh_usage()
5351 return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); in nvme_fdp_ruh_usage()
5354 static uint16_t nvme_fdp_stats(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, in nvme_fdp_stats() argument
5365 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_stats()
5369 if (!n->subsys->endgrp.fdp.enabled) { in nvme_fdp_stats()
5373 endgrp = &n->subsys->endgrp; in nvme_fdp_stats()
5375 trans_len = MIN(sizeof(log) - off, buf_len); in nvme_fdp_stats()
5378 log.hbmw[0] = cpu_to_le64(endgrp->fdp.hbmw); in nvme_fdp_stats()
5379 log.mbmw[0] = cpu_to_le64(endgrp->fdp.mbmw); in nvme_fdp_stats()
5380 log.mbe[0] = cpu_to_le64(endgrp->fdp.mbe); in nvme_fdp_stats()
5382 return nvme_c2h(n, (uint8_t *)&log + off, trans_len, req); in nvme_fdp_stats()
5385 static uint16_t nvme_fdp_events(NvmeCtrl *n, uint32_t endgrpid, in nvme_fdp_events() argument
5390 NvmeCmd *cmd = &req->cmd; in nvme_fdp_events()
5391 bool host_events = (cmd->cdw10 >> 8) & 0x1; in nvme_fdp_events()
5397 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_events()
5401 endgrp = &n->subsys->endgrp; in nvme_fdp_events()
5403 if (!endgrp->fdp.enabled) { in nvme_fdp_events()
5408 ebuf = &endgrp->fdp.host_events; in nvme_fdp_events()
5410 ebuf = &endgrp->fdp.ctrl_events; in nvme_fdp_events()
5413 log_size = sizeof(NvmeFdpEventsLog) + ebuf->nelems * sizeof(NvmeFdpEvent); in nvme_fdp_events()
5419 trans_len = MIN(log_size - off, buf_len); in nvme_fdp_events()
5421 elog->num_events = cpu_to_le32(ebuf->nelems); in nvme_fdp_events()
5424 if (ebuf->nelems && ebuf->start == ebuf->next) { in nvme_fdp_events()
5425 unsigned int nelems = (NVME_FDP_MAX_EVENTS - ebuf->start); in nvme_fdp_events()
5427 memcpy(event, &ebuf->events[ebuf->start], in nvme_fdp_events()
5429 memcpy(event + nelems, ebuf->events, in nvme_fdp_events()
5430 sizeof(NvmeFdpEvent) * ebuf->next); in nvme_fdp_events()
5431 } else if (ebuf->start < ebuf->next) { in nvme_fdp_events()
5432 memcpy(event, &ebuf->events[ebuf->start], in nvme_fdp_events()
5433 sizeof(NvmeFdpEvent) * (ebuf->next - ebuf->start)); in nvme_fdp_events()
5436 return nvme_c2h(n, (uint8_t *)elog + off, trans_len, req); in nvme_fdp_events()
5439 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) in nvme_get_log() argument
5441 NvmeCmd *cmd = &req->cmd; in nvme_get_log()
5443 uint32_t dw10 = le32_to_cpu(cmd->cdw10); in nvme_get_log()
5444 uint32_t dw11 = le32_to_cpu(cmd->cdw11); in nvme_get_log()
5445 uint32_t dw12 = le32_to_cpu(cmd->cdw12); in nvme_get_log()
5446 uint32_t dw13 = le32_to_cpu(cmd->cdw13); in nvme_get_log()
5450 uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; in nvme_get_log()
5471 status = nvme_check_mdts(n, len); in nvme_get_log()
5478 return nvme_error_info(n, rae, len, off, req); in nvme_get_log()
5480 return nvme_smart_info(n, rae, len, off, req); in nvme_get_log()
5482 return nvme_fw_log_info(n, len, off, req); in nvme_get_log()
5484 return nvme_vendor_specific_log(n, rae, len, off, req, lid); in nvme_get_log()
5486 return nvme_changed_nslist(n, rae, len, off, req); in nvme_get_log()
5488 return nvme_cmd_effects(n, csi, len, off, req); in nvme_get_log()
5490 return nvme_endgrp_info(n, rae, len, off, req); in nvme_get_log()
5492 return nvme_fdp_confs(n, lspi, len, off, req); in nvme_get_log()
5494 return nvme_fdp_ruh_usage(n, lspi, dw10, dw12, len, off, req); in nvme_get_log()
5496 return nvme_fdp_stats(n, lspi, len, off, req); in nvme_get_log()
5498 return nvme_fdp_events(n, lspi, len, off, req); in nvme_get_log()
5505 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) in nvme_free_cq() argument
5507 PCIDevice *pci = PCI_DEVICE(n); in nvme_free_cq()
5508 uint16_t offset = (cq->cqid << 3) + (1 << 2); in nvme_free_cq()
5510 n->cq[cq->cqid] = NULL; in nvme_free_cq()
5511 qemu_bh_delete(cq->bh); in nvme_free_cq()
5512 if (cq->ioeventfd_enabled) { in nvme_free_cq()
5513 memory_region_del_eventfd(&n->iomem, in nvme_free_cq()
5514 0x1000 + offset, 4, false, 0, &cq->notifier); in nvme_free_cq()
5515 event_notifier_set_handler(&cq->notifier, NULL); in nvme_free_cq()
5516 event_notifier_cleanup(&cq->notifier); in nvme_free_cq()
5518 if (msix_enabled(pci) && cq->irq_enabled) { in nvme_free_cq()
5519 msix_vector_unuse(pci, cq->vector); in nvme_free_cq()
5521 if (cq->cqid) { in nvme_free_cq()
5526 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req) in nvme_del_cq() argument
5528 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; in nvme_del_cq()
5530 uint16_t qid = le16_to_cpu(c->qid); in nvme_del_cq()
5532 if (unlikely(!qid || nvme_check_cqid(n, qid))) { in nvme_del_cq()
5537 cq = n->cq[qid]; in nvme_del_cq()
5538 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { in nvme_del_cq()
5543 if (cq->irq_enabled && cq->tail != cq->head) { in nvme_del_cq()
5544 n->cq_pending--; in nvme_del_cq()
5547 nvme_irq_deassert(n, cq); in nvme_del_cq()
5549 nvme_free_cq(cq, n); in nvme_del_cq()
5553 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, in nvme_init_cq() argument
5557 PCIDevice *pci = PCI_DEVICE(n); in nvme_init_cq()
5563 cq->ctrl = n; in nvme_init_cq()
5564 cq->cqid = cqid; in nvme_init_cq()
5565 cq->size = size; in nvme_init_cq()
5566 cq->dma_addr = dma_addr; in nvme_init_cq()
5567 cq->phase = 1; in nvme_init_cq()
5568 cq->irq_enabled = irq_enabled; in nvme_init_cq()
5569 cq->vector = vector; in nvme_init_cq()
5570 cq->head = cq->tail = 0; in nvme_init_cq()
5571 QTAILQ_INIT(&cq->req_list); in nvme_init_cq()
5572 QTAILQ_INIT(&cq->sq_list); in nvme_init_cq()
5573 if (n->dbbuf_enabled) { in nvme_init_cq()
5574 cq->db_addr = n->dbbuf_dbs + (cqid << 3) + (1 << 2); in nvme_init_cq()
5575 cq->ei_addr = n->dbbuf_eis + (cqid << 3) + (1 << 2); in nvme_init_cq()
5577 if (n->params.ioeventfd && cqid != 0) { in nvme_init_cq()
5579 cq->ioeventfd_enabled = true; in nvme_init_cq()
5583 n->cq[cqid] = cq; in nvme_init_cq()
5584 cq->bh = qemu_bh_new_guarded(nvme_post_cqes, cq, in nvme_init_cq()
5585 &DEVICE(cq->ctrl)->mem_reentrancy_guard); in nvme_init_cq()
5588 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) in nvme_create_cq() argument
5591 NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd; in nvme_create_cq()
5592 uint16_t cqid = le16_to_cpu(c->cqid); in nvme_create_cq()
5593 uint16_t vector = le16_to_cpu(c->irq_vector); in nvme_create_cq()
5594 uint16_t qsize = le16_to_cpu(c->qsize); in nvme_create_cq()
5595 uint16_t qflags = le16_to_cpu(c->cq_flags); in nvme_create_cq()
5596 uint64_t prp1 = le64_to_cpu(c->prp1); in nvme_create_cq()
5597 uint32_t cc = ldq_le_p(&n->bar.cc); in nvme_create_cq()
5609 if (unlikely(!cqid || cqid > n->conf_ioqpairs || n->cq[cqid] != NULL)) { in nvme_create_cq()
5613 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { in nvme_create_cq()
5617 if (unlikely(prp1 & (n->page_size - 1))) { in nvme_create_cq()
5621 if (unlikely(!msix_enabled(PCI_DEVICE(n)) && vector)) { in nvme_create_cq()
5625 if (unlikely(vector >= n->conf_msix_qsize)) { in nvme_create_cq()
5635 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1, in nvme_create_cq()
5643 n->qs_created = true; in nvme_create_cq()
5647 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req) in nvme_rpt_empty_id_struct() argument
5651 return nvme_c2h(n, id, sizeof(id), req); in nvme_rpt_empty_id_struct()
5654 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req) in nvme_identify_ctrl() argument
5658 return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req); in nvme_identify_ctrl()
5661 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) in nvme_identify_ctrl_csi() argument
5663 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ctrl_csi()
5667 trace_pci_nvme_identify_ctrl_csi(c->csi); in nvme_identify_ctrl_csi()
5669 switch (c->csi) { in nvme_identify_ctrl_csi()
5671 id_nvm->vsl = n->params.vsl; in nvme_identify_ctrl_csi()
5672 id_nvm->dmrl = NVME_ID_CTRL_NVM_DMRL_MAX; in nvme_identify_ctrl_csi()
5673 id_nvm->dmrsl = cpu_to_le32(n->dmrsl); in nvme_identify_ctrl_csi()
5674 id_nvm->dmsl = NVME_ID_CTRL_NVM_DMRL_MAX * n->dmrsl; in nvme_identify_ctrl_csi()
5678 ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl; in nvme_identify_ctrl_csi()
5685 return nvme_c2h(n, id, sizeof(id), req); in nvme_identify_ctrl_csi()
5688 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active) in nvme_identify_ns() argument
5691 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns()
5692 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns()
5696 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { in nvme_identify_ns()
5700 ns = nvme_ns(n, nsid); in nvme_identify_ns()
5703 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ns()
5705 return nvme_rpt_empty_id_struct(n, req); in nvme_identify_ns()
5708 return nvme_rpt_empty_id_struct(n, req); in nvme_identify_ns()
5712 if (active || ns->csi == NVME_CSI_NVM) { in nvme_identify_ns()
5713 return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req); in nvme_identify_ns()
5719 static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req, in nvme_identify_ctrl_list() argument
5722 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ctrl_list()
5723 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ctrl_list()
5724 uint16_t min_id = le16_to_cpu(c->ctrlid); in nvme_identify_ctrl_list()
5731 trace_pci_nvme_identify_ctrl_list(c->cns, min_id); in nvme_identify_ctrl_list()
5733 if (!n->subsys) { in nvme_identify_ctrl_list()
5742 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ctrl_list()
5748 for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) { in nvme_identify_ctrl_list()
5749 ctrl = nvme_subsys_ctrl(n->subsys, cntlid); in nvme_identify_ctrl_list()
5763 return nvme_c2h(n, (uint8_t *)list, sizeof(list), req); in nvme_identify_ctrl_list()
5766 static uint16_t nvme_identify_pri_ctrl_cap(NvmeCtrl *n, NvmeRequest *req) in nvme_identify_pri_ctrl_cap() argument
5768 trace_pci_nvme_identify_pri_ctrl_cap(le16_to_cpu(n->pri_ctrl_cap.cntlid)); in nvme_identify_pri_ctrl_cap()
5770 return nvme_c2h(n, (uint8_t *)&n->pri_ctrl_cap, in nvme_identify_pri_ctrl_cap()
5774 static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req) in nvme_identify_sec_ctrl_list() argument
5776 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_sec_ctrl_list()
5777 uint16_t pri_ctrl_id = le16_to_cpu(n->pri_ctrl_cap.cntlid); in nvme_identify_sec_ctrl_list()
5778 uint16_t min_id = le16_to_cpu(c->ctrlid); in nvme_identify_sec_ctrl_list()
5779 uint8_t num_sec_ctrl = n->nr_sec_ctrls; in nvme_identify_sec_ctrl_list()
5784 if (n->sec_ctrl_list[i].scid >= min_id) { in nvme_identify_sec_ctrl_list()
5785 list.numcntl = MIN(num_sec_ctrl - i, 127); in nvme_identify_sec_ctrl_list()
5786 memcpy(&list.sec, n->sec_ctrl_list + i, in nvme_identify_sec_ctrl_list()
5794 return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req); in nvme_identify_sec_ctrl_list()
5797 static uint16_t nvme_identify_ns_ind(NvmeCtrl *n, NvmeRequest *req, bool alloc) in nvme_identify_ns_ind() argument
5800 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns_ind()
5801 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns_ind()
5805 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { in nvme_identify_ns_ind()
5809 ns = nvme_ns(n, nsid); in nvme_identify_ns_ind()
5812 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ns_ind()
5814 return nvme_rpt_empty_id_struct(n, req); in nvme_identify_ns_ind()
5817 return nvme_rpt_empty_id_struct(n, req); in nvme_identify_ns_ind()
5821 return nvme_c2h(n, (uint8_t *)&ns->id_ns_ind, sizeof(NvmeIdNsInd), req); in nvme_identify_ns_ind()
5824 static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, in nvme_identify_ns_csi() argument
5828 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns_csi()
5829 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns_csi()
5831 trace_pci_nvme_identify_ns_csi(nsid, c->csi); in nvme_identify_ns_csi()
5833 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { in nvme_identify_ns_csi()
5837 ns = nvme_ns(n, nsid); in nvme_identify_ns_csi()
5840 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ns_csi()
5842 return nvme_rpt_empty_id_struct(n, req); in nvme_identify_ns_csi()
5845 return nvme_rpt_empty_id_struct(n, req); in nvme_identify_ns_csi()
5849 if (c->csi == NVME_CSI_NVM) { in nvme_identify_ns_csi()
5850 return nvme_c2h(n, (uint8_t *)&ns->id_ns_nvm, sizeof(NvmeIdNsNvm), in nvme_identify_ns_csi()
5852 } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) { in nvme_identify_ns_csi()
5853 return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned), in nvme_identify_ns_csi()
5860 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req, in nvme_identify_nslist() argument
5864 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_nslist()
5865 uint32_t min_nsid = le32_to_cpu(c->nsid); in nvme_identify_nslist()
5879 if (min_nsid >= NVME_NSID_BROADCAST - 1) { in nvme_identify_nslist()
5884 ns = nvme_ns(n, i); in nvme_identify_nslist()
5887 ns = nvme_subsys_ns(n->subsys, i); in nvme_identify_nslist()
5895 if (ns->params.nsid <= min_nsid) { in nvme_identify_nslist()
5898 list_ptr[j++] = cpu_to_le32(ns->params.nsid); in nvme_identify_nslist()
5904 return nvme_c2h(n, list, data_len, req); in nvme_identify_nslist()
5907 static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req, in nvme_identify_nslist_csi() argument
5911 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_nslist_csi()
5912 uint32_t min_nsid = le32_to_cpu(c->nsid); in nvme_identify_nslist_csi()
5918 trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi); in nvme_identify_nslist_csi()
5923 if (min_nsid >= NVME_NSID_BROADCAST - 1) { in nvme_identify_nslist_csi()
5927 if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) { in nvme_identify_nslist_csi()
5932 ns = nvme_ns(n, i); in nvme_identify_nslist_csi()
5935 ns = nvme_subsys_ns(n->subsys, i); in nvme_identify_nslist_csi()
5943 if (ns->params.nsid <= min_nsid || c->csi != ns->csi) { in nvme_identify_nslist_csi()
5946 list_ptr[j++] = cpu_to_le32(ns->params.nsid); in nvme_identify_nslist_csi()
5952 return nvme_c2h(n, list, data_len, req); in nvme_identify_nslist_csi()
5955 static uint16_t nvme_endurance_group_list(NvmeCtrl *n, NvmeRequest *req) in nvme_endurance_group_list() argument
5960 uint16_t endgid = le32_to_cpu(req->cmd.cdw11) & 0xffff; in nvme_endurance_group_list()
5963 * The current nvme-subsys only supports Endurance Group #1. in nvme_endurance_group_list()
5972 return nvme_c2h(n, list, sizeof(list), req); in nvme_endurance_group_list()
5975 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) in nvme_identify_ns_descr_list() argument
5978 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns_descr_list()
5979 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns_descr_list()
6001 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { in nvme_identify_ns_descr_list()
6005 ns = nvme_ns(n, nsid); in nvme_identify_ns_descr_list()
6010 if (!qemu_uuid_is_null(&ns->params.uuid)) { in nvme_identify_ns_descr_list()
6013 memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); in nvme_identify_ns_descr_list()
6018 if (!nvme_nguid_is_null(&ns->params.nguid)) { in nvme_identify_ns_descr_list()
6021 memcpy(nguid.v, ns->params.nguid.data, NVME_NIDL_NGUID); in nvme_identify_ns_descr_list()
6026 if (ns->params.eui64) { in nvme_identify_ns_descr_list()
6029 eui64.v = cpu_to_be64(ns->params.eui64); in nvme_identify_ns_descr_list()
6036 csi.v = ns->csi; in nvme_identify_ns_descr_list()
6040 return nvme_c2h(n, list, sizeof(list), req); in nvme_identify_ns_descr_list()
6043 static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req) in nvme_identify_cmd_set() argument
6053 return nvme_c2h(n, list, data_len, req); in nvme_identify_cmd_set()
6056 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) in nvme_identify() argument
6058 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify()
6060 trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid), in nvme_identify()
6061 c->csi); in nvme_identify()
6063 switch (c->cns) { in nvme_identify()
6065 return nvme_identify_ns(n, req, true); in nvme_identify()
6067 return nvme_identify_ns(n, req, false); in nvme_identify()
6069 return nvme_identify_ctrl_list(n, req, true); in nvme_identify()
6071 return nvme_identify_ctrl_list(n, req, false); in nvme_identify()
6073 return nvme_identify_pri_ctrl_cap(n, req); in nvme_identify()
6075 return nvme_identify_sec_ctrl_list(n, req); in nvme_identify()
6077 return nvme_identify_ns_csi(n, req, true); in nvme_identify()
6079 return nvme_identify_ns_ind(n, req, false); in nvme_identify()
6081 return nvme_identify_ns_ind(n, req, true); in nvme_identify()
6083 return nvme_identify_ns_csi(n, req, false); in nvme_identify()
6085 return nvme_identify_ctrl(n, req); in nvme_identify()
6087 return nvme_identify_ctrl_csi(n, req); in nvme_identify()
6089 return nvme_identify_nslist(n, req, true); in nvme_identify()
6091 return nvme_identify_nslist(n, req, false); in nvme_identify()
6093 return nvme_identify_nslist_csi(n, req, true); in nvme_identify()
6095 return nvme_endurance_group_list(n, req); in nvme_identify()
6097 return nvme_identify_nslist_csi(n, req, false); in nvme_identify()
6099 return nvme_identify_ns_descr_list(n, req); in nvme_identify()
6101 return nvme_identify_cmd_set(n, req); in nvme_identify()
6103 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); in nvme_identify()
6108 static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req) in nvme_abort() argument
6110 uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff; in nvme_abort()
6111 uint16_t cid = (le32_to_cpu(req->cmd.cdw10) >> 16) & 0xffff; in nvme_abort()
6112 NvmeSQueue *sq = n->sq[sqid]; in nvme_abort()
6116 req->cqe.result = 1; in nvme_abort()
6117 if (nvme_check_sqid(n, sqid)) { in nvme_abort()
6122 for (i = 0; i < n->outstanding_aers; i++) { in nvme_abort()
6123 NvmeRequest *re = n->aer_reqs[i]; in nvme_abort()
6124 if (re->cqe.cid == cid) { in nvme_abort()
6125 memmove(n->aer_reqs + i, n->aer_reqs + i + 1, in nvme_abort()
6126 (n->outstanding_aers - i - 1) * sizeof(NvmeRequest *)); in nvme_abort()
6127 n->outstanding_aers--; in nvme_abort()
6128 re->status = NVME_CMD_ABORT_REQ; in nvme_abort()
6129 req->cqe.result = 0; in nvme_abort()
6130 nvme_enqueue_req_completion(&n->admin_cq, re); in nvme_abort()
6136 QTAILQ_FOREACH_SAFE(r, &sq->out_req_list, entry, next) { in nvme_abort()
6137 if (r->cqe.cid == cid) { in nvme_abort()
6138 if (r->aiocb) { in nvme_abort()
6139 r->status = NVME_CMD_ABORT_REQ; in nvme_abort()
6140 blk_aio_cancel_async(r->aiocb); in nvme_abort()
6149 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) in nvme_set_timestamp() argument
6153 n->host_timestamp = le64_to_cpu(ts); in nvme_set_timestamp()
6154 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); in nvme_set_timestamp()
6157 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) in nvme_get_timestamp() argument
6160 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; in nvme_get_timestamp()
6174 ts.timestamp = n->host_timestamp + elapsed_time; in nvme_get_timestamp()
6176 /* If the host timestamp is non-zero, set the timestamp origin */ in nvme_get_timestamp()
6177 ts.origin = n->host_timestamp ? 0x01 : 0x00; in nvme_get_timestamp()
6184 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) in nvme_get_feature_timestamp() argument
6186 uint64_t timestamp = nvme_get_timestamp(n); in nvme_get_feature_timestamp()
6188 return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req); in nvme_get_feature_timestamp()
6191 static int nvme_get_feature_fdp(NvmeCtrl *n, uint32_t endgrpid, in nvme_get_feature_fdp() argument
6196 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { in nvme_get_feature_fdp()
6206 static uint16_t nvme_get_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, in nvme_get_feature_fdp_events() argument
6209 NvmeCmd *cmd = &req->cmd; in nvme_get_feature_fdp_events()
6210 uint32_t cdw11 = le32_to_cpu(cmd->cdw11); in nvme_get_feature_fdp_events()
6221 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { in nvme_get_feature_fdp_events()
6229 ruhid = ns->fdp.phs[ph]; in nvme_get_feature_fdp_events()
6230 ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; in nvme_get_feature_fdp_events()
6251 s_event->evt = event_type; in nvme_get_feature_fdp_events()
6252 s_event->evta = (ruh->event_filter >> shift) & 0x1; in nvme_get_feature_fdp_events()
6260 ret = nvme_c2h(n, s_events, s_events_siz, req); in nvme_get_feature_fdp_events()
6269 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) in nvme_get_feature() argument
6271 NvmeCmd *cmd = &req->cmd; in nvme_get_feature()
6272 uint32_t dw10 = le32_to_cpu(cmd->cdw10); in nvme_get_feature()
6273 uint32_t dw11 = le32_to_cpu(cmd->cdw11); in nvme_get_feature()
6274 uint32_t nsid = le32_to_cpu(cmd->nsid); in nvme_get_feature()
6294 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { in nvme_get_feature()
6305 if (!nvme_ns(n, nsid)) { in nvme_get_feature()
6336 result = n->features.temp_thresh_hi; in nvme_get_feature()
6339 result = n->features.temp_thresh_low; in nvme_get_feature()
6345 if (!nvme_nsid_valid(n, nsid)) { in nvme_get_feature()
6349 ns = nvme_ns(n, nsid); in nvme_get_feature()
6354 result = ns->features.err_rec; in nvme_get_feature()
6359 ns = nvme_ns(n, i); in nvme_get_feature()
6364 result = blk_enable_write_cache(ns->blkconf.blk); in nvme_get_feature()
6372 result = n->features.async_config; in nvme_get_feature()
6375 return nvme_get_feature_timestamp(n, req); in nvme_get_feature()
6377 return nvme_c2h(n, (uint8_t *)&n->features.hbs, in nvme_get_feature()
6378 sizeof(n->features.hbs), req); in nvme_get_feature()
6386 ret = nvme_get_feature_fdp(n, endgrpid, &result); in nvme_get_feature()
6392 if (!nvme_nsid_valid(n, nsid)) { in nvme_get_feature()
6396 ns = nvme_ns(n, nsid); in nvme_get_feature()
6401 ret = nvme_get_feature_fdp_events(n, ns, req, &result); in nvme_get_feature()
6425 result = (n->conf_ioqpairs - 1) | ((n->conf_ioqpairs - 1) << 16); in nvme_get_feature()
6430 if (iv >= n->conf_ioqpairs + 1) { in nvme_get_feature()
6435 if (iv == n->admin_cq.vector) { in nvme_get_feature()
6446 ret = nvme_get_feature_fdp(n, endgrpid, &result); in nvme_get_feature()
6453 result = n->dn; in nvme_get_feature()
6461 req->cqe.result = cpu_to_le32(result); in nvme_get_feature()
6465 static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) in nvme_set_feature_timestamp() argument
6470 ret = nvme_h2c(n, (uint8_t *)×tamp, sizeof(timestamp), req); in nvme_set_feature_timestamp()
6475 nvme_set_timestamp(n, timestamp); in nvme_set_feature_timestamp()
6480 static uint16_t nvme_set_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, in nvme_set_feature_fdp_events() argument
6483 NvmeCmd *cmd = &req->cmd; in nvme_set_feature_fdp_events()
6484 uint32_t cdw11 = le32_to_cpu(cmd->cdw11); in nvme_set_feature_fdp_events()
6488 uint8_t enable = le32_to_cpu(cmd->cdw12) & 0x1; in nvme_set_feature_fdp_events()
6496 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { in nvme_set_feature_fdp_events()
6504 ruhid = ns->fdp.phs[ph]; in nvme_set_feature_fdp_events()
6505 ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; in nvme_set_feature_fdp_events()
6507 ret = nvme_h2c(n, events, noet, req); in nvme_set_feature_fdp_events()
6517 ruh->event_filter |= event_mask; in nvme_set_feature_fdp_events()
6519 ruh->event_filter = ruh->event_filter & ~event_mask; in nvme_set_feature_fdp_events()
6525 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) in nvme_set_feature() argument
6529 NvmeCmd *cmd = &req->cmd; in nvme_set_feature()
6530 uint32_t dw10 = le32_to_cpu(cmd->cdw10); in nvme_set_feature()
6531 uint32_t dw11 = le32_to_cpu(cmd->cdw11); in nvme_set_feature()
6532 uint32_t nsid = le32_to_cpu(cmd->nsid); in nvme_set_feature()
6537 NvmeIdCtrl *id = &n->id_ctrl; in nvme_set_feature()
6538 NvmeAtomic *atomic = &n->atomic; in nvme_set_feature()
6552 if (!nvme_nsid_valid(n, nsid)) { in nvme_set_feature()
6556 ns = nvme_ns(n, nsid); in nvme_set_feature()
6562 if (!nvme_nsid_valid(n, nsid)) { in nvme_set_feature()
6581 n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); in nvme_set_feature()
6584 n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); in nvme_set_feature()
6590 if ((n->temperature >= n->features.temp_thresh_hi) || in nvme_set_feature()
6591 (n->temperature <= n->features.temp_thresh_low)) { in nvme_set_feature()
6592 nvme_smart_event(n, NVME_SMART_TEMPERATURE); in nvme_set_feature()
6599 ns = nvme_ns(n, i); in nvme_set_feature()
6605 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { in nvme_set_feature()
6606 ns->features.err_rec = dw11; in nvme_set_feature()
6614 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { in nvme_set_feature()
6615 ns->features.err_rec = dw11; in nvme_set_feature()
6620 ns = nvme_ns(n, i); in nvme_set_feature()
6625 if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { in nvme_set_feature()
6626 blk_flush(ns->blkconf.blk); in nvme_set_feature()
6629 blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); in nvme_set_feature()
6635 if (n->qs_created) { in nvme_set_feature()
6649 n->conf_ioqpairs, in nvme_set_feature()
6650 n->conf_ioqpairs); in nvme_set_feature()
6651 req->cqe.result = cpu_to_le32((n->conf_ioqpairs - 1) | in nvme_set_feature()
6652 ((n->conf_ioqpairs - 1) << 16)); in nvme_set_feature()
6655 n->features.async_config = dw11; in nvme_set_feature()
6658 return nvme_set_feature_timestamp(n, req); in nvme_set_feature()
6660 status = nvme_h2c(n, (uint8_t *)&n->features.hbs, in nvme_set_feature()
6661 sizeof(n->features.hbs), req); in nvme_set_feature()
6667 ns = nvme_ns(n, i); in nvme_set_feature()
6673 ns->id_ns.nlbaf = ns->nlbaf - 1; in nvme_set_feature()
6674 if (!n->features.hbs.lbafee) { in nvme_set_feature()
6675 ns->id_ns.nlbaf = MIN(ns->id_ns.nlbaf, 15); in nvme_set_feature()
6690 return nvme_set_feature_fdp_events(n, ns, req); in nvme_set_feature()
6693 n->dn = 0x1 & dw11; in nvme_set_feature()
6695 if (n->dn) { in nvme_set_feature()
6696 atomic->atomic_max_write_size = le16_to_cpu(id->awupf) + 1; in nvme_set_feature()
6698 atomic->atomic_max_write_size = le16_to_cpu(id->awun) + 1; in nvme_set_feature()
6701 if (atomic->atomic_max_write_size == 1) { in nvme_set_feature()
6702 atomic->atomic_writes = 0; in nvme_set_feature()
6704 atomic->atomic_writes = 1; in nvme_set_feature()
6713 static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req) in nvme_aer() argument
6717 if (n->outstanding_aers > n->params.aerl) { in nvme_aer()
6722 n->aer_reqs[n->outstanding_aers] = req; in nvme_aer()
6723 n->outstanding_aers++; in nvme_aer()
6725 if (!QTAILQ_EMPTY(&n->aer_queue)) { in nvme_aer()
6726 nvme_process_aers(n); in nvme_aer()
6732 static void nvme_update_dsm_limits(NvmeCtrl *n, NvmeNamespace *ns) in nvme_update_dsm_limits() argument
6735 n->dmrsl = in nvme_update_dsm_limits()
6736 MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); in nvme_update_dsm_limits()
6742 ns = nvme_ns(n, nsid); in nvme_update_dsm_limits()
6747 n->dmrsl = in nvme_update_dsm_limits()
6748 MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); in nvme_update_dsm_limits()
6752 static bool nvme_csi_supported(NvmeCtrl *n, uint8_t csi) in nvme_csi_supported() argument
6761 cc = ldl_le_p(&n->bar.cc); in nvme_csi_supported()
6769 static void nvme_detach_ns(NvmeCtrl *n, NvmeNamespace *ns) in nvme_detach_ns() argument
6771 assert(ns->attached > 0); in nvme_detach_ns()
6773 n->namespaces[ns->params.nsid] = NULL; in nvme_detach_ns()
6774 ns->attached--; in nvme_detach_ns()
6777 static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) in nvme_ns_attachment() argument
6782 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_ns_attachment()
6783 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_ns_attachment()
6792 if (!nvme_nsid_valid(n, nsid)) { in nvme_ns_attachment()
6796 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_ns_attachment()
6801 ret = nvme_h2c(n, (uint8_t *)list, 4096, req); in nvme_ns_attachment()
6810 *nr_ids = MIN(*nr_ids, NVME_CONTROLLER_LIST_SIZE - 1); in nvme_ns_attachment()
6812 ctrl = nvme_subsys_ctrl(n->subsys, ids[i]); in nvme_ns_attachment()
6819 if (nvme_ns(n, nsid)) { in nvme_ns_attachment()
6823 if (ns->attached && !ns->params.shared) { in nvme_ns_attachment()
6827 if (!nvme_csi_supported(n, ns->csi)) { in nvme_ns_attachment()
6850 if (!test_and_set_bit(nsid, ctrl->changed_nsids)) { in nvme_ns_attachment()
6881 iocb->ret = -ECANCELED; in nvme_format_cancel()
6883 if (iocb->aiocb) { in nvme_format_cancel()
6884 blk_aio_cancel_async(iocb->aiocb); in nvme_format_cancel()
6885 iocb->aiocb = NULL; in nvme_format_cancel()
6900 trace_pci_nvme_format_set(ns->params.nsid, lbaf, mset, pi, pil); in nvme_format_set()
6902 ns->id_ns.dps = (pil << 3) | pi; in nvme_format_set()
6903 ns->id_ns.flbas = (lbafu << 5) | (mset << 4) | lbafl; in nvme_format_set()
6913 NvmeNamespace *ns = iocb->ns; in nvme_format_ns_cb()
6916 if (iocb->ret < 0) { in nvme_format_ns_cb()
6919 iocb->ret = ret; in nvme_format_ns_cb()
6925 if (iocb->offset < ns->size) { in nvme_format_ns_cb()
6926 bytes = MIN(BDRV_REQUEST_MAX_BYTES, ns->size - iocb->offset); in nvme_format_ns_cb()
6928 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, iocb->offset, in nvme_format_ns_cb()
6932 iocb->offset += bytes; in nvme_format_ns_cb()
6936 nvme_format_set(ns, iocb->lbaf, iocb->mset, iocb->pi, iocb->pil); in nvme_format_ns_cb()
6937 ns->status = 0x0; in nvme_format_ns_cb()
6938 iocb->ns = NULL; in nvme_format_ns_cb()
6939 iocb->offset = 0; in nvme_format_ns_cb()
6947 if (ns->params.zoned) { in nvme_format_check()
6951 if (lbaf > ns->id_ns.nlbaf) { in nvme_format_check()
6955 if (pi && (ns->id_ns.lbaf[lbaf].ms < nvme_pi_tuple_size(ns))) { in nvme_format_check()
6968 NvmeRequest *req = iocb->req; in nvme_do_format()
6969 NvmeCtrl *n = nvme_ctrl(req); in nvme_do_format() local
6970 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_do_format()
6976 if (iocb->ret < 0) { in nvme_do_format()
6980 if (iocb->broadcast) { in nvme_do_format()
6981 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { in nvme_do_format()
6982 iocb->ns = nvme_ns(n, i); in nvme_do_format()
6983 if (iocb->ns) { in nvme_do_format()
6984 iocb->nsid = i; in nvme_do_format()
6990 if (!iocb->ns) { in nvme_do_format()
6994 status = nvme_format_check(iocb->ns, lbaf, pi); in nvme_do_format()
6996 req->status = status; in nvme_do_format()
7000 iocb->ns->status = NVME_FORMAT_IN_PROGRESS; in nvme_do_format()
7005 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_do_format()
7009 static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req) in nvme_format() argument
7012 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_format()
7013 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_format()
7023 iocb->req = req; in nvme_format()
7024 iocb->ret = 0; in nvme_format()
7025 iocb->ns = NULL; in nvme_format()
7026 iocb->nsid = 0; in nvme_format()
7027 iocb->lbaf = lbaf; in nvme_format()
7028 iocb->mset = mset; in nvme_format()
7029 iocb->pi = pi; in nvme_format()
7030 iocb->pil = pil; in nvme_format()
7031 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); in nvme_format()
7032 iocb->offset = 0; in nvme_format()
7034 if (n->features.hbs.lbafee) { in nvme_format()
7035 iocb->lbaf |= lbafu << 4; in nvme_format()
7038 if (!iocb->broadcast) { in nvme_format()
7039 if (!nvme_nsid_valid(n, nsid)) { in nvme_format()
7044 iocb->ns = nvme_ns(n, nsid); in nvme_format()
7045 if (!iocb->ns) { in nvme_format()
7051 req->aiocb = &iocb->common; in nvme_format()
7062 static void nvme_get_virt_res_num(NvmeCtrl *n, uint8_t rt, int *num_total, in nvme_get_virt_res_num() argument
7066 n->pri_ctrl_cap.vifrt : n->pri_ctrl_cap.vqfrt); in nvme_get_virt_res_num()
7068 n->pri_ctrl_cap.virfap : n->pri_ctrl_cap.vqrfap); in nvme_get_virt_res_num()
7069 *num_sec = le16_to_cpu(rt ? n->pri_ctrl_cap.virfa : n->pri_ctrl_cap.vqrfa); in nvme_get_virt_res_num()
7072 static uint16_t nvme_assign_virt_res_to_prim(NvmeCtrl *n, NvmeRequest *req, in nvme_assign_virt_res_to_prim() argument
7078 if (cntlid != n->cntlid) { in nvme_assign_virt_res_to_prim()
7082 nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); in nvme_assign_virt_res_to_prim()
7088 if (nr > num_total - num_sec) { in nvme_assign_virt_res_to_prim()
7093 n->next_pri_ctrl_cap.virfap = cpu_to_le16(nr); in nvme_assign_virt_res_to_prim()
7095 n->next_pri_ctrl_cap.vqrfap = cpu_to_le16(nr); in nvme_assign_virt_res_to_prim()
7098 req->cqe.result = cpu_to_le32(nr); in nvme_assign_virt_res_to_prim()
7099 return req->status; in nvme_assign_virt_res_to_prim()
7102 static void nvme_update_virt_res(NvmeCtrl *n, NvmeSecCtrlEntry *sctrl, in nvme_update_virt_res() argument
7108 prev_nr = le16_to_cpu(sctrl->nvi); in nvme_update_virt_res()
7109 prev_total = le32_to_cpu(n->pri_ctrl_cap.virfa); in nvme_update_virt_res()
7110 sctrl->nvi = cpu_to_le16(nr); in nvme_update_virt_res()
7111 n->pri_ctrl_cap.virfa = cpu_to_le32(prev_total + nr - prev_nr); in nvme_update_virt_res()
7113 prev_nr = le16_to_cpu(sctrl->nvq); in nvme_update_virt_res()
7114 prev_total = le32_to_cpu(n->pri_ctrl_cap.vqrfa); in nvme_update_virt_res()
7115 sctrl->nvq = cpu_to_le16(nr); in nvme_update_virt_res()
7116 n->pri_ctrl_cap.vqrfa = cpu_to_le32(prev_total + nr - prev_nr); in nvme_update_virt_res()
7120 static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl *n, NvmeRequest *req, in nvme_assign_virt_res_to_sec() argument
7126 sctrl = nvme_sctrl_for_cntlid(n, cntlid); in nvme_assign_virt_res_to_sec()
7131 if (sctrl->scs) { in nvme_assign_virt_res_to_sec()
7135 limit = le16_to_cpu(rt ? n->pri_ctrl_cap.vifrsm : n->pri_ctrl_cap.vqfrsm); in nvme_assign_virt_res_to_sec()
7140 nvme_get_virt_res_num(n, rt, &num_total, &num_prim, &num_sec); in nvme_assign_virt_res_to_sec()
7141 num_free = num_total - num_prim - num_sec; in nvme_assign_virt_res_to_sec()
7142 diff = nr - le16_to_cpu(rt ? sctrl->nvi : sctrl->nvq); in nvme_assign_virt_res_to_sec()
7148 nvme_update_virt_res(n, sctrl, rt, nr); in nvme_assign_virt_res_to_sec()
7149 req->cqe.result = cpu_to_le32(nr); in nvme_assign_virt_res_to_sec()
7151 return req->status; in nvme_assign_virt_res_to_sec()
7154 static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online) in nvme_virt_set_state() argument
7156 PCIDevice *pci = PCI_DEVICE(n); in nvme_virt_set_state()
7161 sctrl = nvme_sctrl_for_cntlid(n, cntlid); in nvme_virt_set_state()
7167 vf_index = le16_to_cpu(sctrl->vfn) - 1; in nvme_virt_set_state()
7172 if (!sctrl->nvi || (le16_to_cpu(sctrl->nvq) < 2) || !sn) { in nvme_virt_set_state()
7176 if (!sctrl->scs) { in nvme_virt_set_state()
7177 sctrl->scs = 0x1; in nvme_virt_set_state()
7181 nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_INTERRUPT, 0); in nvme_virt_set_state()
7182 nvme_update_virt_res(n, sctrl, NVME_VIRT_RES_QUEUE, 0); in nvme_virt_set_state()
7184 if (sctrl->scs) { in nvme_virt_set_state()
7185 sctrl->scs = 0x0; in nvme_virt_set_state()
7195 static uint16_t nvme_virt_mngmt(NvmeCtrl *n, NvmeRequest *req) in nvme_virt_mngmt() argument
7197 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_virt_mngmt()
7198 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); in nvme_virt_mngmt()
7212 return nvme_assign_virt_res_to_sec(n, req, cntlid, rt, nr); in nvme_virt_mngmt()
7214 return nvme_assign_virt_res_to_prim(n, req, cntlid, rt, nr); in nvme_virt_mngmt()
7216 return nvme_virt_set_state(n, cntlid, true); in nvme_virt_mngmt()
7218 return nvme_virt_set_state(n, cntlid, false); in nvme_virt_mngmt()
7224 static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req) in nvme_dbbuf_config() argument
7226 PCIDevice *pci = PCI_DEVICE(n); in nvme_dbbuf_config()
7227 uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1); in nvme_dbbuf_config()
7228 uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2); in nvme_dbbuf_config()
7232 if (dbs_addr & (n->page_size - 1) || eis_addr & (n->page_size - 1)) { in nvme_dbbuf_config()
7237 n->dbbuf_dbs = dbs_addr; in nvme_dbbuf_config()
7238 n->dbbuf_eis = eis_addr; in nvme_dbbuf_config()
7239 n->dbbuf_enabled = true; in nvme_dbbuf_config()
7241 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { in nvme_dbbuf_config()
7242 NvmeSQueue *sq = n->sq[i]; in nvme_dbbuf_config()
7243 NvmeCQueue *cq = n->cq[i]; in nvme_dbbuf_config()
7248 * nvme_process_db() uses this hard-coded way to calculate in nvme_dbbuf_config()
7251 sq->db_addr = dbs_addr + (i << 3); in nvme_dbbuf_config()
7252 sq->ei_addr = eis_addr + (i << 3); in nvme_dbbuf_config()
7253 stl_le_pci_dma(pci, sq->db_addr, sq->tail, MEMTXATTRS_UNSPECIFIED); in nvme_dbbuf_config()
7255 if (n->params.ioeventfd && sq->sqid != 0) { in nvme_dbbuf_config()
7257 sq->ioeventfd_enabled = true; in nvme_dbbuf_config()
7264 cq->db_addr = dbs_addr + (i << 3) + (1 << 2); in nvme_dbbuf_config()
7265 cq->ei_addr = eis_addr + (i << 3) + (1 << 2); in nvme_dbbuf_config()
7266 stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED); in nvme_dbbuf_config()
7268 if (n->params.ioeventfd && cq->cqid != 0) { in nvme_dbbuf_config()
7270 cq->ioeventfd_enabled = true; in nvme_dbbuf_config()
7281 static uint16_t nvme_directive_send(NvmeCtrl *n, NvmeRequest *req) in nvme_directive_send() argument
7286 static uint16_t nvme_directive_receive(NvmeCtrl *n, NvmeRequest *req) in nvme_directive_receive() argument
7289 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_directive_receive()
7290 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); in nvme_directive_receive()
7291 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_directive_receive()
7310 ns = nvme_ns(n, nsid); in nvme_directive_receive()
7319 if (ns->endgrp && ns->endgrp->fdp.enabled) { in nvme_directive_receive()
7325 return nvme_c2h(n, (uint8_t *)&id, trans_len, req); in nvme_directive_receive()
7336 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) in nvme_admin_cmd() argument
7338 trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, in nvme_admin_cmd()
7339 nvme_adm_opc_str(req->cmd.opcode)); in nvme_admin_cmd()
7341 if (!(n->cse.acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { in nvme_admin_cmd()
7342 trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode); in nvme_admin_cmd()
7347 if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) { in nvme_admin_cmd()
7351 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { in nvme_admin_cmd()
7355 switch (req->cmd.opcode) { in nvme_admin_cmd()
7357 return nvme_del_sq(n, req); in nvme_admin_cmd()
7359 return nvme_create_sq(n, req); in nvme_admin_cmd()
7361 return nvme_get_log(n, req); in nvme_admin_cmd()
7363 return nvme_del_cq(n, req); in nvme_admin_cmd()
7365 return nvme_create_cq(n, req); in nvme_admin_cmd()
7367 return nvme_identify(n, req); in nvme_admin_cmd()
7369 return nvme_abort(n, req); in nvme_admin_cmd()
7371 return nvme_set_feature(n, req); in nvme_admin_cmd()
7373 return nvme_get_feature(n, req); in nvme_admin_cmd()
7375 return nvme_aer(n, req); in nvme_admin_cmd()
7377 return nvme_ns_attachment(n, req); in nvme_admin_cmd()
7379 return nvme_virt_mngmt(n, req); in nvme_admin_cmd()
7381 return nvme_dbbuf_config(n, req); in nvme_admin_cmd()
7383 return nvme_format(n, req); in nvme_admin_cmd()
7385 return nvme_directive_send(n, req); in nvme_admin_cmd()
7387 return nvme_directive_receive(n, req); in nvme_admin_cmd()
7397 trace_pci_nvme_update_sq_eventidx(sq->sqid, sq->tail); in nvme_update_sq_eventidx()
7399 stl_le_pci_dma(PCI_DEVICE(sq->ctrl), sq->ei_addr, sq->tail, in nvme_update_sq_eventidx()
7405 ldl_le_pci_dma(PCI_DEVICE(sq->ctrl), sq->db_addr, &sq->tail, in nvme_update_sq_tail()
7408 trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail); in nvme_update_sq_tail()
7415 static int nvme_atomic_write_check(NvmeCtrl *n, NvmeCmd *cmd, in nvme_atomic_write_check() argument
7419 uint64_t slba = le64_to_cpu(rw->slba); in nvme_atomic_write_check()
7420 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb); in nvme_atomic_write_check()
7425 if ((cmd->opcode == NVME_CMD_READ) || ((cmd->opcode == NVME_CMD_WRITE) && in nvme_atomic_write_check()
7426 ((rw->nlb + 1) > atomic->atomic_max_write_size))) { in nvme_atomic_write_check()
7433 for (i = 1; i < n->params.max_ioqpairs + 1; i++) { in nvme_atomic_write_check()
7441 sq = n->sq[i]; in nvme_atomic_write_check()
7449 QTAILQ_FOREACH(req, &sq->out_req_list, entry) { in nvme_atomic_write_check()
7450 req_rw = (NvmeRwCmd *)&req->cmd; in nvme_atomic_write_check()
7452 if (((req_rw->opcode == NVME_CMD_WRITE) || in nvme_atomic_write_check()
7453 (req_rw->opcode == NVME_CMD_READ)) && in nvme_atomic_write_check()
7454 (cmd->nsid == req->ns->params.nsid)) { in nvme_atomic_write_check()
7455 req_slba = le64_to_cpu(req_rw->slba); in nvme_atomic_write_check()
7456 req_nlb = (uint32_t)le16_to_cpu(req_rw->nlb); in nvme_atomic_write_check()
7464 if (req->atomic_write && ((elba >= req_slba) && in nvme_atomic_write_check()
7478 static NvmeAtomic *nvme_get_atomic(NvmeCtrl *n, NvmeCmd *cmd) in nvme_get_atomic() argument
7480 if (n->atomic.atomic_writes) { in nvme_get_atomic()
7481 return &n->atomic; in nvme_get_atomic()
7489 NvmeCtrl *n = sq->ctrl; in nvme_process_sq() local
7490 NvmeCQueue *cq = n->cq[sq->cqid]; in nvme_process_sq()
7497 if (n->dbbuf_enabled) { in nvme_process_sq()
7501 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { in nvme_process_sq()
7505 addr = sq->dma_addr + (sq->head << NVME_SQES); in nvme_process_sq()
7506 if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) { in nvme_process_sq()
7509 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); in nvme_process_sq()
7513 atomic = nvme_get_atomic(n, &cmd); in nvme_process_sq()
7516 if (sq->sqid && atomic) { in nvme_process_sq()
7519 ret = nvme_atomic_write_check(n, &cmd, atomic); in nvme_process_sq()
7522 qemu_bh_schedule(sq->bh); in nvme_process_sq()
7534 req = QTAILQ_FIRST(&sq->req_list); in nvme_process_sq()
7535 QTAILQ_REMOVE(&sq->req_list, req, entry); in nvme_process_sq()
7536 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); in nvme_process_sq()
7538 req->cqe.cid = cmd.cid; in nvme_process_sq()
7539 memcpy(&req->cmd, &cmd, sizeof(NvmeCmd)); in nvme_process_sq()
7541 if (sq->sqid && atomic) { in nvme_process_sq()
7542 req->atomic_write = cmd_is_atomic; in nvme_process_sq()
7545 status = sq->sqid ? nvme_io_cmd(n, req) : in nvme_process_sq()
7546 nvme_admin_cmd(n, req); in nvme_process_sq()
7548 req->status = status; in nvme_process_sq()
7552 if (n->dbbuf_enabled) { in nvme_process_sq()
7567 assert(table_size > 0 && table_size <= pci_dev->msix_entries_nr); in nvme_update_msixcap_ts()
7569 config = pci_dev->config + pci_dev->msix_cap; in nvme_update_msixcap_ts()
7571 table_size - 1); in nvme_update_msixcap_ts()
7574 static void nvme_activate_virt_res(NvmeCtrl *n) in nvme_activate_virt_res() argument
7576 PCIDevice *pci_dev = PCI_DEVICE(n); in nvme_activate_virt_res()
7577 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; in nvme_activate_virt_res()
7580 /* -1 to account for the admin queue */ in nvme_activate_virt_res()
7582 sctrl = nvme_sctrl(n); in nvme_activate_virt_res()
7583 cap->vqprt = sctrl->nvq; in nvme_activate_virt_res()
7584 cap->viprt = sctrl->nvi; in nvme_activate_virt_res()
7585 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; in nvme_activate_virt_res()
7586 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; in nvme_activate_virt_res()
7588 cap->vqrfap = n->next_pri_ctrl_cap.vqrfap; in nvme_activate_virt_res()
7589 cap->virfap = n->next_pri_ctrl_cap.virfap; in nvme_activate_virt_res()
7590 n->conf_ioqpairs = le16_to_cpu(cap->vqprt) + in nvme_activate_virt_res()
7591 le16_to_cpu(cap->vqrfap) - 1; in nvme_activate_virt_res()
7592 n->conf_msix_qsize = le16_to_cpu(cap->viprt) + in nvme_activate_virt_res()
7593 le16_to_cpu(cap->virfap); in nvme_activate_virt_res()
7597 static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) in nvme_ctrl_reset() argument
7599 PCIDevice *pci_dev = PCI_DEVICE(n); in nvme_ctrl_reset()
7605 ns = nvme_ns(n, i); in nvme_ctrl_reset()
7613 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { in nvme_ctrl_reset()
7614 if (n->sq[i] != NULL) { in nvme_ctrl_reset()
7615 nvme_free_sq(n->sq[i], n); in nvme_ctrl_reset()
7618 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { in nvme_ctrl_reset()
7619 if (n->cq[i] != NULL) { in nvme_ctrl_reset()
7620 nvme_free_cq(n->cq[i], n); in nvme_ctrl_reset()
7624 while (!QTAILQ_EMPTY(&n->aer_queue)) { in nvme_ctrl_reset()
7625 NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue); in nvme_ctrl_reset()
7626 QTAILQ_REMOVE(&n->aer_queue, event, entry); in nvme_ctrl_reset()
7630 if (n->params.sriov_max_vfs) { in nvme_ctrl_reset()
7632 for (i = 0; i < n->nr_sec_ctrls; i++) { in nvme_ctrl_reset()
7633 sctrl = &n->sec_ctrl_list[i]; in nvme_ctrl_reset()
7634 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); in nvme_ctrl_reset()
7639 nvme_activate_virt_res(n); in nvme_ctrl_reset()
7643 n->aer_queued = 0; in nvme_ctrl_reset()
7644 n->aer_mask = 0; in nvme_ctrl_reset()
7645 n->outstanding_aers = 0; in nvme_ctrl_reset()
7646 n->qs_created = false; in nvme_ctrl_reset()
7648 n->dn = n->params.atomic_dn; /* Set Disable Normal */ in nvme_ctrl_reset()
7650 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); in nvme_ctrl_reset()
7653 sctrl = nvme_sctrl(n); in nvme_ctrl_reset()
7655 stl_le_p(&n->bar.csts, sctrl->scs ? 0 : NVME_CSTS_FAILED); in nvme_ctrl_reset()
7657 stl_le_p(&n->bar.csts, 0); in nvme_ctrl_reset()
7660 stl_le_p(&n->bar.intms, 0); in nvme_ctrl_reset()
7661 stl_le_p(&n->bar.intmc, 0); in nvme_ctrl_reset()
7662 stl_le_p(&n->bar.cc, 0); in nvme_ctrl_reset()
7664 n->dbbuf_dbs = 0; in nvme_ctrl_reset()
7665 n->dbbuf_eis = 0; in nvme_ctrl_reset()
7666 n->dbbuf_enabled = false; in nvme_ctrl_reset()
7669 static void nvme_ctrl_shutdown(NvmeCtrl *n) in nvme_ctrl_shutdown() argument
7674 if (n->pmr.dev) { in nvme_ctrl_shutdown()
7675 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); in nvme_ctrl_shutdown()
7679 ns = nvme_ns(n, i); in nvme_ctrl_shutdown()
7688 static int nvme_start_ctrl(NvmeCtrl *n) in nvme_start_ctrl() argument
7690 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_start_ctrl()
7691 uint32_t cc = ldl_le_p(&n->bar.cc); in nvme_start_ctrl()
7692 uint32_t aqa = ldl_le_p(&n->bar.aqa); in nvme_start_ctrl()
7693 uint64_t asq = ldq_le_p(&n->bar.asq); in nvme_start_ctrl()
7694 uint64_t acq = ldq_le_p(&n->bar.acq); in nvme_start_ctrl()
7697 NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); in nvme_start_ctrl()
7699 if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) { in nvme_start_ctrl()
7700 trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi), in nvme_start_ctrl()
7701 le16_to_cpu(sctrl->nvq)); in nvme_start_ctrl()
7702 return -1; in nvme_start_ctrl()
7704 if (unlikely(n->cq[0])) { in nvme_start_ctrl()
7706 return -1; in nvme_start_ctrl()
7708 if (unlikely(n->sq[0])) { in nvme_start_ctrl()
7710 return -1; in nvme_start_ctrl()
7712 if (unlikely(asq & (page_size - 1))) { in nvme_start_ctrl()
7714 return -1; in nvme_start_ctrl()
7716 if (unlikely(acq & (page_size - 1))) { in nvme_start_ctrl()
7718 return -1; in nvme_start_ctrl()
7722 return -1; in nvme_start_ctrl()
7728 return -1; in nvme_start_ctrl()
7735 return -1; in nvme_start_ctrl()
7739 return -1; in nvme_start_ctrl()
7743 return -1; in nvme_start_ctrl()
7746 n->page_bits = page_bits; in nvme_start_ctrl()
7747 n->page_size = page_size; in nvme_start_ctrl()
7748 n->max_prp_ents = n->page_size / sizeof(uint64_t); in nvme_start_ctrl()
7749 nvme_init_cq(&n->admin_cq, n, acq, 0, 0, NVME_AQA_ACQS(aqa) + 1, 1); in nvme_start_ctrl()
7750 nvme_init_sq(&n->admin_sq, n, asq, 0, 0, NVME_AQA_ASQS(aqa) + 1); in nvme_start_ctrl()
7752 nvme_set_timestamp(n, 0ULL); in nvme_start_ctrl()
7756 NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); in nvme_start_ctrl()
7758 if (!ns || (!ns->params.shared && ns->ctrl != n)) { in nvme_start_ctrl()
7762 if (nvme_csi_supported(n, ns->csi) && !ns->params.detached) { in nvme_start_ctrl()
7763 if (!ns->attached || ns->params.shared) { in nvme_start_ctrl()
7764 nvme_attach_ns(n, ns); in nvme_start_ctrl()
7769 nvme_update_dsm_limits(n, NULL); in nvme_start_ctrl()
7774 static void nvme_cmb_enable_regs(NvmeCtrl *n) in nvme_cmb_enable_regs() argument
7776 uint32_t cmbloc = ldl_le_p(&n->bar.cmbloc); in nvme_cmb_enable_regs()
7777 uint32_t cmbsz = ldl_le_p(&n->bar.cmbsz); in nvme_cmb_enable_regs()
7782 stl_le_p(&n->bar.cmbloc, cmbloc); in nvme_cmb_enable_regs()
7790 NVME_CMBSZ_SET_SZ(cmbsz, n->params.cmb_size_mb); in nvme_cmb_enable_regs()
7791 stl_le_p(&n->bar.cmbsz, cmbsz); in nvme_cmb_enable_regs()
7794 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, in nvme_write_bar() argument
7797 PCIDevice *pci = PCI_DEVICE(n); in nvme_write_bar()
7798 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_write_bar()
7799 uint32_t cc = ldl_le_p(&n->bar.cc); in nvme_write_bar()
7800 uint32_t intms = ldl_le_p(&n->bar.intms); in nvme_write_bar()
7801 uint32_t csts = ldl_le_p(&n->bar.csts); in nvme_write_bar()
7802 uint32_t pmrsts = ldl_le_p(&n->bar.pmrsts); in nvme_write_bar()
7804 if (unlikely(offset & (sizeof(uint32_t) - 1))) { in nvme_write_bar()
7806 "MMIO write not 32-bit aligned," in nvme_write_bar()
7813 "MMIO write smaller than 32-bits," in nvme_write_bar()
7824 " when MSI-X is enabled"); in nvme_write_bar()
7828 stl_le_p(&n->bar.intms, intms); in nvme_write_bar()
7829 n->bar.intmc = n->bar.intms; in nvme_write_bar()
7831 nvme_irq_check(n); in nvme_write_bar()
7837 " when MSI-X is enabled"); in nvme_write_bar()
7841 stl_le_p(&n->bar.intms, intms); in nvme_write_bar()
7842 n->bar.intmc = n->bar.intms; in nvme_write_bar()
7844 nvme_irq_check(n); in nvme_write_bar()
7847 stl_le_p(&n->bar.cc, data); in nvme_write_bar()
7853 nvme_ctrl_shutdown(n); in nvme_write_bar()
7862 if (unlikely(nvme_start_ctrl(n))) { in nvme_write_bar()
7871 nvme_ctrl_reset(n, NVME_RESET_CONTROLLER); in nvme_write_bar()
7876 stl_le_p(&n->bar.csts, csts); in nvme_write_bar()
7899 stl_le_p(&n->bar.aqa, data); in nvme_write_bar()
7903 stn_le_p(&n->bar.asq, size, data); in nvme_write_bar()
7907 stl_le_p((uint8_t *)&n->bar.asq + 4, data); in nvme_write_bar()
7908 trace_pci_nvme_mmio_asqaddr_hi(data, ldq_le_p(&n->bar.asq)); in nvme_write_bar()
7912 stn_le_p(&n->bar.acq, size, data); in nvme_write_bar()
7915 stl_le_p((uint8_t *)&n->bar.acq + 4, data); in nvme_write_bar()
7916 trace_pci_nvme_mmio_acqaddr_hi(data, ldq_le_p(&n->bar.acq)); in nvme_write_bar()
7932 stn_le_p(&n->bar.cmbmsc, size, data); in nvme_write_bar()
7933 n->cmb.cmse = false; in nvme_write_bar()
7936 nvme_cmb_enable_regs(n); in nvme_write_bar()
7939 uint64_t cmbmsc = ldq_le_p(&n->bar.cmbmsc); in nvme_write_bar()
7941 if (cba + int128_get64(n->cmb.mem.size) < cba) { in nvme_write_bar()
7942 uint32_t cmbsts = ldl_le_p(&n->bar.cmbsts); in nvme_write_bar()
7944 stl_le_p(&n->bar.cmbsts, cmbsts); in nvme_write_bar()
7948 n->cmb.cba = cba; in nvme_write_bar()
7949 n->cmb.cmse = true; in nvme_write_bar()
7952 n->bar.cmbsz = 0; in nvme_write_bar()
7953 n->bar.cmbloc = 0; in nvme_write_bar()
7958 stl_le_p((uint8_t *)&n->bar.cmbmsc + 4, data); in nvme_write_bar()
7970 stl_le_p(&n->bar.pmrctl, data); in nvme_write_bar()
7972 memory_region_set_enabled(&n->pmr.dev->mr, true); in nvme_write_bar()
7975 memory_region_set_enabled(&n->pmr.dev->mr, false); in nvme_write_bar()
7977 n->pmr.cmse = false; in nvme_write_bar()
7979 stl_le_p(&n->bar.pmrsts, pmrsts); in nvme_write_bar()
7998 stl_le_p(&n->bar.pmrmscl, data); in nvme_write_bar()
7999 n->pmr.cmse = false; in nvme_write_bar()
8002 uint64_t pmrmscu = ldl_le_p(&n->bar.pmrmscu); in nvme_write_bar()
8005 if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { in nvme_write_bar()
8007 stl_le_p(&n->bar.pmrsts, pmrsts); in nvme_write_bar()
8011 n->pmr.cmse = true; in nvme_write_bar()
8012 n->pmr.cba = cba; in nvme_write_bar()
8021 stl_le_p(&n->bar.pmrmscu, data); in nvme_write_bar()
8034 NvmeCtrl *n = (NvmeCtrl *)opaque; in nvme_mmio_read() local
8035 uint8_t *ptr = (uint8_t *)&n->bar; in nvme_mmio_read()
8039 if (unlikely(addr & (sizeof(uint32_t) - 1))) { in nvme_mmio_read()
8041 "MMIO read not 32-bit aligned," in nvme_mmio_read()
8046 "MMIO read smaller than 32-bits," in nvme_mmio_read()
8051 if (addr > sizeof(n->bar) - size) { in nvme_mmio_read()
8059 if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs && in nvme_mmio_read()
8071 (NVME_PMRCAP_PMRWBM(ldl_le_p(&n->bar.pmrcap)) & 0x02)) { in nvme_mmio_read()
8072 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); in nvme_mmio_read()
8078 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) in nvme_process_db() argument
8080 PCIDevice *pci = PCI_DEVICE(n); in nvme_process_db()
8083 if (unlikely(addr & ((1 << 2) - 1))) { in nvme_process_db()
8085 "doorbell write not 32-bit aligned," in nvme_process_db()
8090 if (((addr - 0x1000) >> 2) & 1) { in nvme_process_db()
8096 qid = (addr - (0x1000 + (1 << 2))) >> 3; in nvme_process_db()
8097 if (unlikely(nvme_check_cqid(n, qid))) { in nvme_process_db()
8116 if (n->outstanding_aers) { in nvme_process_db()
8117 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, in nvme_process_db()
8125 cq = n->cq[qid]; in nvme_process_db()
8126 if (unlikely(new_head >= cq->size)) { in nvme_process_db()
8133 if (n->outstanding_aers) { in nvme_process_db()
8134 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, in nvme_process_db()
8142 trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head); in nvme_process_db()
8146 qemu_bh_schedule(cq->bh); in nvme_process_db()
8149 cq->head = new_head; in nvme_process_db()
8150 if (!qid && n->dbbuf_enabled) { in nvme_process_db()
8151 stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED); in nvme_process_db()
8154 if (cq->tail == cq->head) { in nvme_process_db()
8155 if (cq->irq_enabled) { in nvme_process_db()
8156 n->cq_pending--; in nvme_process_db()
8159 nvme_irq_deassert(n, cq); in nvme_process_db()
8167 qid = (addr - 0x1000) >> 3; in nvme_process_db()
8168 if (unlikely(nvme_check_sqid(n, qid))) { in nvme_process_db()
8174 if (n->outstanding_aers) { in nvme_process_db()
8175 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, in nvme_process_db()
8183 sq = n->sq[qid]; in nvme_process_db()
8184 if (unlikely(new_tail >= sq->size)) { in nvme_process_db()
8191 if (n->outstanding_aers) { in nvme_process_db()
8192 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, in nvme_process_db()
8200 trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail); in nvme_process_db()
8202 sq->tail = new_tail; in nvme_process_db()
8203 if (!qid && n->dbbuf_enabled) { in nvme_process_db()
8217 stl_le_pci_dma(pci, sq->db_addr, sq->tail, MEMTXATTRS_UNSPECIFIED); in nvme_process_db()
8220 qemu_bh_schedule(sq->bh); in nvme_process_db()
8227 NvmeCtrl *n = (NvmeCtrl *)opaque; in nvme_mmio_write() local
8231 if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs && in nvme_mmio_write()
8237 if (addr < sizeof(n->bar)) { in nvme_mmio_write()
8238 nvme_write_bar(n, addr, data, size); in nvme_mmio_write()
8240 nvme_process_db(n, addr, data); in nvme_mmio_write()
8257 NvmeCtrl *n = (NvmeCtrl *)opaque; in nvme_cmb_write() local
8258 stn_le_p(&n->cmb.buf[addr], size, data); in nvme_cmb_write()
8263 NvmeCtrl *n = (NvmeCtrl *)opaque; in nvme_cmb_read() local
8264 return ldn_le_p(&n->cmb.buf[addr], size); in nvme_cmb_read()
8277 static bool nvme_check_params(NvmeCtrl *n, Error **errp) in nvme_check_params() argument
8279 NvmeParams *params = &n->params; in nvme_check_params()
8281 if (params->num_queues) { in nvme_check_params()
8285 params->max_ioqpairs = params->num_queues - 1; in nvme_check_params()
8288 if (n->namespace.blkconf.blk && n->subsys) { in nvme_check_params()
8294 if (params->max_ioqpairs < 1 || in nvme_check_params()
8295 params->max_ioqpairs > NVME_MAX_IOQPAIRS) { in nvme_check_params()
8301 if (params->msix_qsize < 1 || in nvme_check_params()
8302 params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) { in nvme_check_params()
8308 if (!params->serial) { in nvme_check_params()
8313 if (params->mqes < 1) { in nvme_check_params()
8318 if (n->pmr.dev) { in nvme_check_params()
8319 if (params->msix_exclusive_bar) { in nvme_check_params()
8324 if (host_memory_backend_is_mapped(n->pmr.dev)) { in nvme_check_params()
8326 object_get_canonical_path_component(OBJECT(n->pmr.dev))); in nvme_check_params()
8330 if (!is_power_of_2(n->pmr.dev->size)) { in nvme_check_params()
8335 host_memory_backend_set_mapped(n->pmr.dev, true); in nvme_check_params()
8338 if (n->params.zasl > n->params.mdts) { in nvme_check_params()
8344 if (!n->params.vsl) { in nvme_check_params()
8345 error_setg(errp, "vsl must be non-zero"); in nvme_check_params()
8349 if (params->sriov_max_vfs) { in nvme_check_params()
8350 if (!n->subsys) { in nvme_check_params()
8351 error_setg(errp, "subsystem is required for the use of SR-IOV"); in nvme_check_params()
8355 if (params->cmb_size_mb) { in nvme_check_params()
8356 error_setg(errp, "CMB is not supported with SR-IOV"); in nvme_check_params()
8360 if (n->pmr.dev) { in nvme_check_params()
8361 error_setg(errp, "PMR is not supported with SR-IOV"); in nvme_check_params()
8365 if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) { in nvme_check_params()
8367 " must be set for the use of SR-IOV"); in nvme_check_params()
8371 if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) { in nvme_check_params()
8373 " to %d (sriov_max_vfs * 2)", params->sriov_max_vfs * 2); in nvme_check_params()
8377 if (params->max_ioqpairs < params->sriov_vq_flexible + 2) { in nvme_check_params()
8378 error_setg(errp, "(max_ioqpairs - sriov_vq_flexible) must be" in nvme_check_params()
8383 if (params->sriov_vi_flexible < params->sriov_max_vfs) { in nvme_check_params()
8385 " to %d (sriov_max_vfs)", params->sriov_max_vfs); in nvme_check_params()
8389 if (params->msix_qsize < params->sriov_vi_flexible + 1) { in nvme_check_params()
8390 error_setg(errp, "(msix_qsize - sriov_vi_flexible) must be" in nvme_check_params()
8395 if (params->sriov_max_vi_per_vf && in nvme_check_params()
8396 (params->sriov_max_vi_per_vf - 1) % NVME_VF_RES_GRANULARITY) { in nvme_check_params()
8398 " (sriov_max_vi_per_vf - 1) %% %d == 0 and" in nvme_check_params()
8403 if (params->sriov_max_vq_per_vf && in nvme_check_params()
8404 (params->sriov_max_vq_per_vf < 2 || in nvme_check_params()
8405 (params->sriov_max_vq_per_vf - 1) % NVME_VF_RES_GRANULARITY)) { in nvme_check_params()
8407 " (sriov_max_vq_per_vf - 1) %% %d == 0 and" in nvme_check_params()
8416 static void nvme_init_state(NvmeCtrl *n) in nvme_init_state() argument
8418 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; in nvme_init_state()
8419 NvmeSecCtrlEntry *list = n->sec_ctrl_list; in nvme_init_state()
8421 PCIDevice *pci = PCI_DEVICE(n); in nvme_init_state()
8422 NvmeAtomic *atomic = &n->atomic; in nvme_init_state()
8423 NvmeIdCtrl *id = &n->id_ctrl; in nvme_init_state()
8428 sctrl = nvme_sctrl(n); in nvme_init_state()
8430 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; in nvme_init_state()
8431 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; in nvme_init_state()
8433 max_vfs = n->params.sriov_max_vfs; in nvme_init_state()
8434 n->conf_ioqpairs = n->params.max_ioqpairs; in nvme_init_state()
8435 n->conf_msix_qsize = n->params.msix_qsize; in nvme_init_state()
8438 n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1); in nvme_init_state()
8439 n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1); in nvme_init_state()
8440 n->temperature = NVME_TEMPERATURE; in nvme_init_state()
8441 n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING; in nvme_init_state()
8442 n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); in nvme_init_state()
8443 n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1); in nvme_init_state()
8444 QTAILQ_INIT(&n->aer_queue); in nvme_init_state()
8446 n->nr_sec_ctrls = max_vfs; in nvme_init_state()
8449 sctrl->pcid = cpu_to_le16(n->cntlid); in nvme_init_state()
8450 sctrl->vfn = cpu_to_le16(i + 1); in nvme_init_state()
8453 cap->cntlid = cpu_to_le16(n->cntlid); in nvme_init_state()
8454 cap->crt = NVME_CRT_VQ | NVME_CRT_VI; in nvme_init_state()
8457 cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs); in nvme_init_state()
8459 cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs - in nvme_init_state()
8460 n->params.sriov_vq_flexible); in nvme_init_state()
8461 cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible); in nvme_init_state()
8462 cap->vqrfap = cap->vqfrt; in nvme_init_state()
8463 cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY); in nvme_init_state()
8464 cap->vqfrsm = n->params.sriov_max_vq_per_vf ? in nvme_init_state()
8465 cpu_to_le16(n->params.sriov_max_vq_per_vf) : in nvme_init_state()
8466 cap->vqfrt / MAX(max_vfs, 1); in nvme_init_state()
8470 cap->viprt = cpu_to_le16(n->conf_msix_qsize); in nvme_init_state()
8472 cap->viprt = cpu_to_le16(n->params.msix_qsize - in nvme_init_state()
8473 n->params.sriov_vi_flexible); in nvme_init_state()
8474 cap->vifrt = cpu_to_le32(n->params.sriov_vi_flexible); in nvme_init_state()
8475 cap->virfap = cap->vifrt; in nvme_init_state()
8476 cap->vigran = cpu_to_le16(NVME_VF_RES_GRANULARITY); in nvme_init_state()
8477 cap->vifrsm = n->params.sriov_max_vi_per_vf ? in nvme_init_state()
8478 cpu_to_le16(n->params.sriov_max_vi_per_vf) : in nvme_init_state()
8479 cap->vifrt / MAX(max_vfs, 1); in nvme_init_state()
8483 id->awun = cpu_to_le16(n->params.atomic_awun); in nvme_init_state()
8484 id->awupf = cpu_to_le16(n->params.atomic_awupf); in nvme_init_state()
8485 n->dn = n->params.atomic_dn; in nvme_init_state()
8487 if (id->awun || id->awupf) { in nvme_init_state()
8488 if (id->awupf > id->awun) { in nvme_init_state()
8489 id->awupf = 0; in nvme_init_state()
8492 if (n->dn) { in nvme_init_state()
8493 atomic->atomic_max_write_size = id->awupf + 1; in nvme_init_state()
8495 atomic->atomic_max_write_size = id->awun + 1; in nvme_init_state()
8498 if (atomic->atomic_max_write_size == 1) { in nvme_init_state()
8499 atomic->atomic_writes = 0; in nvme_init_state()
8501 atomic->atomic_writes = 1; in nvme_init_state()
8506 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev) in nvme_init_cmb() argument
8508 uint64_t cmb_size = n->params.cmb_size_mb * MiB; in nvme_init_cmb()
8509 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_init_cmb()
8511 n->cmb.buf = g_malloc0(cmb_size); in nvme_init_cmb()
8512 memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n, in nvme_init_cmb()
8513 "nvme-cmb", cmb_size); in nvme_init_cmb()
8517 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem); in nvme_init_cmb()
8520 stq_le_p(&n->bar.cap, cap); in nvme_init_cmb()
8522 if (n->params.legacy_cmb) { in nvme_init_cmb()
8523 nvme_cmb_enable_regs(n); in nvme_init_cmb()
8524 n->cmb.cmse = true; in nvme_init_cmb()
8528 static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) in nvme_init_pmr() argument
8530 uint32_t pmrcap = ldl_le_p(&n->bar.pmrcap); in nvme_init_pmr()
8538 stl_le_p(&n->bar.pmrcap, pmrcap); in nvme_init_pmr()
8543 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr); in nvme_init_pmr()
8545 memory_region_set_enabled(&n->pmr.dev->mr, false); in nvme_init_pmr()
8580 static bool nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset, in nvme_init_sriov() argument
8583 uint16_t vf_dev_id = n->params.use_intel_id ? in nvme_init_sriov()
8585 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; in nvme_init_sriov()
8586 uint64_t bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), in nvme_init_sriov()
8587 le16_to_cpu(cap->vifrsm), in nvme_init_sriov()
8591 n->params.sriov_max_vfs, n->params.sriov_max_vfs, in nvme_init_sriov()
8613 pci_set_word(pci_dev->config + offset + PCI_PM_PMC, in nvme_add_pm_capability()
8615 pci_set_word(pci_dev->config + offset + PCI_PM_CTRL, in nvme_add_pm_capability()
8617 pci_set_word(pci_dev->wmask + offset + PCI_PM_CTRL, in nvme_add_pm_capability()
8627 void *rsp = doe_cap->read_mbox; in pcie_doe_spdm_rsp()
8630 uint32_t recvd = spdm_socket_rsp(doe_cap->spdm_socket, in pcie_doe_spdm_rsp()
8633 doe_cap->read_mbox_len += DIV_ROUND_UP(recvd, 4); in pcie_doe_spdm_rsp()
8644 static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) in nvme_init_pci() argument
8647 uint8_t *pci_conf = pci_dev->config; in nvme_init_pci()
8656 if (n->params.use_intel_id) { in nvme_init_pci()
8668 if (n->params.sriov_max_vfs) { in nvme_init_pci()
8672 if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) { in nvme_init_pci()
8673 bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, 0, NULL, NULL); in nvme_init_pci()
8674 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", in nvme_init_pci()
8677 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem); in nvme_init_pci()
8678 ret = msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp); in nvme_init_pci()
8680 assert(n->params.msix_qsize >= 1); in nvme_init_pci()
8684 nr_vectors = n->params.msix_qsize; in nvme_init_pci()
8685 bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, in nvme_init_pci()
8690 NvmePriCtrlCap *cap = &pn->pri_ctrl_cap; in nvme_init_pci()
8692 nr_vectors = le16_to_cpu(cap->vifrsm); in nvme_init_pci()
8693 bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), nr_vectors, in nvme_init_pci()
8697 memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); in nvme_init_pci()
8698 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", in nvme_init_pci()
8700 memory_region_add_subregion(&n->bar0, 0, &n->iomem); in nvme_init_pci()
8703 pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); in nvme_init_pci()
8706 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); in nvme_init_pci()
8710 &n->bar0, 0, msix_table_offset, in nvme_init_pci()
8711 &n->bar0, 0, msix_pba_offset, 0, errp); in nvme_init_pci()
8714 if (ret == -ENOTSUP) { in nvme_init_pci()
8723 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs && in nvme_init_pci()
8724 !nvme_init_sriov(n, pci_dev, 0x120, errp)) { in nvme_init_pci()
8728 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); in nvme_init_pci()
8733 if (pci_dev->spdm_port) { in nvme_init_pci()
8734 uint16_t doe_offset = n->params.sriov_max_vfs ? in nvme_init_pci()
8738 pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset, in nvme_init_pci()
8741 pci_dev->doe_spdm.spdm_socket = spdm_socket_connect(pci_dev->spdm_port, in nvme_init_pci()
8744 if (pci_dev->doe_spdm.spdm_socket < 0) { in nvme_init_pci()
8749 if (n->params.cmb_size_mb) { in nvme_init_pci()
8750 nvme_init_cmb(n, pci_dev); in nvme_init_pci()
8753 if (n->pmr.dev) { in nvme_init_pci()
8754 nvme_init_pmr(n, pci_dev); in nvme_init_pci()
8760 static void nvme_init_subnqn(NvmeCtrl *n) in nvme_init_subnqn() argument
8762 NvmeSubsystem *subsys = n->subsys; in nvme_init_subnqn()
8763 NvmeIdCtrl *id = &n->id_ctrl; in nvme_init_subnqn()
8766 snprintf((char *)id->subnqn, sizeof(id->subnqn), in nvme_init_subnqn()
8767 "nqn.2019-08.org.qemu:%s", n->params.serial); in nvme_init_subnqn()
8769 pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn); in nvme_init_subnqn()
8773 static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) in nvme_init_ctrl() argument
8775 NvmeIdCtrl *id = &n->id_ctrl; in nvme_init_ctrl()
8776 uint8_t *pci_conf = pci_dev->config; in nvme_init_ctrl()
8777 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_init_ctrl()
8778 NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); in nvme_init_ctrl()
8782 memcpy(n->cse.acs, nvme_cse_acs_default, sizeof(n->cse.acs)); in nvme_init_ctrl()
8783 memcpy(n->cse.iocs.nvm, nvme_cse_iocs_nvm_default, sizeof(n->cse.iocs.nvm)); in nvme_init_ctrl()
8784 memcpy(n->cse.iocs.zoned, nvme_cse_iocs_zoned_default, in nvme_init_ctrl()
8785 sizeof(n->cse.iocs.zoned)); in nvme_init_ctrl()
8787 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); in nvme_init_ctrl()
8788 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); in nvme_init_ctrl()
8789 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); in nvme_init_ctrl()
8790 strpadcpy((char *)id->fr, sizeof(id->fr), QEMU_VERSION, ' '); in nvme_init_ctrl()
8791 strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); in nvme_init_ctrl()
8793 id->cntlid = cpu_to_le16(n->cntlid); in nvme_init_ctrl()
8795 id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); in nvme_init_ctrl()
8798 if (n->params.ctratt.mem) { in nvme_init_ctrl()
8802 id->rab = 6; in nvme_init_ctrl()
8804 if (n->params.use_intel_id) { in nvme_init_ctrl()
8805 id->ieee[0] = 0xb3; in nvme_init_ctrl()
8806 id->ieee[1] = 0x02; in nvme_init_ctrl()
8807 id->ieee[2] = 0x00; in nvme_init_ctrl()
8809 id->ieee[0] = 0x00; in nvme_init_ctrl()
8810 id->ieee[1] = 0x54; in nvme_init_ctrl()
8811 id->ieee[2] = 0x52; in nvme_init_ctrl()
8814 id->mdts = n->params.mdts; in nvme_init_ctrl()
8815 id->ver = cpu_to_le32(NVME_SPEC_VER); in nvme_init_ctrl()
8819 if (n->params.dbcs) { in nvme_init_ctrl()
8822 n->cse.acs[NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP; in nvme_init_ctrl()
8825 if (n->params.sriov_max_vfs) { in nvme_init_ctrl()
8828 n->cse.acs[NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP; in nvme_init_ctrl()
8831 id->oacs = cpu_to_le16(oacs); in nvme_init_ctrl()
8833 id->cntrltype = 0x1; in nvme_init_ctrl()
8846 id->acl = 3; in nvme_init_ctrl()
8847 id->aerl = n->params.aerl; in nvme_init_ctrl()
8848 id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; in nvme_init_ctrl()
8849 id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED; in nvme_init_ctrl()
8852 id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); in nvme_init_ctrl()
8853 id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); in nvme_init_ctrl()
8855 id->sqes = (NVME_SQES << 4) | NVME_SQES; in nvme_init_ctrl()
8856 id->cqes = (NVME_CQES << 4) | NVME_CQES; in nvme_init_ctrl()
8857 id->nn = cpu_to_le32(NVME_MAX_NAMESPACES); in nvme_init_ctrl()
8858 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | in nvme_init_ctrl()
8865 * as a Flush-equivalent operation, support for the broadcast NSID in Flush in nvme_init_ctrl()
8870 id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; in nvme_init_ctrl()
8872 id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1 | in nvme_init_ctrl()
8874 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | in nvme_init_ctrl()
8877 nvme_init_subnqn(n); in nvme_init_ctrl()
8879 id->psd[0].mp = cpu_to_le16(0x9c4); in nvme_init_ctrl()
8880 id->psd[0].enlat = cpu_to_le32(0x10); in nvme_init_ctrl()
8881 id->psd[0].exlat = cpu_to_le32(0x4); in nvme_init_ctrl()
8883 id->cmic |= NVME_CMIC_MULTI_CTRL; in nvme_init_ctrl()
8886 id->endgidmax = cpu_to_le16(0x1); in nvme_init_ctrl()
8888 if (n->subsys->endgrp.fdp.enabled) { in nvme_init_ctrl()
8892 id->ctratt = cpu_to_le32(ctratt); in nvme_init_ctrl()
8894 NVME_CAP_SET_MQES(cap, n->params.mqes); in nvme_init_ctrl()
8900 NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0); in nvme_init_ctrl()
8901 NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0); in nvme_init_ctrl()
8902 stq_le_p(&n->bar.cap, cap); in nvme_init_ctrl()
8904 stl_le_p(&n->bar.vs, NVME_SPEC_VER); in nvme_init_ctrl()
8905 n->bar.intmc = n->bar.intms = 0; in nvme_init_ctrl()
8907 if (pci_is_vf(pci_dev) && !sctrl->scs) { in nvme_init_ctrl()
8908 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); in nvme_init_ctrl()
8912 static int nvme_init_subsys(NvmeCtrl *n, Error **errp) in nvme_init_subsys() argument
8916 if (!n->subsys) { in nvme_init_subsys()
8919 qdev_prop_set_string(dev, "nqn", n->params.serial); in nvme_init_subsys()
8922 return -1; in nvme_init_subsys()
8925 n->subsys = NVME_SUBSYS(dev); in nvme_init_subsys()
8928 cntlid = nvme_subsys_register_ctrl(n, errp); in nvme_init_subsys()
8930 return -1; in nvme_init_subsys()
8933 n->cntlid = cntlid; in nvme_init_subsys()
8938 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns) in nvme_attach_ns() argument
8940 uint32_t nsid = ns->params.nsid; in nvme_attach_ns()
8943 n->namespaces[nsid] = ns; in nvme_attach_ns()
8944 ns->attached++; in nvme_attach_ns()
8949 NvmeCtrl *n = NVME(pci_dev); in nvme_realize() local
8959 memcpy(&n->params, &pn->params, sizeof(NvmeParams)); in nvme_realize()
8965 n->params.serial = g_strdup(pn->params.serial); in nvme_realize()
8966 n->subsys = pn->subsys; in nvme_realize()
8973 object_ref(OBJECT(pn->subsys)); in nvme_realize()
8976 if (!nvme_check_params(n, errp)) { in nvme_realize()
8980 qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); in nvme_realize()
8982 if (nvme_init_subsys(n, errp)) { in nvme_realize()
8985 nvme_init_state(n); in nvme_realize()
8986 if (!nvme_init_pci(n, pci_dev, errp)) { in nvme_realize()
8989 nvme_init_ctrl(n, pci_dev); in nvme_realize()
8992 if (n->namespace.blkconf.blk) { in nvme_realize()
8993 ns = &n->namespace; in nvme_realize()
8994 ns->params.nsid = 1; in nvme_realize()
8995 ns->ctrl = n; in nvme_realize()
9001 n->subsys->namespaces[ns->params.nsid] = ns; in nvme_realize()
9007 NvmeCtrl *n = NVME(pci_dev); in nvme_exit() local
9011 nvme_ctrl_reset(n, NVME_RESET_FUNCTION); in nvme_exit()
9014 ns = nvme_ns(n, i); in nvme_exit()
9016 ns->attached--; in nvme_exit()
9020 nvme_subsys_unregister_ctrl(n->subsys, n); in nvme_exit()
9022 g_free(n->cq); in nvme_exit()
9023 g_free(n->sq); in nvme_exit()
9024 g_free(n->aer_reqs); in nvme_exit()
9026 if (n->params.cmb_size_mb) { in nvme_exit()
9027 g_free(n->cmb.buf); in nvme_exit()
9030 if (pci_dev->doe_spdm.spdm_socket > 0) { in nvme_exit()
9031 spdm_socket_close(pci_dev->doe_spdm.spdm_socket, in nvme_exit()
9035 if (n->pmr.dev) { in nvme_exit()
9036 host_memory_backend_set_mapped(n->pmr.dev, false); in nvme_exit()
9039 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { in nvme_exit()
9043 if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) { in nvme_exit()
9046 msix_uninit(pci_dev, &n->bar0, &n->bar0); in nvme_exit()
9049 memory_region_del_subregion(&n->bar0, &n->iomem); in nvme_exit()
9067 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
9068 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
9083 DEFINE_PROP_BOOL("msix-exclusive-bar", NvmeCtrl, params.msix_exclusive_bar,
9097 NvmeCtrl *n = NVME(obj); in nvme_get_smart_warning() local
9098 uint8_t value = n->smart_critical_warning; in nvme_get_smart_warning()
9106 NvmeCtrl *n = NVME(obj); in nvme_set_smart_warning() local
9115 if (NVME_CAP_PMRS(ldq_le_p(&n->bar.cap))) { in nvme_set_smart_warning()
9125 old_value = n->smart_critical_warning; in nvme_set_smart_warning()
9126 n->smart_critical_warning = value; in nvme_set_smart_warning()
9132 nvme_smart_event(n, event); in nvme_set_smart_warning()
9139 NvmeCtrl *n = NVME(pci_dev); in nvme_pci_reset() local
9142 nvme_ctrl_reset(n, NVME_RESET_FUNCTION); in nvme_pci_reset()
9147 NvmeCtrl *n = NVME(dev); in nvme_sriov_post_write_config() local
9152 sctrl = &n->sec_ctrl_list[i]; in nvme_sriov_post_write_config()
9153 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); in nvme_sriov_post_write_config()
9163 pcie_doe_write_config(&dev->doe_spdm, address, val, len); in nvme_pci_write_config()
9173 if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) { in nvme_pci_read_config()
9174 if (pcie_doe_read_config(&dev->doe_spdm, address, len, &val)) { in nvme_pci_read_config()
9191 pc->realize = nvme_realize; in nvme_class_init()
9192 pc->config_write = nvme_pci_write_config; in nvme_class_init()
9193 pc->config_read = nvme_pci_read_config; in nvme_class_init()
9194 pc->exit = nvme_exit; in nvme_class_init()
9195 pc->class_id = PCI_CLASS_STORAGE_EXPRESS; in nvme_class_init()
9196 pc->revision = 2; in nvme_class_init()
9198 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); in nvme_class_init()
9199 dc->desc = "Non-Volatile Memory Express"; in nvme_class_init()
9201 dc->vmsd = &nvme_vmstate; in nvme_class_init()
9207 NvmeCtrl *n = NVME(obj); in nvme_instance_init() local
9209 device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex, in nvme_instance_init()