Lines Matching +full:multi +full:- +full:attr

14  *  https://nvmexpress.org/developers/nvme-specification/
18 * ---------------------
24 * -----
28 * -drive file=<file>,if=none,id=<drive_id>
29 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
30 * -device nvme,serial=<serial>,id=<bus_name>, \
47 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
60 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
63 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
65 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
66 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
71 * nvme-subsys device as above.
75 * - `nqn`
77 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
84 * - `subsys`
87 * device. This also enables multi controller capability represented in
88 * Identify Controller data structure in CMIC (Controller Multi-path I/O and
91 * - `aerl`
96 * - `aer_max_queued`
101 * - `mdts`
103 * between host-accessible memory and the controller. The value is specified
107 * - `vsl`
113 * - `zoned.zasl`
119 * - `zoned.auto_transition`
124 * - `sriov_max_vfs`
126 * by the controller. The default value is 0. Specifying a non-zero value
127 * enables reporting of both SR-IOV and ARI capabilities by the NVMe device.
128 * Virtual function controllers will not report SR-IOV capability.
133 * - `sriov_vq_flexible`
136 * controller's private resources to `(max_ioqpairs - sriov_vq_flexible)`.
138 * - `sriov_vi_flexible`
141 * controller's private resources to `(msix_qsize - sriov_vi_flexible)`.
143 * - `sriov_max_vi_per_vf`
148 * - `sriov_max_vq_per_vf`
155 * - `shared`
158 * nvme-subsys device, the namespace will be attached to all controllers in
163 * - `detached`
166 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
174 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
198 #include "qemu/error-report.h"
205 #include "system/block-backend.h"
209 #include "system/spdm-socket.h"
323 return le16_to_cpu(req->sq->sqid); in nvme_sqid()
329 uint16_t rgif = ns->endgrp->fdp.rgif; in nvme_make_pid()
335 return (rg << (16 - rgif)) | ph; in nvme_make_pid()
340 return ph < ns->fdp.nphs; in nvme_ph_valid()
345 return rg < endgrp->fdp.nrg; in nvme_rg_valid()
350 uint16_t rgif = ns->endgrp->fdp.rgif; in nvme_pid2ph()
356 return pid & ((1 << (15 - rgif)) - 1); in nvme_pid2ph()
361 uint16_t rgif = ns->endgrp->fdp.rgif; in nvme_pid2rg()
367 return pid >> (16 - rgif); in nvme_pid2rg()
376 return nvme_ph_valid(ns, *ph) && nvme_rg_valid(ns->endgrp, *rg); in nvme_parse_pid()
385 QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); in nvme_assign_zone_state()
388 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); in nvme_assign_zone_state()
391 QTAILQ_REMOVE(&ns->closed_zones, zone, entry); in nvme_assign_zone_state()
394 QTAILQ_REMOVE(&ns->full_zones, zone, entry); in nvme_assign_zone_state()
404 QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry); in nvme_assign_zone_state()
407 QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry); in nvme_assign_zone_state()
410 QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry); in nvme_assign_zone_state()
413 QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry); in nvme_assign_zone_state()
417 zone->d.za = 0; in nvme_assign_zone_state()
424 if (ns->params.max_active_zones != 0 && in nvme_zns_check_resources()
425 ns->nr_active_zones + act > ns->params.max_active_zones) { in nvme_zns_check_resources()
426 trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones); in nvme_zns_check_resources()
430 if (ns->params.max_open_zones != 0 && in nvme_zns_check_resources()
431 ns->nr_open_zones + opn > ns->params.max_open_zones) { in nvme_zns_check_resources()
432 trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones); in nvme_zns_check_resources()
436 if (zrwa > ns->zns.numzrwa) { in nvme_zns_check_resources()
455 bool is_full = ebuf->next == ebuf->start && ebuf->nelems; in nvme_fdp_alloc_event()
457 ret = &ebuf->events[ebuf->next++]; in nvme_fdp_alloc_event()
458 if (unlikely(ebuf->next == NVME_FDP_MAX_EVENTS)) { in nvme_fdp_alloc_event()
459 ebuf->next = 0; in nvme_fdp_alloc_event()
462 ebuf->start = ebuf->next; in nvme_fdp_alloc_event()
464 ebuf->nelems++; in nvme_fdp_alloc_event()
468 ret->timestamp = nvme_get_timestamp(n); in nvme_fdp_alloc_event()
475 return (ruh->event_filter >> nvme_fdp_evf_shifts[event_type]) & 0x1; in log_event()
480 NvmeEnduranceGroup *endgrp = ns->endgrp; in nvme_update_ruh()
490 ruhid = ns->fdp.phs[ph]; in nvme_update_ruh()
492 ruh = &endgrp->fdp.ruhs[ruhid]; in nvme_update_ruh()
493 ru = &ruh->rus[rg]; in nvme_update_ruh()
495 if (ru->ruamw) { in nvme_update_ruh()
497 e = nvme_fdp_alloc_event(n, &endgrp->fdp.host_events); in nvme_update_ruh()
498 e->type = FDP_EVT_RU_NOT_FULLY_WRITTEN; in nvme_update_ruh()
499 e->flags = FDPEF_PIV | FDPEF_NSIDV | FDPEF_LV; in nvme_update_ruh()
500 e->pid = cpu_to_le16(pid); in nvme_update_ruh()
501 e->nsid = cpu_to_le32(ns->params.nsid); in nvme_update_ruh()
502 e->rgid = cpu_to_le16(rg); in nvme_update_ruh()
503 e->ruhid = cpu_to_le16(ruhid); in nvme_update_ruh()
507 nvme_fdp_stat_inc(&endgrp->fdp.mbmw, nvme_l2b(ns, ru->ruamw)); in nvme_update_ruh()
510 ru->ruamw = ruh->ruamw; in nvme_update_ruh()
519 if (!n->cmb.cmse) { in nvme_addr_is_cmb()
523 lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; in nvme_addr_is_cmb()
524 hi = lo + int128_get64(n->cmb.mem.size); in nvme_addr_is_cmb()
531 hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; in nvme_addr_to_cmb()
532 return &n->cmb.buf[addr - base]; in nvme_addr_to_cmb()
539 if (!n->pmr.cmse) { in nvme_addr_is_pmr()
543 hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size); in nvme_addr_is_pmr()
545 return addr >= n->pmr.cba && addr < hi; in nvme_addr_is_pmr()
550 return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); in nvme_addr_to_pmr()
561 * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however, in nvme_addr_is_iomem()
565 lo = n->bar0.addr; in nvme_addr_is_iomem()
566 hi = lo + int128_get64(n->bar0.size); in nvme_addr_is_iomem()
573 hwaddr hi = addr + size - 1; in nvme_addr_read()
578 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { in nvme_addr_read()
593 hwaddr hi = addr + size - 1; in nvme_addr_write()
598 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { in nvme_addr_write()
619 return sqid < n->conf_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; in nvme_check_sqid()
624 return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; in nvme_check_cqid()
629 cq->tail++; in nvme_inc_cq_tail()
630 if (cq->tail >= cq->size) { in nvme_inc_cq_tail()
631 cq->tail = 0; in nvme_inc_cq_tail()
632 cq->phase = !cq->phase; in nvme_inc_cq_tail()
638 sq->head = (sq->head + 1) % sq->size; in nvme_inc_sq_head()
643 return (cq->tail + 1) % cq->size == cq->head; in nvme_cq_full()
648 return sq->head == sq->tail; in nvme_sq_empty()
654 uint32_t intms = ldl_le_p(&n->bar.intms); in nvme_irq_check()
665 if (~intms & n->irq_status) { in nvme_irq_check()
676 if (cq->irq_enabled) { in nvme_irq_assert()
678 trace_pci_nvme_irq_msix(cq->vector); in nvme_irq_assert()
679 msix_notify(pci, cq->vector); in nvme_irq_assert()
682 assert(cq->vector < 32); in nvme_irq_assert()
683 n->irq_status |= 1 << cq->vector; in nvme_irq_assert()
693 if (cq->irq_enabled) { in nvme_irq_deassert()
697 assert(cq->vector < 32); in nvme_irq_deassert()
698 if (!n->cq_pending) { in nvme_irq_deassert()
699 n->irq_status &= ~(1 << cq->vector); in nvme_irq_deassert()
708 req->ns = NULL; in nvme_req_clear()
709 req->opaque = NULL; in nvme_req_clear()
710 req->aiocb = NULL; in nvme_req_clear()
711 memset(&req->cqe, 0x0, sizeof(req->cqe)); in nvme_req_clear()
712 req->status = NVME_SUCCESS; in nvme_req_clear()
718 pci_dma_sglist_init(&sg->qsg, PCI_DEVICE(n), 0); in nvme_sg_init()
719 sg->flags = NVME_SG_DMA; in nvme_sg_init()
721 qemu_iovec_init(&sg->iov, 0); in nvme_sg_init()
724 sg->flags |= NVME_SG_ALLOC; in nvme_sg_init()
729 if (!(sg->flags & NVME_SG_ALLOC)) { in nvme_sg_unmap()
733 if (sg->flags & NVME_SG_DMA) { in nvme_sg_unmap()
734 qemu_sglist_destroy(&sg->qsg); in nvme_sg_unmap()
736 qemu_iovec_destroy(&sg->iov); in nvme_sg_unmap()
751 uint32_t trans_len, count = ns->lbasz; in nvme_sg_split()
753 bool dma = sg->flags & NVME_SG_DMA; in nvme_sg_split()
755 size_t sg_len = dma ? sg->qsg.size : sg->iov.size; in nvme_sg_split()
758 assert(sg->flags & NVME_SG_ALLOC); in nvme_sg_split()
761 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; in nvme_sg_split()
764 trans_len = MIN(trans_len, sge_len - offset); in nvme_sg_split()
768 qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset, in nvme_sg_split()
771 qemu_iovec_add(&dst->iov, in nvme_sg_split()
772 sg->iov.iov[sg_idx].iov_base + offset, in nvme_sg_split()
777 sg_len -= trans_len; in nvme_sg_split()
778 count -= trans_len; in nvme_sg_split()
783 count = (dst == data) ? ns->lbasz : ns->lbaf.ms; in nvme_sg_split()
802 if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) { in nvme_map_addr_cmb()
818 if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) { in nvme_map_addr_pmr()
848 if (sg->flags & NVME_SG_DMA) { in nvme_map_addr()
852 if (sg->iov.niov + 1 > IOV_MAX) { in nvme_map_addr()
857 return nvme_map_addr_cmb(n, &sg->iov, addr, len); in nvme_map_addr()
859 return nvme_map_addr_pmr(n, &sg->iov, addr, len); in nvme_map_addr()
863 if (!(sg->flags & NVME_SG_DMA)) { in nvme_map_addr()
867 if (sg->qsg.nsg + 1 > IOV_MAX) { in nvme_map_addr()
871 qemu_sglist_add(&sg->qsg, addr, len); in nvme_map_addr()
889 hwaddr trans_len = n->page_size - (prp1 % n->page_size); in nvme_map_prp()
891 int num_prps = (len >> n->page_bits) + 1; in nvme_map_prp()
904 len -= trans_len; in nvme_map_prp()
906 if (len > n->page_size) { in nvme_map_prp()
907 g_autofree uint64_t *prp_list = g_new(uint64_t, n->max_prp_ents); in nvme_map_prp()
916 nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3; in nvme_map_prp()
917 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); in nvme_map_prp()
927 if (i == nents - 1 && len > n->page_size) { in nvme_map_prp()
928 if (unlikely(prp_ent & (n->page_size - 1))) { in nvme_map_prp()
935 nents = (len + n->page_size - 1) >> n->page_bits; in nvme_map_prp()
936 nents = MIN(nents, n->max_prp_ents); in nvme_map_prp()
948 if (unlikely(prp_ent & (n->page_size - 1))) { in nvme_map_prp()
954 trans_len = MIN(len, n->page_size); in nvme_map_prp()
960 len -= trans_len; in nvme_map_prp()
964 if (unlikely(prp2 & (n->page_size - 1))) { in nvme_map_prp()
1020 uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls); in nvme_map_sgl_data()
1033 if (UINT64_MAX - addr < dlen) { in nvme_map_sgl_data()
1042 *len -= trans_len; in nvme_map_sgl_data()
1088 switch (NVME_SGL_TYPE(sgld->type)) { in nvme_map_sgl()
1096 seg_len = le32_to_cpu(sgld->len); in nvme_map_sgl()
1103 if (UINT64_MAX - addr < seg_len) { in nvme_map_sgl()
1122 nsgld -= SEG_CHUNK_SIZE; in nvme_map_sgl()
1134 last_sgld = &segment[nsgld - 1]; in nvme_map_sgl()
1139 if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { in nvme_map_sgl()
1152 if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { in nvme_map_sgl()
1158 addr = le64_to_cpu(sgld->addr); in nvme_map_sgl()
1164 status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd); in nvme_map_sgl()
1189 switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) { in nvme_map_dptr()
1191 prp1 = le64_to_cpu(cmd->dptr.prp1); in nvme_map_dptr()
1192 prp2 = le64_to_cpu(cmd->dptr.prp2); in nvme_map_dptr()
1197 return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd); in nvme_map_dptr()
1206 int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags); in nvme_map_mptr()
1207 hwaddr mptr = le64_to_cpu(cmd->mptr); in nvme_map_mptr()
1236 NvmeNamespace *ns = req->ns; in nvme_map_data()
1237 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_map_data()
1238 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); in nvme_map_data()
1239 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); in nvme_map_data()
1244 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { in nvme_map_data()
1249 status = nvme_map_dptr(n, &sg, len, &req->cmd); in nvme_map_data()
1254 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); in nvme_map_data()
1255 nvme_sg_split(&sg, ns, &req->sg, NULL); in nvme_map_data()
1261 return nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_map_data()
1266 NvmeNamespace *ns = req->ns; in nvme_map_mdata()
1275 status = nvme_map_dptr(n, &sg, len, &req->cmd); in nvme_map_mdata()
1280 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); in nvme_map_mdata()
1281 nvme_sg_split(&sg, ns, NULL, &req->sg); in nvme_map_mdata()
1287 return nvme_map_mptr(n, &req->sg, len, &req->cmd); in nvme_map_mdata()
1297 bool dma = sg->flags & NVME_SG_DMA; in nvme_tx_interleaved()
1302 assert(sg->flags & NVME_SG_ALLOC); in nvme_tx_interleaved()
1305 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; in nvme_tx_interleaved()
1307 if (sge_len - offset < 0) { in nvme_tx_interleaved()
1308 offset -= sge_len; in nvme_tx_interleaved()
1320 trans_len = MIN(trans_len, sge_len - offset); in nvme_tx_interleaved()
1323 addr = sg->qsg.sg[sg_idx].base + offset; in nvme_tx_interleaved()
1325 addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset; in nvme_tx_interleaved()
1339 len -= trans_len; in nvme_tx_interleaved()
1340 count -= trans_len; in nvme_tx_interleaved()
1355 assert(sg->flags & NVME_SG_ALLOC); in nvme_tx()
1357 if (sg->flags & NVME_SG_DMA) { in nvme_tx()
1362 dma_buf_write(ptr, len, &residual, &sg->qsg, attrs); in nvme_tx()
1364 dma_buf_read(ptr, len, &residual, &sg->qsg, attrs); in nvme_tx()
1375 bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len); in nvme_tx()
1377 bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len); in nvme_tx()
1394 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_c2h()
1399 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE); in nvme_c2h()
1407 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_h2c()
1412 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE); in nvme_h2c()
1418 NvmeNamespace *ns = req->ns; in nvme_bounce_data()
1419 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_bounce_data()
1420 bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); in nvme_bounce_data()
1421 bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); in nvme_bounce_data()
1424 !(pi && pract && ns->lbaf.ms == nvme_pi_tuple_size(ns))) { in nvme_bounce_data()
1425 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz, in nvme_bounce_data()
1426 ns->lbaf.ms, 0, dir); in nvme_bounce_data()
1429 return nvme_tx(n, &req->sg, ptr, len, dir); in nvme_bounce_data()
1435 NvmeNamespace *ns = req->ns; in nvme_bounce_mdata()
1439 return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbaf.ms, in nvme_bounce_mdata()
1440 ns->lbasz, ns->lbasz, dir); in nvme_bounce_mdata()
1443 nvme_sg_unmap(&req->sg); in nvme_bounce_mdata()
1445 status = nvme_map_mptr(n, &req->sg, len, &req->cmd); in nvme_bounce_mdata()
1450 return nvme_tx(n, &req->sg, ptr, len, dir); in nvme_bounce_mdata()
1457 assert(req->sg.flags & NVME_SG_ALLOC); in nvme_blk_read()
1459 if (req->sg.flags & NVME_SG_DMA) { in nvme_blk_read()
1460 req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req); in nvme_blk_read()
1462 req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); in nvme_blk_read()
1470 assert(req->sg.flags & NVME_SG_ALLOC); in nvme_blk_write()
1472 if (req->sg.flags & NVME_SG_DMA) { in nvme_blk_write()
1473 req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req); in nvme_blk_write()
1475 req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); in nvme_blk_write()
1481 trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head); in nvme_update_cq_eventidx()
1483 stl_le_pci_dma(PCI_DEVICE(cq->ctrl), cq->ei_addr, cq->head, in nvme_update_cq_eventidx()
1489 ldl_le_pci_dma(PCI_DEVICE(cq->ctrl), cq->db_addr, &cq->head, in nvme_update_cq_head()
1492 trace_pci_nvme_update_cq_head(cq->cqid, cq->head); in nvme_update_cq_head()
1498 NvmeCtrl *n = cq->ctrl; in nvme_post_cqes()
1500 bool pending = cq->head != cq->tail; in nvme_post_cqes()
1503 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { in nvme_post_cqes()
1507 if (n->dbbuf_enabled) { in nvme_post_cqes()
1516 sq = req->sq; in nvme_post_cqes()
1517 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); in nvme_post_cqes()
1518 req->cqe.sq_id = cpu_to_le16(sq->sqid); in nvme_post_cqes()
1519 req->cqe.sq_head = cpu_to_le16(sq->head); in nvme_post_cqes()
1520 addr = cq->dma_addr + (cq->tail << NVME_CQES); in nvme_post_cqes()
1521 ret = pci_dma_write(PCI_DEVICE(n), addr, (void *)&req->cqe, in nvme_post_cqes()
1522 sizeof(req->cqe)); in nvme_post_cqes()
1526 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); in nvme_post_cqes()
1530 QTAILQ_REMOVE(&cq->req_list, req, entry); in nvme_post_cqes()
1533 nvme_sg_unmap(&req->sg); in nvme_post_cqes()
1535 if (QTAILQ_EMPTY(&sq->req_list) && !nvme_sq_empty(sq)) { in nvme_post_cqes()
1536 qemu_bh_schedule(sq->bh); in nvme_post_cqes()
1539 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); in nvme_post_cqes()
1541 if (cq->tail != cq->head) { in nvme_post_cqes()
1542 if (cq->irq_enabled && !pending) { in nvme_post_cqes()
1543 n->cq_pending++; in nvme_post_cqes()
1552 assert(cq->cqid == req->sq->cqid); in nvme_enqueue_req_completion()
1553 trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid, in nvme_enqueue_req_completion()
1554 le32_to_cpu(req->cqe.result), in nvme_enqueue_req_completion()
1555 le32_to_cpu(req->cqe.dw1), in nvme_enqueue_req_completion()
1556 req->status); in nvme_enqueue_req_completion()
1558 if (req->status) { in nvme_enqueue_req_completion()
1559 trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns), in nvme_enqueue_req_completion()
1560 req->status, req->cmd.opcode); in nvme_enqueue_req_completion()
1563 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); in nvme_enqueue_req_completion()
1564 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); in nvme_enqueue_req_completion()
1566 qemu_bh_schedule(cq->bh); in nvme_enqueue_req_completion()
1574 trace_pci_nvme_process_aers(n->aer_queued); in nvme_process_aers()
1576 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { in nvme_process_aers()
1581 if (!n->outstanding_aers) { in nvme_process_aers()
1587 if (n->aer_mask & (1 << event->result.event_type)) { in nvme_process_aers()
1588 trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask); in nvme_process_aers()
1592 QTAILQ_REMOVE(&n->aer_queue, event, entry); in nvme_process_aers()
1593 n->aer_queued--; in nvme_process_aers()
1595 n->aer_mask |= 1 << event->result.event_type; in nvme_process_aers()
1596 n->outstanding_aers--; in nvme_process_aers()
1598 req = n->aer_reqs[n->outstanding_aers]; in nvme_process_aers()
1600 result = (NvmeAerResult *) &req->cqe.result; in nvme_process_aers()
1601 result->event_type = event->result.event_type; in nvme_process_aers()
1602 result->event_info = event->result.event_info; in nvme_process_aers()
1603 result->log_page = event->result.log_page; in nvme_process_aers()
1606 trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info, in nvme_process_aers()
1607 result->log_page); in nvme_process_aers()
1609 nvme_enqueue_req_completion(&n->admin_cq, req); in nvme_process_aers()
1620 if (n->aer_queued == n->params.aer_max_queued) { in nvme_enqueue_event()
1621 trace_pci_nvme_enqueue_event_noqueue(n->aer_queued); in nvme_enqueue_event()
1626 event->result = (NvmeAerResult) { in nvme_enqueue_event()
1632 QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry); in nvme_enqueue_event()
1633 n->aer_queued++; in nvme_enqueue_event()
1643 if (!(NVME_AEC_SMART(n->features.async_config) & event)) { in nvme_smart_event()
1671 n->aer_mask &= ~(1 << event_type); in nvme_clear_events()
1673 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { in nvme_clear_events()
1674 if (event->result.event_type == event_type) { in nvme_clear_events()
1675 QTAILQ_REMOVE(&n->aer_queue, event, entry); in nvme_clear_events()
1676 n->aer_queued--; in nvme_clear_events()
1684 uint8_t mdts = n->params.mdts; in nvme_check_mdts()
1686 if (mdts && len > n->page_size << mdts) { in nvme_check_mdts()
1697 uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); in nvme_check_bounds()
1699 if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { in nvme_check_bounds()
1710 BlockDriverState *bs = blk_bs(ns->blkconf.blk); in nvme_block_status_all()
1723 bytes -= pnum; in nvme_block_status_all()
1753 error_setg_errno(&err, -ret, "unable to get block status"); in nvme_check_dulbe()
1767 return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 : in nvme_zone_idx()
1768 slba / ns->zone_size; in nvme_zone_idx()
1775 if (zone_idx >= ns->num_zones) { in nvme_get_zone_by_slba()
1779 return &ns->zone_array[zone_idx]; in nvme_get_zone_by_slba()
1784 uint64_t zslba = zone->d.zslba; in nvme_check_zone_state_for_write()
1819 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_check_zone_write()
1820 uint64_t ezrwa = zone->w_ptr + 2 * ns->zns.zrwas; in nvme_check_zone_write()
1822 if (slba < zone->w_ptr || slba + nlb > ezrwa) { in nvme_check_zone_write()
1823 trace_pci_nvme_err_zone_invalid_write(slba, zone->w_ptr); in nvme_check_zone_write()
1827 if (unlikely(slba != zone->w_ptr)) { in nvme_check_zone_write()
1828 trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, in nvme_check_zone_write()
1829 zone->w_ptr); in nvme_check_zone_write()
1853 trace_pci_nvme_err_zone_is_offline(zone->d.zslba); in nvme_check_zone_state_for_read()
1879 if (!ns->params.cross_zone_read) { in nvme_check_zone_read()
1883 * Read across zone boundary - check that all subsequent in nvme_check_zone_read()
1912 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_zrm_finish()
1913 zone->d.za &= ~NVME_ZA_ZRWA_VALID; in nvme_zrm_finish()
1914 if (ns->params.numzrwa) { in nvme_zrm_finish()
1915 ns->zns.numzrwa++; in nvme_zrm_finish()
1955 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_zrm_reset()
1956 if (ns->params.numzrwa) { in nvme_zrm_reset()
1957 ns->zns.numzrwa++; in nvme_zrm_reset()
1963 zone->w_ptr = zone->d.zslba; in nvme_zrm_reset()
1964 zone->d.wp = zone->w_ptr; in nvme_zrm_reset()
1979 if (ns->params.max_open_zones && in nvme_zrm_auto_transition_zone()
1980 ns->nr_open_zones == ns->params.max_open_zones) { in nvme_zrm_auto_transition_zone()
1981 zone = QTAILQ_FIRST(&ns->imp_open_zones); in nvme_zrm_auto_transition_zone()
1986 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); in nvme_zrm_auto_transition_zone()
2010 if (n->params.auto_transition_zones) { in nvme_zrm_open_flags()
2043 ns->zns.numzrwa--; in nvme_zrm_open_flags()
2045 zone->d.za |= NVME_ZA_ZRWA_VALID; in nvme_zrm_open_flags()
2064 zone->d.wp += nlb; in nvme_advance_zone_wp()
2066 if (zone->d.wp == nvme_zone_wr_boundary(zone)) { in nvme_advance_zone_wp()
2074 uint16_t nzrwafgs = DIV_ROUND_UP(nlbc, ns->zns.zrwafg); in nvme_zoned_zrwa_implicit_flush()
2076 nlbc = nzrwafgs * ns->zns.zrwafg; in nvme_zoned_zrwa_implicit_flush()
2078 trace_pci_nvme_zoned_zrwa_implicit_flush(zone->d.zslba, nlbc); in nvme_zoned_zrwa_implicit_flush()
2080 zone->w_ptr += nlbc; in nvme_zoned_zrwa_implicit_flush()
2087 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_finalize_zoned_write()
2092 slba = le64_to_cpu(rw->slba); in nvme_finalize_zoned_write()
2093 nlb = le16_to_cpu(rw->nlb) + 1; in nvme_finalize_zoned_write()
2097 if (zone->d.za & NVME_ZA_ZRWA_VALID) { in nvme_finalize_zoned_write()
2098 uint64_t ezrwa = zone->w_ptr + ns->zns.zrwas - 1; in nvme_finalize_zoned_write()
2099 uint64_t elba = slba + nlb - 1; in nvme_finalize_zoned_write()
2102 nvme_zoned_zrwa_implicit_flush(ns, zone, elba - ezrwa); in nvme_finalize_zoned_write()
2113 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_is_write()
2115 return rw->opcode == NVME_CMD_WRITE || in nvme_is_write()
2116 rw->opcode == NVME_CMD_ZONE_APPEND || in nvme_is_write()
2117 rw->opcode == NVME_CMD_WRITE_ZEROES; in nvme_is_write()
2128 if (!req->status) { in nvme_misc_cb()
2129 req->status = NVME_INTERNAL_DEV_ERROR; in nvme_misc_cb()
2132 trace_pci_nvme_err_aio(cid, strerror(-ret), req->status); in nvme_misc_cb()
2141 NvmeNamespace *ns = req->ns; in nvme_rw_complete_cb()
2142 BlockBackend *blk = ns->blkconf.blk; in nvme_rw_complete_cb()
2143 BlockAcctCookie *acct = &req->acct; in nvme_rw_complete_cb()
2153 switch (req->cmd.opcode) { in nvme_rw_complete_cb()
2155 req->status = NVME_UNRECOVERED_READ; in nvme_rw_complete_cb()
2161 req->status = NVME_WRITE_FAULT; in nvme_rw_complete_cb()
2165 req->status = NVME_INTERNAL_DEV_ERROR; in nvme_rw_complete_cb()
2169 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_rw_complete_cb()
2171 error_setg_errno(&err, -ret, "aio failed"); in nvme_rw_complete_cb()
2177 if (ns->params.zoned && nvme_is_write(req)) { in nvme_rw_complete_cb()
2187 NvmeNamespace *ns = req->ns; in nvme_rw_cb()
2189 BlockBackend *blk = ns->blkconf.blk; in nvme_rw_cb()
2197 if (ns->lbaf.ms) { in nvme_rw_cb()
2198 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_rw_cb()
2199 uint64_t slba = le64_to_cpu(rw->slba); in nvme_rw_cb()
2200 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; in nvme_rw_cb()
2203 if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) { in nvme_rw_cb()
2206 req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen, in nvme_rw_cb()
2212 if (nvme_ns_ext(ns) || req->cmd.mptr) { in nvme_rw_cb()
2215 nvme_sg_unmap(&req->sg); in nvme_rw_cb()
2218 ret = -EFAULT; in nvme_rw_cb()
2222 if (req->cmd.opcode == NVME_CMD_READ) { in nvme_rw_cb()
2237 NvmeRequest *req = ctx->req; in nvme_verify_cb()
2238 NvmeNamespace *ns = req->ns; in nvme_verify_cb()
2239 BlockBackend *blk = ns->blkconf.blk; in nvme_verify_cb()
2240 BlockAcctCookie *acct = &req->acct; in nvme_verify_cb()
2242 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_verify_cb()
2243 uint64_t slba = le64_to_cpu(rw->slba); in nvme_verify_cb()
2244 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_verify_cb()
2245 uint16_t apptag = le16_to_cpu(rw->apptag); in nvme_verify_cb()
2246 uint16_t appmask = le16_to_cpu(rw->appmask); in nvme_verify_cb()
2247 uint64_t reftag = le32_to_cpu(rw->reftag); in nvme_verify_cb()
2248 uint64_t cdw3 = le32_to_cpu(rw->cdw3); in nvme_verify_cb()
2257 req->status = NVME_UNRECOVERED_READ; in nvme_verify_cb()
2259 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_verify_cb()
2266 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_verify_cb()
2267 status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, in nvme_verify_cb()
2268 ctx->mdata.iov.size, slba); in nvme_verify_cb()
2270 req->status = status; in nvme_verify_cb()
2274 req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, in nvme_verify_cb()
2275 ctx->mdata.bounce, ctx->mdata.iov.size, in nvme_verify_cb()
2280 qemu_iovec_destroy(&ctx->data.iov); in nvme_verify_cb()
2281 g_free(ctx->data.bounce); in nvme_verify_cb()
2283 qemu_iovec_destroy(&ctx->mdata.iov); in nvme_verify_cb()
2284 g_free(ctx->mdata.bounce); in nvme_verify_cb()
2295 NvmeRequest *req = ctx->req; in nvme_verify_mdata_in_cb()
2296 NvmeNamespace *ns = req->ns; in nvme_verify_mdata_in_cb()
2297 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_verify_mdata_in_cb()
2298 uint64_t slba = le64_to_cpu(rw->slba); in nvme_verify_mdata_in_cb()
2299 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_verify_mdata_in_cb()
2302 BlockBackend *blk = ns->blkconf.blk; in nvme_verify_mdata_in_cb()
2310 ctx->mdata.bounce = g_malloc(mlen); in nvme_verify_mdata_in_cb()
2312 qemu_iovec_reset(&ctx->mdata.iov); in nvme_verify_mdata_in_cb()
2313 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); in nvme_verify_mdata_in_cb()
2315 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, in nvme_verify_mdata_in_cb()
2338 NvmeNamespace *ns = req->ns; in nvme_compare_mdata_cb()
2340 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_compare_mdata_cb()
2341 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_compare_mdata_cb()
2342 uint16_t apptag = le16_to_cpu(rw->apptag); in nvme_compare_mdata_cb()
2343 uint16_t appmask = le16_to_cpu(rw->appmask); in nvme_compare_mdata_cb()
2344 uint64_t reftag = le32_to_cpu(rw->reftag); in nvme_compare_mdata_cb()
2345 uint64_t cdw3 = le32_to_cpu(rw->cdw3); in nvme_compare_mdata_cb()
2346 struct nvme_compare_ctx *ctx = req->opaque; in nvme_compare_mdata_cb()
2348 BlockBackend *blk = ns->blkconf.blk; in nvme_compare_mdata_cb()
2349 BlockAcctCookie *acct = &req->acct; in nvme_compare_mdata_cb()
2359 req->status = NVME_UNRECOVERED_READ; in nvme_compare_mdata_cb()
2361 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_compare_mdata_cb()
2366 buf = g_malloc(ctx->mdata.iov.size); in nvme_compare_mdata_cb()
2368 status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, in nvme_compare_mdata_cb()
2371 req->status = status; in nvme_compare_mdata_cb()
2375 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_compare_mdata_cb()
2376 uint64_t slba = le64_to_cpu(rw->slba); in nvme_compare_mdata_cb()
2378 uint8_t *mbufp = ctx->mdata.bounce; in nvme_compare_mdata_cb()
2379 uint8_t *end = mbufp + ctx->mdata.iov.size; in nvme_compare_mdata_cb()
2382 status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, in nvme_compare_mdata_cb()
2383 ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, in nvme_compare_mdata_cb()
2386 req->status = status; in nvme_compare_mdata_cb()
2394 if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { in nvme_compare_mdata_cb()
2395 pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); in nvme_compare_mdata_cb()
2398 for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { in nvme_compare_mdata_cb()
2399 if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { in nvme_compare_mdata_cb()
2400 req->status = NVME_CMP_FAILURE | NVME_DNR; in nvme_compare_mdata_cb()
2408 if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { in nvme_compare_mdata_cb()
2409 req->status = NVME_CMP_FAILURE | NVME_DNR; in nvme_compare_mdata_cb()
2416 qemu_iovec_destroy(&ctx->data.iov); in nvme_compare_mdata_cb()
2417 g_free(ctx->data.bounce); in nvme_compare_mdata_cb()
2419 qemu_iovec_destroy(&ctx->mdata.iov); in nvme_compare_mdata_cb()
2420 g_free(ctx->mdata.bounce); in nvme_compare_mdata_cb()
2431 NvmeNamespace *ns = req->ns; in nvme_compare_data_cb()
2432 BlockBackend *blk = ns->blkconf.blk; in nvme_compare_data_cb()
2433 BlockAcctCookie *acct = &req->acct; in nvme_compare_data_cb()
2436 struct nvme_compare_ctx *ctx = req->opaque; in nvme_compare_data_cb()
2444 req->status = NVME_UNRECOVERED_READ; in nvme_compare_data_cb()
2446 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); in nvme_compare_data_cb()
2451 buf = g_malloc(ctx->data.iov.size); in nvme_compare_data_cb()
2453 status = nvme_bounce_data(n, buf, ctx->data.iov.size, in nvme_compare_data_cb()
2456 req->status = status; in nvme_compare_data_cb()
2460 if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { in nvme_compare_data_cb()
2461 req->status = NVME_CMP_FAILURE | NVME_DNR; in nvme_compare_data_cb()
2465 if (ns->lbaf.ms) { in nvme_compare_data_cb()
2466 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_compare_data_cb()
2467 uint64_t slba = le64_to_cpu(rw->slba); in nvme_compare_data_cb()
2468 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_compare_data_cb()
2472 ctx->mdata.bounce = g_malloc(mlen); in nvme_compare_data_cb()
2474 qemu_iovec_init(&ctx->mdata.iov, 1); in nvme_compare_data_cb()
2475 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); in nvme_compare_data_cb()
2477 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, in nvme_compare_data_cb()
2485 qemu_iovec_destroy(&ctx->data.iov); in nvme_compare_data_cb()
2486 g_free(ctx->data.bounce); in nvme_compare_data_cb()
2508 iocb->idx = iocb->nr; in nvme_dsm_cancel()
2509 iocb->ret = -ECANCELED; in nvme_dsm_cancel()
2511 if (iocb->aiocb) { in nvme_dsm_cancel()
2512 blk_aio_cancel_async(iocb->aiocb); in nvme_dsm_cancel()
2513 iocb->aiocb = NULL; in nvme_dsm_cancel()
2519 assert(iocb->idx == iocb->nr); in nvme_dsm_cancel()
2533 NvmeRequest *req = iocb->req; in nvme_dsm_md_cb()
2534 NvmeNamespace *ns = req->ns; in nvme_dsm_md_cb()
2539 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { in nvme_dsm_md_cb()
2543 range = &iocb->range[iocb->idx - 1]; in nvme_dsm_md_cb()
2544 slba = le64_to_cpu(range->slba); in nvme_dsm_md_cb()
2545 nlb = le32_to_cpu(range->nlb); in nvme_dsm_md_cb()
2562 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba), in nvme_dsm_md_cb()
2574 NvmeRequest *req = iocb->req; in nvme_dsm_cb()
2576 NvmeNamespace *ns = req->ns; in nvme_dsm_cb()
2581 if (iocb->ret < 0) { in nvme_dsm_cb()
2584 iocb->ret = ret; in nvme_dsm_cb()
2589 if (iocb->idx == iocb->nr) { in nvme_dsm_cb()
2593 range = &iocb->range[iocb->idx++]; in nvme_dsm_cb()
2594 slba = le64_to_cpu(range->slba); in nvme_dsm_cb()
2595 nlb = le32_to_cpu(range->nlb); in nvme_dsm_cb()
2599 if (nlb > n->dmrsl) { in nvme_dsm_cb()
2600 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl); in nvme_dsm_cb()
2606 ns->id_ns.nsze); in nvme_dsm_cb()
2610 iocb->aiocb = blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, slba), in nvme_dsm_cb()
2616 iocb->aiocb = NULL; in nvme_dsm_cb()
2617 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_dsm_cb()
2618 g_free(iocb->range); in nvme_dsm_cb()
2624 NvmeNamespace *ns = req->ns; in nvme_dsm()
2625 NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd; in nvme_dsm()
2626 uint32_t attr = le32_to_cpu(dsm->attributes); in nvme_dsm() local
2627 uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1; in nvme_dsm()
2630 trace_pci_nvme_dsm(nr, attr); in nvme_dsm()
2632 if (attr & NVME_DSMGMT_AD) { in nvme_dsm()
2633 NvmeDSMAIOCB *iocb = blk_aio_get(&nvme_dsm_aiocb_info, ns->blkconf.blk, in nvme_dsm()
2636 iocb->req = req; in nvme_dsm()
2637 iocb->ret = 0; in nvme_dsm()
2638 iocb->range = g_new(NvmeDsmRange, nr); in nvme_dsm()
2639 iocb->nr = nr; in nvme_dsm()
2640 iocb->idx = 0; in nvme_dsm()
2642 status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, in nvme_dsm()
2645 g_free(iocb->range); in nvme_dsm()
2651 req->aiocb = &iocb->common; in nvme_dsm()
2662 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_verify()
2663 NvmeNamespace *ns = req->ns; in nvme_verify()
2664 BlockBackend *blk = ns->blkconf.blk; in nvme_verify()
2665 uint64_t slba = le64_to_cpu(rw->slba); in nvme_verify()
2666 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_verify()
2670 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_verify()
2671 uint32_t reftag = le32_to_cpu(rw->reftag); in nvme_verify()
2677 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_verify()
2688 if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { in nvme_verify()
2692 if (data_len > (n->page_size << n->params.vsl)) { in nvme_verify()
2701 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { in nvme_verify()
2709 ctx->req = req; in nvme_verify()
2711 ctx->data.bounce = g_malloc(len); in nvme_verify()
2713 qemu_iovec_init(&ctx->data.iov, 1); in nvme_verify()
2714 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); in nvme_verify()
2716 block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, in nvme_verify()
2719 req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, in nvme_verify()
2755 iocb->ret = -ECANCELED; in nvme_copy_cancel()
2757 if (iocb->aiocb) { in nvme_copy_cancel()
2758 blk_aio_cancel_async(iocb->aiocb); in nvme_copy_cancel()
2759 iocb->aiocb = NULL; in nvme_copy_cancel()
2770 NvmeRequest *req = iocb->req; in nvme_copy_done()
2771 NvmeNamespace *ns = req->ns; in nvme_copy_done()
2772 BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk); in nvme_copy_done()
2774 if (iocb->idx != iocb->nr) { in nvme_copy_done()
2775 req->cqe.result = cpu_to_le32(iocb->idx); in nvme_copy_done()
2778 qemu_iovec_destroy(&iocb->iov); in nvme_copy_done()
2779 g_free(iocb->bounce); in nvme_copy_done()
2781 if (iocb->ret < 0) { in nvme_copy_done()
2782 block_acct_failed(stats, &iocb->acct.read); in nvme_copy_done()
2783 block_acct_failed(stats, &iocb->acct.write); in nvme_copy_done()
2785 block_acct_done(stats, &iocb->acct.read); in nvme_copy_done()
2786 block_acct_done(stats, &iocb->acct.write); in nvme_copy_done()
2789 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_copy_done()
2902 nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL, in nvme_check_copy_mcl()
2906 iocb->tcl = copy_len; in nvme_check_copy_mcl()
2907 if (copy_len > ns->id_ns.mcl) { in nvme_check_copy_mcl()
2917 NvmeRequest *req = iocb->req; in nvme_copy_out_completed_cb()
2918 NvmeNamespace *dns = req->ns; in nvme_copy_out_completed_cb()
2921 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, in nvme_copy_out_completed_cb()
2925 iocb->ret = ret; in nvme_copy_out_completed_cb()
2926 req->status = NVME_WRITE_FAULT; in nvme_copy_out_completed_cb()
2928 } else if (iocb->ret < 0) { in nvme_copy_out_completed_cb()
2932 if (dns->params.zoned) { in nvme_copy_out_completed_cb()
2933 nvme_advance_zone_wp(dns, iocb->zone, nlb); in nvme_copy_out_completed_cb()
2936 iocb->idx++; in nvme_copy_out_completed_cb()
2937 iocb->slba += nlb; in nvme_copy_out_completed_cb()
2945 NvmeRequest *req = iocb->req; in nvme_copy_out_cb()
2946 NvmeNamespace *dns = req->ns; in nvme_copy_out_cb()
2951 if (ret < 0 || iocb->ret < 0 || !dns->lbaf.ms) { in nvme_copy_out_cb()
2955 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, in nvme_copy_out_cb()
2959 mbounce = iocb->bounce + nvme_l2b(dns, nlb); in nvme_copy_out_cb()
2961 qemu_iovec_reset(&iocb->iov); in nvme_copy_out_cb()
2962 qemu_iovec_add(&iocb->iov, mbounce, mlen); in nvme_copy_out_cb()
2964 iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_moff(dns, iocb->slba), in nvme_copy_out_cb()
2965 &iocb->iov, 0, nvme_copy_out_completed_cb, in nvme_copy_out_cb()
2977 NvmeRequest *req = iocb->req; in nvme_copy_in_completed_cb()
2978 NvmeNamespace *sns = iocb->sns; in nvme_copy_in_completed_cb()
2979 NvmeNamespace *dns = req->ns; in nvme_copy_in_completed_cb()
2990 iocb->ret = ret; in nvme_copy_in_completed_cb()
2991 req->status = NVME_UNRECOVERED_READ; in nvme_copy_in_completed_cb()
2993 } else if (iocb->ret < 0) { in nvme_copy_in_completed_cb()
2997 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, in nvme_copy_in_completed_cb()
3000 trace_pci_nvme_copy_out(iocb->slba, nlb); in nvme_copy_in_completed_cb()
3004 if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps)) { in nvme_copy_in_completed_cb()
3005 copy = (NvmeCopyCmd *)&req->cmd; in nvme_copy_in_completed_cb()
3007 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); in nvme_copy_in_completed_cb()
3010 mbounce = iocb->bounce + nvme_l2b(sns, nlb); in nvme_copy_in_completed_cb()
3016 status = nvme_dif_check(sns, iocb->bounce, len, mbounce, mlen, prinfor, in nvme_copy_in_completed_cb()
3023 if (NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_copy_in_completed_cb()
3024 copy = (NvmeCopyCmd *)&req->cmd; in nvme_copy_in_completed_cb()
3025 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); in nvme_copy_in_completed_cb()
3028 mbounce = iocb->bounce + nvme_l2b(dns, nlb); in nvme_copy_in_completed_cb()
3030 apptag = le16_to_cpu(copy->apptag); in nvme_copy_in_completed_cb()
3031 appmask = le16_to_cpu(copy->appmask); in nvme_copy_in_completed_cb()
3034 status = nvme_check_prinfo(dns, prinfow, iocb->slba, iocb->reftag); in nvme_copy_in_completed_cb()
3039 nvme_dif_pract_generate_dif(dns, iocb->bounce, len, mbounce, mlen, in nvme_copy_in_completed_cb()
3040 apptag, &iocb->reftag); in nvme_copy_in_completed_cb()
3042 status = nvme_dif_check(dns, iocb->bounce, len, mbounce, mlen, in nvme_copy_in_completed_cb()
3043 prinfow, iocb->slba, apptag, appmask, in nvme_copy_in_completed_cb()
3044 &iocb->reftag); in nvme_copy_in_completed_cb()
3051 status = nvme_check_bounds(dns, iocb->slba, nlb); in nvme_copy_in_completed_cb()
3056 if (dns->params.zoned) { in nvme_copy_in_completed_cb()
3057 status = nvme_check_zone_write(dns, iocb->zone, iocb->slba, nlb); in nvme_copy_in_completed_cb()
3062 if (!(iocb->zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_copy_in_completed_cb()
3063 iocb->zone->w_ptr += nlb; in nvme_copy_in_completed_cb()
3067 qemu_iovec_reset(&iocb->iov); in nvme_copy_in_completed_cb()
3068 qemu_iovec_add(&iocb->iov, iocb->bounce, len); in nvme_copy_in_completed_cb()
3070 block_acct_start(blk_get_stats(dns->blkconf.blk), &iocb->acct.write, 0, in nvme_copy_in_completed_cb()
3073 iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_l2b(dns, iocb->slba), in nvme_copy_in_completed_cb()
3074 &iocb->iov, 0, nvme_copy_out_cb, iocb); in nvme_copy_in_completed_cb()
3079 req->status = status; in nvme_copy_in_completed_cb()
3080 iocb->ret = -1; in nvme_copy_in_completed_cb()
3088 NvmeNamespace *sns = iocb->sns; in nvme_copy_in_cb()
3092 if (ret < 0 || iocb->ret < 0 || !sns->lbaf.ms) { in nvme_copy_in_cb()
3096 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, in nvme_copy_in_cb()
3099 qemu_iovec_reset(&iocb->iov); in nvme_copy_in_cb()
3100 qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(sns, nlb), in nvme_copy_in_cb()
3103 iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_moff(sns, slba), in nvme_copy_in_cb()
3104 &iocb->iov, 0, nvme_copy_in_completed_cb, in nvme_copy_in_cb()
3120 return sns->lbaf.ds == dns->lbaf.ds && sns->lbaf.ms == dns->lbaf.ms; in nvme_copy_ns_format_match()
3126 if (!nvme_csi_supports_copy(sns->csi) || in nvme_copy_matching_ns_format()
3127 !nvme_csi_supports_copy(dns->csi)) { in nvme_copy_matching_ns_format()
3136 sns->id_ns.dps != dns->id_ns.dps)) { in nvme_copy_matching_ns_format()
3146 return sns->lbaf.ms == 0 && in nvme_copy_corresp_pi_match()
3147 ((dns->lbaf.ms == 8 && dns->pif == 0) || in nvme_copy_corresp_pi_match()
3148 (dns->lbaf.ms == 16 && dns->pif == 1)); in nvme_copy_corresp_pi_match()
3154 if (!nvme_csi_supports_copy(sns->csi) || in nvme_copy_corresp_pi_format()
3155 !nvme_csi_supports_copy(dns->csi)) { in nvme_copy_corresp_pi_format()
3172 NvmeRequest *req = iocb->req; in nvme_do_copy()
3174 NvmeNamespace *dns = req->ns; in nvme_do_copy()
3175 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; in nvme_do_copy()
3176 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); in nvme_do_copy()
3177 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); in nvme_do_copy()
3182 uint32_t dnsid = le32_to_cpu(req->cmd.nsid); in nvme_do_copy()
3185 if (iocb->ret < 0) { in nvme_do_copy()
3189 if (iocb->idx == iocb->nr) { in nvme_do_copy()
3193 if (iocb->format == 2 || iocb->format == 3) { in nvme_do_copy()
3194 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, in nvme_do_copy()
3198 !nvme_nsid_valid(iocb->n, snsid)) { in nvme_do_copy()
3202 iocb->sns = nvme_ns(iocb->n, snsid); in nvme_do_copy()
3203 if (unlikely(!iocb->sns)) { in nvme_do_copy()
3208 if (((slba + nlb) > iocb->slba) && in nvme_do_copy()
3209 ((slba + nlb) < (iocb->slba + iocb->tcl))) { in nvme_do_copy()
3215 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, in nvme_do_copy()
3219 sns = iocb->sns; in nvme_do_copy()
3220 if ((snsid == dnsid) && NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3225 if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3226 !NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3232 if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3233 NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3246 if (!NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3247 NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3259 if (NVME_ID_NS_DPS_TYPE(sns->id_ns.dps) && in nvme_do_copy()
3260 !NVME_ID_NS_DPS_TYPE(dns->id_ns.dps)) { in nvme_do_copy()
3276 if (nlb > le16_to_cpu(sns->id_ns.mssrl)) { in nvme_do_copy()
3286 if (NVME_ERR_REC_DULBE(sns->features.err_rec)) { in nvme_do_copy()
3293 if (sns->params.zoned) { in nvme_do_copy()
3300 g_free(iocb->bounce); in nvme_do_copy()
3301 iocb->bounce = g_malloc_n(le16_to_cpu(sns->id_ns.mssrl), in nvme_do_copy()
3302 sns->lbasz + sns->lbaf.ms); in nvme_do_copy()
3304 qemu_iovec_reset(&iocb->iov); in nvme_do_copy()
3305 qemu_iovec_add(&iocb->iov, iocb->bounce, len); in nvme_do_copy()
3307 block_acct_start(blk_get_stats(sns->blkconf.blk), &iocb->acct.read, 0, in nvme_do_copy()
3310 iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_l2b(sns, slba), in nvme_do_copy()
3311 &iocb->iov, 0, nvme_copy_in_cb, iocb); in nvme_do_copy()
3315 req->status = status; in nvme_do_copy()
3316 iocb->ret = -1; in nvme_do_copy()
3323 NvmeNamespace *ns = req->ns; in nvme_copy()
3324 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; in nvme_copy()
3325 NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk, in nvme_copy()
3327 uint16_t nr = copy->nr + 1; in nvme_copy()
3328 uint8_t format = copy->control[0] & 0xf; in nvme_copy()
3335 iocb->ranges = NULL; in nvme_copy()
3336 iocb->zone = NULL; in nvme_copy()
3338 if (!(n->id_ctrl.ocfs & (1 << format)) || in nvme_copy()
3340 !(n->features.hbs.cdfe & (1 << format)))) { in nvme_copy()
3346 if (nr > ns->id_ns.msrc + 1) { in nvme_copy()
3351 if ((ns->pif == 0x0 && (format != 0x0 && format != 0x2)) || in nvme_copy()
3352 (ns->pif != 0x0 && (format != 0x1 && format != 0x3))) { in nvme_copy()
3357 if (ns->pif) { in nvme_copy()
3361 iocb->format = format; in nvme_copy()
3362 iocb->ranges = g_malloc_n(nr, len); in nvme_copy()
3363 status = nvme_h2c(n, (uint8_t *)iocb->ranges, len * nr, req); in nvme_copy()
3368 iocb->slba = le64_to_cpu(copy->sdlba); in nvme_copy()
3370 if (ns->params.zoned) { in nvme_copy()
3371 iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba); in nvme_copy()
3372 if (!iocb->zone) { in nvme_copy()
3377 status = nvme_zrm_auto(n, ns, iocb->zone); in nvme_copy()
3388 iocb->req = req; in nvme_copy()
3389 iocb->ret = 0; in nvme_copy()
3390 iocb->nr = nr; in nvme_copy()
3391 iocb->idx = 0; in nvme_copy()
3392 iocb->reftag = le32_to_cpu(copy->reftag); in nvme_copy()
3393 iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32; in nvme_copy()
3395 qemu_iovec_init(&iocb->iov, 1); in nvme_copy()
3397 req->aiocb = &iocb->common; in nvme_copy()
3398 iocb->sns = req->ns; in nvme_copy()
3399 iocb->n = n; in nvme_copy()
3400 iocb->bounce = NULL; in nvme_copy()
3406 g_free(iocb->ranges); in nvme_copy()
3413 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_compare()
3414 NvmeNamespace *ns = req->ns; in nvme_compare()
3415 BlockBackend *blk = ns->blkconf.blk; in nvme_compare()
3416 uint64_t slba = le64_to_cpu(rw->slba); in nvme_compare()
3417 uint32_t nlb = le16_to_cpu(rw->nlb) + 1; in nvme_compare()
3418 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_compare()
3427 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (prinfo & NVME_PRINFO_PRACT)) { in nvme_compare()
3435 if (NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt)) { in nvme_compare()
3449 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { in nvme_compare()
3456 status = nvme_map_dptr(n, &req->sg, len, &req->cmd); in nvme_compare()
3462 ctx->data.bounce = g_malloc(data_len); in nvme_compare()
3464 req->opaque = ctx; in nvme_compare()
3466 qemu_iovec_init(&ctx->data.iov, 1); in nvme_compare()
3467 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len); in nvme_compare()
3469 block_acct_start(blk_get_stats(blk), &req->acct, data_len, in nvme_compare()
3471 req->aiocb = blk_aio_preadv(blk, offset, &ctx->data.iov, 0, in nvme_compare()
3492 iocb->ret = -ECANCELED; in nvme_flush_cancel()
3494 if (iocb->aiocb) { in nvme_flush_cancel()
3495 blk_aio_cancel_async(iocb->aiocb); in nvme_flush_cancel()
3496 iocb->aiocb = NULL; in nvme_flush_cancel()
3510 NvmeNamespace *ns = iocb->ns; in nvme_flush_ns_cb()
3513 iocb->ret = ret; in nvme_flush_ns_cb()
3514 iocb->req->status = NVME_WRITE_FAULT; in nvme_flush_ns_cb()
3516 } else if (iocb->ret < 0) { in nvme_flush_ns_cb()
3521 trace_pci_nvme_flush_ns(iocb->nsid); in nvme_flush_ns_cb()
3523 iocb->ns = NULL; in nvme_flush_ns_cb()
3524 iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_ns_cb, iocb); in nvme_flush_ns_cb()
3534 NvmeRequest *req = iocb->req; in nvme_do_flush()
3538 if (iocb->ret < 0) { in nvme_do_flush()
3542 if (iocb->broadcast) { in nvme_do_flush()
3543 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { in nvme_do_flush()
3544 iocb->ns = nvme_ns(n, i); in nvme_do_flush()
3545 if (iocb->ns) { in nvme_do_flush()
3546 iocb->nsid = i; in nvme_do_flush()
3552 if (!iocb->ns) { in nvme_do_flush()
3560 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_do_flush()
3567 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_flush()
3572 iocb->req = req; in nvme_flush()
3573 iocb->ret = 0; in nvme_flush()
3574 iocb->ns = NULL; in nvme_flush()
3575 iocb->nsid = 0; in nvme_flush()
3576 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); in nvme_flush()
3578 if (!iocb->broadcast) { in nvme_flush()
3584 iocb->ns = nvme_ns(n, nsid); in nvme_flush()
3585 if (!iocb->ns) { in nvme_flush()
3590 iocb->nsid = nsid; in nvme_flush()
3593 req->aiocb = &iocb->common; in nvme_flush()
3606 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_read()
3607 NvmeNamespace *ns = req->ns; in nvme_read()
3608 uint64_t slba = le64_to_cpu(rw->slba); in nvme_read()
3609 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; in nvme_read()
3610 uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); in nvme_read()
3614 BlockBackend *blk = ns->blkconf.blk; in nvme_read()
3617 if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { in nvme_read()
3620 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_read()
3623 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { in nvme_read()
3641 if (ns->params.zoned) { in nvme_read()
3649 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { in nvme_read()
3656 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_read()
3667 block_acct_start(blk_get_stats(blk), &req->acct, data_size, in nvme_read()
3680 NvmeNamespace *ns = req->ns; in nvme_do_write_fdp()
3681 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_do_write_fdp()
3683 uint32_t dw12 = le32_to_cpu(req->cmd.cdw12); in nvme_do_write_fdp()
3685 uint16_t pid = le16_to_cpu(rw->dspec); in nvme_do_write_fdp()
3695 ruhid = ns->fdp.phs[ph]; in nvme_do_write_fdp()
3696 ru = &ns->endgrp->fdp.ruhs[ruhid].rus[rg]; in nvme_do_write_fdp()
3698 nvme_fdp_stat_inc(&ns->endgrp->fdp.hbmw, data_size); in nvme_do_write_fdp()
3699 nvme_fdp_stat_inc(&ns->endgrp->fdp.mbmw, data_size); in nvme_do_write_fdp()
3702 if (nlb < ru->ruamw) { in nvme_do_write_fdp()
3703 ru->ruamw -= nlb; in nvme_do_write_fdp()
3707 nlb -= ru->ruamw; in nvme_do_write_fdp()
3715 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; in nvme_do_write()
3716 NvmeNamespace *ns = req->ns; in nvme_do_write()
3717 uint64_t slba = le64_to_cpu(rw->slba); in nvme_do_write()
3718 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; in nvme_do_write()
3719 uint16_t ctrl = le16_to_cpu(rw->control); in nvme_do_write()
3725 NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe; in nvme_do_write()
3726 BlockBackend *blk = ns->blkconf.blk; in nvme_do_write()
3729 if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { in nvme_do_write()
3732 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_do_write()
3735 if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) { in nvme_do_write()
3736 mapped_size -= nvme_m2b(ns, nlb); in nvme_do_write()
3741 trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode), in nvme_do_write()
3756 if (ns->params.zoned) { in nvme_do_write()
3763 if (unlikely(zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_do_write()
3767 if (unlikely(slba != zone->d.zslba)) { in nvme_do_write()
3768 trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); in nvme_do_write()
3773 if (n->params.zasl && in nvme_do_write()
3774 data_size > (uint64_t)n->page_size << n->params.zasl) { in nvme_do_write()
3779 slba = zone->w_ptr; in nvme_do_write()
3780 rw->slba = cpu_to_le64(slba); in nvme_do_write()
3781 res->slba = cpu_to_le64(slba); in nvme_do_write()
3783 switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_do_write()
3793 uint32_t reftag = le32_to_cpu(rw->reftag); in nvme_do_write()
3794 rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba)); in nvme_do_write()
3818 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_do_write()
3819 zone->w_ptr += nlb; in nvme_do_write()
3821 } else if (ns->endgrp && ns->endgrp->fdp.enabled) { in nvme_do_write()
3827 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { in nvme_do_write()
3837 block_acct_start(blk_get_stats(blk), &req->acct, data_size, in nvme_do_write()
3841 req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, in nvme_do_write()
3871 uint32_t dw10 = le32_to_cpu(c->cdw10); in nvme_get_mgmt_zone_slba_idx()
3872 uint32_t dw11 = le32_to_cpu(c->cdw11); in nvme_get_mgmt_zone_slba_idx()
3874 if (!ns->params.zoned) { in nvme_get_mgmt_zone_slba_idx()
3875 trace_pci_nvme_err_invalid_opc(c->opcode); in nvme_get_mgmt_zone_slba_idx()
3880 if (unlikely(*slba >= ns->id_ns.nsze)) { in nvme_get_mgmt_zone_slba_idx()
3881 trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze); in nvme_get_mgmt_zone_slba_idx()
3887 assert(*zone_idx < ns->num_zones); in nvme_get_mgmt_zone_slba_idx()
3906 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; in nvme_open_zone()
3909 if (cmd->zsflags & NVME_ZSFLAG_ZRWA_ALLOC) { in nvme_open_zone()
3910 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); in nvme_open_zone()
3916 if (zone->w_ptr % ns->zns.zrwafg) { in nvme_open_zone()
3963 zone->d.za |= NVME_ZA_ZD_EXT_VALID; in nvme_set_zd_ext()
4016 QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { in nvme_do_zone_op()
4025 QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { in nvme_do_zone_op()
4033 QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { in nvme_do_zone_op()
4042 QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) { in nvme_do_zone_op()
4052 for (i = 0; i < ns->num_zones; i++, zone++) { in nvme_do_zone_op()
4080 NvmeRequest *req = iocb->req; in nvme_zone_reset_cancel()
4081 NvmeNamespace *ns = req->ns; in nvme_zone_reset_cancel()
4083 iocb->idx = ns->num_zones; in nvme_zone_reset_cancel()
4085 iocb->ret = -ECANCELED; in nvme_zone_reset_cancel()
4087 if (iocb->aiocb) { in nvme_zone_reset_cancel()
4088 blk_aio_cancel_async(iocb->aiocb); in nvme_zone_reset_cancel()
4089 iocb->aiocb = NULL; in nvme_zone_reset_cancel()
4103 NvmeRequest *req = iocb->req; in nvme_zone_reset_epilogue_cb()
4104 NvmeNamespace *ns = req->ns; in nvme_zone_reset_epilogue_cb()
4108 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { in nvme_zone_reset_epilogue_cb()
4112 moff = nvme_moff(ns, iocb->zone->d.zslba); in nvme_zone_reset_epilogue_cb()
4113 count = nvme_m2b(ns, ns->zone_size); in nvme_zone_reset_epilogue_cb()
4115 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, moff, count, in nvme_zone_reset_epilogue_cb()
4127 NvmeRequest *req = iocb->req; in nvme_zone_reset_cb()
4128 NvmeNamespace *ns = req->ns; in nvme_zone_reset_cb()
4130 if (iocb->ret < 0) { in nvme_zone_reset_cb()
4133 iocb->ret = ret; in nvme_zone_reset_cb()
4137 if (iocb->zone) { in nvme_zone_reset_cb()
4138 nvme_zrm_reset(ns, iocb->zone); in nvme_zone_reset_cb()
4140 if (!iocb->all) { in nvme_zone_reset_cb()
4145 while (iocb->idx < ns->num_zones) { in nvme_zone_reset_cb()
4146 NvmeZone *zone = &ns->zone_array[iocb->idx++]; in nvme_zone_reset_cb()
4150 if (!iocb->all) { in nvme_zone_reset_cb()
4160 iocb->zone = zone; in nvme_zone_reset_cb()
4167 trace_pci_nvme_zns_zone_reset(zone->d.zslba); in nvme_zone_reset_cb()
4169 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, in nvme_zone_reset_cb()
4170 nvme_l2b(ns, zone->d.zslba), in nvme_zone_reset_cb()
4171 nvme_l2b(ns, ns->zone_size), in nvme_zone_reset_cb()
4179 iocb->aiocb = NULL; in nvme_zone_reset_cb()
4181 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_zone_reset_cb()
4188 NvmeNamespace *ns = req->ns; in nvme_zone_mgmt_send_zrwa_flush()
4189 uint16_t ozcs = le16_to_cpu(ns->id_ns_zoned->ozcs); in nvme_zone_mgmt_send_zrwa_flush()
4190 uint64_t wp = zone->d.wp; in nvme_zone_mgmt_send_zrwa_flush()
4191 uint32_t nlb = elba - wp + 1; in nvme_zone_mgmt_send_zrwa_flush()
4199 if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_zone_mgmt_send_zrwa_flush()
4203 if (elba < wp || elba > wp + ns->zns.zrwas) { in nvme_zone_mgmt_send_zrwa_flush()
4207 if (nlb % ns->zns.zrwafg) { in nvme_zone_mgmt_send_zrwa_flush()
4216 zone->w_ptr += nlb; in nvme_zone_mgmt_send_zrwa_flush()
4225 NvmeZoneSendCmd *cmd = (NvmeZoneSendCmd *)&req->cmd; in nvme_zone_mgmt_send()
4226 NvmeNamespace *ns = req->ns; in nvme_zone_mgmt_send()
4233 uint8_t action = cmd->zsa; in nvme_zone_mgmt_send()
4237 all = cmd->zsflags & NVME_ZSFLAG_SELECT_ALL; in nvme_zone_mgmt_send()
4239 req->status = NVME_SUCCESS; in nvme_zone_mgmt_send()
4242 status = nvme_get_mgmt_zone_slba_idx(ns, &req->cmd, &slba, &zone_idx); in nvme_zone_mgmt_send()
4248 zone = &ns->zone_array[zone_idx]; in nvme_zone_mgmt_send()
4249 if (slba != zone->d.zslba && action != NVME_ZONE_ACTION_ZRWA_FLUSH) { in nvme_zone_mgmt_send()
4250 trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba); in nvme_zone_mgmt_send()
4283 iocb = blk_aio_get(&nvme_zone_reset_aiocb_info, ns->blkconf.blk, in nvme_zone_mgmt_send()
4286 iocb->req = req; in nvme_zone_mgmt_send()
4287 iocb->ret = 0; in nvme_zone_mgmt_send()
4288 iocb->all = all; in nvme_zone_mgmt_send()
4289 iocb->idx = zone_idx; in nvme_zone_mgmt_send()
4290 iocb->zone = NULL; in nvme_zone_mgmt_send()
4292 req->aiocb = &iocb->common; in nvme_zone_mgmt_send()
4307 if (all || !ns->params.zd_extension_size) { in nvme_zone_mgmt_send()
4311 status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req); in nvme_zone_mgmt_send()
4338 zone->d.za); in nvme_zone_mgmt_send()
4375 NvmeCmd *cmd = &req->cmd; in nvme_zone_mgmt_recv()
4376 NvmeNamespace *ns = req->ns; in nvme_zone_mgmt_recv()
4377 /* cdw12 is zero-based number of dwords to return. Convert to bytes */ in nvme_zone_mgmt_recv()
4378 uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2; in nvme_zone_mgmt_recv()
4379 uint32_t dw13 = le32_to_cpu(cmd->cdw13); in nvme_zone_mgmt_recv()
4391 req->status = NVME_SUCCESS; in nvme_zone_mgmt_recv()
4402 if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) { in nvme_zone_mgmt_recv()
4424 zone_entry_sz += ns->params.zd_extension_size; in nvme_zone_mgmt_recv()
4427 max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz; in nvme_zone_mgmt_recv()
4430 zone = &ns->zone_array[zone_idx]; in nvme_zone_mgmt_recv()
4431 for (i = zone_idx; i < ns->num_zones; i++) { in nvme_zone_mgmt_recv()
4440 header->nr_zones = cpu_to_le64(nr_zones); in nvme_zone_mgmt_recv()
4443 for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) { in nvme_zone_mgmt_recv()
4444 zone = &ns->zone_array[zone_idx]; in nvme_zone_mgmt_recv()
4449 z->zt = zone->d.zt; in nvme_zone_mgmt_recv()
4450 z->zs = zone->d.zs; in nvme_zone_mgmt_recv()
4451 z->zcap = cpu_to_le64(zone->d.zcap); in nvme_zone_mgmt_recv()
4452 z->zslba = cpu_to_le64(zone->d.zslba); in nvme_zone_mgmt_recv()
4453 z->za = zone->d.za; in nvme_zone_mgmt_recv()
4456 z->wp = cpu_to_le64(zone->d.wp); in nvme_zone_mgmt_recv()
4458 z->wp = cpu_to_le64(~0ULL); in nvme_zone_mgmt_recv()
4462 if (zone->d.za & NVME_ZA_ZD_EXT_VALID) { in nvme_zone_mgmt_recv()
4464 ns->params.zd_extension_size); in nvme_zone_mgmt_recv()
4466 buf_p += ns->params.zd_extension_size; in nvme_zone_mgmt_recv()
4469 max_zones--; in nvme_zone_mgmt_recv()
4483 NvmeNamespace *ns = req->ns; in nvme_io_mgmt_recv_ruhs()
4492 if (!n->subsys) { in nvme_io_mgmt_recv_ruhs()
4496 if (ns->params.nsid == 0 || ns->params.nsid == 0xffffffff) { in nvme_io_mgmt_recv_ruhs()
4500 if (!n->subsys->endgrp.fdp.enabled) { in nvme_io_mgmt_recv_ruhs()
4504 endgrp = ns->endgrp; in nvme_io_mgmt_recv_ruhs()
4506 nruhsd = ns->fdp.nphs * endgrp->fdp.nrg; in nvme_io_mgmt_recv_ruhs()
4515 hdr->nruhsd = cpu_to_le16(nruhsd); in nvme_io_mgmt_recv_ruhs()
4517 ruhid = ns->fdp.phs; in nvme_io_mgmt_recv_ruhs()
4519 for (ph = 0; ph < ns->fdp.nphs; ph++, ruhid++) { in nvme_io_mgmt_recv_ruhs()
4520 NvmeRuHandle *ruh = &endgrp->fdp.ruhs[*ruhid]; in nvme_io_mgmt_recv_ruhs()
4522 for (rg = 0; rg < endgrp->fdp.nrg; rg++, ruhsd++) { in nvme_io_mgmt_recv_ruhs()
4525 ruhsd->pid = cpu_to_le16(pid); in nvme_io_mgmt_recv_ruhs()
4526 ruhsd->ruhid = *ruhid; in nvme_io_mgmt_recv_ruhs()
4527 ruhsd->earutr = 0; in nvme_io_mgmt_recv_ruhs()
4528 ruhsd->ruamw = cpu_to_le64(ruh->rus[rg].ruamw); in nvme_io_mgmt_recv_ruhs()
4537 NvmeCmd *cmd = &req->cmd; in nvme_io_mgmt_recv()
4538 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); in nvme_io_mgmt_recv()
4539 uint32_t numd = le32_to_cpu(cmd->cdw11); in nvme_io_mgmt_recv()
4555 NvmeCmd *cmd = &req->cmd; in nvme_io_mgmt_send_ruh_update()
4556 NvmeNamespace *ns = req->ns; in nvme_io_mgmt_send_ruh_update()
4557 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); in nvme_io_mgmt_send_ruh_update()
4564 if (!ns->endgrp || !ns->endgrp->fdp.enabled) { in nvme_io_mgmt_send_ruh_update()
4568 maxnpid = n->subsys->endgrp.fdp.nrg * n->subsys->endgrp.fdp.nruh; in nvme_io_mgmt_send_ruh_update()
4592 NvmeCmd *cmd = &req->cmd; in nvme_io_mgmt_send()
4593 uint32_t cdw10 = le32_to_cpu(cmd->cdw10); in nvme_io_mgmt_send()
4608 switch (req->cmd.opcode) { in __nvme_io_cmd_nvm()
4634 if (!(n->cse.iocs.nvm[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { in nvme_io_cmd_nvm()
4635 trace_pci_nvme_err_invalid_opc(req->cmd.opcode); in nvme_io_cmd_nvm()
4644 if (!(n->cse.iocs.zoned[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { in nvme_io_cmd_zoned()
4645 trace_pci_nvme_err_invalid_opc(req->cmd.opcode); in nvme_io_cmd_zoned()
4649 switch (req->cmd.opcode) { in nvme_io_cmd_zoned()
4664 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_io_cmd()
4667 req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode)); in nvme_io_cmd()
4678 * semantics in some other command set - does an NSID of FFFFFFFFh then in nvme_io_cmd()
4689 if (req->cmd.opcode == NVME_CMD_FLUSH) { in nvme_io_cmd()
4702 if (ns->status) { in nvme_io_cmd()
4703 return ns->status; in nvme_io_cmd()
4706 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { in nvme_io_cmd()
4710 req->ns = ns; in nvme_io_cmd()
4712 switch (ns->csi) { in nvme_io_cmd()
4725 NvmeCtrl *n = cq->ctrl; in nvme_cq_notifier()
4733 if (cq->tail == cq->head) { in nvme_cq_notifier()
4734 if (cq->irq_enabled) { in nvme_cq_notifier()
4735 n->cq_pending--; in nvme_cq_notifier()
4741 qemu_bh_schedule(cq->bh); in nvme_cq_notifier()
4746 NvmeCtrl *n = cq->ctrl; in nvme_init_cq_ioeventfd()
4747 uint16_t offset = (cq->cqid << 3) + (1 << 2); in nvme_init_cq_ioeventfd()
4750 ret = event_notifier_init(&cq->notifier, 0); in nvme_init_cq_ioeventfd()
4755 event_notifier_set_handler(&cq->notifier, nvme_cq_notifier); in nvme_init_cq_ioeventfd()
4756 memory_region_add_eventfd(&n->iomem, in nvme_init_cq_ioeventfd()
4757 0x1000 + offset, 4, false, 0, &cq->notifier); in nvme_init_cq_ioeventfd()
4775 NvmeCtrl *n = sq->ctrl; in nvme_init_sq_ioeventfd()
4776 uint16_t offset = sq->sqid << 3; in nvme_init_sq_ioeventfd()
4779 ret = event_notifier_init(&sq->notifier, 0); in nvme_init_sq_ioeventfd()
4784 event_notifier_set_handler(&sq->notifier, nvme_sq_notifier); in nvme_init_sq_ioeventfd()
4785 memory_region_add_eventfd(&n->iomem, in nvme_init_sq_ioeventfd()
4786 0x1000 + offset, 4, false, 0, &sq->notifier); in nvme_init_sq_ioeventfd()
4793 uint16_t offset = sq->sqid << 3; in nvme_free_sq()
4795 n->sq[sq->sqid] = NULL; in nvme_free_sq()
4796 qemu_bh_delete(sq->bh); in nvme_free_sq()
4797 if (sq->ioeventfd_enabled) { in nvme_free_sq()
4798 memory_region_del_eventfd(&n->iomem, in nvme_free_sq()
4799 0x1000 + offset, 4, false, 0, &sq->notifier); in nvme_free_sq()
4800 event_notifier_set_handler(&sq->notifier, NULL); in nvme_free_sq()
4801 event_notifier_cleanup(&sq->notifier); in nvme_free_sq()
4803 g_free(sq->io_req); in nvme_free_sq()
4804 if (sq->sqid) { in nvme_free_sq()
4811 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; in nvme_del_sq()
4815 uint16_t qid = le16_to_cpu(c->qid); in nvme_del_sq()
4824 sq = n->sq[qid]; in nvme_del_sq()
4825 while (!QTAILQ_EMPTY(&sq->out_req_list)) { in nvme_del_sq()
4826 r = QTAILQ_FIRST(&sq->out_req_list); in nvme_del_sq()
4827 assert(r->aiocb); in nvme_del_sq()
4828 r->status = NVME_CMD_ABORT_SQ_DEL; in nvme_del_sq()
4829 blk_aio_cancel(r->aiocb); in nvme_del_sq()
4832 assert(QTAILQ_EMPTY(&sq->out_req_list)); in nvme_del_sq()
4834 if (!nvme_check_cqid(n, sq->cqid)) { in nvme_del_sq()
4835 cq = n->cq[sq->cqid]; in nvme_del_sq()
4836 QTAILQ_REMOVE(&cq->sq_list, sq, entry); in nvme_del_sq()
4839 QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { in nvme_del_sq()
4840 if (r->sq == sq) { in nvme_del_sq()
4841 QTAILQ_REMOVE(&cq->req_list, r, entry); in nvme_del_sq()
4842 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); in nvme_del_sq()
4857 sq->ctrl = n; in nvme_init_sq()
4858 sq->dma_addr = dma_addr; in nvme_init_sq()
4859 sq->sqid = sqid; in nvme_init_sq()
4860 sq->size = size; in nvme_init_sq()
4861 sq->cqid = cqid; in nvme_init_sq()
4862 sq->head = sq->tail = 0; in nvme_init_sq()
4863 sq->io_req = g_new0(NvmeRequest, sq->size); in nvme_init_sq()
4865 QTAILQ_INIT(&sq->req_list); in nvme_init_sq()
4866 QTAILQ_INIT(&sq->out_req_list); in nvme_init_sq()
4867 for (i = 0; i < sq->size; i++) { in nvme_init_sq()
4868 sq->io_req[i].sq = sq; in nvme_init_sq()
4869 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); in nvme_init_sq()
4872 sq->bh = qemu_bh_new_guarded(nvme_process_sq, sq, in nvme_init_sq()
4873 &DEVICE(sq->ctrl)->mem_reentrancy_guard); in nvme_init_sq()
4875 if (n->dbbuf_enabled) { in nvme_init_sq()
4876 sq->db_addr = n->dbbuf_dbs + (sqid << 3); in nvme_init_sq()
4877 sq->ei_addr = n->dbbuf_eis + (sqid << 3); in nvme_init_sq()
4879 if (n->params.ioeventfd && sq->sqid != 0) { in nvme_init_sq()
4881 sq->ioeventfd_enabled = true; in nvme_init_sq()
4886 assert(n->cq[cqid]); in nvme_init_sq()
4887 cq = n->cq[cqid]; in nvme_init_sq()
4888 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); in nvme_init_sq()
4889 n->sq[sqid] = sq; in nvme_init_sq()
4895 NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd; in nvme_create_sq()
4897 uint16_t cqid = le16_to_cpu(c->cqid); in nvme_create_sq()
4898 uint16_t sqid = le16_to_cpu(c->sqid); in nvme_create_sq()
4899 uint16_t qsize = le16_to_cpu(c->qsize); in nvme_create_sq()
4900 uint16_t qflags = le16_to_cpu(c->sq_flags); in nvme_create_sq()
4901 uint64_t prp1 = le64_to_cpu(c->prp1); in nvme_create_sq()
4909 if (unlikely(!sqid || sqid > n->conf_ioqpairs || n->sq[sqid] != NULL)) { in nvme_create_sq()
4913 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { in nvme_create_sq()
4917 if (unlikely(prp1 & (n->page_size - 1))) { in nvme_create_sq()
4939 BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); in nvme_set_blk_stats()
4941 stats->units_read += s->nr_bytes[BLOCK_ACCT_READ]; in nvme_set_blk_stats()
4942 stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE]; in nvme_set_blk_stats()
4943 stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; in nvme_set_blk_stats()
4944 stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; in nvme_set_blk_stats()
4982 trans_len = MIN(sizeof(smart_l) - off, buf_len); in nvme_ocp_extended_smart_info()
4989 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_smart_info()
5019 trans_len = MIN(sizeof(smart) - off, buf_len); in nvme_smart_info()
5020 smart.critical_warning = n->smart_critical_warning; in nvme_smart_info()
5030 smart.temperature = cpu_to_le16(n->temperature); in nvme_smart_info()
5032 if ((n->temperature >= n->features.temp_thresh_hi) || in nvme_smart_info()
5033 (n->temperature <= n->features.temp_thresh_low)) { in nvme_smart_info()
5039 cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60); in nvme_smart_info()
5051 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); in nvme_endgrp_info()
5057 if (!n->subsys || endgrpid != 0x1) { in nvme_endgrp_info()
5066 NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); in nvme_endgrp_info()
5084 buf_len = MIN(sizeof(info) - off, buf_len); in nvme_endgrp_info()
5103 trans_len = MIN(sizeof(fw_log) - off, buf_len); in nvme_fw_log_info()
5123 trans_len = MIN(sizeof(errlog) - off, buf_len); in nvme_error_info()
5142 trans_len = MIN(sizeof(nslist) - off, buf_len); in nvme_changed_nslist()
5144 while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) != in nvme_changed_nslist()
5157 clear_bit(nsid, n->changed_nsids); in nvme_changed_nslist()
5165 bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE); in nvme_changed_nslist()
5187 switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) { in nvme_cmd_effects()
5189 iocs = n->cse.iocs.nvm; in nvme_cmd_effects()
5195 iocs = n->cse.iocs.nvm; in nvme_cmd_effects()
5198 iocs = n->cse.iocs.zoned; in nvme_cmd_effects()
5205 memcpy(log.acs, n->cse.acs, sizeof(log.acs)); in nvme_cmd_effects()
5211 trans_len = MIN(sizeof(log) - off, buf_len); in nvme_cmd_effects()
5222 if (n->params.ocp) { in nvme_vendor_specific_log()
5252 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_confs()
5256 endgrp = &n->subsys->endgrp; in nvme_fdp_confs()
5258 if (endgrp->fdp.enabled) { in nvme_fdp_confs()
5259 nruh = endgrp->fdp.nruh; in nvme_fdp_confs()
5271 trans_len = MIN(log_size - off, buf_len); in nvme_fdp_confs()
5278 log->num_confs = cpu_to_le16(0); in nvme_fdp_confs()
5279 log->size = cpu_to_le32(log_size); in nvme_fdp_confs()
5281 hdr->descr_size = cpu_to_le16(fdp_descr_size); in nvme_fdp_confs()
5282 if (endgrp->fdp.enabled) { in nvme_fdp_confs()
5283 hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, VALID, 1); in nvme_fdp_confs()
5284 hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, RGIF, endgrp->fdp.rgif); in nvme_fdp_confs()
5285 hdr->nrg = cpu_to_le16(endgrp->fdp.nrg); in nvme_fdp_confs()
5286 hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); in nvme_fdp_confs()
5287 hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); in nvme_fdp_confs()
5288 hdr->nnss = cpu_to_le32(NVME_MAX_NAMESPACES); in nvme_fdp_confs()
5289 hdr->runs = cpu_to_le64(endgrp->fdp.runs); in nvme_fdp_confs()
5292 ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; in nvme_fdp_confs()
5296 /* 1 bit for RUH in PIF -> 2 RUHs max. */ in nvme_fdp_confs()
5297 hdr->nrg = cpu_to_le16(1); in nvme_fdp_confs()
5298 hdr->nruh = cpu_to_le16(1); in nvme_fdp_confs()
5299 hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); in nvme_fdp_confs()
5300 hdr->nnss = cpu_to_le32(1); in nvme_fdp_confs()
5301 hdr->runs = cpu_to_le64(96 * MiB); in nvme_fdp_confs()
5303 ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; in nvme_fdp_confs()
5322 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_ruh_usage()
5326 endgrp = &n->subsys->endgrp; in nvme_fdp_ruh_usage()
5328 if (!endgrp->fdp.enabled) { in nvme_fdp_ruh_usage()
5332 log_size = sizeof(NvmeRuhuLog) + endgrp->fdp.nruh * sizeof(NvmeRuhuDescr); in nvme_fdp_ruh_usage()
5338 trans_len = MIN(log_size - off, buf_len); in nvme_fdp_ruh_usage()
5344 ruh = endgrp->fdp.ruhs; in nvme_fdp_ruh_usage()
5345 hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); in nvme_fdp_ruh_usage()
5347 for (i = 0; i < endgrp->fdp.nruh; i++, ruhud++, ruh++) { in nvme_fdp_ruh_usage()
5348 ruhud->ruha = ruh->ruha; in nvme_fdp_ruh_usage()
5365 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_stats()
5369 if (!n->subsys->endgrp.fdp.enabled) { in nvme_fdp_stats()
5373 endgrp = &n->subsys->endgrp; in nvme_fdp_stats()
5375 trans_len = MIN(sizeof(log) - off, buf_len); in nvme_fdp_stats()
5378 log.hbmw[0] = cpu_to_le64(endgrp->fdp.hbmw); in nvme_fdp_stats()
5379 log.mbmw[0] = cpu_to_le64(endgrp->fdp.mbmw); in nvme_fdp_stats()
5380 log.mbe[0] = cpu_to_le64(endgrp->fdp.mbe); in nvme_fdp_stats()
5390 NvmeCmd *cmd = &req->cmd; in nvme_fdp_events()
5391 bool host_events = (cmd->cdw10 >> 8) & 0x1; in nvme_fdp_events()
5397 if (endgrpid != 1 || !n->subsys) { in nvme_fdp_events()
5401 endgrp = &n->subsys->endgrp; in nvme_fdp_events()
5403 if (!endgrp->fdp.enabled) { in nvme_fdp_events()
5408 ebuf = &endgrp->fdp.host_events; in nvme_fdp_events()
5410 ebuf = &endgrp->fdp.ctrl_events; in nvme_fdp_events()
5413 log_size = sizeof(NvmeFdpEventsLog) + ebuf->nelems * sizeof(NvmeFdpEvent); in nvme_fdp_events()
5419 trans_len = MIN(log_size - off, buf_len); in nvme_fdp_events()
5421 elog->num_events = cpu_to_le32(ebuf->nelems); in nvme_fdp_events()
5424 if (ebuf->nelems && ebuf->start == ebuf->next) { in nvme_fdp_events()
5425 unsigned int nelems = (NVME_FDP_MAX_EVENTS - ebuf->start); in nvme_fdp_events()
5427 memcpy(event, &ebuf->events[ebuf->start], in nvme_fdp_events()
5429 memcpy(event + nelems, ebuf->events, in nvme_fdp_events()
5430 sizeof(NvmeFdpEvent) * ebuf->next); in nvme_fdp_events()
5431 } else if (ebuf->start < ebuf->next) { in nvme_fdp_events()
5432 memcpy(event, &ebuf->events[ebuf->start], in nvme_fdp_events()
5433 sizeof(NvmeFdpEvent) * (ebuf->next - ebuf->start)); in nvme_fdp_events()
5441 NvmeCmd *cmd = &req->cmd; in nvme_get_log()
5443 uint32_t dw10 = le32_to_cpu(cmd->cdw10); in nvme_get_log()
5444 uint32_t dw11 = le32_to_cpu(cmd->cdw11); in nvme_get_log()
5445 uint32_t dw12 = le32_to_cpu(cmd->cdw12); in nvme_get_log()
5446 uint32_t dw13 = le32_to_cpu(cmd->cdw13); in nvme_get_log()
5450 uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; in nvme_get_log()
5508 uint16_t offset = (cq->cqid << 3) + (1 << 2); in nvme_free_cq()
5510 n->cq[cq->cqid] = NULL; in nvme_free_cq()
5511 qemu_bh_delete(cq->bh); in nvme_free_cq()
5512 if (cq->ioeventfd_enabled) { in nvme_free_cq()
5513 memory_region_del_eventfd(&n->iomem, in nvme_free_cq()
5514 0x1000 + offset, 4, false, 0, &cq->notifier); in nvme_free_cq()
5515 event_notifier_set_handler(&cq->notifier, NULL); in nvme_free_cq()
5516 event_notifier_cleanup(&cq->notifier); in nvme_free_cq()
5518 if (msix_enabled(pci) && cq->irq_enabled) { in nvme_free_cq()
5519 msix_vector_unuse(pci, cq->vector); in nvme_free_cq()
5521 if (cq->cqid) { in nvme_free_cq()
5528 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; in nvme_del_cq()
5530 uint16_t qid = le16_to_cpu(c->qid); in nvme_del_cq()
5537 cq = n->cq[qid]; in nvme_del_cq()
5538 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { in nvme_del_cq()
5543 if (cq->irq_enabled && cq->tail != cq->head) { in nvme_del_cq()
5544 n->cq_pending--; in nvme_del_cq()
5563 cq->ctrl = n; in nvme_init_cq()
5564 cq->cqid = cqid; in nvme_init_cq()
5565 cq->size = size; in nvme_init_cq()
5566 cq->dma_addr = dma_addr; in nvme_init_cq()
5567 cq->phase = 1; in nvme_init_cq()
5568 cq->irq_enabled = irq_enabled; in nvme_init_cq()
5569 cq->vector = vector; in nvme_init_cq()
5570 cq->head = cq->tail = 0; in nvme_init_cq()
5571 QTAILQ_INIT(&cq->req_list); in nvme_init_cq()
5572 QTAILQ_INIT(&cq->sq_list); in nvme_init_cq()
5573 if (n->dbbuf_enabled) { in nvme_init_cq()
5574 cq->db_addr = n->dbbuf_dbs + (cqid << 3) + (1 << 2); in nvme_init_cq()
5575 cq->ei_addr = n->dbbuf_eis + (cqid << 3) + (1 << 2); in nvme_init_cq()
5577 if (n->params.ioeventfd && cqid != 0) { in nvme_init_cq()
5579 cq->ioeventfd_enabled = true; in nvme_init_cq()
5583 n->cq[cqid] = cq; in nvme_init_cq()
5584 cq->bh = qemu_bh_new_guarded(nvme_post_cqes, cq, in nvme_init_cq()
5585 &DEVICE(cq->ctrl)->mem_reentrancy_guard); in nvme_init_cq()
5591 NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd; in nvme_create_cq()
5592 uint16_t cqid = le16_to_cpu(c->cqid); in nvme_create_cq()
5593 uint16_t vector = le16_to_cpu(c->irq_vector); in nvme_create_cq()
5594 uint16_t qsize = le16_to_cpu(c->qsize); in nvme_create_cq()
5595 uint16_t qflags = le16_to_cpu(c->cq_flags); in nvme_create_cq()
5596 uint64_t prp1 = le64_to_cpu(c->prp1); in nvme_create_cq()
5597 uint32_t cc = ldq_le_p(&n->bar.cc); in nvme_create_cq()
5609 if (unlikely(!cqid || cqid > n->conf_ioqpairs || n->cq[cqid] != NULL)) { in nvme_create_cq()
5613 if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { in nvme_create_cq()
5617 if (unlikely(prp1 & (n->page_size - 1))) { in nvme_create_cq()
5625 if (unlikely(vector >= n->conf_msix_qsize)) { in nvme_create_cq()
5643 n->qs_created = true; in nvme_create_cq()
5658 return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req); in nvme_identify_ctrl()
5663 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ctrl_csi()
5667 trace_pci_nvme_identify_ctrl_csi(c->csi); in nvme_identify_ctrl_csi()
5669 switch (c->csi) { in nvme_identify_ctrl_csi()
5671 id_nvm->vsl = n->params.vsl; in nvme_identify_ctrl_csi()
5672 id_nvm->dmrl = NVME_ID_CTRL_NVM_DMRL_MAX; in nvme_identify_ctrl_csi()
5673 id_nvm->dmrsl = cpu_to_le32(n->dmrsl); in nvme_identify_ctrl_csi()
5674 id_nvm->dmsl = NVME_ID_CTRL_NVM_DMRL_MAX * n->dmrsl; in nvme_identify_ctrl_csi()
5678 ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl; in nvme_identify_ctrl_csi()
5691 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns()
5692 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns()
5703 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ns()
5712 if (active || ns->csi == NVME_CSI_NVM) { in nvme_identify_ns()
5713 return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req); in nvme_identify_ns()
5722 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ctrl_list()
5723 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ctrl_list()
5724 uint16_t min_id = le16_to_cpu(c->ctrlid); in nvme_identify_ctrl_list()
5731 trace_pci_nvme_identify_ctrl_list(c->cns, min_id); in nvme_identify_ctrl_list()
5733 if (!n->subsys) { in nvme_identify_ctrl_list()
5742 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ctrl_list()
5748 for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) { in nvme_identify_ctrl_list()
5749 ctrl = nvme_subsys_ctrl(n->subsys, cntlid); in nvme_identify_ctrl_list()
5768 trace_pci_nvme_identify_pri_ctrl_cap(le16_to_cpu(n->pri_ctrl_cap.cntlid)); in nvme_identify_pri_ctrl_cap()
5770 return nvme_c2h(n, (uint8_t *)&n->pri_ctrl_cap, in nvme_identify_pri_ctrl_cap()
5776 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_sec_ctrl_list()
5777 uint16_t pri_ctrl_id = le16_to_cpu(n->pri_ctrl_cap.cntlid); in nvme_identify_sec_ctrl_list()
5778 uint16_t min_id = le16_to_cpu(c->ctrlid); in nvme_identify_sec_ctrl_list()
5779 uint8_t num_sec_ctrl = n->nr_sec_ctrls; in nvme_identify_sec_ctrl_list()
5784 if (n->sec_ctrl_list[i].scid >= min_id) { in nvme_identify_sec_ctrl_list()
5785 list.numcntl = MIN(num_sec_ctrl - i, 127); in nvme_identify_sec_ctrl_list()
5786 memcpy(&list.sec, n->sec_ctrl_list + i, in nvme_identify_sec_ctrl_list()
5800 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns_ind()
5801 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns_ind()
5812 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ns_ind()
5821 return nvme_c2h(n, (uint8_t *)&ns->id_ns_ind, sizeof(NvmeIdNsInd), req); in nvme_identify_ns_ind()
5828 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns_csi()
5829 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns_csi()
5831 trace_pci_nvme_identify_ns_csi(nsid, c->csi); in nvme_identify_ns_csi()
5840 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_identify_ns_csi()
5849 if (c->csi == NVME_CSI_NVM) { in nvme_identify_ns_csi()
5850 return nvme_c2h(n, (uint8_t *)&ns->id_ns_nvm, sizeof(NvmeIdNsNvm), in nvme_identify_ns_csi()
5852 } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) { in nvme_identify_ns_csi()
5853 return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned), in nvme_identify_ns_csi()
5864 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_nslist()
5865 uint32_t min_nsid = le32_to_cpu(c->nsid); in nvme_identify_nslist()
5879 if (min_nsid >= NVME_NSID_BROADCAST - 1) { in nvme_identify_nslist()
5887 ns = nvme_subsys_ns(n->subsys, i); in nvme_identify_nslist()
5895 if (ns->params.nsid <= min_nsid) { in nvme_identify_nslist()
5898 list_ptr[j++] = cpu_to_le32(ns->params.nsid); in nvme_identify_nslist()
5911 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_nslist_csi()
5912 uint32_t min_nsid = le32_to_cpu(c->nsid); in nvme_identify_nslist_csi()
5918 trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi); in nvme_identify_nslist_csi()
5923 if (min_nsid >= NVME_NSID_BROADCAST - 1) { in nvme_identify_nslist_csi()
5927 if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) { in nvme_identify_nslist_csi()
5935 ns = nvme_subsys_ns(n->subsys, i); in nvme_identify_nslist_csi()
5943 if (ns->params.nsid <= min_nsid || c->csi != ns->csi) { in nvme_identify_nslist_csi()
5946 list_ptr[j++] = cpu_to_le32(ns->params.nsid); in nvme_identify_nslist_csi()
5960 uint16_t endgid = le32_to_cpu(req->cmd.cdw11) & 0xffff; in nvme_endurance_group_list()
5963 * The current nvme-subsys only supports Endurance Group #1. in nvme_endurance_group_list()
5978 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify_ns_descr_list()
5979 uint32_t nsid = le32_to_cpu(c->nsid); in nvme_identify_ns_descr_list()
6010 if (!qemu_uuid_is_null(&ns->params.uuid)) { in nvme_identify_ns_descr_list()
6013 memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); in nvme_identify_ns_descr_list()
6018 if (!nvme_nguid_is_null(&ns->params.nguid)) { in nvme_identify_ns_descr_list()
6021 memcpy(nguid.v, ns->params.nguid.data, NVME_NIDL_NGUID); in nvme_identify_ns_descr_list()
6026 if (ns->params.eui64) { in nvme_identify_ns_descr_list()
6029 eui64.v = cpu_to_be64(ns->params.eui64); in nvme_identify_ns_descr_list()
6036 csi.v = ns->csi; in nvme_identify_ns_descr_list()
6058 NvmeIdentify *c = (NvmeIdentify *)&req->cmd; in nvme_identify()
6060 trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid), in nvme_identify()
6061 c->csi); in nvme_identify()
6063 switch (c->cns) { in nvme_identify()
6103 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); in nvme_identify()
6110 uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff; in nvme_abort()
6111 uint16_t cid = (le32_to_cpu(req->cmd.cdw10) >> 16) & 0xffff; in nvme_abort()
6112 NvmeSQueue *sq = n->sq[sqid]; in nvme_abort()
6116 req->cqe.result = 1; in nvme_abort()
6122 for (i = 0; i < n->outstanding_aers; i++) { in nvme_abort()
6123 NvmeRequest *re = n->aer_reqs[i]; in nvme_abort()
6124 if (re->cqe.cid == cid) { in nvme_abort()
6125 memmove(n->aer_reqs + i, n->aer_reqs + i + 1, in nvme_abort()
6126 (n->outstanding_aers - i - 1) * sizeof(NvmeRequest *)); in nvme_abort()
6127 n->outstanding_aers--; in nvme_abort()
6128 re->status = NVME_CMD_ABORT_REQ; in nvme_abort()
6129 req->cqe.result = 0; in nvme_abort()
6130 nvme_enqueue_req_completion(&n->admin_cq, re); in nvme_abort()
6136 QTAILQ_FOREACH_SAFE(r, &sq->out_req_list, entry, next) { in nvme_abort()
6137 if (r->cqe.cid == cid) { in nvme_abort()
6138 if (r->aiocb) { in nvme_abort()
6139 r->status = NVME_CMD_ABORT_REQ; in nvme_abort()
6140 blk_aio_cancel_async(r->aiocb); in nvme_abort()
6153 n->host_timestamp = le64_to_cpu(ts); in nvme_set_timestamp()
6154 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); in nvme_set_timestamp()
6160 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; in nvme_get_timestamp()
6174 ts.timestamp = n->host_timestamp + elapsed_time; in nvme_get_timestamp()
6176 /* If the host timestamp is non-zero, set the timestamp origin */ in nvme_get_timestamp()
6177 ts.origin = n->host_timestamp ? 0x01 : 0x00; in nvme_get_timestamp()
6196 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { in nvme_get_feature_fdp()
6209 NvmeCmd *cmd = &req->cmd; in nvme_get_feature_fdp_events()
6210 uint32_t cdw11 = le32_to_cpu(cmd->cdw11); in nvme_get_feature_fdp_events()
6221 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { in nvme_get_feature_fdp_events()
6229 ruhid = ns->fdp.phs[ph]; in nvme_get_feature_fdp_events()
6230 ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; in nvme_get_feature_fdp_events()
6251 s_event->evt = event_type; in nvme_get_feature_fdp_events()
6252 s_event->evta = (ruh->event_filter >> shift) & 0x1; in nvme_get_feature_fdp_events()
6271 NvmeCmd *cmd = &req->cmd; in nvme_get_feature()
6272 uint32_t dw10 = le32_to_cpu(cmd->cdw10); in nvme_get_feature()
6273 uint32_t dw11 = le32_to_cpu(cmd->cdw11); in nvme_get_feature()
6274 uint32_t nsid = le32_to_cpu(cmd->nsid); in nvme_get_feature()
6336 result = n->features.temp_thresh_hi; in nvme_get_feature()
6339 result = n->features.temp_thresh_low; in nvme_get_feature()
6354 result = ns->features.err_rec; in nvme_get_feature()
6364 result = blk_enable_write_cache(ns->blkconf.blk); in nvme_get_feature()
6372 result = n->features.async_config; in nvme_get_feature()
6377 return nvme_c2h(n, (uint8_t *)&n->features.hbs, in nvme_get_feature()
6378 sizeof(n->features.hbs), req); in nvme_get_feature()
6425 result = (n->conf_ioqpairs - 1) | ((n->conf_ioqpairs - 1) << 16); in nvme_get_feature()
6430 if (iv >= n->conf_ioqpairs + 1) { in nvme_get_feature()
6435 if (iv == n->admin_cq.vector) { in nvme_get_feature()
6453 result = n->dn; in nvme_get_feature()
6461 req->cqe.result = cpu_to_le32(result); in nvme_get_feature()
6483 NvmeCmd *cmd = &req->cmd; in nvme_set_feature_fdp_events()
6484 uint32_t cdw11 = le32_to_cpu(cmd->cdw11); in nvme_set_feature_fdp_events()
6488 uint8_t enable = le32_to_cpu(cmd->cdw12) & 0x1; in nvme_set_feature_fdp_events()
6496 if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { in nvme_set_feature_fdp_events()
6504 ruhid = ns->fdp.phs[ph]; in nvme_set_feature_fdp_events()
6505 ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; in nvme_set_feature_fdp_events()
6517 ruh->event_filter |= event_mask; in nvme_set_feature_fdp_events()
6519 ruh->event_filter = ruh->event_filter & ~event_mask; in nvme_set_feature_fdp_events()
6529 NvmeCmd *cmd = &req->cmd; in nvme_set_feature()
6530 uint32_t dw10 = le32_to_cpu(cmd->cdw10); in nvme_set_feature()
6531 uint32_t dw11 = le32_to_cpu(cmd->cdw11); in nvme_set_feature()
6532 uint32_t nsid = le32_to_cpu(cmd->nsid); in nvme_set_feature()
6537 NvmeIdCtrl *id = &n->id_ctrl; in nvme_set_feature()
6538 NvmeAtomic *atomic = &n->atomic; in nvme_set_feature()
6581 n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); in nvme_set_feature()
6584 n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); in nvme_set_feature()
6590 if ((n->temperature >= n->features.temp_thresh_hi) || in nvme_set_feature()
6591 (n->temperature <= n->features.temp_thresh_low)) { in nvme_set_feature()
6605 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { in nvme_set_feature()
6606 ns->features.err_rec = dw11; in nvme_set_feature()
6614 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { in nvme_set_feature()
6615 ns->features.err_rec = dw11; in nvme_set_feature()
6625 if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { in nvme_set_feature()
6626 blk_flush(ns->blkconf.blk); in nvme_set_feature()
6629 blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); in nvme_set_feature()
6635 if (n->qs_created) { in nvme_set_feature()
6649 n->conf_ioqpairs, in nvme_set_feature()
6650 n->conf_ioqpairs); in nvme_set_feature()
6651 req->cqe.result = cpu_to_le32((n->conf_ioqpairs - 1) | in nvme_set_feature()
6652 ((n->conf_ioqpairs - 1) << 16)); in nvme_set_feature()
6655 n->features.async_config = dw11; in nvme_set_feature()
6660 status = nvme_h2c(n, (uint8_t *)&n->features.hbs, in nvme_set_feature()
6661 sizeof(n->features.hbs), req); in nvme_set_feature()
6673 ns->id_ns.nlbaf = ns->nlbaf - 1; in nvme_set_feature()
6674 if (!n->features.hbs.lbafee) { in nvme_set_feature()
6675 ns->id_ns.nlbaf = MIN(ns->id_ns.nlbaf, 15); in nvme_set_feature()
6693 n->dn = 0x1 & dw11; in nvme_set_feature()
6695 if (n->dn) { in nvme_set_feature()
6696 atomic->atomic_max_write_size = le16_to_cpu(id->awupf) + 1; in nvme_set_feature()
6698 atomic->atomic_max_write_size = le16_to_cpu(id->awun) + 1; in nvme_set_feature()
6701 if (atomic->atomic_max_write_size == 1) { in nvme_set_feature()
6702 atomic->atomic_writes = 0; in nvme_set_feature()
6704 atomic->atomic_writes = 1; in nvme_set_feature()
6717 if (n->outstanding_aers > n->params.aerl) { in nvme_aer()
6722 n->aer_reqs[n->outstanding_aers] = req; in nvme_aer()
6723 n->outstanding_aers++; in nvme_aer()
6725 if (!QTAILQ_EMPTY(&n->aer_queue)) { in nvme_aer()
6735 n->dmrsl = in nvme_update_dsm_limits()
6736 MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); in nvme_update_dsm_limits()
6747 n->dmrsl = in nvme_update_dsm_limits()
6748 MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); in nvme_update_dsm_limits()
6761 cc = ldl_le_p(&n->bar.cc); in nvme_csi_supported()
6771 assert(ns->attached > 0); in nvme_detach_ns()
6773 n->namespaces[ns->params.nsid] = NULL; in nvme_detach_ns()
6774 ns->attached--; in nvme_detach_ns()
6782 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_ns_attachment()
6783 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_ns_attachment()
6796 ns = nvme_subsys_ns(n->subsys, nsid); in nvme_ns_attachment()
6810 *nr_ids = MIN(*nr_ids, NVME_CONTROLLER_LIST_SIZE - 1); in nvme_ns_attachment()
6812 ctrl = nvme_subsys_ctrl(n->subsys, ids[i]); in nvme_ns_attachment()
6823 if (ns->attached && !ns->params.shared) { in nvme_ns_attachment()
6827 if (!nvme_csi_supported(n, ns->csi)) { in nvme_ns_attachment()
6850 if (!test_and_set_bit(nsid, ctrl->changed_nsids)) { in nvme_ns_attachment()
6881 iocb->ret = -ECANCELED; in nvme_format_cancel()
6883 if (iocb->aiocb) { in nvme_format_cancel()
6884 blk_aio_cancel_async(iocb->aiocb); in nvme_format_cancel()
6885 iocb->aiocb = NULL; in nvme_format_cancel()
6900 trace_pci_nvme_format_set(ns->params.nsid, lbaf, mset, pi, pil); in nvme_format_set()
6902 ns->id_ns.dps = (pil << 3) | pi; in nvme_format_set()
6903 ns->id_ns.flbas = (lbafu << 5) | (mset << 4) | lbafl; in nvme_format_set()
6913 NvmeNamespace *ns = iocb->ns; in nvme_format_ns_cb()
6916 if (iocb->ret < 0) { in nvme_format_ns_cb()
6919 iocb->ret = ret; in nvme_format_ns_cb()
6925 if (iocb->offset < ns->size) { in nvme_format_ns_cb()
6926 bytes = MIN(BDRV_REQUEST_MAX_BYTES, ns->size - iocb->offset); in nvme_format_ns_cb()
6928 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, iocb->offset, in nvme_format_ns_cb()
6932 iocb->offset += bytes; in nvme_format_ns_cb()
6936 nvme_format_set(ns, iocb->lbaf, iocb->mset, iocb->pi, iocb->pil); in nvme_format_ns_cb()
6937 ns->status = 0x0; in nvme_format_ns_cb()
6938 iocb->ns = NULL; in nvme_format_ns_cb()
6939 iocb->offset = 0; in nvme_format_ns_cb()
6947 if (ns->params.zoned) { in nvme_format_check()
6951 if (lbaf > ns->id_ns.nlbaf) { in nvme_format_check()
6955 if (pi && (ns->id_ns.lbaf[lbaf].ms < nvme_pi_tuple_size(ns))) { in nvme_format_check()
6968 NvmeRequest *req = iocb->req; in nvme_do_format()
6970 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_do_format()
6976 if (iocb->ret < 0) { in nvme_do_format()
6980 if (iocb->broadcast) { in nvme_do_format()
6981 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { in nvme_do_format()
6982 iocb->ns = nvme_ns(n, i); in nvme_do_format()
6983 if (iocb->ns) { in nvme_do_format()
6984 iocb->nsid = i; in nvme_do_format()
6990 if (!iocb->ns) { in nvme_do_format()
6994 status = nvme_format_check(iocb->ns, lbaf, pi); in nvme_do_format()
6996 req->status = status; in nvme_do_format()
7000 iocb->ns->status = NVME_FORMAT_IN_PROGRESS; in nvme_do_format()
7005 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_do_format()
7012 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_format()
7013 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_format()
7023 iocb->req = req; in nvme_format()
7024 iocb->ret = 0; in nvme_format()
7025 iocb->ns = NULL; in nvme_format()
7026 iocb->nsid = 0; in nvme_format()
7027 iocb->lbaf = lbaf; in nvme_format()
7028 iocb->mset = mset; in nvme_format()
7029 iocb->pi = pi; in nvme_format()
7030 iocb->pil = pil; in nvme_format()
7031 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); in nvme_format()
7032 iocb->offset = 0; in nvme_format()
7034 if (n->features.hbs.lbafee) { in nvme_format()
7035 iocb->lbaf |= lbafu << 4; in nvme_format()
7038 if (!iocb->broadcast) { in nvme_format()
7044 iocb->ns = nvme_ns(n, nsid); in nvme_format()
7045 if (!iocb->ns) { in nvme_format()
7051 req->aiocb = &iocb->common; in nvme_format()
7066 n->pri_ctrl_cap.vifrt : n->pri_ctrl_cap.vqfrt); in nvme_get_virt_res_num()
7068 n->pri_ctrl_cap.virfap : n->pri_ctrl_cap.vqrfap); in nvme_get_virt_res_num()
7069 *num_sec = le16_to_cpu(rt ? n->pri_ctrl_cap.virfa : n->pri_ctrl_cap.vqrfa); in nvme_get_virt_res_num()
7078 if (cntlid != n->cntlid) { in nvme_assign_virt_res_to_prim()
7088 if (nr > num_total - num_sec) { in nvme_assign_virt_res_to_prim()
7093 n->next_pri_ctrl_cap.virfap = cpu_to_le16(nr); in nvme_assign_virt_res_to_prim()
7095 n->next_pri_ctrl_cap.vqrfap = cpu_to_le16(nr); in nvme_assign_virt_res_to_prim()
7098 req->cqe.result = cpu_to_le32(nr); in nvme_assign_virt_res_to_prim()
7099 return req->status; in nvme_assign_virt_res_to_prim()
7108 prev_nr = le16_to_cpu(sctrl->nvi); in nvme_update_virt_res()
7109 prev_total = le32_to_cpu(n->pri_ctrl_cap.virfa); in nvme_update_virt_res()
7110 sctrl->nvi = cpu_to_le16(nr); in nvme_update_virt_res()
7111 n->pri_ctrl_cap.virfa = cpu_to_le32(prev_total + nr - prev_nr); in nvme_update_virt_res()
7113 prev_nr = le16_to_cpu(sctrl->nvq); in nvme_update_virt_res()
7114 prev_total = le32_to_cpu(n->pri_ctrl_cap.vqrfa); in nvme_update_virt_res()
7115 sctrl->nvq = cpu_to_le16(nr); in nvme_update_virt_res()
7116 n->pri_ctrl_cap.vqrfa = cpu_to_le32(prev_total + nr - prev_nr); in nvme_update_virt_res()
7131 if (sctrl->scs) { in nvme_assign_virt_res_to_sec()
7135 limit = le16_to_cpu(rt ? n->pri_ctrl_cap.vifrsm : n->pri_ctrl_cap.vqfrsm); in nvme_assign_virt_res_to_sec()
7141 num_free = num_total - num_prim - num_sec; in nvme_assign_virt_res_to_sec()
7142 diff = nr - le16_to_cpu(rt ? sctrl->nvi : sctrl->nvq); in nvme_assign_virt_res_to_sec()
7149 req->cqe.result = cpu_to_le32(nr); in nvme_assign_virt_res_to_sec()
7151 return req->status; in nvme_assign_virt_res_to_sec()
7167 vf_index = le16_to_cpu(sctrl->vfn) - 1; in nvme_virt_set_state()
7172 if (!sctrl->nvi || (le16_to_cpu(sctrl->nvq) < 2) || !sn) { in nvme_virt_set_state()
7176 if (!sctrl->scs) { in nvme_virt_set_state()
7177 sctrl->scs = 0x1; in nvme_virt_set_state()
7184 if (sctrl->scs) { in nvme_virt_set_state()
7185 sctrl->scs = 0x0; in nvme_virt_set_state()
7197 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_virt_mngmt()
7198 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); in nvme_virt_mngmt()
7227 uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1); in nvme_dbbuf_config()
7228 uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2); in nvme_dbbuf_config()
7232 if (dbs_addr & (n->page_size - 1) || eis_addr & (n->page_size - 1)) { in nvme_dbbuf_config()
7237 n->dbbuf_dbs = dbs_addr; in nvme_dbbuf_config()
7238 n->dbbuf_eis = eis_addr; in nvme_dbbuf_config()
7239 n->dbbuf_enabled = true; in nvme_dbbuf_config()
7241 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { in nvme_dbbuf_config()
7242 NvmeSQueue *sq = n->sq[i]; in nvme_dbbuf_config()
7243 NvmeCQueue *cq = n->cq[i]; in nvme_dbbuf_config()
7248 * nvme_process_db() uses this hard-coded way to calculate in nvme_dbbuf_config()
7251 sq->db_addr = dbs_addr + (i << 3); in nvme_dbbuf_config()
7252 sq->ei_addr = eis_addr + (i << 3); in nvme_dbbuf_config()
7253 stl_le_pci_dma(pci, sq->db_addr, sq->tail, MEMTXATTRS_UNSPECIFIED); in nvme_dbbuf_config()
7255 if (n->params.ioeventfd && sq->sqid != 0) { in nvme_dbbuf_config()
7257 sq->ioeventfd_enabled = true; in nvme_dbbuf_config()
7264 cq->db_addr = dbs_addr + (i << 3) + (1 << 2); in nvme_dbbuf_config()
7265 cq->ei_addr = eis_addr + (i << 3) + (1 << 2); in nvme_dbbuf_config()
7266 stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED); in nvme_dbbuf_config()
7268 if (n->params.ioeventfd && cq->cqid != 0) { in nvme_dbbuf_config()
7270 cq->ioeventfd_enabled = true; in nvme_dbbuf_config()
7289 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); in nvme_directive_receive()
7290 uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); in nvme_directive_receive()
7291 uint32_t nsid = le32_to_cpu(req->cmd.nsid); in nvme_directive_receive()
7319 if (ns->endgrp && ns->endgrp->fdp.enabled) { in nvme_directive_receive()
7338 trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, in nvme_admin_cmd()
7339 nvme_adm_opc_str(req->cmd.opcode)); in nvme_admin_cmd()
7341 if (!(n->cse.acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { in nvme_admin_cmd()
7342 trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode); in nvme_admin_cmd()
7347 if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) { in nvme_admin_cmd()
7351 if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { in nvme_admin_cmd()
7355 switch (req->cmd.opcode) { in nvme_admin_cmd()
7397 trace_pci_nvme_update_sq_eventidx(sq->sqid, sq->tail); in nvme_update_sq_eventidx()
7399 stl_le_pci_dma(PCI_DEVICE(sq->ctrl), sq->ei_addr, sq->tail, in nvme_update_sq_eventidx()
7405 ldl_le_pci_dma(PCI_DEVICE(sq->ctrl), sq->db_addr, &sq->tail, in nvme_update_sq_tail()
7408 trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail); in nvme_update_sq_tail()
7419 uint64_t slba = le64_to_cpu(rw->slba); in nvme_atomic_write_check()
7420 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb); in nvme_atomic_write_check()
7425 if ((cmd->opcode == NVME_CMD_READ) || ((cmd->opcode == NVME_CMD_WRITE) && in nvme_atomic_write_check()
7426 ((rw->nlb + 1) > atomic->atomic_max_write_size))) { in nvme_atomic_write_check()
7433 for (i = 1; i < n->params.max_ioqpairs + 1; i++) { in nvme_atomic_write_check()
7441 sq = n->sq[i]; in nvme_atomic_write_check()
7449 QTAILQ_FOREACH(req, &sq->out_req_list, entry) { in nvme_atomic_write_check()
7450 req_rw = (NvmeRwCmd *)&req->cmd; in nvme_atomic_write_check()
7452 if (((req_rw->opcode == NVME_CMD_WRITE) || in nvme_atomic_write_check()
7453 (req_rw->opcode == NVME_CMD_READ)) && in nvme_atomic_write_check()
7454 (cmd->nsid == req->ns->params.nsid)) { in nvme_atomic_write_check()
7455 req_slba = le64_to_cpu(req_rw->slba); in nvme_atomic_write_check()
7456 req_nlb = (uint32_t)le16_to_cpu(req_rw->nlb); in nvme_atomic_write_check()
7464 if (req->atomic_write && ((elba >= req_slba) && in nvme_atomic_write_check()
7480 if (n->atomic.atomic_writes) { in nvme_get_atomic()
7481 return &n->atomic; in nvme_get_atomic()
7489 NvmeCtrl *n = sq->ctrl; in nvme_process_sq()
7490 NvmeCQueue *cq = n->cq[sq->cqid]; in nvme_process_sq()
7497 if (n->dbbuf_enabled) { in nvme_process_sq()
7501 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { in nvme_process_sq()
7505 addr = sq->dma_addr + (sq->head << NVME_SQES); in nvme_process_sq()
7509 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); in nvme_process_sq()
7516 if (sq->sqid && atomic) { in nvme_process_sq()
7522 qemu_bh_schedule(sq->bh); in nvme_process_sq()
7534 req = QTAILQ_FIRST(&sq->req_list); in nvme_process_sq()
7535 QTAILQ_REMOVE(&sq->req_list, req, entry); in nvme_process_sq()
7536 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); in nvme_process_sq()
7538 req->cqe.cid = cmd.cid; in nvme_process_sq()
7539 memcpy(&req->cmd, &cmd, sizeof(NvmeCmd)); in nvme_process_sq()
7541 if (sq->sqid && atomic) { in nvme_process_sq()
7542 req->atomic_write = cmd_is_atomic; in nvme_process_sq()
7545 status = sq->sqid ? nvme_io_cmd(n, req) : in nvme_process_sq()
7548 req->status = status; in nvme_process_sq()
7552 if (n->dbbuf_enabled) { in nvme_process_sq()
7567 assert(table_size > 0 && table_size <= pci_dev->msix_entries_nr); in nvme_update_msixcap_ts()
7569 config = pci_dev->config + pci_dev->msix_cap; in nvme_update_msixcap_ts()
7571 table_size - 1); in nvme_update_msixcap_ts()
7577 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; in nvme_activate_virt_res()
7580 /* -1 to account for the admin queue */ in nvme_activate_virt_res()
7583 cap->vqprt = sctrl->nvq; in nvme_activate_virt_res()
7584 cap->viprt = sctrl->nvi; in nvme_activate_virt_res()
7585 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; in nvme_activate_virt_res()
7586 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; in nvme_activate_virt_res()
7588 cap->vqrfap = n->next_pri_ctrl_cap.vqrfap; in nvme_activate_virt_res()
7589 cap->virfap = n->next_pri_ctrl_cap.virfap; in nvme_activate_virt_res()
7590 n->conf_ioqpairs = le16_to_cpu(cap->vqprt) + in nvme_activate_virt_res()
7591 le16_to_cpu(cap->vqrfap) - 1; in nvme_activate_virt_res()
7592 n->conf_msix_qsize = le16_to_cpu(cap->viprt) + in nvme_activate_virt_res()
7593 le16_to_cpu(cap->virfap); in nvme_activate_virt_res()
7613 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { in nvme_ctrl_reset()
7614 if (n->sq[i] != NULL) { in nvme_ctrl_reset()
7615 nvme_free_sq(n->sq[i], n); in nvme_ctrl_reset()
7618 for (i = 0; i < n->params.max_ioqpairs + 1; i++) { in nvme_ctrl_reset()
7619 if (n->cq[i] != NULL) { in nvme_ctrl_reset()
7620 nvme_free_cq(n->cq[i], n); in nvme_ctrl_reset()
7624 while (!QTAILQ_EMPTY(&n->aer_queue)) { in nvme_ctrl_reset()
7625 NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue); in nvme_ctrl_reset()
7626 QTAILQ_REMOVE(&n->aer_queue, event, entry); in nvme_ctrl_reset()
7630 if (n->params.sriov_max_vfs) { in nvme_ctrl_reset()
7632 for (i = 0; i < n->nr_sec_ctrls; i++) { in nvme_ctrl_reset()
7633 sctrl = &n->sec_ctrl_list[i]; in nvme_ctrl_reset()
7634 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); in nvme_ctrl_reset()
7643 n->aer_queued = 0; in nvme_ctrl_reset()
7644 n->aer_mask = 0; in nvme_ctrl_reset()
7645 n->outstanding_aers = 0; in nvme_ctrl_reset()
7646 n->qs_created = false; in nvme_ctrl_reset()
7648 n->dn = n->params.atomic_dn; /* Set Disable Normal */ in nvme_ctrl_reset()
7650 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); in nvme_ctrl_reset()
7655 stl_le_p(&n->bar.csts, sctrl->scs ? 0 : NVME_CSTS_FAILED); in nvme_ctrl_reset()
7657 stl_le_p(&n->bar.csts, 0); in nvme_ctrl_reset()
7660 stl_le_p(&n->bar.intms, 0); in nvme_ctrl_reset()
7661 stl_le_p(&n->bar.intmc, 0); in nvme_ctrl_reset()
7662 stl_le_p(&n->bar.cc, 0); in nvme_ctrl_reset()
7664 n->dbbuf_dbs = 0; in nvme_ctrl_reset()
7665 n->dbbuf_eis = 0; in nvme_ctrl_reset()
7666 n->dbbuf_enabled = false; in nvme_ctrl_reset()
7674 if (n->pmr.dev) { in nvme_ctrl_shutdown()
7675 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); in nvme_ctrl_shutdown()
7690 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_start_ctrl()
7691 uint32_t cc = ldl_le_p(&n->bar.cc); in nvme_start_ctrl()
7692 uint32_t aqa = ldl_le_p(&n->bar.aqa); in nvme_start_ctrl()
7693 uint64_t asq = ldq_le_p(&n->bar.asq); in nvme_start_ctrl()
7694 uint64_t acq = ldq_le_p(&n->bar.acq); in nvme_start_ctrl()
7699 if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) { in nvme_start_ctrl()
7700 trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi), in nvme_start_ctrl()
7701 le16_to_cpu(sctrl->nvq)); in nvme_start_ctrl()
7702 return -1; in nvme_start_ctrl()
7704 if (unlikely(n->cq[0])) { in nvme_start_ctrl()
7706 return -1; in nvme_start_ctrl()
7708 if (unlikely(n->sq[0])) { in nvme_start_ctrl()
7710 return -1; in nvme_start_ctrl()
7712 if (unlikely(asq & (page_size - 1))) { in nvme_start_ctrl()
7714 return -1; in nvme_start_ctrl()
7716 if (unlikely(acq & (page_size - 1))) { in nvme_start_ctrl()
7718 return -1; in nvme_start_ctrl()
7722 return -1; in nvme_start_ctrl()
7728 return -1; in nvme_start_ctrl()
7735 return -1; in nvme_start_ctrl()
7739 return -1; in nvme_start_ctrl()
7743 return -1; in nvme_start_ctrl()
7746 n->page_bits = page_bits; in nvme_start_ctrl()
7747 n->page_size = page_size; in nvme_start_ctrl()
7748 n->max_prp_ents = n->page_size / sizeof(uint64_t); in nvme_start_ctrl()
7749 nvme_init_cq(&n->admin_cq, n, acq, 0, 0, NVME_AQA_ACQS(aqa) + 1, 1); in nvme_start_ctrl()
7750 nvme_init_sq(&n->admin_sq, n, asq, 0, 0, NVME_AQA_ASQS(aqa) + 1); in nvme_start_ctrl()
7756 NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); in nvme_start_ctrl()
7758 if (!ns || (!ns->params.shared && ns->ctrl != n)) { in nvme_start_ctrl()
7762 if (nvme_csi_supported(n, ns->csi) && !ns->params.detached) { in nvme_start_ctrl()
7763 if (!ns->attached || ns->params.shared) { in nvme_start_ctrl()
7776 uint32_t cmbloc = ldl_le_p(&n->bar.cmbloc); in nvme_cmb_enable_regs()
7777 uint32_t cmbsz = ldl_le_p(&n->bar.cmbsz); in nvme_cmb_enable_regs()
7782 stl_le_p(&n->bar.cmbloc, cmbloc); in nvme_cmb_enable_regs()
7790 NVME_CMBSZ_SET_SZ(cmbsz, n->params.cmb_size_mb); in nvme_cmb_enable_regs()
7791 stl_le_p(&n->bar.cmbsz, cmbsz); in nvme_cmb_enable_regs()
7798 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_write_bar()
7799 uint32_t cc = ldl_le_p(&n->bar.cc); in nvme_write_bar()
7800 uint32_t intms = ldl_le_p(&n->bar.intms); in nvme_write_bar()
7801 uint32_t csts = ldl_le_p(&n->bar.csts); in nvme_write_bar()
7802 uint32_t pmrsts = ldl_le_p(&n->bar.pmrsts); in nvme_write_bar()
7804 if (unlikely(offset & (sizeof(uint32_t) - 1))) { in nvme_write_bar()
7806 "MMIO write not 32-bit aligned," in nvme_write_bar()
7813 "MMIO write smaller than 32-bits," in nvme_write_bar()
7824 " when MSI-X is enabled"); in nvme_write_bar()
7828 stl_le_p(&n->bar.intms, intms); in nvme_write_bar()
7829 n->bar.intmc = n->bar.intms; in nvme_write_bar()
7837 " when MSI-X is enabled"); in nvme_write_bar()
7841 stl_le_p(&n->bar.intms, intms); in nvme_write_bar()
7842 n->bar.intmc = n->bar.intms; in nvme_write_bar()
7847 stl_le_p(&n->bar.cc, data); in nvme_write_bar()
7876 stl_le_p(&n->bar.csts, csts); in nvme_write_bar()
7899 stl_le_p(&n->bar.aqa, data); in nvme_write_bar()
7903 stn_le_p(&n->bar.asq, size, data); in nvme_write_bar()
7907 stl_le_p((uint8_t *)&n->bar.asq + 4, data); in nvme_write_bar()
7908 trace_pci_nvme_mmio_asqaddr_hi(data, ldq_le_p(&n->bar.asq)); in nvme_write_bar()
7912 stn_le_p(&n->bar.acq, size, data); in nvme_write_bar()
7915 stl_le_p((uint8_t *)&n->bar.acq + 4, data); in nvme_write_bar()
7916 trace_pci_nvme_mmio_acqaddr_hi(data, ldq_le_p(&n->bar.acq)); in nvme_write_bar()
7932 stn_le_p(&n->bar.cmbmsc, size, data); in nvme_write_bar()
7933 n->cmb.cmse = false; in nvme_write_bar()
7939 uint64_t cmbmsc = ldq_le_p(&n->bar.cmbmsc); in nvme_write_bar()
7941 if (cba + int128_get64(n->cmb.mem.size) < cba) { in nvme_write_bar()
7942 uint32_t cmbsts = ldl_le_p(&n->bar.cmbsts); in nvme_write_bar()
7944 stl_le_p(&n->bar.cmbsts, cmbsts); in nvme_write_bar()
7948 n->cmb.cba = cba; in nvme_write_bar()
7949 n->cmb.cmse = true; in nvme_write_bar()
7952 n->bar.cmbsz = 0; in nvme_write_bar()
7953 n->bar.cmbloc = 0; in nvme_write_bar()
7958 stl_le_p((uint8_t *)&n->bar.cmbmsc + 4, data); in nvme_write_bar()
7970 stl_le_p(&n->bar.pmrctl, data); in nvme_write_bar()
7972 memory_region_set_enabled(&n->pmr.dev->mr, true); in nvme_write_bar()
7975 memory_region_set_enabled(&n->pmr.dev->mr, false); in nvme_write_bar()
7977 n->pmr.cmse = false; in nvme_write_bar()
7979 stl_le_p(&n->bar.pmrsts, pmrsts); in nvme_write_bar()
7998 stl_le_p(&n->bar.pmrmscl, data); in nvme_write_bar()
7999 n->pmr.cmse = false; in nvme_write_bar()
8002 uint64_t pmrmscu = ldl_le_p(&n->bar.pmrmscu); in nvme_write_bar()
8005 if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { in nvme_write_bar()
8007 stl_le_p(&n->bar.pmrsts, pmrsts); in nvme_write_bar()
8011 n->pmr.cmse = true; in nvme_write_bar()
8012 n->pmr.cba = cba; in nvme_write_bar()
8021 stl_le_p(&n->bar.pmrmscu, data); in nvme_write_bar()
8035 uint8_t *ptr = (uint8_t *)&n->bar; in nvme_mmio_read()
8039 if (unlikely(addr & (sizeof(uint32_t) - 1))) { in nvme_mmio_read()
8041 "MMIO read not 32-bit aligned," in nvme_mmio_read()
8046 "MMIO read smaller than 32-bits," in nvme_mmio_read()
8051 if (addr > sizeof(n->bar) - size) { in nvme_mmio_read()
8059 if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs && in nvme_mmio_read()
8071 (NVME_PMRCAP_PMRWBM(ldl_le_p(&n->bar.pmrcap)) & 0x02)) { in nvme_mmio_read()
8072 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); in nvme_mmio_read()
8083 if (unlikely(addr & ((1 << 2) - 1))) { in nvme_process_db()
8085 "doorbell write not 32-bit aligned," in nvme_process_db()
8090 if (((addr - 0x1000) >> 2) & 1) { in nvme_process_db()
8096 qid = (addr - (0x1000 + (1 << 2))) >> 3; in nvme_process_db()
8116 if (n->outstanding_aers) { in nvme_process_db()
8125 cq = n->cq[qid]; in nvme_process_db()
8126 if (unlikely(new_head >= cq->size)) { in nvme_process_db()
8133 if (n->outstanding_aers) { in nvme_process_db()
8142 trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head); in nvme_process_db()
8146 qemu_bh_schedule(cq->bh); in nvme_process_db()
8149 cq->head = new_head; in nvme_process_db()
8150 if (!qid && n->dbbuf_enabled) { in nvme_process_db()
8151 stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED); in nvme_process_db()
8154 if (cq->tail == cq->head) { in nvme_process_db()
8155 if (cq->irq_enabled) { in nvme_process_db()
8156 n->cq_pending--; in nvme_process_db()
8167 qid = (addr - 0x1000) >> 3; in nvme_process_db()
8174 if (n->outstanding_aers) { in nvme_process_db()
8183 sq = n->sq[qid]; in nvme_process_db()
8184 if (unlikely(new_tail >= sq->size)) { in nvme_process_db()
8191 if (n->outstanding_aers) { in nvme_process_db()
8200 trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail); in nvme_process_db()
8202 sq->tail = new_tail; in nvme_process_db()
8203 if (!qid && n->dbbuf_enabled) { in nvme_process_db()
8217 stl_le_pci_dma(pci, sq->db_addr, sq->tail, MEMTXATTRS_UNSPECIFIED); in nvme_process_db()
8220 qemu_bh_schedule(sq->bh); in nvme_process_db()
8231 if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs && in nvme_mmio_write()
8237 if (addr < sizeof(n->bar)) { in nvme_mmio_write()
8258 stn_le_p(&n->cmb.buf[addr], size, data); in nvme_cmb_write()
8264 return ldn_le_p(&n->cmb.buf[addr], size); in nvme_cmb_read()
8279 NvmeParams *params = &n->params; in nvme_check_params()
8281 if (params->num_queues) { in nvme_check_params()
8285 params->max_ioqpairs = params->num_queues - 1; in nvme_check_params()
8288 if (n->namespace.blkconf.blk && n->subsys) { in nvme_check_params()
8294 if (params->max_ioqpairs < 1 || in nvme_check_params()
8295 params->max_ioqpairs > NVME_MAX_IOQPAIRS) { in nvme_check_params()
8301 if (params->msix_qsize < 1 || in nvme_check_params()
8302 params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) { in nvme_check_params()
8308 if (!params->serial) { in nvme_check_params()
8313 if (params->mqes < 1) { in nvme_check_params()
8318 if (n->pmr.dev) { in nvme_check_params()
8319 if (params->msix_exclusive_bar) { in nvme_check_params()
8324 if (host_memory_backend_is_mapped(n->pmr.dev)) { in nvme_check_params()
8326 object_get_canonical_path_component(OBJECT(n->pmr.dev))); in nvme_check_params()
8330 if (!is_power_of_2(n->pmr.dev->size)) { in nvme_check_params()
8335 host_memory_backend_set_mapped(n->pmr.dev, true); in nvme_check_params()
8338 if (n->params.zasl > n->params.mdts) { in nvme_check_params()
8344 if (!n->params.vsl) { in nvme_check_params()
8345 error_setg(errp, "vsl must be non-zero"); in nvme_check_params()
8349 if (params->sriov_max_vfs) { in nvme_check_params()
8350 if (!n->subsys) { in nvme_check_params()
8351 error_setg(errp, "subsystem is required for the use of SR-IOV"); in nvme_check_params()
8355 if (params->cmb_size_mb) { in nvme_check_params()
8356 error_setg(errp, "CMB is not supported with SR-IOV"); in nvme_check_params()
8360 if (n->pmr.dev) { in nvme_check_params()
8361 error_setg(errp, "PMR is not supported with SR-IOV"); in nvme_check_params()
8365 if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) { in nvme_check_params()
8367 " must be set for the use of SR-IOV"); in nvme_check_params()
8371 if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) { in nvme_check_params()
8373 " to %d (sriov_max_vfs * 2)", params->sriov_max_vfs * 2); in nvme_check_params()
8377 if (params->max_ioqpairs < params->sriov_vq_flexible + 2) { in nvme_check_params()
8378 error_setg(errp, "(max_ioqpairs - sriov_vq_flexible) must be" in nvme_check_params()
8383 if (params->sriov_vi_flexible < params->sriov_max_vfs) { in nvme_check_params()
8385 " to %d (sriov_max_vfs)", params->sriov_max_vfs); in nvme_check_params()
8389 if (params->msix_qsize < params->sriov_vi_flexible + 1) { in nvme_check_params()
8390 error_setg(errp, "(msix_qsize - sriov_vi_flexible) must be" in nvme_check_params()
8395 if (params->sriov_max_vi_per_vf && in nvme_check_params()
8396 (params->sriov_max_vi_per_vf - 1) % NVME_VF_RES_GRANULARITY) { in nvme_check_params()
8398 " (sriov_max_vi_per_vf - 1) %% %d == 0 and" in nvme_check_params()
8403 if (params->sriov_max_vq_per_vf && in nvme_check_params()
8404 (params->sriov_max_vq_per_vf < 2 || in nvme_check_params()
8405 (params->sriov_max_vq_per_vf - 1) % NVME_VF_RES_GRANULARITY)) { in nvme_check_params()
8407 " (sriov_max_vq_per_vf - 1) %% %d == 0 and" in nvme_check_params()
8418 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; in nvme_init_state()
8419 NvmeSecCtrlEntry *list = n->sec_ctrl_list; in nvme_init_state()
8422 NvmeAtomic *atomic = &n->atomic; in nvme_init_state()
8423 NvmeIdCtrl *id = &n->id_ctrl; in nvme_init_state()
8430 n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; in nvme_init_state()
8431 n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1; in nvme_init_state()
8433 max_vfs = n->params.sriov_max_vfs; in nvme_init_state()
8434 n->conf_ioqpairs = n->params.max_ioqpairs; in nvme_init_state()
8435 n->conf_msix_qsize = n->params.msix_qsize; in nvme_init_state()
8438 n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1); in nvme_init_state()
8439 n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1); in nvme_init_state()
8440 n->temperature = NVME_TEMPERATURE; in nvme_init_state()
8441 n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING; in nvme_init_state()
8442 n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); in nvme_init_state()
8443 n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1); in nvme_init_state()
8444 QTAILQ_INIT(&n->aer_queue); in nvme_init_state()
8446 n->nr_sec_ctrls = max_vfs; in nvme_init_state()
8449 sctrl->pcid = cpu_to_le16(n->cntlid); in nvme_init_state()
8450 sctrl->vfn = cpu_to_le16(i + 1); in nvme_init_state()
8453 cap->cntlid = cpu_to_le16(n->cntlid); in nvme_init_state()
8454 cap->crt = NVME_CRT_VQ | NVME_CRT_VI; in nvme_init_state()
8457 cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs); in nvme_init_state()
8459 cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs - in nvme_init_state()
8460 n->params.sriov_vq_flexible); in nvme_init_state()
8461 cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible); in nvme_init_state()
8462 cap->vqrfap = cap->vqfrt; in nvme_init_state()
8463 cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY); in nvme_init_state()
8464 cap->vqfrsm = n->params.sriov_max_vq_per_vf ? in nvme_init_state()
8465 cpu_to_le16(n->params.sriov_max_vq_per_vf) : in nvme_init_state()
8466 cap->vqfrt / MAX(max_vfs, 1); in nvme_init_state()
8470 cap->viprt = cpu_to_le16(n->conf_msix_qsize); in nvme_init_state()
8472 cap->viprt = cpu_to_le16(n->params.msix_qsize - in nvme_init_state()
8473 n->params.sriov_vi_flexible); in nvme_init_state()
8474 cap->vifrt = cpu_to_le32(n->params.sriov_vi_flexible); in nvme_init_state()
8475 cap->virfap = cap->vifrt; in nvme_init_state()
8476 cap->vigran = cpu_to_le16(NVME_VF_RES_GRANULARITY); in nvme_init_state()
8477 cap->vifrsm = n->params.sriov_max_vi_per_vf ? in nvme_init_state()
8478 cpu_to_le16(n->params.sriov_max_vi_per_vf) : in nvme_init_state()
8479 cap->vifrt / MAX(max_vfs, 1); in nvme_init_state()
8483 id->awun = cpu_to_le16(n->params.atomic_awun); in nvme_init_state()
8484 id->awupf = cpu_to_le16(n->params.atomic_awupf); in nvme_init_state()
8485 n->dn = n->params.atomic_dn; in nvme_init_state()
8487 if (id->awun || id->awupf) { in nvme_init_state()
8488 if (id->awupf > id->awun) { in nvme_init_state()
8489 id->awupf = 0; in nvme_init_state()
8492 if (n->dn) { in nvme_init_state()
8493 atomic->atomic_max_write_size = id->awupf + 1; in nvme_init_state()
8495 atomic->atomic_max_write_size = id->awun + 1; in nvme_init_state()
8498 if (atomic->atomic_max_write_size == 1) { in nvme_init_state()
8499 atomic->atomic_writes = 0; in nvme_init_state()
8501 atomic->atomic_writes = 1; in nvme_init_state()
8508 uint64_t cmb_size = n->params.cmb_size_mb * MiB; in nvme_init_cmb()
8509 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_init_cmb()
8511 n->cmb.buf = g_malloc0(cmb_size); in nvme_init_cmb()
8512 memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n, in nvme_init_cmb()
8513 "nvme-cmb", cmb_size); in nvme_init_cmb()
8517 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem); in nvme_init_cmb()
8520 stq_le_p(&n->bar.cap, cap); in nvme_init_cmb()
8522 if (n->params.legacy_cmb) { in nvme_init_cmb()
8524 n->cmb.cmse = true; in nvme_init_cmb()
8530 uint32_t pmrcap = ldl_le_p(&n->bar.pmrcap); in nvme_init_pmr()
8538 stl_le_p(&n->bar.pmrcap, pmrcap); in nvme_init_pmr()
8543 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr); in nvme_init_pmr()
8545 memory_region_set_enabled(&n->pmr.dev->mr, false); in nvme_init_pmr()
8583 uint16_t vf_dev_id = n->params.use_intel_id ? in nvme_init_sriov()
8585 NvmePriCtrlCap *cap = &n->pri_ctrl_cap; in nvme_init_sriov()
8586 uint64_t bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), in nvme_init_sriov()
8587 le16_to_cpu(cap->vifrsm), in nvme_init_sriov()
8591 n->params.sriov_max_vfs, n->params.sriov_max_vfs, in nvme_init_sriov()
8613 pci_set_word(pci_dev->config + offset + PCI_PM_PMC, in nvme_add_pm_capability()
8615 pci_set_word(pci_dev->config + offset + PCI_PM_CTRL, in nvme_add_pm_capability()
8617 pci_set_word(pci_dev->wmask + offset + PCI_PM_CTRL, in nvme_add_pm_capability()
8627 void *rsp = doe_cap->read_mbox; in pcie_doe_spdm_rsp()
8630 uint32_t recvd = spdm_socket_rsp(doe_cap->spdm_socket, in pcie_doe_spdm_rsp()
8633 doe_cap->read_mbox_len += DIV_ROUND_UP(recvd, 4); in pcie_doe_spdm_rsp()
8647 uint8_t *pci_conf = pci_dev->config; in nvme_init_pci()
8656 if (n->params.use_intel_id) { in nvme_init_pci()
8668 if (n->params.sriov_max_vfs) { in nvme_init_pci()
8672 if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) { in nvme_init_pci()
8673 bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, 0, NULL, NULL); in nvme_init_pci()
8674 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", in nvme_init_pci()
8677 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem); in nvme_init_pci()
8678 ret = msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp); in nvme_init_pci()
8680 assert(n->params.msix_qsize >= 1); in nvme_init_pci()
8684 nr_vectors = n->params.msix_qsize; in nvme_init_pci()
8685 bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1, in nvme_init_pci()
8690 NvmePriCtrlCap *cap = &pn->pri_ctrl_cap; in nvme_init_pci()
8692 nr_vectors = le16_to_cpu(cap->vifrsm); in nvme_init_pci()
8693 bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), nr_vectors, in nvme_init_pci()
8697 memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); in nvme_init_pci()
8698 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", in nvme_init_pci()
8700 memory_region_add_subregion(&n->bar0, 0, &n->iomem); in nvme_init_pci()
8703 pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); in nvme_init_pci()
8706 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); in nvme_init_pci()
8710 &n->bar0, 0, msix_table_offset, in nvme_init_pci()
8711 &n->bar0, 0, msix_pba_offset, 0, errp); in nvme_init_pci()
8714 if (ret == -ENOTSUP) { in nvme_init_pci()
8723 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs && in nvme_init_pci()
8728 nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); in nvme_init_pci()
8733 if (pci_dev->spdm_port) { in nvme_init_pci()
8734 uint16_t doe_offset = n->params.sriov_max_vfs ? in nvme_init_pci()
8738 pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset, in nvme_init_pci()
8741 pci_dev->doe_spdm.spdm_socket = spdm_socket_connect(pci_dev->spdm_port, in nvme_init_pci()
8744 if (pci_dev->doe_spdm.spdm_socket < 0) { in nvme_init_pci()
8749 if (n->params.cmb_size_mb) { in nvme_init_pci()
8753 if (n->pmr.dev) { in nvme_init_pci()
8762 NvmeSubsystem *subsys = n->subsys; in nvme_init_subnqn()
8763 NvmeIdCtrl *id = &n->id_ctrl; in nvme_init_subnqn()
8766 snprintf((char *)id->subnqn, sizeof(id->subnqn), in nvme_init_subnqn()
8767 "nqn.2019-08.org.qemu:%s", n->params.serial); in nvme_init_subnqn()
8769 pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn); in nvme_init_subnqn()
8775 NvmeIdCtrl *id = &n->id_ctrl; in nvme_init_ctrl()
8776 uint8_t *pci_conf = pci_dev->config; in nvme_init_ctrl()
8777 uint64_t cap = ldq_le_p(&n->bar.cap); in nvme_init_ctrl()
8782 memcpy(n->cse.acs, nvme_cse_acs_default, sizeof(n->cse.acs)); in nvme_init_ctrl()
8783 memcpy(n->cse.iocs.nvm, nvme_cse_iocs_nvm_default, sizeof(n->cse.iocs.nvm)); in nvme_init_ctrl()
8784 memcpy(n->cse.iocs.zoned, nvme_cse_iocs_zoned_default, in nvme_init_ctrl()
8785 sizeof(n->cse.iocs.zoned)); in nvme_init_ctrl()
8787 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); in nvme_init_ctrl()
8788 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); in nvme_init_ctrl()
8789 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); in nvme_init_ctrl()
8790 strpadcpy((char *)id->fr, sizeof(id->fr), QEMU_VERSION, ' '); in nvme_init_ctrl()
8791 strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); in nvme_init_ctrl()
8793 id->cntlid = cpu_to_le16(n->cntlid); in nvme_init_ctrl()
8795 id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); in nvme_init_ctrl()
8798 if (n->params.ctratt.mem) { in nvme_init_ctrl()
8802 id->rab = 6; in nvme_init_ctrl()
8804 if (n->params.use_intel_id) { in nvme_init_ctrl()
8805 id->ieee[0] = 0xb3; in nvme_init_ctrl()
8806 id->ieee[1] = 0x02; in nvme_init_ctrl()
8807 id->ieee[2] = 0x00; in nvme_init_ctrl()
8809 id->ieee[0] = 0x00; in nvme_init_ctrl()
8810 id->ieee[1] = 0x54; in nvme_init_ctrl()
8811 id->ieee[2] = 0x52; in nvme_init_ctrl()
8814 id->mdts = n->params.mdts; in nvme_init_ctrl()
8815 id->ver = cpu_to_le32(NVME_SPEC_VER); in nvme_init_ctrl()
8819 if (n->params.dbcs) { in nvme_init_ctrl()
8822 n->cse.acs[NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP; in nvme_init_ctrl()
8825 if (n->params.sriov_max_vfs) { in nvme_init_ctrl()
8828 n->cse.acs[NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP; in nvme_init_ctrl()
8831 id->oacs = cpu_to_le16(oacs); in nvme_init_ctrl()
8833 id->cntrltype = 0x1; in nvme_init_ctrl()
8846 id->acl = 3; in nvme_init_ctrl()
8847 id->aerl = n->params.aerl; in nvme_init_ctrl()
8848 id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; in nvme_init_ctrl()
8849 id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED; in nvme_init_ctrl()
8852 id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); in nvme_init_ctrl()
8853 id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); in nvme_init_ctrl()
8855 id->sqes = (NVME_SQES << 4) | NVME_SQES; in nvme_init_ctrl()
8856 id->cqes = (NVME_CQES << 4) | NVME_CQES; in nvme_init_ctrl()
8857 id->nn = cpu_to_le32(NVME_MAX_NAMESPACES); in nvme_init_ctrl()
8858 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | in nvme_init_ctrl()
8865 * as a Flush-equivalent operation, support for the broadcast NSID in Flush in nvme_init_ctrl()
8870 id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; in nvme_init_ctrl()
8872 id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1 | in nvme_init_ctrl()
8874 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | in nvme_init_ctrl()
8879 id->psd[0].mp = cpu_to_le16(0x9c4); in nvme_init_ctrl()
8880 id->psd[0].enlat = cpu_to_le32(0x10); in nvme_init_ctrl()
8881 id->psd[0].exlat = cpu_to_le32(0x4); in nvme_init_ctrl()
8883 id->cmic |= NVME_CMIC_MULTI_CTRL; in nvme_init_ctrl()
8886 id->endgidmax = cpu_to_le16(0x1); in nvme_init_ctrl()
8888 if (n->subsys->endgrp.fdp.enabled) { in nvme_init_ctrl()
8892 id->ctratt = cpu_to_le32(ctratt); in nvme_init_ctrl()
8894 NVME_CAP_SET_MQES(cap, n->params.mqes); in nvme_init_ctrl()
8900 NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0); in nvme_init_ctrl()
8901 NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0); in nvme_init_ctrl()
8902 stq_le_p(&n->bar.cap, cap); in nvme_init_ctrl()
8904 stl_le_p(&n->bar.vs, NVME_SPEC_VER); in nvme_init_ctrl()
8905 n->bar.intmc = n->bar.intms = 0; in nvme_init_ctrl()
8907 if (pci_is_vf(pci_dev) && !sctrl->scs) { in nvme_init_ctrl()
8908 stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); in nvme_init_ctrl()
8916 if (!n->subsys) { in nvme_init_subsys()
8919 qdev_prop_set_string(dev, "nqn", n->params.serial); in nvme_init_subsys()
8922 return -1; in nvme_init_subsys()
8925 n->subsys = NVME_SUBSYS(dev); in nvme_init_subsys()
8930 return -1; in nvme_init_subsys()
8933 n->cntlid = cntlid; in nvme_init_subsys()
8940 uint32_t nsid = ns->params.nsid; in nvme_attach_ns()
8943 n->namespaces[nsid] = ns; in nvme_attach_ns()
8944 ns->attached++; in nvme_attach_ns()
8959 memcpy(&n->params, &pn->params, sizeof(NvmeParams)); in nvme_realize()
8965 n->params.serial = g_strdup(pn->params.serial); in nvme_realize()
8966 n->subsys = pn->subsys; in nvme_realize()
8973 object_ref(OBJECT(pn->subsys)); in nvme_realize()
8980 qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); in nvme_realize()
8992 if (n->namespace.blkconf.blk) { in nvme_realize()
8993 ns = &n->namespace; in nvme_realize()
8994 ns->params.nsid = 1; in nvme_realize()
8995 ns->ctrl = n; in nvme_realize()
9001 n->subsys->namespaces[ns->params.nsid] = ns; in nvme_realize()
9016 ns->attached--; in nvme_exit()
9020 nvme_subsys_unregister_ctrl(n->subsys, n); in nvme_exit()
9022 g_free(n->cq); in nvme_exit()
9023 g_free(n->sq); in nvme_exit()
9024 g_free(n->aer_reqs); in nvme_exit()
9026 if (n->params.cmb_size_mb) { in nvme_exit()
9027 g_free(n->cmb.buf); in nvme_exit()
9030 if (pci_dev->doe_spdm.spdm_socket > 0) { in nvme_exit()
9031 spdm_socket_close(pci_dev->doe_spdm.spdm_socket, in nvme_exit()
9035 if (n->pmr.dev) { in nvme_exit()
9036 host_memory_backend_set_mapped(n->pmr.dev, false); in nvme_exit()
9039 if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { in nvme_exit()
9043 if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) { in nvme_exit()
9046 msix_uninit(pci_dev, &n->bar0, &n->bar0); in nvme_exit()
9049 memory_region_del_subregion(&n->bar0, &n->iomem); in nvme_exit()
9067 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
9068 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
9083 DEFINE_PROP_BOOL("msix-exclusive-bar", NvmeCtrl, params.msix_exclusive_bar,
9098 uint8_t value = n->smart_critical_warning; in nvme_get_smart_warning()
9115 if (NVME_CAP_PMRS(ldq_le_p(&n->bar.cap))) { in nvme_set_smart_warning()
9125 old_value = n->smart_critical_warning; in nvme_set_smart_warning()
9126 n->smart_critical_warning = value; in nvme_set_smart_warning()
9152 sctrl = &n->sec_ctrl_list[i]; in nvme_sriov_post_write_config()
9153 nvme_virt_set_state(n, le16_to_cpu(sctrl->scid), false); in nvme_sriov_post_write_config()
9163 pcie_doe_write_config(&dev->doe_spdm, address, val, len); in nvme_pci_write_config()
9173 if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) { in nvme_pci_read_config()
9174 if (pcie_doe_read_config(&dev->doe_spdm, address, len, &val)) { in nvme_pci_read_config()
9191 pc->realize = nvme_realize; in nvme_class_init()
9192 pc->config_write = nvme_pci_write_config; in nvme_class_init()
9193 pc->config_read = nvme_pci_read_config; in nvme_class_init()
9194 pc->exit = nvme_exit; in nvme_class_init()
9195 pc->class_id = PCI_CLASS_STORAGE_EXPRESS; in nvme_class_init()
9196 pc->revision = 2; in nvme_class_init()
9198 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); in nvme_class_init()
9199 dc->desc = "Non-Volatile Memory Express"; in nvme_class_init()
9201 dc->vmsd = &nvme_vmstate; in nvme_class_init()
9209 device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex, in nvme_instance_init()