1 /*
2 * QEMU NVM Express
3 *
4 * Copyright (c) 2012 Intel Corporation
5 * Copyright (c) 2021 Minwoo Im
6 * Copyright (c) 2021 Samsung Electronics Co., Ltd.
7 *
8 * Authors:
9 * Keith Busch <kbusch@kernel.org>
10 * Klaus Jensen <k.jensen@samsung.com>
11 * Gollu Appalanaidu <anaidu.gollu@samsung.com>
12 * Dmitry Fomichev <dmitry.fomichev@wdc.com>
13 * Minwoo Im <minwoo.im.dev@gmail.com>
14 *
15 * This code is licensed under the GNU GPL v2 or later.
16 */
17
18 #ifndef HW_NVME_NVME_H
19 #define HW_NVME_NVME_H
20
21 #include "qemu/uuid.h"
22 #include "hw/pci/pci_device.h"
23 #include "hw/block/block.h"
24
25 #include "block/nvme.h"
26
27 #define NVME_MAX_CONTROLLERS 256
28 #define NVME_MAX_NAMESPACES 256
29 #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000)
30 #define NVME_FDP_MAX_EVENTS 63
31 #define NVME_FDP_MAXPIDS 128
32
33 /*
34 * The controller only supports Submission and Completion Queue Entry Sizes of
35 * 64 and 16 bytes respectively.
36 */
37 #define NVME_SQES 6
38 #define NVME_CQES 4
39
40 QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1);
41
42 typedef struct NvmeCtrl NvmeCtrl;
43 typedef struct NvmeNamespace NvmeNamespace;
44
45 #define TYPE_NVME_BUS "nvme-bus"
46 OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS)
47
48 typedef struct NvmeBus {
49 BusState parent_bus;
50 } NvmeBus;
51
52 #define TYPE_NVME_SUBSYS "nvme-subsys"
53 #define NVME_SUBSYS(obj) \
54 OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
55 #define SUBSYS_SLOT_RSVD (void *)0xFFFF
56
57 typedef struct NvmeReclaimUnit {
58 uint64_t ruamw;
59 } NvmeReclaimUnit;
60
61 typedef struct NvmeRuHandle {
62 uint8_t ruht;
63 uint8_t ruha;
64 uint64_t event_filter;
65 uint8_t lbafi;
66 uint64_t ruamw;
67
68 /* reclaim units indexed by reclaim group */
69 NvmeReclaimUnit *rus;
70 } NvmeRuHandle;
71
72 typedef struct NvmeFdpEventBuffer {
73 NvmeFdpEvent events[NVME_FDP_MAX_EVENTS];
74 unsigned int nelems;
75 unsigned int start;
76 unsigned int next;
77 } NvmeFdpEventBuffer;
78
79 typedef struct NvmeEnduranceGroup {
80 uint8_t event_conf;
81
82 struct {
83 NvmeFdpEventBuffer host_events, ctrl_events;
84
85 uint16_t nruh;
86 uint16_t nrg;
87 uint8_t rgif;
88 uint64_t runs;
89
90 uint64_t hbmw;
91 uint64_t mbmw;
92 uint64_t mbe;
93
94 bool enabled;
95
96 NvmeRuHandle *ruhs;
97 } fdp;
98 } NvmeEnduranceGroup;
99
100 typedef struct NvmeSubsystem {
101 DeviceState parent_obj;
102 NvmeBus bus;
103 uint8_t subnqn[256];
104 char *serial;
105
106 NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS];
107 NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
108 NvmeEnduranceGroup endgrp;
109
110 struct {
111 char *nqn;
112
113 struct {
114 bool enabled;
115 uint64_t runs;
116 uint16_t nruh;
117 uint32_t nrg;
118 } fdp;
119 } params;
120 } NvmeSubsystem;
121
122 int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
123 void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n);
124
nvme_subsys_ctrl(NvmeSubsystem * subsys,uint32_t cntlid)125 static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
126 uint32_t cntlid)
127 {
128 if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) {
129 return NULL;
130 }
131
132 if (subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD) {
133 return NULL;
134 }
135
136 return subsys->ctrls[cntlid];
137 }
138
nvme_subsys_ns(NvmeSubsystem * subsys,uint32_t nsid)139 static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys,
140 uint32_t nsid)
141 {
142 if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) {
143 return NULL;
144 }
145
146 return subsys->namespaces[nsid];
147 }
148
149 #define TYPE_NVME_NS "nvme-ns"
150 #define NVME_NS(obj) \
151 OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS)
152
153 typedef struct NvmeZone {
154 NvmeZoneDescr d;
155 uint64_t w_ptr;
156 QTAILQ_ENTRY(NvmeZone) entry;
157 } NvmeZone;
158
159 #define FDP_EVT_MAX 0xff
160 #define NVME_FDP_MAX_NS_RUHS 32u
161 #define FDPVSS 0
162
163 static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = {
164 /* Host events */
165 [FDP_EVT_RU_NOT_FULLY_WRITTEN] = 0,
166 [FDP_EVT_RU_ATL_EXCEEDED] = 1,
167 [FDP_EVT_CTRL_RESET_RUH] = 2,
168 [FDP_EVT_INVALID_PID] = 3,
169 /* CTRL events */
170 [FDP_EVT_MEDIA_REALLOC] = 32,
171 [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33,
172 };
173
174 #define NGUID_LEN 16
175
176 typedef struct {
177 uint8_t data[NGUID_LEN];
178 } NvmeNGUID;
179
180 bool nvme_nguid_is_null(const NvmeNGUID *nguid);
181
182 extern const PropertyInfo qdev_prop_nguid;
183
184 #define DEFINE_PROP_NGUID_NODEFAULT(_name, _state, _field) \
185 DEFINE_PROP(_name, _state, _field, qdev_prop_nguid, NvmeNGUID)
186
187 typedef struct NvmeNamespaceParams {
188 bool detached;
189 bool shared;
190 uint32_t nsid;
191 QemuUUID uuid;
192 NvmeNGUID nguid;
193 uint64_t eui64;
194 bool eui64_default;
195
196 uint16_t ms;
197 uint8_t mset;
198 uint8_t pi;
199 uint8_t pil;
200 uint8_t pif;
201
202 uint16_t mssrl;
203 uint32_t mcl;
204 uint8_t msrc;
205
206 bool zoned;
207 bool cross_zone_read;
208 uint64_t zone_size_bs;
209 uint64_t zone_cap_bs;
210 uint32_t max_active_zones;
211 uint32_t max_open_zones;
212 uint32_t zd_extension_size;
213
214 uint32_t numzrwa;
215 uint64_t zrwas;
216 uint64_t zrwafg;
217
218 struct {
219 char *ruhs;
220 } fdp;
221 } NvmeNamespaceParams;
222
223 typedef struct NvmeAtomic {
224 uint32_t atomic_max_write_size;
225 bool atomic_writes;
226 } NvmeAtomic;
227
228 typedef struct NvmeNamespace {
229 DeviceState parent_obj;
230 BlockConf blkconf;
231 int32_t bootindex;
232 int64_t size;
233 int64_t moff;
234 NvmeIdNs id_ns;
235 NvmeIdNsNvm id_ns_nvm;
236 NvmeIdNsInd id_ns_ind;
237 NvmeLBAF lbaf;
238 unsigned int nlbaf;
239 size_t lbasz;
240 uint8_t csi;
241 uint16_t status;
242 int attached;
243 uint8_t pif;
244
245 struct {
246 uint16_t zrwas;
247 uint16_t zrwafg;
248 uint32_t numzrwa;
249 } zns;
250
251 QTAILQ_ENTRY(NvmeNamespace) entry;
252
253 NvmeIdNsZoned *id_ns_zoned;
254 NvmeZone *zone_array;
255 QTAILQ_HEAD(, NvmeZone) exp_open_zones;
256 QTAILQ_HEAD(, NvmeZone) imp_open_zones;
257 QTAILQ_HEAD(, NvmeZone) closed_zones;
258 QTAILQ_HEAD(, NvmeZone) full_zones;
259 uint32_t num_zones;
260 uint64_t zone_size;
261 uint64_t zone_capacity;
262 uint32_t zone_size_log2;
263 uint8_t *zd_extensions;
264 int32_t nr_open_zones;
265 int32_t nr_active_zones;
266
267 NvmeNamespaceParams params;
268 NvmeSubsystem *subsys;
269 NvmeEnduranceGroup *endgrp;
270
271 /* NULL for shared namespaces; set to specific controller if private */
272 NvmeCtrl *ctrl;
273
274 struct {
275 uint32_t err_rec;
276 } features;
277
278 struct {
279 uint16_t nphs;
280 /* reclaim unit handle identifiers indexed by placement handle */
281 uint16_t *phs;
282 } fdp;
283 } NvmeNamespace;
284
nvme_nsid(NvmeNamespace * ns)285 static inline uint32_t nvme_nsid(NvmeNamespace *ns)
286 {
287 if (ns) {
288 return ns->params.nsid;
289 }
290
291 return 0;
292 }
293
nvme_l2b(NvmeNamespace * ns,uint64_t lba)294 static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba)
295 {
296 return lba << ns->lbaf.ds;
297 }
298
nvme_m2b(NvmeNamespace * ns,uint64_t lba)299 static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba)
300 {
301 return ns->lbaf.ms * lba;
302 }
303
nvme_moff(NvmeNamespace * ns,uint64_t lba)304 static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba)
305 {
306 return ns->moff + nvme_m2b(ns, lba);
307 }
308
nvme_ns_ext(NvmeNamespace * ns)309 static inline bool nvme_ns_ext(NvmeNamespace *ns)
310 {
311 return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas);
312 }
313
nvme_get_zone_state(NvmeZone * zone)314 static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone)
315 {
316 return zone->d.zs >> 4;
317 }
318
nvme_set_zone_state(NvmeZone * zone,NvmeZoneState state)319 static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state)
320 {
321 zone->d.zs = state << 4;
322 }
323
nvme_zone_rd_boundary(NvmeNamespace * ns,NvmeZone * zone)324 static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone)
325 {
326 return zone->d.zslba + ns->zone_size;
327 }
328
nvme_zone_wr_boundary(NvmeZone * zone)329 static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone)
330 {
331 return zone->d.zslba + zone->d.zcap;
332 }
333
nvme_wp_is_valid(NvmeZone * zone)334 static inline bool nvme_wp_is_valid(NvmeZone *zone)
335 {
336 uint8_t st = nvme_get_zone_state(zone);
337
338 return st != NVME_ZONE_STATE_FULL &&
339 st != NVME_ZONE_STATE_READ_ONLY &&
340 st != NVME_ZONE_STATE_OFFLINE;
341 }
342
nvme_get_zd_extension(NvmeNamespace * ns,uint32_t zone_idx)343 static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns,
344 uint32_t zone_idx)
345 {
346 return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size];
347 }
348
nvme_aor_inc_open(NvmeNamespace * ns)349 static inline void nvme_aor_inc_open(NvmeNamespace *ns)
350 {
351 assert(ns->nr_open_zones >= 0);
352 if (ns->params.max_open_zones) {
353 ns->nr_open_zones++;
354 assert(ns->nr_open_zones <= ns->params.max_open_zones);
355 }
356 }
357
nvme_aor_dec_open(NvmeNamespace * ns)358 static inline void nvme_aor_dec_open(NvmeNamespace *ns)
359 {
360 if (ns->params.max_open_zones) {
361 assert(ns->nr_open_zones > 0);
362 ns->nr_open_zones--;
363 }
364 assert(ns->nr_open_zones >= 0);
365 }
366
nvme_aor_inc_active(NvmeNamespace * ns)367 static inline void nvme_aor_inc_active(NvmeNamespace *ns)
368 {
369 assert(ns->nr_active_zones >= 0);
370 if (ns->params.max_active_zones) {
371 ns->nr_active_zones++;
372 assert(ns->nr_active_zones <= ns->params.max_active_zones);
373 }
374 }
375
nvme_aor_dec_active(NvmeNamespace * ns)376 static inline void nvme_aor_dec_active(NvmeNamespace *ns)
377 {
378 if (ns->params.max_active_zones) {
379 assert(ns->nr_active_zones > 0);
380 ns->nr_active_zones--;
381 assert(ns->nr_active_zones >= ns->nr_open_zones);
382 }
383 assert(ns->nr_active_zones >= 0);
384 }
385
nvme_fdp_stat_inc(uint64_t * a,uint64_t b)386 static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b)
387 {
388 uint64_t ret = *a + b;
389 *a = ret < *a ? UINT64_MAX : ret;
390 }
391
392 void nvme_ns_init_format(NvmeNamespace *ns);
393 int nvme_ns_setup(NvmeNamespace *ns, Error **errp);
394 void nvme_ns_drain(NvmeNamespace *ns);
395 void nvme_ns_shutdown(NvmeNamespace *ns);
396 void nvme_ns_cleanup(NvmeNamespace *ns);
397
398 typedef struct NvmeAsyncEvent {
399 QTAILQ_ENTRY(NvmeAsyncEvent) entry;
400 NvmeAerResult result;
401 } NvmeAsyncEvent;
402
403 enum {
404 NVME_SG_ALLOC = 1 << 0,
405 NVME_SG_DMA = 1 << 1,
406 };
407
408 typedef struct NvmeSg {
409 int flags;
410
411 union {
412 QEMUSGList qsg;
413 QEMUIOVector iov;
414 };
415 } NvmeSg;
416
417 typedef enum NvmeTxDirection {
418 NVME_TX_DIRECTION_TO_DEVICE = 0,
419 NVME_TX_DIRECTION_FROM_DEVICE = 1,
420 } NvmeTxDirection;
421
422 typedef struct NvmeRequest {
423 struct NvmeSQueue *sq;
424 struct NvmeNamespace *ns;
425 BlockAIOCB *aiocb;
426 uint16_t status;
427 void *opaque;
428 NvmeCqe cqe;
429 NvmeCmd cmd;
430 BlockAcctCookie acct;
431 NvmeSg sg;
432 bool atomic_write;
433 QTAILQ_ENTRY(NvmeRequest)entry;
434 } NvmeRequest;
435
436 typedef struct NvmeBounceContext {
437 NvmeRequest *req;
438
439 struct {
440 QEMUIOVector iov;
441 uint8_t *bounce;
442 } data, mdata;
443 } NvmeBounceContext;
444
nvme_adm_opc_str(uint8_t opc)445 static inline const char *nvme_adm_opc_str(uint8_t opc)
446 {
447 switch (opc) {
448 case NVME_ADM_CMD_DELETE_SQ: return "NVME_ADM_CMD_DELETE_SQ";
449 case NVME_ADM_CMD_CREATE_SQ: return "NVME_ADM_CMD_CREATE_SQ";
450 case NVME_ADM_CMD_GET_LOG_PAGE: return "NVME_ADM_CMD_GET_LOG_PAGE";
451 case NVME_ADM_CMD_DELETE_CQ: return "NVME_ADM_CMD_DELETE_CQ";
452 case NVME_ADM_CMD_CREATE_CQ: return "NVME_ADM_CMD_CREATE_CQ";
453 case NVME_ADM_CMD_IDENTIFY: return "NVME_ADM_CMD_IDENTIFY";
454 case NVME_ADM_CMD_ABORT: return "NVME_ADM_CMD_ABORT";
455 case NVME_ADM_CMD_SET_FEATURES: return "NVME_ADM_CMD_SET_FEATURES";
456 case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES";
457 case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ";
458 case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT";
459 case NVME_ADM_CMD_DIRECTIVE_SEND: return "NVME_ADM_CMD_DIRECTIVE_SEND";
460 case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT";
461 case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV";
462 case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG";
463 case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM";
464 default: return "NVME_ADM_CMD_UNKNOWN";
465 }
466 }
467
nvme_io_opc_str(uint8_t opc)468 static inline const char *nvme_io_opc_str(uint8_t opc)
469 {
470 switch (opc) {
471 case NVME_CMD_FLUSH: return "NVME_NVM_CMD_FLUSH";
472 case NVME_CMD_WRITE: return "NVME_NVM_CMD_WRITE";
473 case NVME_CMD_READ: return "NVME_NVM_CMD_READ";
474 case NVME_CMD_COMPARE: return "NVME_NVM_CMD_COMPARE";
475 case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES";
476 case NVME_CMD_DSM: return "NVME_NVM_CMD_DSM";
477 case NVME_CMD_VERIFY: return "NVME_NVM_CMD_VERIFY";
478 case NVME_CMD_COPY: return "NVME_NVM_CMD_COPY";
479 case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_MGMT_SEND";
480 case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_MGMT_RECV";
481 case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND";
482 default: return "NVME_NVM_CMD_UNKNOWN";
483 }
484 }
485
486 typedef struct NvmeSQueue {
487 struct NvmeCtrl *ctrl;
488 uint16_t sqid;
489 uint16_t cqid;
490 uint32_t head;
491 uint32_t tail;
492 uint32_t size;
493 uint64_t dma_addr;
494 uint64_t db_addr;
495 uint64_t ei_addr;
496 QEMUBH *bh;
497 EventNotifier notifier;
498 bool ioeventfd_enabled;
499 NvmeRequest *io_req;
500 QTAILQ_HEAD(, NvmeRequest) req_list;
501 QTAILQ_HEAD(, NvmeRequest) out_req_list;
502 QTAILQ_ENTRY(NvmeSQueue) entry;
503 } NvmeSQueue;
504
505 typedef struct NvmeCQueue {
506 struct NvmeCtrl *ctrl;
507 uint8_t phase;
508 uint16_t cqid;
509 uint16_t irq_enabled;
510 uint32_t head;
511 uint32_t tail;
512 uint32_t vector;
513 uint32_t size;
514 uint64_t dma_addr;
515 uint64_t db_addr;
516 uint64_t ei_addr;
517 QEMUBH *bh;
518 EventNotifier notifier;
519 bool ioeventfd_enabled;
520 QTAILQ_HEAD(, NvmeSQueue) sq_list;
521 QTAILQ_HEAD(, NvmeRequest) req_list;
522 } NvmeCQueue;
523
524 #define TYPE_NVME "nvme"
525 #define NVME(obj) \
526 OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
527
528 typedef struct NvmeParams {
529 char *serial;
530 uint32_t num_queues; /* deprecated since 5.1 */
531 uint32_t max_ioqpairs;
532 uint16_t msix_qsize;
533 uint16_t mqes;
534 uint32_t cmb_size_mb;
535 uint8_t aerl;
536 uint32_t aer_max_queued;
537 uint8_t mdts;
538 uint8_t vsl;
539 bool use_intel_id;
540 uint8_t zasl;
541 bool auto_transition_zones;
542 bool legacy_cmb;
543 bool ioeventfd;
544 bool dbcs;
545 uint16_t sriov_max_vfs;
546 uint16_t sriov_vq_flexible;
547 uint16_t sriov_vi_flexible;
548 uint32_t sriov_max_vq_per_vf;
549 uint32_t sriov_max_vi_per_vf;
550 bool msix_exclusive_bar;
551 bool ocp;
552
553 struct {
554 bool mem;
555 } ctratt;
556
557 uint16_t atomic_awun;
558 uint16_t atomic_awupf;
559 bool atomic_dn;
560 } NvmeParams;
561
562 typedef struct NvmeCtrl {
563 PCIDevice parent_obj;
564 MemoryRegion bar0;
565 MemoryRegion iomem;
566 NvmeBar bar;
567 NvmeParams params;
568 NvmeBus bus;
569
570 uint16_t cntlid;
571 bool qs_created;
572 uint32_t page_size;
573 uint16_t page_bits;
574 uint16_t max_prp_ents;
575 uint32_t max_q_ents;
576 uint8_t outstanding_aers;
577 uint32_t irq_status;
578 int cq_pending;
579 uint64_t host_timestamp; /* Timestamp sent by the host */
580 uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */
581 uint64_t starttime_ms;
582 uint16_t temperature;
583 uint8_t smart_critical_warning;
584 uint32_t conf_msix_qsize;
585 uint32_t conf_ioqpairs;
586 uint64_t dbbuf_dbs;
587 uint64_t dbbuf_eis;
588 bool dbbuf_enabled;
589
590 struct {
591 uint32_t acs[256];
592 struct {
593 uint32_t nvm[256];
594 uint32_t zoned[256];
595 } iocs;
596 } cse;
597
598 struct {
599 MemoryRegion mem;
600 uint8_t *buf;
601 bool cmse;
602 hwaddr cba;
603 } cmb;
604
605 struct {
606 HostMemoryBackend *dev;
607 bool cmse;
608 hwaddr cba;
609 } pmr;
610
611 uint8_t aer_mask;
612 NvmeRequest **aer_reqs;
613 QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue;
614 int aer_queued;
615
616 uint32_t dmrsl;
617
618 /* Namespace ID is started with 1 so bitmap should be 1-based */
619 #define NVME_CHANGED_NSID_SIZE (NVME_MAX_NAMESPACES + 1)
620 DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE);
621
622 NvmeSubsystem *subsys;
623
624 NvmeNamespace namespace;
625 NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
626 NvmeSQueue **sq;
627 NvmeCQueue **cq;
628 NvmeSQueue admin_sq;
629 NvmeCQueue admin_cq;
630 NvmeIdCtrl id_ctrl;
631
632 struct {
633 struct {
634 uint16_t temp_thresh_hi;
635 uint16_t temp_thresh_low;
636 };
637
638 uint32_t async_config;
639 NvmeHostBehaviorSupport hbs;
640 } features;
641
642 NvmePriCtrlCap pri_ctrl_cap;
643 uint32_t nr_sec_ctrls;
644 NvmeSecCtrlEntry *sec_ctrl_list;
645 struct {
646 uint16_t vqrfap;
647 uint16_t virfap;
648 } next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */
649 uint32_t dn; /* Disable Normal */
650 NvmeAtomic atomic;
651 } NvmeCtrl;
652
653 typedef enum NvmeResetType {
654 NVME_RESET_FUNCTION = 0,
655 NVME_RESET_CONTROLLER = 1,
656 } NvmeResetType;
657
nvme_ns(NvmeCtrl * n,uint32_t nsid)658 static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid)
659 {
660 if (!nsid || nsid > NVME_MAX_NAMESPACES) {
661 return NULL;
662 }
663
664 return n->namespaces[nsid];
665 }
666
nvme_cq(NvmeRequest * req)667 static inline NvmeCQueue *nvme_cq(NvmeRequest *req)
668 {
669 NvmeSQueue *sq = req->sq;
670 NvmeCtrl *n = sq->ctrl;
671
672 return n->cq[sq->cqid];
673 }
674
nvme_ctrl(NvmeRequest * req)675 static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req)
676 {
677 NvmeSQueue *sq = req->sq;
678 return sq->ctrl;
679 }
680
nvme_cid(NvmeRequest * req)681 static inline uint16_t nvme_cid(NvmeRequest *req)
682 {
683 if (!req) {
684 return 0xffff;
685 }
686
687 return le16_to_cpu(req->cqe.cid);
688 }
689
nvme_sctrl(NvmeCtrl * n)690 static inline NvmeSecCtrlEntry *nvme_sctrl(NvmeCtrl *n)
691 {
692 PCIDevice *pci_dev = &n->parent_obj;
693 NvmeCtrl *pf = NVME(pcie_sriov_get_pf(pci_dev));
694
695 if (pci_is_vf(pci_dev)) {
696 return &pf->sec_ctrl_list[pcie_sriov_vf_number(pci_dev)];
697 }
698
699 return NULL;
700 }
701
nvme_sctrl_for_cntlid(NvmeCtrl * n,uint16_t cntlid)702 static inline NvmeSecCtrlEntry *nvme_sctrl_for_cntlid(NvmeCtrl *n,
703 uint16_t cntlid)
704 {
705 NvmeSecCtrlEntry *list = n->sec_ctrl_list;
706 uint8_t i;
707
708 for (i = 0; i < n->nr_sec_ctrls; i++) {
709 if (le16_to_cpu(list[i].scid) == cntlid) {
710 return &list[i];
711 }
712 }
713
714 return NULL;
715 }
716
717 void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns);
718 uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len,
719 NvmeTxDirection dir, NvmeRequest *req);
720 uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len,
721 NvmeTxDirection dir, NvmeRequest *req);
722 void nvme_rw_complete_cb(void *opaque, int ret);
723 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
724 NvmeCmd *cmd);
725
726 #endif /* HW_NVME_NVME_H */
727