1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Definitions for the NVM Express interface
4 * Copyright (c) 2011-2014, Intel Corporation.
5 */
6
7 #ifndef _LINUX_NVME_H
8 #define _LINUX_NVME_H
9
10 #include <linux/bits.h>
11 #include <linux/types.h>
12 #include <linux/uuid.h>
13
14 /* NQN names in commands fields specified one size */
15 #define NVMF_NQN_FIELD_LEN 256
16
17 /* However the max length of a qualified name is another size */
18 #define NVMF_NQN_SIZE 223
19
20 #define NVMF_TRSVCID_SIZE 32
21 #define NVMF_TRADDR_SIZE 256
22 #define NVMF_TSAS_SIZE 256
23
24 #define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
25
26 #define NVME_NSID_ALL 0xffffffff
27
28 /* Special NSSR value, 'NVMe' */
29 #define NVME_SUBSYS_RESET 0x4E564D65
30
31 enum nvme_subsys_type {
32 /* Referral to another discovery type target subsystem */
33 NVME_NQN_DISC = 1,
34
35 /* NVME type target subsystem */
36 NVME_NQN_NVME = 2,
37
38 /* Current discovery type target subsystem */
39 NVME_NQN_CURR = 3,
40 };
41
42 enum nvme_ctrl_type {
43 NVME_CTRL_IO = 1, /* I/O controller */
44 NVME_CTRL_DISC = 2, /* Discovery controller */
45 NVME_CTRL_ADMIN = 3, /* Administrative controller */
46 };
47
48 enum nvme_dctype {
49 NVME_DCTYPE_NOT_REPORTED = 0,
50 NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
51 NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
52 };
53
54 /* Address Family codes for Discovery Log Page entry ADRFAM field */
55 enum {
56 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
57 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
58 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
59 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
60 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
61 NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */
62 NVMF_ADDR_FAMILY_MAX,
63 };
64
65 /* Transport Type codes for Discovery Log Page entry TRTYPE field */
66 enum {
67 NVMF_TRTYPE_PCI = 0, /* PCI */
68 NVMF_TRTYPE_RDMA = 1, /* RDMA */
69 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
70 NVMF_TRTYPE_TCP = 3, /* TCP/IP */
71 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
72 NVMF_TRTYPE_MAX,
73 };
74
75 /* Transport Requirements codes for Discovery Log Page entry TREQ field */
76 enum {
77 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
78 NVMF_TREQ_REQUIRED = 1, /* Required */
79 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
80 #define NVME_TREQ_SECURE_CHANNEL_MASK \
81 (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
82
83 NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
84 };
85
86 /* RDMA QP Service Type codes for Discovery Log Page entry TSAS
87 * RDMA_QPTYPE field
88 */
89 enum {
90 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
91 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
92 NVMF_RDMA_QPTYPE_INVALID = 0xff,
93 };
94
95 /* RDMA Provider Type codes for Discovery Log Page entry TSAS
96 * RDMA_PRTYPE field
97 */
98 enum {
99 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
100 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
101 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
102 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
103 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
104 };
105
106 /* RDMA Connection Management Service Type codes for Discovery Log Page
107 * entry TSAS RDMA_CMS field
108 */
109 enum {
110 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
111 };
112
113 /* TSAS SECTYPE for TCP transport */
114 enum {
115 NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
116 NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
117 NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
118 NVMF_TCP_SECTYPE_INVALID = 0xff,
119 };
120
121 #define NVME_AQ_DEPTH 32
122 #define NVME_NR_AEN_COMMANDS 1
123 #define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
124
125 /*
126 * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
127 * NVM-Express 1.2 specification, section 4.1.2.
128 */
129 #define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
130
131 enum {
132 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
133 NVME_REG_VS = 0x0008, /* Version */
134 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
135 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
136 NVME_REG_CC = 0x0014, /* Controller Configuration */
137 NVME_REG_CSTS = 0x001c, /* Controller Status */
138 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
139 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
140 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
141 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
142 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
143 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
144 NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
145 NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
146 NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
147 * Location
148 */
149 NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
150 * Space Control
151 */
152 NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */
153 NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
154 NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
155 NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
156 NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
157 * Buffer Size
158 */
159 NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
160 * Write Throughput
161 */
162 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
163 };
164
165 #define NVME_CAP_MQES(cap) ((cap) & 0xffff)
166 #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
167 #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
168 #define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
169 #define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
170 #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
171 #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
172 #define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
173
174 #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
175 #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
176
177 #define NVME_CRTO_CRIMT(crto) ((crto) >> 16)
178 #define NVME_CRTO_CRWMT(crto) ((crto) & 0xffff)
179
180 enum {
181 NVME_CMBSZ_SQS = 1 << 0,
182 NVME_CMBSZ_CQS = 1 << 1,
183 NVME_CMBSZ_LISTS = 1 << 2,
184 NVME_CMBSZ_RDS = 1 << 3,
185 NVME_CMBSZ_WDS = 1 << 4,
186
187 NVME_CMBSZ_SZ_SHIFT = 12,
188 NVME_CMBSZ_SZ_MASK = 0xfffff,
189
190 NVME_CMBSZ_SZU_SHIFT = 8,
191 NVME_CMBSZ_SZU_MASK = 0xf,
192 };
193
194 /*
195 * Submission and Completion Queue Entry Sizes for the NVM command set.
196 * (In bytes and specified as a power of two (2^n)).
197 */
198 #define NVME_ADM_SQES 6
199 #define NVME_NVM_IOSQES 6
200 #define NVME_NVM_IOCQES 4
201
202 /*
203 * Controller Configuration (CC) register (Offset 14h)
204 */
205 enum {
206 /* Enable (EN): bit 0 */
207 NVME_CC_ENABLE = 1 << 0,
208 NVME_CC_EN_SHIFT = 0,
209
210 /* Bits 03:01 are reserved (NVMe Base Specification rev 2.1) */
211
212 /* I/O Command Set Selected (CSS): bits 06:04 */
213 NVME_CC_CSS_SHIFT = 4,
214 NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
215 NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT,
216 NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT,
217
218 /* Memory Page Size (MPS): bits 10:07 */
219 NVME_CC_MPS_SHIFT = 7,
220 NVME_CC_MPS_MASK = 0xf << NVME_CC_MPS_SHIFT,
221
222 /* Arbitration Mechanism Selected (AMS): bits 13:11 */
223 NVME_CC_AMS_SHIFT = 11,
224 NVME_CC_AMS_MASK = 7 << NVME_CC_AMS_SHIFT,
225 NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
226 NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
227 NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
228
229 /* Shutdown Notification (SHN): bits 15:14 */
230 NVME_CC_SHN_SHIFT = 14,
231 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
232 NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
233 NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
234 NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
235
236 /* I/O Submission Queue Entry Size (IOSQES): bits 19:16 */
237 NVME_CC_IOSQES_SHIFT = 16,
238 NVME_CC_IOSQES_MASK = 0xf << NVME_CC_IOSQES_SHIFT,
239 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
240
241 /* I/O Completion Queue Entry Size (IOCQES): bits 23:20 */
242 NVME_CC_IOCQES_SHIFT = 20,
243 NVME_CC_IOCQES_MASK = 0xf << NVME_CC_IOCQES_SHIFT,
244 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
245
246 /* Controller Ready Independent of Media Enable (CRIME): bit 24 */
247 NVME_CC_CRIME = 1 << 24,
248
249 /* Bits 25:31 are reserved (NVMe Base Specification rev 2.1) */
250 };
251
252 enum {
253 NVME_CSTS_RDY = 1 << 0,
254 NVME_CSTS_CFS = 1 << 1,
255 NVME_CSTS_NSSRO = 1 << 4,
256 NVME_CSTS_PP = 1 << 5,
257 NVME_CSTS_SHST_NORMAL = 0 << 2,
258 NVME_CSTS_SHST_OCCUR = 1 << 2,
259 NVME_CSTS_SHST_CMPLT = 2 << 2,
260 NVME_CSTS_SHST_MASK = 3 << 2,
261 };
262
263 enum {
264 NVME_CMBMSC_CRE = 1 << 0,
265 NVME_CMBMSC_CMSE = 1 << 1,
266 };
267
268 enum {
269 NVME_CAP_CSS_NVM = 1 << 0,
270 NVME_CAP_CSS_CSI = 1 << 6,
271 };
272
273 enum {
274 NVME_CAP_CRMS_CRWMS = 1ULL << 59,
275 NVME_CAP_CRMS_CRIMS = 1ULL << 60,
276 };
277
278 struct nvme_id_power_state {
279 __le16 max_power; /* centiwatts */
280 __u8 rsvd2;
281 __u8 flags;
282 __le32 entry_lat; /* microseconds */
283 __le32 exit_lat; /* microseconds */
284 __u8 read_tput;
285 __u8 read_lat;
286 __u8 write_tput;
287 __u8 write_lat;
288 __le16 idle_power;
289 __u8 idle_scale;
290 __u8 rsvd19;
291 __le16 active_power;
292 __u8 active_work_scale;
293 __u8 rsvd23[9];
294 };
295
296 enum {
297 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
298 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
299 };
300
301 enum nvme_ctrl_attr {
302 NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
303 NVME_CTRL_ATTR_TBKAS = (1 << 6),
304 NVME_CTRL_ATTR_ELBAS = (1 << 15),
305 NVME_CTRL_ATTR_RHII = (1 << 18),
306 NVME_CTRL_ATTR_FDPS = (1 << 19),
307 };
308
309 struct nvme_id_ctrl {
310 __le16 vid;
311 __le16 ssvid;
312 char sn[20];
313 char mn[40];
314 char fr[8];
315 __u8 rab;
316 __u8 ieee[3];
317 __u8 cmic;
318 __u8 mdts;
319 __le16 cntlid;
320 __le32 ver;
321 __le32 rtd3r;
322 __le32 rtd3e;
323 __le32 oaes;
324 __le32 ctratt;
325 __u8 rsvd100[11];
326 __u8 cntrltype;
327 __u8 fguid[16];
328 __le16 crdt1;
329 __le16 crdt2;
330 __le16 crdt3;
331 __u8 rsvd134[122];
332 __le16 oacs;
333 __u8 acl;
334 __u8 aerl;
335 __u8 frmw;
336 __u8 lpa;
337 __u8 elpe;
338 __u8 npss;
339 __u8 avscc;
340 __u8 apsta;
341 __le16 wctemp;
342 __le16 cctemp;
343 __le16 mtfa;
344 __le32 hmpre;
345 __le32 hmmin;
346 __u8 tnvmcap[16];
347 __u8 unvmcap[16];
348 __le32 rpmbs;
349 __le16 edstt;
350 __u8 dsto;
351 __u8 fwug;
352 __le16 kas;
353 __le16 hctma;
354 __le16 mntmt;
355 __le16 mxtmt;
356 __le32 sanicap;
357 __le32 hmminds;
358 __le16 hmmaxd;
359 __le16 nvmsetidmax;
360 __le16 endgidmax;
361 __u8 anatt;
362 __u8 anacap;
363 __le32 anagrpmax;
364 __le32 nanagrpid;
365 __u8 rsvd352[160];
366 __u8 sqes;
367 __u8 cqes;
368 __le16 maxcmd;
369 __le32 nn;
370 __le16 oncs;
371 __le16 fuses;
372 __u8 fna;
373 __u8 vwc;
374 __le16 awun;
375 __le16 awupf;
376 __u8 nvscc;
377 __u8 nwpc;
378 __le16 acwu;
379 __u8 rsvd534[2];
380 __le32 sgls;
381 __le32 mnan;
382 __u8 rsvd544[224];
383 char subnqn[256];
384 __u8 rsvd1024[768];
385 __le32 ioccsz;
386 __le32 iorcsz;
387 __le16 icdoff;
388 __u8 ctrattr;
389 __u8 msdbd;
390 __u8 rsvd1804[2];
391 __u8 dctype;
392 __u8 rsvd1807[241];
393 struct nvme_id_power_state psd[32];
394 __u8 vs[1024];
395 };
396
397 enum {
398 NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
399 NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
400 NVME_CTRL_CMIC_ANA = 1 << 3,
401 NVME_CTRL_ONCS_COMPARE = 1 << 0,
402 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
403 NVME_CTRL_ONCS_DSM = 1 << 2,
404 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
405 NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
406 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
407 NVME_CTRL_VWC_PRESENT = 1 << 0,
408 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
409 NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
410 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
411 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
412 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
413 NVME_CTRL_CTRATT_128_ID = 1 << 0,
414 NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
415 NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
416 NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
417 NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
418 NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
419 NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
420 NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
421 NVME_CTRL_SGLS_BYTE_ALIGNED = 1,
422 NVME_CTRL_SGLS_DWORD_ALIGNED = 2,
423 NVME_CTRL_SGLS_KSDBDS = 1 << 2,
424 NVME_CTRL_SGLS_MSDS = 1 << 19,
425 NVME_CTRL_SGLS_SAOS = 1 << 20,
426 };
427
428 struct nvme_lbaf {
429 __le16 ms;
430 __u8 ds;
431 __u8 rp;
432 };
433
434 struct nvme_id_ns {
435 __le64 nsze;
436 __le64 ncap;
437 __le64 nuse;
438 __u8 nsfeat;
439 __u8 nlbaf;
440 __u8 flbas;
441 __u8 mc;
442 __u8 dpc;
443 __u8 dps;
444 __u8 nmic;
445 __u8 rescap;
446 __u8 fpi;
447 __u8 dlfeat;
448 __le16 nawun;
449 __le16 nawupf;
450 __le16 nacwu;
451 __le16 nabsn;
452 __le16 nabo;
453 __le16 nabspf;
454 __le16 noiob;
455 __u8 nvmcap[16];
456 __le16 npwg;
457 __le16 npwa;
458 __le16 npdg;
459 __le16 npda;
460 __le16 nows;
461 __u8 rsvd74[18];
462 __le32 anagrpid;
463 __u8 rsvd96[3];
464 __u8 nsattr;
465 __le16 nvmsetid;
466 __le16 endgid;
467 __u8 nguid[16];
468 __u8 eui64[8];
469 struct nvme_lbaf lbaf[64];
470 __u8 vs[3712];
471 };
472
473 /* I/O Command Set Independent Identify Namespace Data Structure */
474 struct nvme_id_ns_cs_indep {
475 __u8 nsfeat;
476 __u8 nmic;
477 __u8 rescap;
478 __u8 fpi;
479 __le32 anagrpid;
480 __u8 nsattr;
481 __u8 rsvd9;
482 __le16 nvmsetid;
483 __le16 endgid;
484 __u8 nstat;
485 __u8 rsvd15[4081];
486 };
487
488 struct nvme_zns_lbafe {
489 __le64 zsze;
490 __u8 zdes;
491 __u8 rsvd9[7];
492 };
493
494 struct nvme_id_ns_zns {
495 __le16 zoc;
496 __le16 ozcs;
497 __le32 mar;
498 __le32 mor;
499 __le32 rrl;
500 __le32 frl;
501 __u8 rsvd20[2796];
502 struct nvme_zns_lbafe lbafe[64];
503 __u8 vs[256];
504 };
505
506 struct nvme_id_ctrl_zns {
507 __u8 zasl;
508 __u8 rsvd1[4095];
509 };
510
511 struct nvme_id_ns_nvm {
512 __le64 lbstm;
513 __u8 pic;
514 __u8 rsvd9[3];
515 __le32 elbaf[64];
516 __le32 npdgl;
517 __le32 nprg;
518 __le32 npra;
519 __le32 nors;
520 __le32 npdal;
521 __u8 rsvd288[3808];
522 };
523
524 static_assert(sizeof(struct nvme_id_ns_nvm) == 4096);
525
526 enum {
527 NVME_ID_NS_NVM_STS_MASK = 0x7f,
528 NVME_ID_NS_NVM_GUARD_SHIFT = 7,
529 NVME_ID_NS_NVM_GUARD_MASK = 0x3,
530 NVME_ID_NS_NVM_QPIF_SHIFT = 9,
531 NVME_ID_NS_NVM_QPIF_MASK = 0xf,
532 NVME_ID_NS_NVM_QPIFS = 1 << 3,
533 };
534
nvme_elbaf_sts(__u32 elbaf)535 static inline __u8 nvme_elbaf_sts(__u32 elbaf)
536 {
537 return elbaf & NVME_ID_NS_NVM_STS_MASK;
538 }
539
nvme_elbaf_guard_type(__u32 elbaf)540 static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
541 {
542 return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
543 }
544
nvme_elbaf_qualified_guard_type(__u32 elbaf)545 static inline __u8 nvme_elbaf_qualified_guard_type(__u32 elbaf)
546 {
547 return (elbaf >> NVME_ID_NS_NVM_QPIF_SHIFT) & NVME_ID_NS_NVM_QPIF_MASK;
548 }
549
550 struct nvme_id_ctrl_nvm {
551 __u8 vsl;
552 __u8 wzsl;
553 __u8 wusl;
554 __u8 dmrl;
555 __le32 dmrsl;
556 __le64 dmsl;
557 __u8 rsvd16[4080];
558 };
559
560 enum {
561 NVME_ID_CNS_NS = 0x00,
562 NVME_ID_CNS_CTRL = 0x01,
563 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
564 NVME_ID_CNS_NS_DESC_LIST = 0x03,
565 NVME_ID_CNS_CS_NS = 0x05,
566 NVME_ID_CNS_CS_CTRL = 0x06,
567 NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07,
568 NVME_ID_CNS_NS_CS_INDEP = 0x08,
569 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
570 NVME_ID_CNS_NS_PRESENT = 0x11,
571 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
572 NVME_ID_CNS_CTRL_LIST = 0x13,
573 NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
574 NVME_ID_CNS_NS_GRANULARITY = 0x16,
575 NVME_ID_CNS_UUID_LIST = 0x17,
576 NVME_ID_CNS_ENDGRP_LIST = 0x19,
577 };
578
579 enum {
580 NVME_CSI_NVM = 0,
581 NVME_CSI_ZNS = 2,
582 };
583
584 enum {
585 NVME_DIR_IDENTIFY = 0x00,
586 NVME_DIR_STREAMS = 0x01,
587 NVME_DIR_SND_ID_OP_ENABLE = 0x01,
588 NVME_DIR_SND_ST_OP_REL_ID = 0x01,
589 NVME_DIR_SND_ST_OP_REL_RSC = 0x02,
590 NVME_DIR_RCV_ID_OP_PARAM = 0x01,
591 NVME_DIR_RCV_ST_OP_PARAM = 0x01,
592 NVME_DIR_RCV_ST_OP_STATUS = 0x02,
593 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03,
594 NVME_DIR_ENDIR = 0x01,
595 };
596
597 enum {
598 NVME_NS_FEAT_THIN = 1 << 0,
599 NVME_NS_FEAT_ATOMICS = 1 << 1,
600 NVME_NS_FEAT_OPTPERF_SHIFT = 4,
601 /* In NVMe version 2.0 and below, OPTPERF is only bit 4 of NSFEAT */
602 NVME_NS_FEAT_OPTPERF_MASK = 0x1,
603 /* Since version 2.1, OPTPERF is bits 4 and 5 of NSFEAT */
604 NVME_NS_FEAT_OPTPERF_MASK_2_1 = 0x3,
605 NVME_NS_ATTR_RO = 1 << 0,
606 NVME_NS_FLBAS_LBA_MASK = 0xf,
607 NVME_NS_FLBAS_LBA_UMASK = 0x60,
608 NVME_NS_FLBAS_LBA_SHIFT = 1,
609 NVME_NS_FLBAS_META_EXT = 0x10,
610 NVME_NS_NMIC_SHARED = 1 << 0,
611 NVME_NS_ROTATIONAL = 1 << 4,
612 NVME_NS_VWC_NOT_PRESENT = 1 << 5,
613 NVME_LBAF_RP_BEST = 0,
614 NVME_LBAF_RP_BETTER = 1,
615 NVME_LBAF_RP_GOOD = 2,
616 NVME_LBAF_RP_DEGRADED = 3,
617 NVME_NS_DPC_PI_LAST = 1 << 4,
618 NVME_NS_DPC_PI_FIRST = 1 << 3,
619 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
620 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
621 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
622 NVME_NS_DPS_PI_FIRST = 1 << 3,
623 NVME_NS_DPS_PI_MASK = 0x7,
624 NVME_NS_DPS_PI_TYPE1 = 1,
625 NVME_NS_DPS_PI_TYPE2 = 2,
626 NVME_NS_DPS_PI_TYPE3 = 3,
627 };
628
629 enum {
630 NVME_NSTAT_NRDY = 1 << 0,
631 };
632
633 enum {
634 NVME_NVM_NS_16B_GUARD = 0,
635 NVME_NVM_NS_32B_GUARD = 1,
636 NVME_NVM_NS_64B_GUARD = 2,
637 NVME_NVM_NS_QTYPE_GUARD = 3,
638 };
639
nvme_lbaf_index(__u8 flbas)640 static inline __u8 nvme_lbaf_index(__u8 flbas)
641 {
642 return (flbas & NVME_NS_FLBAS_LBA_MASK) |
643 ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT);
644 }
645
646 /* Identify Namespace Metadata Capabilities (MC): */
647 enum {
648 NVME_MC_EXTENDED_LBA = (1 << 0),
649 NVME_MC_METADATA_PTR = (1 << 1),
650 };
651
652 struct nvme_ns_id_desc {
653 __u8 nidt;
654 __u8 nidl;
655 __le16 reserved;
656 };
657
658 #define NVME_NIDT_EUI64_LEN 8
659 #define NVME_NIDT_NGUID_LEN 16
660 #define NVME_NIDT_UUID_LEN 16
661 #define NVME_NIDT_CSI_LEN 1
662
663 enum {
664 NVME_NIDT_EUI64 = 0x01,
665 NVME_NIDT_NGUID = 0x02,
666 NVME_NIDT_UUID = 0x03,
667 NVME_NIDT_CSI = 0x04,
668 };
669
670 struct nvme_endurance_group_log {
671 __u8 egcw;
672 __u8 egfeat;
673 __u8 rsvd2;
674 __u8 avsp;
675 __u8 avspt;
676 __u8 pused;
677 __le16 did;
678 __u8 rsvd8[24];
679 __u8 ee[16];
680 __u8 dur[16];
681 __u8 duw[16];
682 __u8 muw[16];
683 __u8 hrc[16];
684 __u8 hwc[16];
685 __u8 mdie[16];
686 __u8 neile[16];
687 __u8 tegcap[16];
688 __u8 uegcap[16];
689 __u8 rsvd192[320];
690 };
691
692 struct nvme_rotational_media_log {
693 __le16 endgid;
694 __le16 numa;
695 __le16 nrs;
696 __u8 rsvd6[2];
697 __le32 spinc;
698 __le32 fspinc;
699 __le32 ldc;
700 __le32 fldc;
701 __u8 rsvd24[488];
702 };
703
704 struct nvme_fdp_config {
705 __u8 flags;
706 #define FDPCFG_FDPE (1U << 0)
707 __u8 fdpcidx;
708 __le16 reserved;
709 };
710
711 struct nvme_fdp_ruh_desc {
712 __u8 ruht;
713 __u8 reserved[3];
714 };
715
716 struct nvme_fdp_config_desc {
717 __le16 dsze;
718 __u8 fdpa;
719 __u8 vss;
720 __le32 nrg;
721 __le16 nruh;
722 __le16 maxpids;
723 __le32 nns;
724 __le64 runs;
725 __le32 erutl;
726 __u8 rsvd28[36];
727 struct nvme_fdp_ruh_desc ruhs[];
728 };
729
730 struct nvme_fdp_config_log {
731 __le16 numfdpc;
732 __u8 ver;
733 __u8 rsvd3;
734 __le32 sze;
735 __u8 rsvd8[8];
736 /*
737 * This is followed by variable number of nvme_fdp_config_desc
738 * structures, but sparse doesn't like nested variable sized arrays.
739 */
740 };
741
742 struct nvme_smart_log {
743 __u8 critical_warning;
744 __u8 temperature[2];
745 __u8 avail_spare;
746 __u8 spare_thresh;
747 __u8 percent_used;
748 __u8 endu_grp_crit_warn_sumry;
749 __u8 rsvd7[25];
750 __u8 data_units_read[16];
751 __u8 data_units_written[16];
752 __u8 host_reads[16];
753 __u8 host_writes[16];
754 __u8 ctrl_busy_time[16];
755 __u8 power_cycles[16];
756 __u8 power_on_hours[16];
757 __u8 unsafe_shutdowns[16];
758 __u8 media_errors[16];
759 __u8 num_err_log_entries[16];
760 __le32 warning_temp_time;
761 __le32 critical_comp_time;
762 __le16 temp_sensor[8];
763 __le32 thm_temp1_trans_count;
764 __le32 thm_temp2_trans_count;
765 __le32 thm_temp1_total_time;
766 __le32 thm_temp2_total_time;
767 __u8 rsvd232[280];
768 };
769
770 struct nvme_fw_slot_info_log {
771 __u8 afi;
772 __u8 rsvd1[7];
773 __le64 frs[7];
774 __u8 rsvd64[448];
775 };
776
777 enum {
778 NVME_CMD_EFFECTS_CSUPP = 1 << 0,
779 NVME_CMD_EFFECTS_LBCC = 1 << 1,
780 NVME_CMD_EFFECTS_NCC = 1 << 2,
781 NVME_CMD_EFFECTS_NIC = 1 << 3,
782 NVME_CMD_EFFECTS_CCC = 1 << 4,
783 NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14),
784 NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
785 NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
786 NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
787 };
788
789 struct nvme_effects_log {
790 __le32 acs[256];
791 __le32 iocs[256];
792 __u8 resv[2048];
793 };
794
795 enum nvme_ana_state {
796 NVME_ANA_OPTIMIZED = 0x01,
797 NVME_ANA_NONOPTIMIZED = 0x02,
798 NVME_ANA_INACCESSIBLE = 0x03,
799 NVME_ANA_PERSISTENT_LOSS = 0x04,
800 NVME_ANA_CHANGE = 0x0f,
801 };
802
803 struct nvme_ana_group_desc {
804 __le32 grpid;
805 __le32 nnsids;
806 __le64 chgcnt;
807 __u8 state;
808 __u8 rsvd17[15];
809 __le32 nsids[];
810 };
811
812 /* flag for the log specific field of the ANA log */
813 #define NVME_ANA_LOG_RGO (1 << 0)
814
815 struct nvme_ana_rsp_hdr {
816 __le64 chgcnt;
817 __le16 ngrps;
818 __le16 rsvd10[3];
819 };
820
821 struct nvme_zone_descriptor {
822 __u8 zt;
823 __u8 zs;
824 __u8 za;
825 __u8 rsvd3[5];
826 __le64 zcap;
827 __le64 zslba;
828 __le64 wp;
829 __u8 rsvd32[32];
830 };
831
832 enum {
833 NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2,
834 };
835
836 struct nvme_zone_report {
837 __le64 nr_zones;
838 __u8 resv8[56];
839 struct nvme_zone_descriptor entries[];
840 };
841
842 enum {
843 NVME_SMART_CRIT_SPARE = 1 << 0,
844 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
845 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
846 NVME_SMART_CRIT_MEDIA = 1 << 3,
847 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
848 };
849
850 enum {
851 NVME_AER_ERROR = 0,
852 NVME_AER_SMART = 1,
853 NVME_AER_NOTICE = 2,
854 NVME_AER_CSS = 6,
855 NVME_AER_VS = 7,
856 };
857
858 enum {
859 NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
860 };
861
862 enum {
863 NVME_AER_NOTICE_NS_CHANGED = 0x00,
864 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
865 NVME_AER_NOTICE_ANA = 0x03,
866 NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
867 };
868
869 enum {
870 NVME_AEN_BIT_NS_ATTR = 8,
871 NVME_AEN_BIT_FW_ACT = 9,
872 NVME_AEN_BIT_ANA_CHANGE = 11,
873 NVME_AEN_BIT_DISC_CHANGE = 31,
874 };
875
876 enum {
877 NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
878 NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
879 NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
880 NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
881 };
882
883 struct nvme_lba_range_type {
884 __u8 type;
885 __u8 attributes;
886 __u8 rsvd2[14];
887 __le64 slba;
888 __le64 nlb;
889 __u8 guid[16];
890 __u8 rsvd48[16];
891 };
892
893 enum {
894 NVME_LBART_TYPE_FS = 0x01,
895 NVME_LBART_TYPE_RAID = 0x02,
896 NVME_LBART_TYPE_CACHE = 0x03,
897 NVME_LBART_TYPE_SWAP = 0x04,
898
899 NVME_LBART_ATTRIB_TEMP = 1 << 0,
900 NVME_LBART_ATTRIB_HIDE = 1 << 1,
901 };
902
903 enum nvme_pr_type {
904 NVME_PR_WRITE_EXCLUSIVE = 1,
905 NVME_PR_EXCLUSIVE_ACCESS = 2,
906 NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
907 NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
908 NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
909 NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
910 };
911
912 enum nvme_eds {
913 NVME_EXTENDED_DATA_STRUCT = 0x1,
914 };
915
916 struct nvme_registered_ctrl {
917 __le16 cntlid;
918 __u8 rcsts;
919 __u8 rsvd3[5];
920 __le64 hostid;
921 __le64 rkey;
922 };
923
924 struct nvme_reservation_status {
925 __le32 gen;
926 __u8 rtype;
927 __u8 regctl[2];
928 __u8 resv5[2];
929 __u8 ptpls;
930 __u8 resv10[14];
931 struct nvme_registered_ctrl regctl_ds[];
932 };
933
934 struct nvme_registered_ctrl_ext {
935 __le16 cntlid;
936 __u8 rcsts;
937 __u8 rsvd3[5];
938 __le64 rkey;
939 __u8 hostid[16];
940 __u8 rsvd32[32];
941 };
942
943 struct nvme_reservation_status_ext {
944 __le32 gen;
945 __u8 rtype;
946 __u8 regctl[2];
947 __u8 resv5[2];
948 __u8 ptpls;
949 __u8 resv10[14];
950 __u8 rsvd24[40];
951 struct nvme_registered_ctrl_ext regctl_eds[];
952 };
953
954 /* I/O commands */
955
956 enum nvme_opcode {
957 nvme_cmd_flush = 0x00,
958 nvme_cmd_write = 0x01,
959 nvme_cmd_read = 0x02,
960 nvme_cmd_write_uncor = 0x04,
961 nvme_cmd_compare = 0x05,
962 nvme_cmd_write_zeroes = 0x08,
963 nvme_cmd_dsm = 0x09,
964 nvme_cmd_verify = 0x0c,
965 nvme_cmd_resv_register = 0x0d,
966 nvme_cmd_resv_report = 0x0e,
967 nvme_cmd_resv_acquire = 0x11,
968 nvme_cmd_io_mgmt_recv = 0x12,
969 nvme_cmd_resv_release = 0x15,
970 nvme_cmd_zone_mgmt_send = 0x79,
971 nvme_cmd_zone_mgmt_recv = 0x7a,
972 nvme_cmd_zone_append = 0x7d,
973 nvme_cmd_vendor_start = 0x80,
974 };
975
976 #define nvme_opcode_name(opcode) { opcode, #opcode }
977 #define show_nvm_opcode_name(val) \
978 __print_symbolic(val, \
979 nvme_opcode_name(nvme_cmd_flush), \
980 nvme_opcode_name(nvme_cmd_write), \
981 nvme_opcode_name(nvme_cmd_read), \
982 nvme_opcode_name(nvme_cmd_write_uncor), \
983 nvme_opcode_name(nvme_cmd_compare), \
984 nvme_opcode_name(nvme_cmd_write_zeroes), \
985 nvme_opcode_name(nvme_cmd_dsm), \
986 nvme_opcode_name(nvme_cmd_verify), \
987 nvme_opcode_name(nvme_cmd_resv_register), \
988 nvme_opcode_name(nvme_cmd_resv_report), \
989 nvme_opcode_name(nvme_cmd_resv_acquire), \
990 nvme_opcode_name(nvme_cmd_io_mgmt_recv), \
991 nvme_opcode_name(nvme_cmd_resv_release), \
992 nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
993 nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
994 nvme_opcode_name(nvme_cmd_zone_append))
995
996
997
998 /*
999 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
1000 *
1001 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
1002 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
1003 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
1004 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
1005 * request subtype
1006 */
1007 enum {
1008 NVME_SGL_FMT_ADDRESS = 0x00,
1009 NVME_SGL_FMT_OFFSET = 0x01,
1010 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
1011 NVME_SGL_FMT_INVALIDATE = 0x0f,
1012 };
1013
1014 /*
1015 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
1016 *
1017 * For struct nvme_sgl_desc:
1018 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
1019 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
1020 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
1021 *
1022 * For struct nvme_keyed_sgl_desc:
1023 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
1024 *
1025 * Transport-specific SGL types:
1026 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
1027 */
1028 enum {
1029 NVME_SGL_FMT_DATA_DESC = 0x00,
1030 NVME_SGL_FMT_SEG_DESC = 0x02,
1031 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
1032 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
1033 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
1034 };
1035
1036 struct nvme_sgl_desc {
1037 __le64 addr;
1038 __le32 length;
1039 __u8 rsvd[3];
1040 __u8 type;
1041 };
1042
1043 struct nvme_keyed_sgl_desc {
1044 __le64 addr;
1045 __u8 length[3];
1046 __u8 key[4];
1047 __u8 type;
1048 };
1049
1050 union nvme_data_ptr {
1051 struct {
1052 __le64 prp1;
1053 __le64 prp2;
1054 };
1055 struct nvme_sgl_desc sgl;
1056 struct nvme_keyed_sgl_desc ksgl;
1057 };
1058
1059 /*
1060 * Lowest two bits of our flags field (FUSE field in the spec):
1061 *
1062 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
1063 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
1064 *
1065 * Highest two bits in our flags field (PSDT field in the spec):
1066 *
1067 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
1068 * If used, MPTR contains addr of single physical buffer (byte aligned).
1069 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
1070 * If used, MPTR contains an address of an SGL segment containing
1071 * exactly 1 SGL descriptor (qword aligned).
1072 */
1073 enum {
1074 NVME_CMD_FUSE_FIRST = (1 << 0),
1075 NVME_CMD_FUSE_SECOND = (1 << 1),
1076
1077 NVME_CMD_SGL_METABUF = (1 << 6),
1078 NVME_CMD_SGL_METASEG = (1 << 7),
1079 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
1080 };
1081
1082 struct nvme_common_command {
1083 __u8 opcode;
1084 __u8 flags;
1085 __u16 command_id;
1086 __le32 nsid;
1087 __le32 cdw2[2];
1088 __le64 metadata;
1089 union nvme_data_ptr dptr;
1090 struct_group(cdws,
1091 __le32 cdw10;
1092 __le32 cdw11;
1093 __le32 cdw12;
1094 __le32 cdw13;
1095 __le32 cdw14;
1096 __le32 cdw15;
1097 );
1098 };
1099
1100 struct nvme_rw_command {
1101 __u8 opcode;
1102 __u8 flags;
1103 __u16 command_id;
1104 __le32 nsid;
1105 __le32 cdw2;
1106 __le32 cdw3;
1107 __le64 metadata;
1108 union nvme_data_ptr dptr;
1109 __le64 slba;
1110 __le16 length;
1111 __le16 control;
1112 __le32 dsmgmt;
1113 __le32 reftag;
1114 __le16 lbat;
1115 __le16 lbatm;
1116 };
1117
1118 enum {
1119 NVME_RW_LR = 1 << 15,
1120 NVME_RW_FUA = 1 << 14,
1121 NVME_RW_APPEND_PIREMAP = 1 << 9,
1122 NVME_RW_DSM_FREQ_UNSPEC = 0,
1123 NVME_RW_DSM_FREQ_TYPICAL = 1,
1124 NVME_RW_DSM_FREQ_RARE = 2,
1125 NVME_RW_DSM_FREQ_READS = 3,
1126 NVME_RW_DSM_FREQ_WRITES = 4,
1127 NVME_RW_DSM_FREQ_RW = 5,
1128 NVME_RW_DSM_FREQ_ONCE = 6,
1129 NVME_RW_DSM_FREQ_PREFETCH = 7,
1130 NVME_RW_DSM_FREQ_TEMP = 8,
1131 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
1132 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
1133 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
1134 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
1135 NVME_RW_DSM_SEQ_REQ = 1 << 6,
1136 NVME_RW_DSM_COMPRESSED = 1 << 7,
1137 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
1138 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
1139 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
1140 NVME_RW_PRINFO_PRACT = 1 << 13,
1141 NVME_RW_DTYPE_STREAMS = 1 << 4,
1142 NVME_RW_DTYPE_DPLCMT = 2 << 4,
1143 NVME_WZ_DEAC = 1 << 9,
1144 };
1145
1146 struct nvme_dsm_cmd {
1147 __u8 opcode;
1148 __u8 flags;
1149 __u16 command_id;
1150 __le32 nsid;
1151 __u64 rsvd2[2];
1152 union nvme_data_ptr dptr;
1153 __le32 nr;
1154 __le32 attributes;
1155 __u32 rsvd12[4];
1156 };
1157
1158 enum {
1159 NVME_DSMGMT_IDR = 1 << 0,
1160 NVME_DSMGMT_IDW = 1 << 1,
1161 NVME_DSMGMT_AD = 1 << 2,
1162 };
1163
1164 #define NVME_DSM_MAX_RANGES 256
1165
1166 struct nvme_dsm_range {
1167 __le32 cattr;
1168 __le32 nlb;
1169 __le64 slba;
1170 };
1171
1172 struct nvme_write_zeroes_cmd {
1173 __u8 opcode;
1174 __u8 flags;
1175 __u16 command_id;
1176 __le32 nsid;
1177 __u64 rsvd2;
1178 __le64 metadata;
1179 union nvme_data_ptr dptr;
1180 __le64 slba;
1181 __le16 length;
1182 __le16 control;
1183 __le32 dsmgmt;
1184 __le32 reftag;
1185 __le16 lbat;
1186 __le16 lbatm;
1187 };
1188
1189 enum nvme_zone_mgmt_action {
1190 NVME_ZONE_CLOSE = 0x1,
1191 NVME_ZONE_FINISH = 0x2,
1192 NVME_ZONE_OPEN = 0x3,
1193 NVME_ZONE_RESET = 0x4,
1194 NVME_ZONE_OFFLINE = 0x5,
1195 NVME_ZONE_SET_DESC_EXT = 0x10,
1196 };
1197
1198 struct nvme_zone_mgmt_send_cmd {
1199 __u8 opcode;
1200 __u8 flags;
1201 __u16 command_id;
1202 __le32 nsid;
1203 __le32 cdw2[2];
1204 __le64 metadata;
1205 union nvme_data_ptr dptr;
1206 __le64 slba;
1207 __le32 cdw12;
1208 __u8 zsa;
1209 __u8 select_all;
1210 __u8 rsvd13[2];
1211 __le32 cdw14[2];
1212 };
1213
1214 struct nvme_zone_mgmt_recv_cmd {
1215 __u8 opcode;
1216 __u8 flags;
1217 __u16 command_id;
1218 __le32 nsid;
1219 __le64 rsvd2[2];
1220 union nvme_data_ptr dptr;
1221 __le64 slba;
1222 __le32 numd;
1223 __u8 zra;
1224 __u8 zrasf;
1225 __u8 pr;
1226 __u8 rsvd13;
1227 __le32 cdw14[2];
1228 };
1229
1230 struct nvme_io_mgmt_recv_cmd {
1231 __u8 opcode;
1232 __u8 flags;
1233 __u16 command_id;
1234 __le32 nsid;
1235 __le64 rsvd2[2];
1236 union nvme_data_ptr dptr;
1237 __u8 mo;
1238 __u8 rsvd11;
1239 __u16 mos;
1240 __le32 numd;
1241 __le32 cdw12[4];
1242 };
1243
1244 enum {
1245 NVME_IO_MGMT_RECV_MO_RUHS = 1,
1246 };
1247
1248 struct nvme_fdp_ruh_status_desc {
1249 __le16 pid;
1250 __le16 ruhid;
1251 __le32 earutr;
1252 __le64 ruamw;
1253 __u8 reserved[16];
1254 };
1255
1256 struct nvme_fdp_ruh_status {
1257 __u8 rsvd0[14];
1258 __le16 nruhsd;
1259 struct nvme_fdp_ruh_status_desc ruhsd[];
1260 };
1261
1262 enum {
1263 NVME_ZRA_ZONE_REPORT = 0,
1264 NVME_ZRASF_ZONE_REPORT_ALL = 0,
1265 NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
1266 NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
1267 NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
1268 NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
1269 NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
1270 NVME_ZRASF_ZONE_STATE_FULL = 0x06,
1271 NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
1272 NVME_REPORT_ZONE_PARTIAL = 1,
1273 };
1274
1275 /* Features */
1276
1277 enum {
1278 NVME_TEMP_THRESH_MASK = 0xffff,
1279 NVME_TEMP_THRESH_SELECT_SHIFT = 16,
1280 NVME_TEMP_THRESH_TYPE_UNDER = 0x100000,
1281 };
1282
1283 struct nvme_feat_auto_pst {
1284 __le64 entries[32];
1285 };
1286
1287 enum {
1288 NVME_HOST_MEM_ENABLE = (1 << 0),
1289 NVME_HOST_MEM_RETURN = (1 << 1),
1290 };
1291
1292 struct nvme_feat_host_behavior {
1293 __u8 acre;
1294 __u8 etdas;
1295 __u8 lbafee;
1296 __u8 resv1[509];
1297 };
1298
1299 enum {
1300 NVME_ENABLE_ACRE = 1,
1301 NVME_ENABLE_LBAFEE = 1,
1302 };
1303
1304 /* Admin commands */
1305
1306 enum nvme_admin_opcode {
1307 nvme_admin_delete_sq = 0x00,
1308 nvme_admin_create_sq = 0x01,
1309 nvme_admin_get_log_page = 0x02,
1310 nvme_admin_delete_cq = 0x04,
1311 nvme_admin_create_cq = 0x05,
1312 nvme_admin_identify = 0x06,
1313 nvme_admin_abort_cmd = 0x08,
1314 nvme_admin_set_features = 0x09,
1315 nvme_admin_get_features = 0x0a,
1316 nvme_admin_async_event = 0x0c,
1317 nvme_admin_ns_mgmt = 0x0d,
1318 nvme_admin_activate_fw = 0x10,
1319 nvme_admin_download_fw = 0x11,
1320 nvme_admin_dev_self_test = 0x14,
1321 nvme_admin_ns_attach = 0x15,
1322 nvme_admin_keep_alive = 0x18,
1323 nvme_admin_directive_send = 0x19,
1324 nvme_admin_directive_recv = 0x1a,
1325 nvme_admin_virtual_mgmt = 0x1c,
1326 nvme_admin_nvme_mi_send = 0x1d,
1327 nvme_admin_nvme_mi_recv = 0x1e,
1328 nvme_admin_dbbuf = 0x7C,
1329 nvme_admin_format_nvm = 0x80,
1330 nvme_admin_security_send = 0x81,
1331 nvme_admin_security_recv = 0x82,
1332 nvme_admin_sanitize_nvm = 0x84,
1333 nvme_admin_get_lba_status = 0x86,
1334 nvme_admin_vendor_start = 0xC0,
1335 };
1336
1337 #define nvme_admin_opcode_name(opcode) { opcode, #opcode }
1338 #define show_admin_opcode_name(val) \
1339 __print_symbolic(val, \
1340 nvme_admin_opcode_name(nvme_admin_delete_sq), \
1341 nvme_admin_opcode_name(nvme_admin_create_sq), \
1342 nvme_admin_opcode_name(nvme_admin_get_log_page), \
1343 nvme_admin_opcode_name(nvme_admin_delete_cq), \
1344 nvme_admin_opcode_name(nvme_admin_create_cq), \
1345 nvme_admin_opcode_name(nvme_admin_identify), \
1346 nvme_admin_opcode_name(nvme_admin_abort_cmd), \
1347 nvme_admin_opcode_name(nvme_admin_set_features), \
1348 nvme_admin_opcode_name(nvme_admin_get_features), \
1349 nvme_admin_opcode_name(nvme_admin_async_event), \
1350 nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
1351 nvme_admin_opcode_name(nvme_admin_activate_fw), \
1352 nvme_admin_opcode_name(nvme_admin_download_fw), \
1353 nvme_admin_opcode_name(nvme_admin_dev_self_test), \
1354 nvme_admin_opcode_name(nvme_admin_ns_attach), \
1355 nvme_admin_opcode_name(nvme_admin_keep_alive), \
1356 nvme_admin_opcode_name(nvme_admin_directive_send), \
1357 nvme_admin_opcode_name(nvme_admin_directive_recv), \
1358 nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \
1359 nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \
1360 nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \
1361 nvme_admin_opcode_name(nvme_admin_dbbuf), \
1362 nvme_admin_opcode_name(nvme_admin_format_nvm), \
1363 nvme_admin_opcode_name(nvme_admin_security_send), \
1364 nvme_admin_opcode_name(nvme_admin_security_recv), \
1365 nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \
1366 nvme_admin_opcode_name(nvme_admin_get_lba_status))
1367
1368 enum {
1369 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
1370 NVME_CQ_IRQ_ENABLED = (1 << 1),
1371 NVME_SQ_PRIO_URGENT = (0 << 1),
1372 NVME_SQ_PRIO_HIGH = (1 << 1),
1373 NVME_SQ_PRIO_MEDIUM = (2 << 1),
1374 NVME_SQ_PRIO_LOW = (3 << 1),
1375 NVME_FEAT_ARBITRATION = 0x01,
1376 NVME_FEAT_POWER_MGMT = 0x02,
1377 NVME_FEAT_LBA_RANGE = 0x03,
1378 NVME_FEAT_TEMP_THRESH = 0x04,
1379 NVME_FEAT_ERR_RECOVERY = 0x05,
1380 NVME_FEAT_VOLATILE_WC = 0x06,
1381 NVME_FEAT_NUM_QUEUES = 0x07,
1382 NVME_FEAT_IRQ_COALESCE = 0x08,
1383 NVME_FEAT_IRQ_CONFIG = 0x09,
1384 NVME_FEAT_WRITE_ATOMIC = 0x0a,
1385 NVME_FEAT_ASYNC_EVENT = 0x0b,
1386 NVME_FEAT_AUTO_PST = 0x0c,
1387 NVME_FEAT_HOST_MEM_BUF = 0x0d,
1388 NVME_FEAT_TIMESTAMP = 0x0e,
1389 NVME_FEAT_KATO = 0x0f,
1390 NVME_FEAT_HCTM = 0x10,
1391 NVME_FEAT_NOPSC = 0x11,
1392 NVME_FEAT_RRL = 0x12,
1393 NVME_FEAT_PLM_CONFIG = 0x13,
1394 NVME_FEAT_PLM_WINDOW = 0x14,
1395 NVME_FEAT_HOST_BEHAVIOR = 0x16,
1396 NVME_FEAT_SANITIZE = 0x17,
1397 NVME_FEAT_FDP = 0x1d,
1398 NVME_FEAT_SW_PROGRESS = 0x80,
1399 NVME_FEAT_HOST_ID = 0x81,
1400 NVME_FEAT_RESV_MASK = 0x82,
1401 NVME_FEAT_RESV_PERSIST = 0x83,
1402 NVME_FEAT_WRITE_PROTECT = 0x84,
1403 NVME_FEAT_VENDOR_START = 0xC0,
1404 NVME_FEAT_VENDOR_END = 0xFF,
1405 NVME_LOG_SUPPORTED = 0x00,
1406 NVME_LOG_ERROR = 0x01,
1407 NVME_LOG_SMART = 0x02,
1408 NVME_LOG_FW_SLOT = 0x03,
1409 NVME_LOG_CHANGED_NS = 0x04,
1410 NVME_LOG_CMD_EFFECTS = 0x05,
1411 NVME_LOG_DEVICE_SELF_TEST = 0x06,
1412 NVME_LOG_TELEMETRY_HOST = 0x07,
1413 NVME_LOG_TELEMETRY_CTRL = 0x08,
1414 NVME_LOG_ENDURANCE_GROUP = 0x09,
1415 NVME_LOG_ANA = 0x0c,
1416 NVME_LOG_FEATURES = 0x12,
1417 NVME_LOG_RMI = 0x16,
1418 NVME_LOG_FDP_CONFIGS = 0x20,
1419 NVME_LOG_DISC = 0x70,
1420 NVME_LOG_RESERVATION = 0x80,
1421 NVME_FWACT_REPL = (0 << 3),
1422 NVME_FWACT_REPL_ACTV = (1 << 3),
1423 NVME_FWACT_ACTV = (2 << 3),
1424 };
1425
1426 struct nvme_supported_log {
1427 __le32 lids[256];
1428 };
1429
1430 enum {
1431 NVME_LIDS_LSUPP = 1 << 0,
1432 };
1433
1434 struct nvme_supported_features_log {
1435 __le32 fis[256];
1436 };
1437
1438 enum {
1439 NVME_FIS_FSUPP = 1 << 0,
1440 NVME_FIS_NSCPE = 1 << 20,
1441 NVME_FIS_CSCPE = 1 << 21,
1442 };
1443
1444 /* NVMe Namespace Write Protect State */
1445 enum {
1446 NVME_NS_NO_WRITE_PROTECT = 0,
1447 NVME_NS_WRITE_PROTECT,
1448 NVME_NS_WRITE_PROTECT_POWER_CYCLE,
1449 NVME_NS_WRITE_PROTECT_PERMANENT,
1450 };
1451
1452 #define NVME_MAX_CHANGED_NAMESPACES 1024
1453
1454 struct nvme_identify {
1455 __u8 opcode;
1456 __u8 flags;
1457 __u16 command_id;
1458 __le32 nsid;
1459 __u64 rsvd2[2];
1460 union nvme_data_ptr dptr;
1461 __u8 cns;
1462 __u8 rsvd3;
1463 __le16 ctrlid;
1464 __le16 cnssid;
1465 __u8 rsvd11;
1466 __u8 csi;
1467 __u32 rsvd12[4];
1468 };
1469
1470 #define NVME_IDENTIFY_DATA_SIZE 4096
1471
1472 struct nvme_features {
1473 __u8 opcode;
1474 __u8 flags;
1475 __u16 command_id;
1476 __le32 nsid;
1477 __u64 rsvd2[2];
1478 union nvme_data_ptr dptr;
1479 __le32 fid;
1480 __le32 dword11;
1481 __le32 dword12;
1482 __le32 dword13;
1483 __le32 dword14;
1484 __le32 dword15;
1485 };
1486
1487 struct nvme_host_mem_buf_desc {
1488 __le64 addr;
1489 __le32 size;
1490 __u32 rsvd;
1491 };
1492
1493 struct nvme_create_cq {
1494 __u8 opcode;
1495 __u8 flags;
1496 __u16 command_id;
1497 __u32 rsvd1[5];
1498 __le64 prp1;
1499 __u64 rsvd8;
1500 __le16 cqid;
1501 __le16 qsize;
1502 __le16 cq_flags;
1503 __le16 irq_vector;
1504 __u32 rsvd12[4];
1505 };
1506
1507 struct nvme_create_sq {
1508 __u8 opcode;
1509 __u8 flags;
1510 __u16 command_id;
1511 __u32 rsvd1[5];
1512 __le64 prp1;
1513 __u64 rsvd8;
1514 __le16 sqid;
1515 __le16 qsize;
1516 __le16 sq_flags;
1517 __le16 cqid;
1518 __u32 rsvd12[4];
1519 };
1520
1521 struct nvme_delete_queue {
1522 __u8 opcode;
1523 __u8 flags;
1524 __u16 command_id;
1525 __u32 rsvd1[9];
1526 __le16 qid;
1527 __u16 rsvd10;
1528 __u32 rsvd11[5];
1529 };
1530
1531 struct nvme_abort_cmd {
1532 __u8 opcode;
1533 __u8 flags;
1534 __u16 command_id;
1535 __u32 rsvd1[9];
1536 __le16 sqid;
1537 __u16 cid;
1538 __u32 rsvd11[5];
1539 };
1540
1541 struct nvme_download_firmware {
1542 __u8 opcode;
1543 __u8 flags;
1544 __u16 command_id;
1545 __u32 rsvd1[5];
1546 union nvme_data_ptr dptr;
1547 __le32 numd;
1548 __le32 offset;
1549 __u32 rsvd12[4];
1550 };
1551
1552 struct nvme_format_cmd {
1553 __u8 opcode;
1554 __u8 flags;
1555 __u16 command_id;
1556 __le32 nsid;
1557 __u64 rsvd2[4];
1558 __le32 cdw10;
1559 __u32 rsvd11[5];
1560 };
1561
1562 struct nvme_get_log_page_command {
1563 __u8 opcode;
1564 __u8 flags;
1565 __u16 command_id;
1566 __le32 nsid;
1567 __u64 rsvd2[2];
1568 union nvme_data_ptr dptr;
1569 __u8 lid;
1570 __u8 lsp; /* upper 4 bits reserved */
1571 __le16 numdl;
1572 __le16 numdu;
1573 __le16 lsi;
1574 union {
1575 struct {
1576 __le32 lpol;
1577 __le32 lpou;
1578 };
1579 __le64 lpo;
1580 };
1581 __u8 rsvd14[3];
1582 __u8 csi;
1583 __u32 rsvd15;
1584 };
1585
1586 struct nvme_directive_cmd {
1587 __u8 opcode;
1588 __u8 flags;
1589 __u16 command_id;
1590 __le32 nsid;
1591 __u64 rsvd2[2];
1592 union nvme_data_ptr dptr;
1593 __le32 numd;
1594 __u8 doper;
1595 __u8 dtype;
1596 __le16 dspec;
1597 __u8 endir;
1598 __u8 tdtype;
1599 __u16 rsvd15;
1600
1601 __u32 rsvd16[3];
1602 };
1603
1604 /*
1605 * Fabrics subcommands.
1606 */
1607 enum nvmf_fabrics_opcode {
1608 nvme_fabrics_command = 0x7f,
1609 };
1610
1611 enum nvmf_capsule_command {
1612 nvme_fabrics_type_property_set = 0x00,
1613 nvme_fabrics_type_connect = 0x01,
1614 nvme_fabrics_type_property_get = 0x04,
1615 nvme_fabrics_type_auth_send = 0x05,
1616 nvme_fabrics_type_auth_receive = 0x06,
1617 };
1618
1619 #define nvme_fabrics_type_name(type) { type, #type }
1620 #define show_fabrics_type_name(type) \
1621 __print_symbolic(type, \
1622 nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
1623 nvme_fabrics_type_name(nvme_fabrics_type_connect), \
1624 nvme_fabrics_type_name(nvme_fabrics_type_property_get), \
1625 nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \
1626 nvme_fabrics_type_name(nvme_fabrics_type_auth_receive))
1627
1628 /*
1629 * If not fabrics command, fctype will be ignored.
1630 */
1631 #define show_opcode_name(qid, opcode, fctype) \
1632 ((opcode) == nvme_fabrics_command ? \
1633 show_fabrics_type_name(fctype) : \
1634 ((qid) ? \
1635 show_nvm_opcode_name(opcode) : \
1636 show_admin_opcode_name(opcode)))
1637
1638 struct nvmf_common_command {
1639 __u8 opcode;
1640 __u8 resv1;
1641 __u16 command_id;
1642 __u8 fctype;
1643 __u8 resv2[35];
1644 __u8 ts[24];
1645 };
1646
1647 /*
1648 * The legal cntlid range a NVMe Target will provide.
1649 * Note that cntlid of value 0 is considered illegal in the fabrics world.
1650 * Devices based on earlier specs did not have the subsystem concept;
1651 * therefore, those devices had their cntlid value set to 0 as a result.
1652 */
1653 #define NVME_CNTLID_MIN 1
1654 #define NVME_CNTLID_MAX 0xffef
1655 #define NVME_CNTLID_DYNAMIC 0xffff
1656
1657 #define MAX_DISC_LOGS 255
1658
1659 /* Discovery log page entry flags (EFLAGS): */
1660 enum {
1661 NVME_DISC_EFLAGS_EPCSD = (1 << 1),
1662 NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
1663 };
1664
1665 /* Discovery log page entry */
1666 struct nvmf_disc_rsp_page_entry {
1667 __u8 trtype;
1668 __u8 adrfam;
1669 __u8 subtype;
1670 __u8 treq;
1671 __le16 portid;
1672 __le16 cntlid;
1673 __le16 asqsz;
1674 __le16 eflags;
1675 __u8 resv10[20];
1676 char trsvcid[NVMF_TRSVCID_SIZE];
1677 __u8 resv64[192];
1678 char subnqn[NVMF_NQN_FIELD_LEN];
1679 char traddr[NVMF_TRADDR_SIZE];
1680 union tsas {
1681 char common[NVMF_TSAS_SIZE];
1682 struct rdma {
1683 __u8 qptype;
1684 __u8 prtype;
1685 __u8 cms;
1686 __u8 resv3[5];
1687 __u16 pkey;
1688 __u8 resv10[246];
1689 } rdma;
1690 struct tcp {
1691 __u8 sectype;
1692 } tcp;
1693 } tsas;
1694 };
1695
1696 /* Discovery log page header */
1697 struct nvmf_disc_rsp_page_hdr {
1698 __le64 genctr;
1699 __le64 numrec;
1700 __le16 recfmt;
1701 __u8 resv14[1006];
1702 struct nvmf_disc_rsp_page_entry entries[];
1703 };
1704
1705 enum {
1706 NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
1707 };
1708
1709 struct nvmf_connect_command {
1710 __u8 opcode;
1711 __u8 resv1;
1712 __u16 command_id;
1713 __u8 fctype;
1714 __u8 resv2[19];
1715 union nvme_data_ptr dptr;
1716 __le16 recfmt;
1717 __le16 qid;
1718 __le16 sqsize;
1719 __u8 cattr;
1720 __u8 resv3;
1721 __le32 kato;
1722 __u8 resv4[12];
1723 };
1724
1725 enum {
1726 NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
1727 NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
1728 };
1729
1730 struct nvmf_connect_data {
1731 uuid_t hostid;
1732 __le16 cntlid;
1733 char resv4[238];
1734 char subsysnqn[NVMF_NQN_FIELD_LEN];
1735 char hostnqn[NVMF_NQN_FIELD_LEN];
1736 char resv5[256];
1737 };
1738
1739 struct nvmf_property_set_command {
1740 __u8 opcode;
1741 __u8 resv1;
1742 __u16 command_id;
1743 __u8 fctype;
1744 __u8 resv2[35];
1745 __u8 attrib;
1746 __u8 resv3[3];
1747 __le32 offset;
1748 __le64 value;
1749 __u8 resv4[8];
1750 };
1751
1752 struct nvmf_property_get_command {
1753 __u8 opcode;
1754 __u8 resv1;
1755 __u16 command_id;
1756 __u8 fctype;
1757 __u8 resv2[35];
1758 __u8 attrib;
1759 __u8 resv3[3];
1760 __le32 offset;
1761 __u8 resv4[16];
1762 };
1763
1764 struct nvmf_auth_common_command {
1765 __u8 opcode;
1766 __u8 resv1;
1767 __u16 command_id;
1768 __u8 fctype;
1769 __u8 resv2[19];
1770 union nvme_data_ptr dptr;
1771 __u8 resv3;
1772 __u8 spsp0;
1773 __u8 spsp1;
1774 __u8 secp;
1775 __le32 al_tl;
1776 __u8 resv4[16];
1777 };
1778
1779 struct nvmf_auth_send_command {
1780 __u8 opcode;
1781 __u8 resv1;
1782 __u16 command_id;
1783 __u8 fctype;
1784 __u8 resv2[19];
1785 union nvme_data_ptr dptr;
1786 __u8 resv3;
1787 __u8 spsp0;
1788 __u8 spsp1;
1789 __u8 secp;
1790 __le32 tl;
1791 __u8 resv4[16];
1792 };
1793
1794 struct nvmf_auth_receive_command {
1795 __u8 opcode;
1796 __u8 resv1;
1797 __u16 command_id;
1798 __u8 fctype;
1799 __u8 resv2[19];
1800 union nvme_data_ptr dptr;
1801 __u8 resv3;
1802 __u8 spsp0;
1803 __u8 spsp1;
1804 __u8 secp;
1805 __le32 al;
1806 __u8 resv4[16];
1807 };
1808
1809 /* Value for secp */
1810 enum {
1811 NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9,
1812 };
1813
1814 /* Defined value for auth_type */
1815 enum {
1816 NVME_AUTH_COMMON_MESSAGES = 0x00,
1817 NVME_AUTH_DHCHAP_MESSAGES = 0x01,
1818 };
1819
1820 /* Defined messages for auth_id */
1821 enum {
1822 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00,
1823 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01,
1824 NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02,
1825 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03,
1826 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04,
1827 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0,
1828 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1,
1829 };
1830
1831 struct nvmf_auth_dhchap_protocol_descriptor {
1832 __u8 authid;
1833 __u8 rsvd;
1834 __u8 halen;
1835 __u8 dhlen;
1836 __u8 idlist[60];
1837 };
1838
1839 enum {
1840 NVME_AUTH_DHCHAP_AUTH_ID = 0x01,
1841 };
1842
1843 /* Defined hash functions for DH-HMAC-CHAP authentication */
1844 enum {
1845 NVME_AUTH_HASH_SHA256 = 0x01,
1846 NVME_AUTH_HASH_SHA384 = 0x02,
1847 NVME_AUTH_HASH_SHA512 = 0x03,
1848 NVME_AUTH_HASH_INVALID = 0xff,
1849 };
1850
1851 /* Maximum digest size for any NVME_AUTH_HASH_* value */
1852 enum {
1853 NVME_AUTH_MAX_DIGEST_SIZE = 64,
1854 };
1855
1856 /* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
1857 enum {
1858 NVME_AUTH_DHGROUP_NULL = 0x00,
1859 NVME_AUTH_DHGROUP_2048 = 0x01,
1860 NVME_AUTH_DHGROUP_3072 = 0x02,
1861 NVME_AUTH_DHGROUP_4096 = 0x03,
1862 NVME_AUTH_DHGROUP_6144 = 0x04,
1863 NVME_AUTH_DHGROUP_8192 = 0x05,
1864 NVME_AUTH_DHGROUP_INVALID = 0xff,
1865 };
1866
1867 enum {
1868 NVME_AUTH_SECP_NOSC = 0x00,
1869 NVME_AUTH_SECP_SC = 0x01,
1870 NVME_AUTH_SECP_NEWTLSPSK = 0x02,
1871 NVME_AUTH_SECP_REPLACETLSPSK = 0x03,
1872 };
1873
1874 union nvmf_auth_protocol {
1875 struct nvmf_auth_dhchap_protocol_descriptor dhchap;
1876 };
1877
1878 struct nvmf_auth_dhchap_negotiate_data {
1879 __u8 auth_type;
1880 __u8 auth_id;
1881 __le16 rsvd;
1882 __le16 t_id;
1883 __u8 sc_c;
1884 __u8 napd;
1885 union nvmf_auth_protocol auth_protocol[];
1886 };
1887
1888 struct nvmf_auth_dhchap_challenge_data {
1889 __u8 auth_type;
1890 __u8 auth_id;
1891 __u16 rsvd1;
1892 __le16 t_id;
1893 __u8 hl;
1894 __u8 rsvd2;
1895 __u8 hashid;
1896 __u8 dhgid;
1897 __le16 dhvlen;
1898 __le32 seqnum;
1899 /* 'hl' bytes of challenge value */
1900 __u8 cval[];
1901 /* followed by 'dhvlen' bytes of DH value */
1902 };
1903
1904 struct nvmf_auth_dhchap_reply_data {
1905 __u8 auth_type;
1906 __u8 auth_id;
1907 __le16 rsvd1;
1908 __le16 t_id;
1909 __u8 hl;
1910 __u8 rsvd2;
1911 __u8 cvalid;
1912 __u8 rsvd3;
1913 __le16 dhvlen;
1914 __le32 seqnum;
1915 /* 'hl' bytes of response data */
1916 __u8 rval[];
1917 /* followed by 'hl' bytes of Challenge value */
1918 /* followed by 'dhvlen' bytes of DH value */
1919 };
1920
1921 enum {
1922 NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0),
1923 };
1924
1925 struct nvmf_auth_dhchap_success1_data {
1926 __u8 auth_type;
1927 __u8 auth_id;
1928 __le16 rsvd1;
1929 __le16 t_id;
1930 __u8 hl;
1931 __u8 rsvd2;
1932 __u8 rvalid;
1933 __u8 rsvd3[7];
1934 /* 'hl' bytes of response value */
1935 __u8 rval[];
1936 };
1937
1938 struct nvmf_auth_dhchap_success2_data {
1939 __u8 auth_type;
1940 __u8 auth_id;
1941 __le16 rsvd1;
1942 __le16 t_id;
1943 __u8 rsvd2[10];
1944 };
1945
1946 struct nvmf_auth_dhchap_failure_data {
1947 __u8 auth_type;
1948 __u8 auth_id;
1949 __le16 rsvd1;
1950 __le16 t_id;
1951 __u8 rescode;
1952 __u8 rescode_exp;
1953 };
1954
1955 enum {
1956 NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01,
1957 };
1958
1959 enum {
1960 NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01,
1961 NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02,
1962 NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03,
1963 NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04,
1964 NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05,
1965 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06,
1966 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07,
1967 };
1968
1969
1970 struct nvme_dbbuf {
1971 __u8 opcode;
1972 __u8 flags;
1973 __u16 command_id;
1974 __u32 rsvd1[5];
1975 __le64 prp1;
1976 __le64 prp2;
1977 __u32 rsvd12[6];
1978 };
1979
1980 struct streams_directive_params {
1981 __le16 msl;
1982 __le16 nssa;
1983 __le16 nsso;
1984 __u8 rsvd[10];
1985 __le32 sws;
1986 __le16 sgs;
1987 __le16 nsa;
1988 __le16 nso;
1989 __u8 rsvd2[6];
1990 };
1991
1992 struct nvme_command {
1993 union {
1994 struct nvme_common_command common;
1995 struct nvme_rw_command rw;
1996 struct nvme_identify identify;
1997 struct nvme_features features;
1998 struct nvme_create_cq create_cq;
1999 struct nvme_create_sq create_sq;
2000 struct nvme_delete_queue delete_queue;
2001 struct nvme_download_firmware dlfw;
2002 struct nvme_format_cmd format;
2003 struct nvme_dsm_cmd dsm;
2004 struct nvme_write_zeroes_cmd write_zeroes;
2005 struct nvme_zone_mgmt_send_cmd zms;
2006 struct nvme_zone_mgmt_recv_cmd zmr;
2007 struct nvme_abort_cmd abort;
2008 struct nvme_get_log_page_command get_log_page;
2009 struct nvmf_common_command fabrics;
2010 struct nvmf_connect_command connect;
2011 struct nvmf_property_set_command prop_set;
2012 struct nvmf_property_get_command prop_get;
2013 struct nvmf_auth_common_command auth_common;
2014 struct nvmf_auth_send_command auth_send;
2015 struct nvmf_auth_receive_command auth_receive;
2016 struct nvme_dbbuf dbbuf;
2017 struct nvme_directive_cmd directive;
2018 struct nvme_io_mgmt_recv_cmd imr;
2019 };
2020 };
2021
nvme_is_fabrics(const struct nvme_command * cmd)2022 static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
2023 {
2024 return cmd->common.opcode == nvme_fabrics_command;
2025 }
2026
2027 #ifdef CONFIG_NVME_VERBOSE_ERRORS
2028 const char *nvme_get_error_status_str(u16 status);
2029 const char *nvme_get_opcode_str(u8 opcode);
2030 const char *nvme_get_admin_opcode_str(u8 opcode);
2031 const char *nvme_get_fabrics_opcode_str(u8 opcode);
2032 #else /* CONFIG_NVME_VERBOSE_ERRORS */
nvme_get_error_status_str(u16 status)2033 static inline const char *nvme_get_error_status_str(u16 status)
2034 {
2035 return "I/O Error";
2036 }
nvme_get_opcode_str(u8 opcode)2037 static inline const char *nvme_get_opcode_str(u8 opcode)
2038 {
2039 return "I/O Cmd";
2040 }
nvme_get_admin_opcode_str(u8 opcode)2041 static inline const char *nvme_get_admin_opcode_str(u8 opcode)
2042 {
2043 return "Admin Cmd";
2044 }
2045
nvme_get_fabrics_opcode_str(u8 opcode)2046 static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
2047 {
2048 return "Fabrics Cmd";
2049 }
2050 #endif /* CONFIG_NVME_VERBOSE_ERRORS */
2051
nvme_opcode_str(int qid,u8 opcode)2052 static inline const char *nvme_opcode_str(int qid, u8 opcode)
2053 {
2054 return qid ? nvme_get_opcode_str(opcode) :
2055 nvme_get_admin_opcode_str(opcode);
2056 }
2057
nvme_fabrics_opcode_str(int qid,const struct nvme_command * cmd)2058 static inline const char *nvme_fabrics_opcode_str(
2059 int qid, const struct nvme_command *cmd)
2060 {
2061 if (nvme_is_fabrics(cmd))
2062 return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
2063
2064 return nvme_opcode_str(qid, cmd->common.opcode);
2065 }
2066
2067 struct nvme_error_slot {
2068 __le64 error_count;
2069 __le16 sqid;
2070 __le16 cmdid;
2071 __le16 status_field;
2072 __le16 param_error_location;
2073 __le64 lba;
2074 __le32 nsid;
2075 __u8 vs;
2076 __u8 resv[3];
2077 __le64 cs;
2078 __u8 resv2[24];
2079 };
2080
nvme_is_write(const struct nvme_command * cmd)2081 static inline bool nvme_is_write(const struct nvme_command *cmd)
2082 {
2083 /*
2084 * What a mess...
2085 *
2086 * Why can't we simply have a Fabrics In and Fabrics out command?
2087 */
2088 if (unlikely(nvme_is_fabrics(cmd)))
2089 return cmd->fabrics.fctype & 1;
2090 return cmd->common.opcode & 1;
2091 }
2092
2093 enum {
2094 /*
2095 * Generic Command Status:
2096 */
2097 NVME_SCT_GENERIC = 0x0,
2098 NVME_SC_SUCCESS = 0x0,
2099 NVME_SC_INVALID_OPCODE = 0x1,
2100 NVME_SC_INVALID_FIELD = 0x2,
2101 NVME_SC_CMDID_CONFLICT = 0x3,
2102 NVME_SC_DATA_XFER_ERROR = 0x4,
2103 NVME_SC_POWER_LOSS = 0x5,
2104 NVME_SC_INTERNAL = 0x6,
2105 NVME_SC_ABORT_REQ = 0x7,
2106 NVME_SC_ABORT_QUEUE = 0x8,
2107 NVME_SC_FUSED_FAIL = 0x9,
2108 NVME_SC_FUSED_MISSING = 0xa,
2109 NVME_SC_INVALID_NS = 0xb,
2110 NVME_SC_CMD_SEQ_ERROR = 0xc,
2111 NVME_SC_SGL_INVALID_LAST = 0xd,
2112 NVME_SC_SGL_INVALID_COUNT = 0xe,
2113 NVME_SC_SGL_INVALID_DATA = 0xf,
2114 NVME_SC_SGL_INVALID_METADATA = 0x10,
2115 NVME_SC_SGL_INVALID_TYPE = 0x11,
2116 NVME_SC_CMB_INVALID_USE = 0x12,
2117 NVME_SC_PRP_INVALID_OFFSET = 0x13,
2118 NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
2119 NVME_SC_OP_DENIED = 0x15,
2120 NVME_SC_SGL_INVALID_OFFSET = 0x16,
2121 NVME_SC_RESERVED = 0x17,
2122 NVME_SC_HOST_ID_INCONSIST = 0x18,
2123 NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
2124 NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
2125 NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
2126 NVME_SC_SANITIZE_FAILED = 0x1C,
2127 NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
2128 NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
2129 NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
2130 NVME_SC_NS_WRITE_PROTECTED = 0x20,
2131 NVME_SC_CMD_INTERRUPTED = 0x21,
2132 NVME_SC_TRANSIENT_TR_ERR = 0x22,
2133 NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24,
2134 NVME_SC_INVALID_IO_CMD_SET = 0x2C,
2135
2136 NVME_SC_LBA_RANGE = 0x80,
2137 NVME_SC_CAP_EXCEEDED = 0x81,
2138 NVME_SC_NS_NOT_READY = 0x82,
2139 NVME_SC_RESERVATION_CONFLICT = 0x83,
2140 NVME_SC_FORMAT_IN_PROGRESS = 0x84,
2141
2142 /*
2143 * Command Specific Status:
2144 */
2145 NVME_SCT_COMMAND_SPECIFIC = 0x100,
2146 NVME_SC_CQ_INVALID = 0x100,
2147 NVME_SC_QID_INVALID = 0x101,
2148 NVME_SC_QUEUE_SIZE = 0x102,
2149 NVME_SC_ABORT_LIMIT = 0x103,
2150 NVME_SC_ABORT_MISSING = 0x104,
2151 NVME_SC_ASYNC_LIMIT = 0x105,
2152 NVME_SC_FIRMWARE_SLOT = 0x106,
2153 NVME_SC_FIRMWARE_IMAGE = 0x107,
2154 NVME_SC_INVALID_VECTOR = 0x108,
2155 NVME_SC_INVALID_LOG_PAGE = 0x109,
2156 NVME_SC_INVALID_FORMAT = 0x10a,
2157 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
2158 NVME_SC_INVALID_QUEUE = 0x10c,
2159 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
2160 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
2161 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
2162 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
2163 NVME_SC_FW_NEEDS_RESET = 0x111,
2164 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
2165 NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
2166 NVME_SC_OVERLAPPING_RANGE = 0x114,
2167 NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
2168 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
2169 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
2170 NVME_SC_NS_IS_PRIVATE = 0x119,
2171 NVME_SC_NS_NOT_ATTACHED = 0x11a,
2172 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
2173 NVME_SC_CTRL_LIST_INVALID = 0x11c,
2174 NVME_SC_SELF_TEST_IN_PROGRESS = 0x11d,
2175 NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
2176 NVME_SC_CTRL_ID_INVALID = 0x11f,
2177 NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
2178 NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
2179 NVME_SC_RES_ID_INVALID = 0x122,
2180 NVME_SC_PMR_SAN_PROHIBITED = 0x123,
2181 NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
2182 NVME_SC_ANA_ATTACH_FAILED = 0x125,
2183
2184 /*
2185 * I/O Command Set Specific - NVM commands:
2186 */
2187 NVME_SC_BAD_ATTRIBUTES = 0x180,
2188 NVME_SC_INVALID_PI = 0x181,
2189 NVME_SC_READ_ONLY = 0x182,
2190 NVME_SC_CMD_SIZE_LIM_EXCEEDED = 0x183,
2191
2192 /*
2193 * I/O Command Set Specific - Fabrics commands:
2194 */
2195 NVME_SC_CONNECT_FORMAT = 0x180,
2196 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
2197 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
2198 NVME_SC_CONNECT_RESTART_DISC = 0x183,
2199 NVME_SC_CONNECT_INVALID_HOST = 0x184,
2200
2201 NVME_SC_DISCOVERY_RESTART = 0x190,
2202 NVME_SC_AUTH_REQUIRED = 0x191,
2203
2204 /*
2205 * I/O Command Set Specific - Zoned commands:
2206 */
2207 NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8,
2208 NVME_SC_ZONE_FULL = 0x1b9,
2209 NVME_SC_ZONE_READ_ONLY = 0x1ba,
2210 NVME_SC_ZONE_OFFLINE = 0x1bb,
2211 NVME_SC_ZONE_INVALID_WRITE = 0x1bc,
2212 NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd,
2213 NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be,
2214 NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf,
2215
2216 /*
2217 * Media and Data Integrity Errors:
2218 */
2219 NVME_SCT_MEDIA_ERROR = 0x200,
2220 NVME_SC_WRITE_FAULT = 0x280,
2221 NVME_SC_READ_ERROR = 0x281,
2222 NVME_SC_GUARD_CHECK = 0x282,
2223 NVME_SC_APPTAG_CHECK = 0x283,
2224 NVME_SC_REFTAG_CHECK = 0x284,
2225 NVME_SC_COMPARE_FAILED = 0x285,
2226 NVME_SC_ACCESS_DENIED = 0x286,
2227 NVME_SC_UNWRITTEN_BLOCK = 0x287,
2228
2229 /*
2230 * Path-related Errors:
2231 */
2232 NVME_SCT_PATH = 0x300,
2233 NVME_SC_INTERNAL_PATH_ERROR = 0x300,
2234 NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
2235 NVME_SC_ANA_INACCESSIBLE = 0x302,
2236 NVME_SC_ANA_TRANSITION = 0x303,
2237 NVME_SC_CTRL_PATH_ERROR = 0x360,
2238 NVME_SC_HOST_PATH_ERROR = 0x370,
2239 NVME_SC_HOST_ABORTED_CMD = 0x371,
2240
2241 NVME_SC_MASK = 0x00ff, /* Status Code */
2242 NVME_SCT_MASK = 0x0700, /* Status Code Type */
2243 NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK,
2244
2245 NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */
2246 NVME_STATUS_MORE = 0x2000,
2247 NVME_STATUS_DNR = 0x4000, /* Do Not Retry */
2248 };
2249
2250 #define NVME_SCT(status) ((status) >> 8 & 7)
2251
2252 struct nvme_completion {
2253 /*
2254 * Used by Admin and Fabrics commands to return data:
2255 */
2256 union nvme_result {
2257 __le16 u16;
2258 __le32 u32;
2259 __le64 u64;
2260 } result;
2261 __le16 sq_head; /* how much of this queue may be reclaimed */
2262 __le16 sq_id; /* submission queue that generated this entry */
2263 __u16 command_id; /* of the command which completed */
2264 __le16 status; /* did the command fail, and if so, why? */
2265 };
2266
2267 #define NVME_VS(major, minor, tertiary) \
2268 (((major) << 16) | ((minor) << 8) | (tertiary))
2269
2270 #define NVME_MAJOR(ver) ((ver) >> 16)
2271 #define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
2272 #define NVME_TERTIARY(ver) ((ver) & 0xff)
2273
2274 enum {
2275 NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00,
2276 };
2277
2278 enum {
2279 NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00,
2280 NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01,
2281 NVME_PR_LOG_RESERVATION_RELEASED = 0x02,
2282 NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03,
2283 };
2284
2285 enum {
2286 NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1,
2287 NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2,
2288 NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3,
2289 };
2290
2291 struct nvme_pr_log {
2292 __le64 count;
2293 __u8 type;
2294 __u8 nr_pages;
2295 __u8 rsvd1[2];
2296 __le32 nsid;
2297 __u8 rsvd2[48];
2298 };
2299
2300 struct nvmet_pr_register_data {
2301 __le64 crkey;
2302 __le64 nrkey;
2303 };
2304
2305 struct nvmet_pr_acquire_data {
2306 __le64 crkey;
2307 __le64 prkey;
2308 };
2309
2310 struct nvmet_pr_release_data {
2311 __le64 crkey;
2312 };
2313
2314 enum nvme_pr_capabilities {
2315 NVME_PR_SUPPORT_PTPL = 1,
2316 NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1,
2317 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2,
2318 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3,
2319 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4,
2320 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5,
2321 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6,
2322 NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7,
2323 };
2324
2325 enum nvme_pr_register_action {
2326 NVME_PR_REGISTER_ACT_REG = 0,
2327 NVME_PR_REGISTER_ACT_UNREG = 1,
2328 NVME_PR_REGISTER_ACT_REPLACE = 1 << 1,
2329 };
2330
2331 enum nvme_pr_acquire_action {
2332 NVME_PR_ACQUIRE_ACT_ACQUIRE = 0,
2333 NVME_PR_ACQUIRE_ACT_PREEMPT = 1,
2334 NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1,
2335 };
2336
2337 enum nvme_pr_release_action {
2338 NVME_PR_RELEASE_ACT_RELEASE = 0,
2339 NVME_PR_RELEASE_ACT_CLEAR = 1,
2340 };
2341
2342 enum nvme_pr_change_ptpl {
2343 NVME_PR_CPTPL_NO_CHANGE = 0,
2344 NVME_PR_CPTPL_RESV = 1 << 30,
2345 NVME_PR_CPTPL_CLEARED = 2 << 30,
2346 NVME_PR_CPTPL_PERSIST = 3 << 30,
2347 };
2348
2349 #define NVME_PR_IGNORE_KEY (1 << 3)
2350
2351 /* Section 8.3.4.5.2 of the NVMe 2.1 */
2352 #define NVME_AUTH_DHCHAP_MAX_HASH_IDS 30
2353 #define NVME_AUTH_DHCHAP_MAX_DH_IDS 30
2354
2355 #endif /* _LINUX_NVME_H */
2356