1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2011-2014, Intel Corporation.
4 */
5
6 #ifndef _NVME_H
7 #define _NVME_H
8
9 #include <linux/nvme.h>
10 #include <linux/cdev.h>
11 #include <linux/pci.h>
12 #include <linux/kref.h>
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
16 #include <linux/rcupdate.h>
17 #include <linux/wait.h>
18 #include <linux/t10-pi.h>
19 #include <linux/ratelimit_types.h>
20
21 #include <trace/events/block.h>
22
23 extern const struct pr_ops nvme_pr_ops;
24
25 extern unsigned int nvme_io_timeout;
26 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
27
28 extern unsigned int admin_timeout;
29 #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
30
31 #define NVME_DEFAULT_KATO 5
32
33 #ifdef CONFIG_ARCH_NO_SG_CHAIN
34 #define NVME_INLINE_SG_CNT 0
35 #define NVME_INLINE_METADATA_SG_CNT 0
36 #else
37 #define NVME_INLINE_SG_CNT 2
38 #define NVME_INLINE_METADATA_SG_CNT 1
39 #endif
40
41 /*
42 * Default to a 4K page size, with the intention to update this
43 * path in the future to accommodate architectures with differing
44 * kernel and IO page sizes.
45 */
46 #define NVME_CTRL_PAGE_SHIFT 12
47 #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
48
49 extern struct workqueue_struct *nvme_wq;
50 extern struct workqueue_struct *nvme_reset_wq;
51 extern struct workqueue_struct *nvme_delete_wq;
52 extern struct mutex nvme_subsystems_lock;
53
54 /*
55 * List of workarounds for devices that required behavior not specified in
56 * the standard.
57 */
58 enum nvme_quirks {
59 /*
60 * Prefers I/O aligned to a stripe size specified in a vendor
61 * specific Identify field.
62 */
63 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
64
65 /*
66 * The controller doesn't handle Identify value others than 0 or 1
67 * correctly.
68 */
69 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
70
71 /*
72 * The controller deterministically returns O's on reads to
73 * logical blocks that deallocate was called on.
74 */
75 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
76
77 /*
78 * The controller needs a delay before starts checking the device
79 * readiness, which is done by reading the NVME_CSTS_RDY bit.
80 */
81 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
82
83 /*
84 * APST should not be used.
85 */
86 NVME_QUIRK_NO_APST = (1 << 4),
87
88 /*
89 * The deepest sleep state should not be used.
90 */
91 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
92
93 /*
94 * Problems seen with concurrent commands
95 */
96 NVME_QUIRK_QDEPTH_ONE = (1 << 6),
97
98 /*
99 * Set MEDIUM priority on SQ creation
100 */
101 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
102
103 /*
104 * Ignore device provided subnqn.
105 */
106 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
107
108 /*
109 * Broken Write Zeroes.
110 */
111 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
112
113 /*
114 * Force simple suspend/resume path.
115 */
116 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
117
118 /*
119 * Use only one interrupt vector for all queues
120 */
121 NVME_QUIRK_SINGLE_VECTOR = (1 << 11),
122
123 /*
124 * Use non-standard 128 bytes SQEs.
125 */
126 NVME_QUIRK_128_BYTES_SQES = (1 << 12),
127
128 /*
129 * Prevent tag overlap between queues
130 */
131 NVME_QUIRK_SHARED_TAGS = (1 << 13),
132
133 /*
134 * Don't change the value of the temperature threshold feature
135 */
136 NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
137
138 /*
139 * The controller doesn't handle the Identify Namespace
140 * Identification Descriptor list subcommand despite claiming
141 * NVMe 1.3 compliance.
142 */
143 NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
144
145 /*
146 * The controller does not properly handle DMA addresses over
147 * 48 bits.
148 */
149 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
150
151 /*
152 * The controller requires the command_id value be limited, so skip
153 * encoding the generation sequence number.
154 */
155 NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
156
157 /*
158 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
159 */
160 NVME_QUIRK_BOGUS_NID = (1 << 18),
161
162 /*
163 * No temperature thresholds for channels other than 0 (Composite).
164 */
165 NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
166
167 /*
168 * Disables simple suspend/resume path.
169 */
170 NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
171
172 /*
173 * MSI (but not MSI-X) interrupts are broken and never fire.
174 */
175 NVME_QUIRK_BROKEN_MSI = (1 << 21),
176
177 /*
178 * Align dma pool segment size to 512 bytes
179 */
180 NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
181 };
182
183 /*
184 * Common request structure for NVMe passthrough. All drivers must have
185 * this structure as the first member of their request-private data.
186 */
187 struct nvme_request {
188 struct nvme_command *cmd;
189 union nvme_result result;
190 u8 genctr;
191 u8 retries;
192 u8 flags;
193 u16 status;
194 #ifdef CONFIG_NVME_MULTIPATH
195 unsigned long start_time;
196 #endif
197 struct nvme_ctrl *ctrl;
198 };
199
200 /*
201 * Mark a bio as coming in through the mpath node.
202 */
203 #define REQ_NVME_MPATH REQ_DRV
204
205 enum {
206 NVME_REQ_CANCELLED = (1 << 0),
207 NVME_REQ_USERCMD = (1 << 1),
208 NVME_MPATH_IO_STATS = (1 << 2),
209 NVME_MPATH_CNT_ACTIVE = (1 << 3),
210 };
211
nvme_req(struct request * req)212 static inline struct nvme_request *nvme_req(struct request *req)
213 {
214 return blk_mq_rq_to_pdu(req);
215 }
216
nvme_req_qid(struct request * req)217 static inline u16 nvme_req_qid(struct request *req)
218 {
219 if (!req->q->queuedata)
220 return 0;
221
222 return req->mq_hctx->queue_num + 1;
223 }
224
225 /* The below value is the specific amount of delay needed before checking
226 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
227 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
228 * found empirically.
229 */
230 #define NVME_QUIRK_DELAY_AMOUNT 2300
231
232 /*
233 * enum nvme_ctrl_state: Controller state
234 *
235 * @NVME_CTRL_NEW: New controller just allocated, initial state
236 * @NVME_CTRL_LIVE: Controller is connected and I/O capable
237 * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
238 * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
239 * transport
240 * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
241 * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
242 * disabled/failed immediately. This state comes
243 * after all async event processing took place and
244 * before ns removal and the controller deletion
245 * progress
246 * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
247 * shutdown or removal. In this case we forcibly
248 * kill all inflight I/O as they have no chance to
249 * complete
250 */
251 enum nvme_ctrl_state {
252 NVME_CTRL_NEW,
253 NVME_CTRL_LIVE,
254 NVME_CTRL_RESETTING,
255 NVME_CTRL_CONNECTING,
256 NVME_CTRL_DELETING,
257 NVME_CTRL_DELETING_NOIO,
258 NVME_CTRL_DEAD,
259 };
260
261 struct nvme_fault_inject {
262 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
263 struct fault_attr attr;
264 struct dentry *parent;
265 bool dont_retry; /* DNR, do not retry */
266 u16 status; /* status code */
267 #endif
268 };
269
270 enum nvme_ctrl_flags {
271 NVME_CTRL_FAILFAST_EXPIRED = 0,
272 NVME_CTRL_ADMIN_Q_STOPPED = 1,
273 NVME_CTRL_STARTED_ONCE = 2,
274 NVME_CTRL_STOPPED = 3,
275 NVME_CTRL_SKIP_ID_CNS_CS = 4,
276 NVME_CTRL_DIRTY_CAPABILITY = 5,
277 NVME_CTRL_FROZEN = 6,
278 };
279
280 struct nvme_ctrl {
281 bool comp_seen;
282 bool identified;
283 bool passthru_err_log_enabled;
284 enum nvme_ctrl_state state;
285 spinlock_t lock;
286 struct mutex scan_lock;
287 const struct nvme_ctrl_ops *ops;
288 struct request_queue *admin_q;
289 struct request_queue *connect_q;
290 struct request_queue *fabrics_q;
291 struct device *dev;
292 int instance;
293 int numa_node;
294 struct blk_mq_tag_set *tagset;
295 struct blk_mq_tag_set *admin_tagset;
296 struct list_head namespaces;
297 struct mutex namespaces_lock;
298 struct srcu_struct srcu;
299 struct device ctrl_device;
300 struct device *device; /* char device */
301 #ifdef CONFIG_NVME_HWMON
302 struct device *hwmon_device;
303 #endif
304 struct cdev cdev;
305 struct work_struct reset_work;
306 struct work_struct delete_work;
307 wait_queue_head_t state_wq;
308
309 struct nvme_subsystem *subsys;
310 struct list_head subsys_entry;
311
312 struct opal_dev *opal_dev;
313
314 u16 cntlid;
315
316 u16 mtfa;
317 u32 ctrl_config;
318 u32 queue_count;
319
320 u64 cap;
321 u32 max_hw_sectors;
322 u32 max_segments;
323 u32 max_integrity_segments;
324 u32 max_zeroes_sectors;
325 #ifdef CONFIG_BLK_DEV_ZONED
326 u32 max_zone_append;
327 #endif
328 u16 crdt[3];
329 u16 oncs;
330 u8 dmrl;
331 u32 dmrsl;
332 u16 oacs;
333 u16 sqsize;
334 u32 max_namespaces;
335 atomic_t abort_limit;
336 u8 vwc;
337 u32 vs;
338 u32 sgls;
339 u16 kas;
340 u8 npss;
341 u8 apsta;
342 u16 wctemp;
343 u16 cctemp;
344 u32 oaes;
345 u32 aen_result;
346 u32 ctratt;
347 unsigned int shutdown_timeout;
348 unsigned int kato;
349 bool subsystem;
350 unsigned long quirks;
351 struct nvme_id_power_state psd[32];
352 struct nvme_effects_log *effects;
353 struct xarray cels;
354 struct work_struct scan_work;
355 struct work_struct async_event_work;
356 struct delayed_work ka_work;
357 struct delayed_work failfast_work;
358 struct nvme_command ka_cmd;
359 unsigned long ka_last_check_time;
360 struct work_struct fw_act_work;
361 unsigned long events;
362
363 #ifdef CONFIG_NVME_MULTIPATH
364 /* asymmetric namespace access: */
365 u8 anacap;
366 u8 anatt;
367 u32 anagrpmax;
368 u32 nanagrpid;
369 struct mutex ana_lock;
370 struct nvme_ana_rsp_hdr *ana_log_buf;
371 size_t ana_log_size;
372 struct timer_list anatt_timer;
373 struct work_struct ana_work;
374 atomic_t nr_active;
375 #endif
376
377 #ifdef CONFIG_NVME_HOST_AUTH
378 struct work_struct dhchap_auth_work;
379 struct mutex dhchap_auth_mutex;
380 struct nvme_dhchap_queue_context *dhchap_ctxs;
381 struct nvme_dhchap_key *host_key;
382 struct nvme_dhchap_key *ctrl_key;
383 u16 transaction;
384 #endif
385 key_serial_t tls_pskid;
386
387 /* Power saving configuration */
388 u64 ps_max_latency_us;
389 bool apst_enabled;
390
391 /* PCIe only: */
392 u16 hmmaxd;
393 u32 hmpre;
394 u32 hmmin;
395 u32 hmminds;
396
397 /* Fabrics only */
398 u32 ioccsz;
399 u32 iorcsz;
400 u16 icdoff;
401 u16 maxcmd;
402 int nr_reconnects;
403 unsigned long flags;
404 struct nvmf_ctrl_options *opts;
405
406 struct page *discard_page;
407 unsigned long discard_page_busy;
408
409 struct nvme_fault_inject fault_inject;
410
411 enum nvme_ctrl_type cntrltype;
412 enum nvme_dctype dctype;
413 u16 awupf; /* 0's based value. */
414 };
415
nvme_ctrl_state(struct nvme_ctrl * ctrl)416 static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
417 {
418 return READ_ONCE(ctrl->state);
419 }
420
421 enum nvme_iopolicy {
422 NVME_IOPOLICY_NUMA,
423 NVME_IOPOLICY_RR,
424 NVME_IOPOLICY_QD,
425 };
426
427 struct nvme_subsystem {
428 int instance;
429 struct device dev;
430 /*
431 * Because we unregister the device on the last put we need
432 * a separate refcount.
433 */
434 struct kref ref;
435 struct list_head entry;
436 struct mutex lock;
437 struct list_head ctrls;
438 struct list_head nsheads;
439 char subnqn[NVMF_NQN_SIZE];
440 char serial[20];
441 char model[40];
442 char firmware_rev[8];
443 u8 cmic;
444 enum nvme_subsys_type subtype;
445 u16 vendor_id;
446 struct ida ns_ida;
447 #ifdef CONFIG_NVME_MULTIPATH
448 enum nvme_iopolicy iopolicy;
449 #endif
450 u32 atomic_bs;
451 };
452
453 /*
454 * Container structure for uniqueue namespace identifiers.
455 */
456 struct nvme_ns_ids {
457 u8 eui64[8];
458 u8 nguid[16];
459 uuid_t uuid;
460 u8 csi;
461 };
462
463 /*
464 * Anchor structure for namespaces. There is one for each namespace in a
465 * NVMe subsystem that any of our controllers can see, and the namespace
466 * structure for each controller is chained of it. For private namespaces
467 * there is a 1:1 relation to our namespace structures, that is ->list
468 * only ever has a single entry for private namespaces.
469 */
470 struct nvme_ns_head {
471 struct list_head list;
472 struct srcu_struct srcu;
473 struct nvme_subsystem *subsys;
474 struct nvme_ns_ids ids;
475 u8 lba_shift;
476 u16 ms;
477 u16 pi_size;
478 u8 pi_type;
479 u8 guard_type;
480 struct list_head entry;
481 struct kref ref;
482 bool shared;
483 bool rotational;
484 bool passthru_err_log_enabled;
485 struct nvme_effects_log *effects;
486 u64 nuse;
487 unsigned ns_id;
488 int instance;
489 #ifdef CONFIG_BLK_DEV_ZONED
490 u64 zsze;
491 #endif
492 unsigned long features;
493
494 struct ratelimit_state rs_nuse;
495
496 struct cdev cdev;
497 struct device cdev_device;
498
499 struct gendisk *disk;
500 #ifdef CONFIG_NVME_MULTIPATH
501 struct bio_list requeue_list;
502 spinlock_t requeue_lock;
503 struct work_struct requeue_work;
504 struct work_struct partition_scan_work;
505 struct mutex lock;
506 unsigned long flags;
507 #define NVME_NSHEAD_DISK_LIVE 0
508 struct nvme_ns __rcu *current_path[];
509 #endif
510 };
511
nvme_ns_head_multipath(struct nvme_ns_head * head)512 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
513 {
514 return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
515 }
516
517 enum nvme_ns_features {
518 NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
519 NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
520 NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
521 };
522
523 struct nvme_ns {
524 struct list_head list;
525
526 struct nvme_ctrl *ctrl;
527 struct request_queue *queue;
528 struct gendisk *disk;
529 #ifdef CONFIG_NVME_MULTIPATH
530 enum nvme_ana_state ana_state;
531 u32 ana_grpid;
532 #endif
533 struct list_head siblings;
534 struct kref kref;
535 struct nvme_ns_head *head;
536
537 unsigned long flags;
538 #define NVME_NS_REMOVING 0
539 #define NVME_NS_ANA_PENDING 2
540 #define NVME_NS_FORCE_RO 3
541 #define NVME_NS_READY 4
542 #define NVME_NS_SYSFS_ATTR_LINK 5
543
544 struct cdev cdev;
545 struct device cdev_device;
546
547 struct nvme_fault_inject fault_inject;
548 };
549
550 /* NVMe ns supports metadata actions by the controller (generate/strip) */
nvme_ns_has_pi(struct nvme_ns_head * head)551 static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
552 {
553 return head->pi_type && head->ms == head->pi_size;
554 }
555
556 struct nvme_ctrl_ops {
557 const char *name;
558 struct module *module;
559 unsigned int flags;
560 #define NVME_F_FABRICS (1 << 0)
561 #define NVME_F_METADATA_SUPPORTED (1 << 1)
562 #define NVME_F_BLOCKING (1 << 2)
563
564 const struct attribute_group **dev_attr_groups;
565 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
566 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
567 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
568 void (*free_ctrl)(struct nvme_ctrl *ctrl);
569 void (*submit_async_event)(struct nvme_ctrl *ctrl);
570 int (*subsystem_reset)(struct nvme_ctrl *ctrl);
571 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
572 void (*stop_ctrl)(struct nvme_ctrl *ctrl);
573 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
574 void (*print_device_info)(struct nvme_ctrl *ctrl);
575 bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
576 };
577
578 /*
579 * nvme command_id is constructed as such:
580 * | xxxx | xxxxxxxxxxxx |
581 * gen request tag
582 */
583 #define nvme_genctr_mask(gen) (gen & 0xf)
584 #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
585 #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
586 #define nvme_tag_from_cid(cid) (cid & 0xfff)
587
nvme_cid(struct request * rq)588 static inline u16 nvme_cid(struct request *rq)
589 {
590 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
591 }
592
nvme_find_rq(struct blk_mq_tags * tags,u16 command_id)593 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
594 u16 command_id)
595 {
596 u8 genctr = nvme_genctr_from_cid(command_id);
597 u16 tag = nvme_tag_from_cid(command_id);
598 struct request *rq;
599
600 rq = blk_mq_tag_to_rq(tags, tag);
601 if (unlikely(!rq)) {
602 pr_err("could not locate request for tag %#x\n",
603 tag);
604 return NULL;
605 }
606 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
607 dev_err(nvme_req(rq)->ctrl->device,
608 "request %#x genctr mismatch (got %#x expected %#x)\n",
609 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
610 return NULL;
611 }
612 return rq;
613 }
614
nvme_cid_to_rq(struct blk_mq_tags * tags,u16 command_id)615 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
616 u16 command_id)
617 {
618 return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
619 }
620
621 /*
622 * Return the length of the string without the space padding
623 */
nvme_strlen(char * s,int len)624 static inline int nvme_strlen(char *s, int len)
625 {
626 while (s[len - 1] == ' ')
627 len--;
628 return len;
629 }
630
nvme_print_device_info(struct nvme_ctrl * ctrl)631 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
632 {
633 struct nvme_subsystem *subsys = ctrl->subsys;
634
635 if (ctrl->ops->print_device_info) {
636 ctrl->ops->print_device_info(ctrl);
637 return;
638 }
639
640 dev_err(ctrl->device,
641 "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
642 nvme_strlen(subsys->model, sizeof(subsys->model)),
643 subsys->model, nvme_strlen(subsys->firmware_rev,
644 sizeof(subsys->firmware_rev)),
645 subsys->firmware_rev);
646 }
647
648 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
649 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
650 const char *dev_name);
651 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
652 void nvme_should_fail(struct request *req);
653 #else
nvme_fault_inject_init(struct nvme_fault_inject * fault_inj,const char * dev_name)654 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
655 const char *dev_name)
656 {
657 }
nvme_fault_inject_fini(struct nvme_fault_inject * fault_inj)658 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
659 {
660 }
nvme_should_fail(struct request * req)661 static inline void nvme_should_fail(struct request *req) {}
662 #endif
663
664 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
665 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
666
nvme_reset_subsystem(struct nvme_ctrl * ctrl)667 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
668 {
669 if (!ctrl->subsystem || !ctrl->ops->subsystem_reset)
670 return -ENOTTY;
671 return ctrl->ops->subsystem_reset(ctrl);
672 }
673
674 /*
675 * Convert a 512B sector number to a device logical block number.
676 */
nvme_sect_to_lba(struct nvme_ns_head * head,sector_t sector)677 static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector)
678 {
679 return sector >> (head->lba_shift - SECTOR_SHIFT);
680 }
681
682 /*
683 * Convert a device logical block number to a 512B sector number.
684 */
nvme_lba_to_sect(struct nvme_ns_head * head,u64 lba)685 static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba)
686 {
687 return lba << (head->lba_shift - SECTOR_SHIFT);
688 }
689
690 /*
691 * Convert byte length to nvme's 0-based num dwords
692 */
nvme_bytes_to_numd(size_t len)693 static inline u32 nvme_bytes_to_numd(size_t len)
694 {
695 return (len >> 2) - 1;
696 }
697
nvme_is_ana_error(u16 status)698 static inline bool nvme_is_ana_error(u16 status)
699 {
700 switch (status & NVME_SCT_SC_MASK) {
701 case NVME_SC_ANA_TRANSITION:
702 case NVME_SC_ANA_INACCESSIBLE:
703 case NVME_SC_ANA_PERSISTENT_LOSS:
704 return true;
705 default:
706 return false;
707 }
708 }
709
nvme_is_path_error(u16 status)710 static inline bool nvme_is_path_error(u16 status)
711 {
712 /* check for a status code type of 'path related status' */
713 return (status & NVME_SCT_MASK) == NVME_SCT_PATH;
714 }
715
716 /*
717 * Fill in the status and result information from the CQE, and then figure out
718 * if blk-mq will need to use IPI magic to complete the request, and if yes do
719 * so. If not let the caller complete the request without an indirect function
720 * call.
721 */
nvme_try_complete_req(struct request * req,__le16 status,union nvme_result result)722 static inline bool nvme_try_complete_req(struct request *req, __le16 status,
723 union nvme_result result)
724 {
725 struct nvme_request *rq = nvme_req(req);
726 struct nvme_ctrl *ctrl = rq->ctrl;
727
728 if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
729 rq->genctr++;
730
731 rq->status = le16_to_cpu(status) >> 1;
732 rq->result = result;
733 /* inject error when permitted by fault injection framework */
734 nvme_should_fail(req);
735 if (unlikely(blk_should_fake_timeout(req->q)))
736 return true;
737 return blk_mq_complete_request_remote(req);
738 }
739
nvme_get_ctrl(struct nvme_ctrl * ctrl)740 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
741 {
742 get_device(ctrl->device);
743 }
744
nvme_put_ctrl(struct nvme_ctrl * ctrl)745 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
746 {
747 put_device(ctrl->device);
748 }
749
nvme_is_aen_req(u16 qid,__u16 command_id)750 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
751 {
752 return !qid &&
753 nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
754 }
755
756 /*
757 * Returns true for sink states that can't ever transition back to live.
758 */
nvme_state_terminal(struct nvme_ctrl * ctrl)759 static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
760 {
761 switch (nvme_ctrl_state(ctrl)) {
762 case NVME_CTRL_NEW:
763 case NVME_CTRL_LIVE:
764 case NVME_CTRL_RESETTING:
765 case NVME_CTRL_CONNECTING:
766 return false;
767 case NVME_CTRL_DELETING:
768 case NVME_CTRL_DELETING_NOIO:
769 case NVME_CTRL_DEAD:
770 return true;
771 default:
772 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
773 return true;
774 }
775 }
776
777 void nvme_end_req(struct request *req);
778 void nvme_complete_rq(struct request *req);
779 void nvme_complete_batch_req(struct request *req);
780
nvme_complete_batch(struct io_comp_batch * iob,void (* fn)(struct request * rq))781 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
782 void (*fn)(struct request *rq))
783 {
784 struct request *req;
785
786 rq_list_for_each(&iob->req_list, req) {
787 fn(req);
788 nvme_complete_batch_req(req);
789 }
790 blk_mq_end_request_batch(iob);
791 }
792
793 blk_status_t nvme_host_path_error(struct request *req);
794 bool nvme_cancel_request(struct request *req, void *data);
795 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
796 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
797 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
798 enum nvme_ctrl_state new_state);
799 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
800 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
801 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
802 const struct nvme_ctrl_ops *ops, unsigned long quirks);
803 int nvme_add_ctrl(struct nvme_ctrl *ctrl);
804 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
805 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
806 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
807 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
808 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
809 const struct blk_mq_ops *ops, unsigned int cmd_size);
810 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
811 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
812 const struct blk_mq_ops *ops, unsigned int nr_maps,
813 unsigned int cmd_size);
814 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
815
816 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
817
818 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
819 volatile union nvme_result *res);
820
821 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
822 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
823 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
824 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
825 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
826 void nvme_sync_queues(struct nvme_ctrl *ctrl);
827 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
828 void nvme_unfreeze(struct nvme_ctrl *ctrl);
829 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
830 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
831 void nvme_start_freeze(struct nvme_ctrl *ctrl);
832
nvme_req_op(struct nvme_command * cmd)833 static inline enum req_op nvme_req_op(struct nvme_command *cmd)
834 {
835 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
836 }
837
838 #define NVME_QID_ANY -1
839 void nvme_init_request(struct request *req, struct nvme_command *cmd);
840 void nvme_cleanup_cmd(struct request *req);
841 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
842 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
843 struct request *req);
844 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
845 bool queue_live, enum nvme_ctrl_state state);
846
nvme_check_ready(struct nvme_ctrl * ctrl,struct request * rq,bool queue_live)847 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
848 bool queue_live)
849 {
850 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
851
852 if (likely(state == NVME_CTRL_LIVE))
853 return true;
854 if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
855 return queue_live;
856 return __nvme_check_ready(ctrl, rq, queue_live, state);
857 }
858
859 /*
860 * NSID shall be unique for all shared namespaces, or if at least one of the
861 * following conditions is met:
862 * 1. Namespace Management is supported by the controller
863 * 2. ANA is supported by the controller
864 * 3. NVM Set are supported by the controller
865 *
866 * In other case, private namespace are not required to report a unique NSID.
867 */
nvme_is_unique_nsid(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)868 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
869 struct nvme_ns_head *head)
870 {
871 return head->shared ||
872 (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
873 (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
874 (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
875 }
876
877 /*
878 * Flags for __nvme_submit_sync_cmd()
879 */
880 typedef __u32 __bitwise nvme_submit_flags_t;
881
882 enum {
883 /* Insert request at the head of the queue */
884 NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
885 /* Set BLK_MQ_REQ_NOWAIT when allocating request */
886 NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
887 /* Set BLK_MQ_REQ_RESERVED when allocating request */
888 NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
889 /* Retry command when NVME_STATUS_DNR is not set in the result */
890 NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
891 };
892
893 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
894 void *buf, unsigned bufflen);
895 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
896 union nvme_result *result, void *buffer, unsigned bufflen,
897 int qid, nvme_submit_flags_t flags);
898 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
899 unsigned int dword11, void *buffer, size_t buflen,
900 u32 *result);
901 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
902 unsigned int dword11, void *buffer, size_t buflen,
903 u32 *result);
904 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
905 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
906 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
907 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
908 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
909 void nvme_queue_scan(struct nvme_ctrl *ctrl);
910 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
911 void *log, size_t size, u64 offset);
912 bool nvme_tryget_ns_head(struct nvme_ns_head *head);
913 void nvme_put_ns_head(struct nvme_ns_head *head);
914 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
915 const struct file_operations *fops, struct module *owner);
916 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
917 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
918 unsigned int cmd, unsigned long arg);
919 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
920 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
921 unsigned int cmd, unsigned long arg);
922 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
923 unsigned long arg);
924 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
925 unsigned long arg);
926 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
927 struct io_comp_batch *iob, unsigned int poll_flags);
928 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
929 unsigned int issue_flags);
930 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
931 unsigned int issue_flags);
932 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
933 struct nvme_id_ns **id);
934 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
935 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
936
937 extern const struct attribute_group *nvme_ns_attr_groups[];
938 extern const struct attribute_group nvme_ns_mpath_attr_group;
939 extern const struct pr_ops nvme_pr_ops;
940 extern const struct block_device_operations nvme_ns_head_ops;
941 extern const struct attribute_group nvme_dev_attrs_group;
942 extern const struct attribute_group *nvme_subsys_attrs_groups[];
943 extern const struct attribute_group *nvme_dev_attr_groups[];
944 extern const struct block_device_operations nvme_bdev_ops;
945
946 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
947 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
948 #ifdef CONFIG_NVME_MULTIPATH
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)949 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
950 {
951 return ctrl->ana_log_buf != NULL;
952 }
953
954 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
955 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
956 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
957 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
958 void nvme_failover_req(struct request *req);
959 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
960 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
961 void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns);
962 void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns);
963 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
964 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
965 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
966 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
967 void nvme_mpath_update(struct nvme_ctrl *ctrl);
968 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
969 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
970 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
971 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
972 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
973 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
974 void nvme_mpath_start_request(struct request *rq);
975 void nvme_mpath_end_request(struct request *rq);
976
nvme_trace_bio_complete(struct request * req)977 static inline void nvme_trace_bio_complete(struct request *req)
978 {
979 struct nvme_ns *ns = req->q->queuedata;
980
981 if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
982 trace_block_bio_complete(ns->head->disk->queue, req->bio);
983 }
984
985 extern bool multipath;
986 extern struct device_attribute dev_attr_ana_grpid;
987 extern struct device_attribute dev_attr_ana_state;
988 extern struct device_attribute dev_attr_queue_depth;
989 extern struct device_attribute dev_attr_numa_nodes;
990 extern struct device_attribute subsys_attr_iopolicy;
991
nvme_disk_is_ns_head(struct gendisk * disk)992 static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
993 {
994 return disk->fops == &nvme_ns_head_ops;
995 }
996 #else
997 #define multipath false
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)998 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
999 {
1000 return false;
1001 }
nvme_failover_req(struct request * req)1002 static inline void nvme_failover_req(struct request *req)
1003 {
1004 }
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)1005 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
1006 {
1007 }
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)1008 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
1009 struct nvme_ns_head *head)
1010 {
1011 return 0;
1012 }
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)1013 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
1014 {
1015 }
nvme_mpath_remove_disk(struct nvme_ns_head * head)1016 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
1017 {
1018 }
nvme_mpath_add_sysfs_link(struct nvme_ns * ns)1019 static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns)
1020 {
1021 }
nvme_mpath_remove_sysfs_link(struct nvme_ns * ns)1022 static inline void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
1023 {
1024 }
nvme_mpath_clear_current_path(struct nvme_ns * ns)1025 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
1026 {
1027 return false;
1028 }
nvme_mpath_revalidate_paths(struct nvme_ns * ns)1029 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
1030 {
1031 }
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)1032 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
1033 {
1034 }
nvme_mpath_shutdown_disk(struct nvme_ns_head * head)1035 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
1036 {
1037 }
nvme_trace_bio_complete(struct request * req)1038 static inline void nvme_trace_bio_complete(struct request *req)
1039 {
1040 }
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)1041 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
1042 {
1043 }
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)1044 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
1045 struct nvme_id_ctrl *id)
1046 {
1047 if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
1048 dev_warn(ctrl->device,
1049 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
1050 return 0;
1051 }
nvme_mpath_update(struct nvme_ctrl * ctrl)1052 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
1053 {
1054 }
nvme_mpath_uninit(struct nvme_ctrl * ctrl)1055 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1056 {
1057 }
nvme_mpath_stop(struct nvme_ctrl * ctrl)1058 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
1059 {
1060 }
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)1061 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
1062 {
1063 }
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)1064 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
1065 {
1066 }
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)1067 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
1068 {
1069 }
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)1070 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
1071 {
1072 }
nvme_mpath_start_request(struct request * rq)1073 static inline void nvme_mpath_start_request(struct request *rq)
1074 {
1075 }
nvme_mpath_end_request(struct request * rq)1076 static inline void nvme_mpath_end_request(struct request *rq)
1077 {
1078 }
nvme_disk_is_ns_head(struct gendisk * disk)1079 static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
1080 {
1081 return false;
1082 }
1083 #endif /* CONFIG_NVME_MULTIPATH */
1084
1085 int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
1086 enum blk_unique_id type);
1087
1088 struct nvme_zone_info {
1089 u64 zone_size;
1090 unsigned int max_open_zones;
1091 unsigned int max_active_zones;
1092 };
1093
1094 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
1095 unsigned int nr_zones, report_zones_cb cb, void *data);
1096 int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
1097 struct nvme_zone_info *zi);
1098 void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
1099 struct nvme_zone_info *zi);
1100 #ifdef CONFIG_BLK_DEV_ZONED
1101 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
1102 struct nvme_command *cmnd,
1103 enum nvme_zone_mgmt_action action);
1104 #else
nvme_setup_zone_mgmt_send(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd,enum nvme_zone_mgmt_action action)1105 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
1106 struct request *req, struct nvme_command *cmnd,
1107 enum nvme_zone_mgmt_action action)
1108 {
1109 return BLK_STS_NOTSUPP;
1110 }
1111 #endif
1112
nvme_get_ns_from_dev(struct device * dev)1113 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
1114 {
1115 struct gendisk *disk = dev_to_disk(dev);
1116
1117 WARN_ON(nvme_disk_is_ns_head(disk));
1118 return disk->private_data;
1119 }
1120
1121 #ifdef CONFIG_NVME_HWMON
1122 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
1123 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
1124 #else
nvme_hwmon_init(struct nvme_ctrl * ctrl)1125 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
1126 {
1127 return 0;
1128 }
1129
nvme_hwmon_exit(struct nvme_ctrl * ctrl)1130 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
1131 {
1132 }
1133 #endif
1134
nvme_start_request(struct request * rq)1135 static inline void nvme_start_request(struct request *rq)
1136 {
1137 if (rq->cmd_flags & REQ_NVME_MPATH)
1138 nvme_mpath_start_request(rq);
1139 blk_mq_start_request(rq);
1140 }
1141
nvme_ctrl_sgl_supported(struct nvme_ctrl * ctrl)1142 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
1143 {
1144 return ctrl->sgls & (NVME_CTRL_SGLS_BYTE_ALIGNED |
1145 NVME_CTRL_SGLS_DWORD_ALIGNED);
1146 }
1147
nvme_ctrl_meta_sgl_supported(struct nvme_ctrl * ctrl)1148 static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl)
1149 {
1150 if (ctrl->ops->flags & NVME_F_FABRICS)
1151 return true;
1152 return ctrl->sgls & NVME_CTRL_SGLS_MSDS;
1153 }
1154
1155 #ifdef CONFIG_NVME_HOST_AUTH
1156 int __init nvme_init_auth(void);
1157 void __exit nvme_exit_auth(void);
1158 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1159 void nvme_auth_stop(struct nvme_ctrl *ctrl);
1160 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1161 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1162 void nvme_auth_free(struct nvme_ctrl *ctrl);
1163 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl);
1164 #else
nvme_auth_init_ctrl(struct nvme_ctrl * ctrl)1165 static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1166 {
1167 return 0;
1168 }
nvme_init_auth(void)1169 static inline int __init nvme_init_auth(void)
1170 {
1171 return 0;
1172 }
nvme_exit_auth(void)1173 static inline void __exit nvme_exit_auth(void)
1174 {
1175 }
nvme_auth_stop(struct nvme_ctrl * ctrl)1176 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
nvme_auth_negotiate(struct nvme_ctrl * ctrl,int qid)1177 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
1178 {
1179 return -EPROTONOSUPPORT;
1180 }
nvme_auth_wait(struct nvme_ctrl * ctrl,int qid)1181 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
1182 {
1183 return -EPROTONOSUPPORT;
1184 }
nvme_auth_free(struct nvme_ctrl * ctrl)1185 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
nvme_auth_revoke_tls_key(struct nvme_ctrl * ctrl)1186 static inline void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) {};
1187 #endif
1188
1189 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1190 u8 opcode);
1191 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
1192 int nvme_execute_rq(struct request *rq, bool at_head);
1193 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1194 struct nvme_command *cmd, int status);
1195 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
1196 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
1197 bool nvme_get_ns(struct nvme_ns *ns);
1198 void nvme_put_ns(struct nvme_ns *ns);
1199
nvme_multi_css(struct nvme_ctrl * ctrl)1200 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
1201 {
1202 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
1203 }
1204
1205 #endif /* _NVME_H */
1206