1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 */
5
6 #ifndef _NVMET_H
7 #define _NVMET_H
8
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23
24 #define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
25
26 #define NVMET_ASYNC_EVENTS 4
27 #define NVMET_ERROR_LOG_SLOTS 128
28 #define NVMET_NO_ERROR_LOC ((u16)-1)
29 #define NVMET_DEFAULT_CTRL_MODEL "Linux"
30 #define NVMET_MN_MAX_SIZE 40
31 #define NVMET_SN_MAX_SIZE 20
32 #define NVMET_FR_MAX_SIZE 8
33
34 /*
35 * Supported optional AENs:
36 */
37 #define NVMET_AEN_CFG_OPTIONAL \
38 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
39 #define NVMET_DISC_AEN_CFG_OPTIONAL \
40 (NVME_AEN_CFG_DISC_CHANGE)
41
42 /*
43 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
44 */
45 #define NVMET_AEN_CFG_ALL \
46 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
47 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
48 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
49
50 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
51 * The 16 bit shift is to set IATTR bit to 1, which means offending
52 * offset starts in the data section of connect()
53 */
54 #define IPO_IATTR_CONNECT_DATA(x) \
55 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
56 #define IPO_IATTR_CONNECT_SQE(x) \
57 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
58
59 struct nvmet_ns {
60 struct percpu_ref ref;
61 struct bdev_handle *bdev_handle;
62 struct block_device *bdev;
63 struct file *file;
64 bool readonly;
65 u32 nsid;
66 u32 blksize_shift;
67 loff_t size;
68 u8 nguid[16];
69 uuid_t uuid;
70 u32 anagrpid;
71
72 bool buffered_io;
73 bool enabled;
74 struct nvmet_subsys *subsys;
75 const char *device_path;
76
77 struct config_group device_group;
78 struct config_group group;
79
80 struct completion disable_done;
81 mempool_t *bvec_pool;
82
83 struct pci_dev *p2p_dev;
84 int use_p2pmem;
85 int pi_type;
86 int metadata_size;
87 u8 csi;
88 };
89
to_nvmet_ns(struct config_item * item)90 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
91 {
92 return container_of(to_config_group(item), struct nvmet_ns, group);
93 }
94
nvmet_ns_dev(struct nvmet_ns * ns)95 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
96 {
97 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
98 }
99
100 struct nvmet_cq {
101 u16 qid;
102 u16 size;
103 };
104
105 struct nvmet_sq {
106 struct nvmet_ctrl *ctrl;
107 struct percpu_ref ref;
108 u16 qid;
109 u16 size;
110 u32 sqhd;
111 bool sqhd_disabled;
112 #ifdef CONFIG_NVME_TARGET_AUTH
113 bool authenticated;
114 struct delayed_work auth_expired_work;
115 u16 dhchap_tid;
116 u16 dhchap_status;
117 int dhchap_step;
118 u8 *dhchap_c1;
119 u8 *dhchap_c2;
120 u32 dhchap_s1;
121 u32 dhchap_s2;
122 u8 *dhchap_skey;
123 int dhchap_skey_len;
124 #endif
125 struct completion free_done;
126 struct completion confirm_done;
127 };
128
129 struct nvmet_ana_group {
130 struct config_group group;
131 struct nvmet_port *port;
132 u32 grpid;
133 };
134
to_ana_group(struct config_item * item)135 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
136 {
137 return container_of(to_config_group(item), struct nvmet_ana_group,
138 group);
139 }
140
141 /**
142 * struct nvmet_port - Common structure to keep port
143 * information for the target.
144 * @entry: Entry into referrals or transport list.
145 * @disc_addr: Address information is stored in a format defined
146 * for a discovery log page entry.
147 * @group: ConfigFS group for this element's folder.
148 * @priv: Private data for the transport.
149 */
150 struct nvmet_port {
151 struct list_head entry;
152 struct nvmf_disc_rsp_page_entry disc_addr;
153 struct config_group group;
154 struct config_group subsys_group;
155 struct list_head subsystems;
156 struct config_group referrals_group;
157 struct list_head referrals;
158 struct list_head global_entry;
159 struct config_group ana_groups_group;
160 struct nvmet_ana_group ana_default_group;
161 enum nvme_ana_state *ana_state;
162 struct key *keyring;
163 void *priv;
164 bool enabled;
165 int inline_data_size;
166 const struct nvmet_fabrics_ops *tr_ops;
167 bool pi_enable;
168 };
169
to_nvmet_port(struct config_item * item)170 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
171 {
172 return container_of(to_config_group(item), struct nvmet_port,
173 group);
174 }
175
ana_groups_to_port(struct config_item * item)176 static inline struct nvmet_port *ana_groups_to_port(
177 struct config_item *item)
178 {
179 return container_of(to_config_group(item), struct nvmet_port,
180 ana_groups_group);
181 }
182
nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port * port)183 static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
184 {
185 return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
186 }
187
nvmet_port_secure_channel_required(struct nvmet_port * port)188 static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
189 {
190 return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
191 }
192
193 struct nvmet_ctrl {
194 struct nvmet_subsys *subsys;
195 struct nvmet_sq **sqs;
196
197 bool reset_tbkas;
198
199 struct mutex lock;
200 u64 cap;
201 u32 cc;
202 u32 csts;
203
204 uuid_t hostid;
205 u16 cntlid;
206 u32 kato;
207
208 struct nvmet_port *port;
209
210 u32 aen_enabled;
211 unsigned long aen_masked;
212 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
213 unsigned int nr_async_event_cmds;
214 struct list_head async_events;
215 struct work_struct async_event_work;
216
217 struct list_head subsys_entry;
218 struct kref ref;
219 struct delayed_work ka_work;
220 struct work_struct fatal_err_work;
221
222 const struct nvmet_fabrics_ops *ops;
223
224 __le32 *changed_ns_list;
225 u32 nr_changed_ns;
226
227 char subsysnqn[NVMF_NQN_FIELD_LEN];
228 char hostnqn[NVMF_NQN_FIELD_LEN];
229
230 struct device *p2p_client;
231 struct radix_tree_root p2p_ns_map;
232
233 spinlock_t error_lock;
234 u64 err_counter;
235 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
236 bool pi_support;
237 #ifdef CONFIG_NVME_TARGET_AUTH
238 struct nvme_dhchap_key *host_key;
239 struct nvme_dhchap_key *ctrl_key;
240 u8 shash_id;
241 struct crypto_kpp *dh_tfm;
242 u8 dh_gid;
243 u8 *dh_key;
244 size_t dh_keysize;
245 #endif
246 };
247
248 struct nvmet_subsys {
249 enum nvme_subsys_type type;
250
251 struct mutex lock;
252 struct kref ref;
253
254 struct xarray namespaces;
255 unsigned int nr_namespaces;
256 u32 max_nsid;
257 u16 cntlid_min;
258 u16 cntlid_max;
259
260 struct list_head ctrls;
261
262 struct list_head hosts;
263 bool allow_any_host;
264
265 u16 max_qid;
266
267 u64 ver;
268 char serial[NVMET_SN_MAX_SIZE];
269 bool subsys_discovered;
270 char *subsysnqn;
271 bool pi_support;
272
273 struct config_group group;
274
275 struct config_group namespaces_group;
276 struct config_group allowed_hosts_group;
277
278 char *model_number;
279 u32 ieee_oui;
280 char *firmware_rev;
281
282 #ifdef CONFIG_NVME_TARGET_PASSTHRU
283 struct nvme_ctrl *passthru_ctrl;
284 char *passthru_ctrl_path;
285 struct config_group passthru_group;
286 unsigned int admin_timeout;
287 unsigned int io_timeout;
288 unsigned int clear_ids;
289 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
290
291 #ifdef CONFIG_BLK_DEV_ZONED
292 u8 zasl;
293 #endif /* CONFIG_BLK_DEV_ZONED */
294 };
295
to_subsys(struct config_item * item)296 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
297 {
298 return container_of(to_config_group(item), struct nvmet_subsys, group);
299 }
300
namespaces_to_subsys(struct config_item * item)301 static inline struct nvmet_subsys *namespaces_to_subsys(
302 struct config_item *item)
303 {
304 return container_of(to_config_group(item), struct nvmet_subsys,
305 namespaces_group);
306 }
307
308 struct nvmet_host {
309 struct config_group group;
310 u8 *dhchap_secret;
311 u8 *dhchap_ctrl_secret;
312 u8 dhchap_key_hash;
313 u8 dhchap_ctrl_key_hash;
314 u8 dhchap_hash_id;
315 u8 dhchap_dhgroup_id;
316 };
317
to_host(struct config_item * item)318 static inline struct nvmet_host *to_host(struct config_item *item)
319 {
320 return container_of(to_config_group(item), struct nvmet_host, group);
321 }
322
nvmet_host_name(struct nvmet_host * host)323 static inline char *nvmet_host_name(struct nvmet_host *host)
324 {
325 return config_item_name(&host->group.cg_item);
326 }
327
328 struct nvmet_host_link {
329 struct list_head entry;
330 struct nvmet_host *host;
331 };
332
333 struct nvmet_subsys_link {
334 struct list_head entry;
335 struct nvmet_subsys *subsys;
336 };
337
338 struct nvmet_req;
339 struct nvmet_fabrics_ops {
340 struct module *owner;
341 unsigned int type;
342 unsigned int msdbd;
343 unsigned int flags;
344 #define NVMF_KEYED_SGLS (1 << 0)
345 #define NVMF_METADATA_SUPPORTED (1 << 1)
346 void (*queue_response)(struct nvmet_req *req);
347 int (*add_port)(struct nvmet_port *port);
348 void (*remove_port)(struct nvmet_port *port);
349 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
350 void (*disc_traddr)(struct nvmet_req *req,
351 struct nvmet_port *port, char *traddr);
352 u16 (*install_queue)(struct nvmet_sq *nvme_sq);
353 void (*discovery_chg)(struct nvmet_port *port);
354 u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
355 u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
356 };
357
358 #define NVMET_MAX_INLINE_BIOVEC 8
359 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
360
361 struct nvmet_req {
362 struct nvme_command *cmd;
363 struct nvme_completion *cqe;
364 struct nvmet_sq *sq;
365 struct nvmet_cq *cq;
366 struct nvmet_ns *ns;
367 struct scatterlist *sg;
368 struct scatterlist *metadata_sg;
369 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
370 union {
371 struct {
372 struct bio inline_bio;
373 } b;
374 struct {
375 bool mpool_alloc;
376 struct kiocb iocb;
377 struct bio_vec *bvec;
378 struct work_struct work;
379 } f;
380 struct {
381 struct bio inline_bio;
382 struct request *rq;
383 struct work_struct work;
384 bool use_workqueue;
385 } p;
386 #ifdef CONFIG_BLK_DEV_ZONED
387 struct {
388 struct bio inline_bio;
389 struct work_struct zmgmt_work;
390 } z;
391 #endif /* CONFIG_BLK_DEV_ZONED */
392 };
393 int sg_cnt;
394 int metadata_sg_cnt;
395 /* data length as parsed from the SGL descriptor: */
396 size_t transfer_len;
397 size_t metadata_len;
398
399 struct nvmet_port *port;
400
401 void (*execute)(struct nvmet_req *req);
402 const struct nvmet_fabrics_ops *ops;
403
404 struct pci_dev *p2p_dev;
405 struct device *p2p_client;
406 u16 error_loc;
407 u64 error_slba;
408 };
409
410 #define NVMET_MAX_MPOOL_BVEC 16
411 extern struct kmem_cache *nvmet_bvec_cache;
412 extern struct workqueue_struct *buffered_io_wq;
413 extern struct workqueue_struct *zbd_wq;
414 extern struct workqueue_struct *nvmet_wq;
415
nvmet_set_result(struct nvmet_req * req,u32 result)416 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
417 {
418 req->cqe->result.u32 = cpu_to_le32(result);
419 }
420
421 /*
422 * NVMe command writes actually are DMA reads for us on the target side.
423 */
424 static inline enum dma_data_direction
nvmet_data_dir(struct nvmet_req * req)425 nvmet_data_dir(struct nvmet_req *req)
426 {
427 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
428 }
429
430 struct nvmet_async_event {
431 struct list_head entry;
432 u8 event_type;
433 u8 event_info;
434 u8 log_page;
435 };
436
nvmet_clear_aen_bit(struct nvmet_req * req,u32 bn)437 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
438 {
439 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
440
441 if (!rae)
442 clear_bit(bn, &req->sq->ctrl->aen_masked);
443 }
444
nvmet_aen_bit_disabled(struct nvmet_ctrl * ctrl,u32 bn)445 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
446 {
447 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
448 return true;
449 return test_and_set_bit(bn, &ctrl->aen_masked);
450 }
451
452 void nvmet_get_feat_kato(struct nvmet_req *req);
453 void nvmet_get_feat_async_event(struct nvmet_req *req);
454 u16 nvmet_set_feat_kato(struct nvmet_req *req);
455 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
456 void nvmet_execute_async_event(struct nvmet_req *req);
457 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
458 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
459
460 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
461 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
462 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
463 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
464 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
465 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
466 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
467 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
468 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
469
470 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
471 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
472 void nvmet_req_uninit(struct nvmet_req *req);
473 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
474 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
475 void nvmet_req_complete(struct nvmet_req *req, u16 status);
476 int nvmet_req_alloc_sgls(struct nvmet_req *req);
477 void nvmet_req_free_sgls(struct nvmet_req *req);
478
479 void nvmet_execute_set_features(struct nvmet_req *req);
480 void nvmet_execute_get_features(struct nvmet_req *req);
481 void nvmet_execute_keep_alive(struct nvmet_req *req);
482
483 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
484 u16 size);
485 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
486 u16 size);
487 void nvmet_sq_destroy(struct nvmet_sq *sq);
488 int nvmet_sq_init(struct nvmet_sq *sq);
489
490 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
491
492 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
493 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
494 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
495 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
496 const char *hostnqn, u16 cntlid,
497 struct nvmet_req *req);
498 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
499 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
500
501 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
502 enum nvme_subsys_type type);
503 void nvmet_subsys_put(struct nvmet_subsys *subsys);
504 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
505
506 u16 nvmet_req_find_ns(struct nvmet_req *req);
507 void nvmet_put_namespace(struct nvmet_ns *ns);
508 int nvmet_ns_enable(struct nvmet_ns *ns);
509 void nvmet_ns_disable(struct nvmet_ns *ns);
510 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
511 void nvmet_ns_free(struct nvmet_ns *ns);
512
513 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
514 struct nvmet_port *port);
515 void nvmet_port_send_ana_event(struct nvmet_port *port);
516
517 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
518 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
519
520 void nvmet_port_del_ctrls(struct nvmet_port *port,
521 struct nvmet_subsys *subsys);
522
523 int nvmet_enable_port(struct nvmet_port *port);
524 void nvmet_disable_port(struct nvmet_port *port);
525
526 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
527 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
528
529 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
530 size_t len);
531 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
532 size_t len);
533 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
534
535 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
536 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
537
538 extern struct list_head *nvmet_ports;
539 void nvmet_port_disc_changed(struct nvmet_port *port,
540 struct nvmet_subsys *subsys);
541 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
542 struct nvmet_host *host);
543 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
544 u8 event_info, u8 log_page);
545
546 #define NVMET_QUEUE_SIZE 1024
547 #define NVMET_NR_QUEUES 128
548 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
549
550 /*
551 * Nice round number that makes a list of nsids fit into a page.
552 * Should become tunable at some point in the future.
553 */
554 #define NVMET_MAX_NAMESPACES 1024
555
556 /*
557 * 0 is not a valid ANA group ID, so we start numbering at 1.
558 *
559 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
560 * by default, and is available in an optimized state through all ports.
561 */
562 #define NVMET_MAX_ANAGRPS 128
563 #define NVMET_DEFAULT_ANA_GRPID 1
564
565 #define NVMET_KAS 10
566 #define NVMET_DISC_KATO_MS 120000
567
568 int __init nvmet_init_configfs(void);
569 void __exit nvmet_exit_configfs(void);
570
571 int __init nvmet_init_discovery(void);
572 void nvmet_exit_discovery(void);
573
574 extern struct nvmet_subsys *nvmet_disc_subsys;
575 extern struct rw_semaphore nvmet_config_sem;
576
577 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
578 extern u64 nvmet_ana_chgcnt;
579 extern struct rw_semaphore nvmet_ana_sem;
580
581 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
582
583 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
584 int nvmet_file_ns_enable(struct nvmet_ns *ns);
585 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
586 void nvmet_file_ns_disable(struct nvmet_ns *ns);
587 u16 nvmet_bdev_flush(struct nvmet_req *req);
588 u16 nvmet_file_flush(struct nvmet_req *req);
589 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
590 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
591 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
592 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
593 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
594
595 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
596 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
597 void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
598 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
599 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
600 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
601
nvmet_rw_data_len(struct nvmet_req * req)602 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
603 {
604 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
605 req->ns->blksize_shift;
606 }
607
nvmet_rw_metadata_len(struct nvmet_req * req)608 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
609 {
610 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
611 return 0;
612 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
613 req->ns->metadata_size;
614 }
615
nvmet_dsm_len(struct nvmet_req * req)616 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
617 {
618 return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
619 sizeof(struct nvme_dsm_range);
620 }
621
nvmet_req_subsys(struct nvmet_req * req)622 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
623 {
624 return req->sq->ctrl->subsys;
625 }
626
nvmet_is_disc_subsys(struct nvmet_subsys * subsys)627 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
628 {
629 return subsys->type != NVME_NQN_NVME;
630 }
631
632 #ifdef CONFIG_NVME_TARGET_PASSTHRU
633 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
634 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
635 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
636 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
637 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)638 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
639 {
640 return subsys->passthru_ctrl;
641 }
642 #else /* CONFIG_NVME_TARGET_PASSTHRU */
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)643 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
644 {
645 }
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)646 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
647 {
648 }
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)649 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
650 {
651 return 0;
652 }
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)653 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
654 {
655 return 0;
656 }
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)657 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
658 {
659 return NULL;
660 }
661 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
662
nvmet_is_passthru_req(struct nvmet_req * req)663 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
664 {
665 return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
666 }
667
668 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
669
670 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
671 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
672
673 /* Convert a 32-bit number to a 16-bit 0's based number */
to0based(u32 a)674 static inline __le16 to0based(u32 a)
675 {
676 return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
677 }
678
nvmet_ns_has_pi(struct nvmet_ns * ns)679 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
680 {
681 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
682 return false;
683 return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
684 }
685
nvmet_sect_to_lba(struct nvmet_ns * ns,sector_t sect)686 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
687 {
688 return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
689 }
690
nvmet_lba_to_sect(struct nvmet_ns * ns,__le64 lba)691 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
692 {
693 return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
694 }
695
nvmet_use_inline_bvec(struct nvmet_req * req)696 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
697 {
698 return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
699 req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
700 }
701
nvmet_req_bio_put(struct nvmet_req * req,struct bio * bio)702 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
703 {
704 if (bio != &req->b.inline_bio)
705 bio_put(bio);
706 }
707
708 #ifdef CONFIG_NVME_TARGET_AUTH
709 void nvmet_execute_auth_send(struct nvmet_req *req);
710 void nvmet_execute_auth_receive(struct nvmet_req *req);
711 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
712 bool set_ctrl);
713 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
714 int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
715 void nvmet_auth_sq_init(struct nvmet_sq *sq);
716 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
717 void nvmet_auth_sq_free(struct nvmet_sq *sq);
718 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
719 bool nvmet_check_auth_status(struct nvmet_req *req);
720 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
721 unsigned int hash_len);
722 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
723 unsigned int hash_len);
nvmet_has_auth(struct nvmet_ctrl * ctrl)724 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
725 {
726 return ctrl->host_key != NULL;
727 }
728 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
729 u8 *buf, int buf_size);
730 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
731 u8 *buf, int buf_size);
732 #else
nvmet_setup_auth(struct nvmet_ctrl * ctrl)733 static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
734 {
735 return 0;
736 }
nvmet_auth_sq_init(struct nvmet_sq * sq)737 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
738 {
739 }
nvmet_destroy_auth(struct nvmet_ctrl * ctrl)740 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
nvmet_auth_sq_free(struct nvmet_sq * sq)741 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
nvmet_check_auth_status(struct nvmet_req * req)742 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
743 {
744 return true;
745 }
nvmet_has_auth(struct nvmet_ctrl * ctrl)746 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
747 {
748 return false;
749 }
nvmet_dhchap_dhgroup_name(u8 dhgid)750 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
751 #endif
752
753 #endif /* _NVMET_H */
754