1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17 #include <trace/events/sock.h>
18
19 #include "nvmet.h"
20
21 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
22
param_store_val(const char * str,int * val,int min,int max)23 static int param_store_val(const char *str, int *val, int min, int max)
24 {
25 int ret, new_val;
26
27 ret = kstrtoint(str, 10, &new_val);
28 if (ret)
29 return -EINVAL;
30
31 if (new_val < min || new_val > max)
32 return -EINVAL;
33
34 *val = new_val;
35 return 0;
36 }
37
set_params(const char * str,const struct kernel_param * kp)38 static int set_params(const char *str, const struct kernel_param *kp)
39 {
40 return param_store_val(str, kp->arg, 0, INT_MAX);
41 }
42
43 static const struct kernel_param_ops set_param_ops = {
44 .set = set_params,
45 .get = param_get_int,
46 };
47
48 /* Define the socket priority to use for connections were it is desirable
49 * that the NIC consider performing optimized packet processing or filtering.
50 * A non-zero value being sufficient to indicate general consideration of any
51 * possible optimization. Making it a module param allows for alternative
52 * values that may be unique for some NIC implementations.
53 */
54 static int so_priority;
55 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
56 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
57
58 /* Define a time period (in usecs) that io_work() shall sample an activated
59 * queue before determining it to be idle. This optional module behavior
60 * can enable NIC solutions that support socket optimized packet processing
61 * using advanced interrupt moderation techniques.
62 */
63 static int idle_poll_period_usecs;
64 device_param_cb(idle_poll_period_usecs, &set_param_ops,
65 &idle_poll_period_usecs, 0644);
66 MODULE_PARM_DESC(idle_poll_period_usecs,
67 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
68
69 #define NVMET_TCP_RECV_BUDGET 8
70 #define NVMET_TCP_SEND_BUDGET 8
71 #define NVMET_TCP_IO_WORK_BUDGET 64
72
73 enum nvmet_tcp_send_state {
74 NVMET_TCP_SEND_DATA_PDU,
75 NVMET_TCP_SEND_DATA,
76 NVMET_TCP_SEND_R2T,
77 NVMET_TCP_SEND_DDGST,
78 NVMET_TCP_SEND_RESPONSE
79 };
80
81 enum nvmet_tcp_recv_state {
82 NVMET_TCP_RECV_PDU,
83 NVMET_TCP_RECV_DATA,
84 NVMET_TCP_RECV_DDGST,
85 NVMET_TCP_RECV_ERR,
86 };
87
88 enum {
89 NVMET_TCP_F_INIT_FAILED = (1 << 0),
90 };
91
92 struct nvmet_tcp_cmd {
93 struct nvmet_tcp_queue *queue;
94 struct nvmet_req req;
95
96 struct nvme_tcp_cmd_pdu *cmd_pdu;
97 struct nvme_tcp_rsp_pdu *rsp_pdu;
98 struct nvme_tcp_data_pdu *data_pdu;
99 struct nvme_tcp_r2t_pdu *r2t_pdu;
100
101 u32 rbytes_done;
102 u32 wbytes_done;
103
104 u32 pdu_len;
105 u32 pdu_recv;
106 int sg_idx;
107 struct msghdr recv_msg;
108 struct bio_vec *iov;
109 u32 flags;
110
111 struct list_head entry;
112 struct llist_node lentry;
113
114 /* send state */
115 u32 offset;
116 struct scatterlist *cur_sg;
117 enum nvmet_tcp_send_state state;
118
119 __le32 exp_ddgst;
120 __le32 recv_ddgst;
121 };
122
123 enum nvmet_tcp_queue_state {
124 NVMET_TCP_Q_CONNECTING,
125 NVMET_TCP_Q_LIVE,
126 NVMET_TCP_Q_DISCONNECTING,
127 };
128
129 struct nvmet_tcp_queue {
130 struct socket *sock;
131 struct nvmet_tcp_port *port;
132 struct work_struct io_work;
133 struct nvmet_cq nvme_cq;
134 struct nvmet_sq nvme_sq;
135
136 /* send state */
137 struct nvmet_tcp_cmd *cmds;
138 unsigned int nr_cmds;
139 struct list_head free_list;
140 struct llist_head resp_list;
141 struct list_head resp_send_list;
142 int send_list_len;
143 struct nvmet_tcp_cmd *snd_cmd;
144
145 /* recv state */
146 int offset;
147 int left;
148 enum nvmet_tcp_recv_state rcv_state;
149 struct nvmet_tcp_cmd *cmd;
150 union nvme_tcp_pdu pdu;
151
152 /* digest state */
153 bool hdr_digest;
154 bool data_digest;
155 struct ahash_request *snd_hash;
156 struct ahash_request *rcv_hash;
157
158 unsigned long poll_end;
159
160 spinlock_t state_lock;
161 enum nvmet_tcp_queue_state state;
162
163 struct sockaddr_storage sockaddr;
164 struct sockaddr_storage sockaddr_peer;
165 struct work_struct release_work;
166
167 int idx;
168 struct list_head queue_list;
169
170 struct nvmet_tcp_cmd connect;
171
172 struct page_frag_cache pf_cache;
173
174 void (*data_ready)(struct sock *);
175 void (*state_change)(struct sock *);
176 void (*write_space)(struct sock *);
177 };
178
179 struct nvmet_tcp_port {
180 struct socket *sock;
181 struct work_struct accept_work;
182 struct nvmet_port *nport;
183 struct sockaddr_storage addr;
184 void (*data_ready)(struct sock *);
185 };
186
187 static DEFINE_IDA(nvmet_tcp_queue_ida);
188 static LIST_HEAD(nvmet_tcp_queue_list);
189 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
190
191 static struct workqueue_struct *nvmet_tcp_wq;
192 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
193 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
194 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
195
nvmet_tcp_cmd_tag(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd)196 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
197 struct nvmet_tcp_cmd *cmd)
198 {
199 if (unlikely(!queue->nr_cmds)) {
200 /* We didn't allocate cmds yet, send 0xffff */
201 return USHRT_MAX;
202 }
203
204 return cmd - queue->cmds;
205 }
206
nvmet_tcp_has_data_in(struct nvmet_tcp_cmd * cmd)207 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
208 {
209 return nvme_is_write(cmd->req.cmd) &&
210 cmd->rbytes_done < cmd->req.transfer_len;
211 }
212
nvmet_tcp_need_data_in(struct nvmet_tcp_cmd * cmd)213 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
214 {
215 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
216 }
217
nvmet_tcp_need_data_out(struct nvmet_tcp_cmd * cmd)218 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
219 {
220 return !nvme_is_write(cmd->req.cmd) &&
221 cmd->req.transfer_len > 0 &&
222 !cmd->req.cqe->status;
223 }
224
nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd * cmd)225 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
226 {
227 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
228 !cmd->rbytes_done;
229 }
230
231 static inline struct nvmet_tcp_cmd *
nvmet_tcp_get_cmd(struct nvmet_tcp_queue * queue)232 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
233 {
234 struct nvmet_tcp_cmd *cmd;
235
236 cmd = list_first_entry_or_null(&queue->free_list,
237 struct nvmet_tcp_cmd, entry);
238 if (!cmd)
239 return NULL;
240 list_del_init(&cmd->entry);
241
242 cmd->rbytes_done = cmd->wbytes_done = 0;
243 cmd->pdu_len = 0;
244 cmd->pdu_recv = 0;
245 cmd->iov = NULL;
246 cmd->flags = 0;
247 return cmd;
248 }
249
nvmet_tcp_put_cmd(struct nvmet_tcp_cmd * cmd)250 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
251 {
252 if (unlikely(cmd == &cmd->queue->connect))
253 return;
254
255 list_add_tail(&cmd->entry, &cmd->queue->free_list);
256 }
257
queue_cpu(struct nvmet_tcp_queue * queue)258 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
259 {
260 return queue->sock->sk->sk_incoming_cpu;
261 }
262
nvmet_tcp_hdgst_len(struct nvmet_tcp_queue * queue)263 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
264 {
265 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
266 }
267
nvmet_tcp_ddgst_len(struct nvmet_tcp_queue * queue)268 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
269 {
270 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
271 }
272
nvmet_tcp_hdgst(struct ahash_request * hash,void * pdu,size_t len)273 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
274 void *pdu, size_t len)
275 {
276 struct scatterlist sg;
277
278 sg_init_one(&sg, pdu, len);
279 ahash_request_set_crypt(hash, &sg, pdu + len, len);
280 crypto_ahash_digest(hash);
281 }
282
nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue * queue,void * pdu,size_t len)283 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
284 void *pdu, size_t len)
285 {
286 struct nvme_tcp_hdr *hdr = pdu;
287 __le32 recv_digest;
288 __le32 exp_digest;
289
290 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
291 pr_err("queue %d: header digest enabled but no header digest\n",
292 queue->idx);
293 return -EPROTO;
294 }
295
296 recv_digest = *(__le32 *)(pdu + hdr->hlen);
297 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
298 exp_digest = *(__le32 *)(pdu + hdr->hlen);
299 if (recv_digest != exp_digest) {
300 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
301 queue->idx, le32_to_cpu(recv_digest),
302 le32_to_cpu(exp_digest));
303 return -EPROTO;
304 }
305
306 return 0;
307 }
308
nvmet_tcp_check_ddgst(struct nvmet_tcp_queue * queue,void * pdu)309 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
310 {
311 struct nvme_tcp_hdr *hdr = pdu;
312 u8 digest_len = nvmet_tcp_hdgst_len(queue);
313 u32 len;
314
315 len = le32_to_cpu(hdr->plen) - hdr->hlen -
316 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
317
318 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
319 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
320 return -EPROTO;
321 }
322
323 return 0;
324 }
325
nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd * cmd)326 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
327 {
328 kfree(cmd->iov);
329 sgl_free(cmd->req.sg);
330 cmd->iov = NULL;
331 cmd->req.sg = NULL;
332 }
333
nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd * cmd)334 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
335 {
336 struct bio_vec *iov = cmd->iov;
337 struct scatterlist *sg;
338 u32 length, offset, sg_offset;
339 int nr_pages;
340
341 length = cmd->pdu_len;
342 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
343 offset = cmd->rbytes_done;
344 cmd->sg_idx = offset / PAGE_SIZE;
345 sg_offset = offset % PAGE_SIZE;
346 sg = &cmd->req.sg[cmd->sg_idx];
347
348 while (length) {
349 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
350
351 bvec_set_page(iov, sg_page(sg), iov_len,
352 sg->offset + sg_offset);
353
354 length -= iov_len;
355 sg = sg_next(sg);
356 iov++;
357 sg_offset = 0;
358 }
359
360 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
361 nr_pages, cmd->pdu_len);
362 }
363
nvmet_tcp_fatal_error(struct nvmet_tcp_queue * queue)364 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
365 {
366 queue->rcv_state = NVMET_TCP_RECV_ERR;
367 if (queue->nvme_sq.ctrl)
368 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
369 else
370 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
371 }
372
nvmet_tcp_socket_error(struct nvmet_tcp_queue * queue,int status)373 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
374 {
375 queue->rcv_state = NVMET_TCP_RECV_ERR;
376 if (status == -EPIPE || status == -ECONNRESET)
377 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
378 else
379 nvmet_tcp_fatal_error(queue);
380 }
381
nvmet_tcp_map_data(struct nvmet_tcp_cmd * cmd)382 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
383 {
384 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
385 u32 len = le32_to_cpu(sgl->length);
386
387 if (!len)
388 return 0;
389
390 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
391 NVME_SGL_FMT_OFFSET)) {
392 if (!nvme_is_write(cmd->req.cmd))
393 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
394
395 if (len > cmd->req.port->inline_data_size)
396 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
397 cmd->pdu_len = len;
398 }
399 cmd->req.transfer_len += len;
400
401 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
402 if (!cmd->req.sg)
403 return NVME_SC_INTERNAL;
404 cmd->cur_sg = cmd->req.sg;
405
406 if (nvmet_tcp_has_data_in(cmd)) {
407 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
408 sizeof(*cmd->iov), GFP_KERNEL);
409 if (!cmd->iov)
410 goto err;
411 }
412
413 return 0;
414 err:
415 nvmet_tcp_free_cmd_buffers(cmd);
416 return NVME_SC_INTERNAL;
417 }
418
nvmet_tcp_calc_ddgst(struct ahash_request * hash,struct nvmet_tcp_cmd * cmd)419 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
420 struct nvmet_tcp_cmd *cmd)
421 {
422 ahash_request_set_crypt(hash, cmd->req.sg,
423 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
424 crypto_ahash_digest(hash);
425 }
426
nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd * cmd)427 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
428 {
429 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
430 struct nvmet_tcp_queue *queue = cmd->queue;
431 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
432 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
433
434 cmd->offset = 0;
435 cmd->state = NVMET_TCP_SEND_DATA_PDU;
436
437 pdu->hdr.type = nvme_tcp_c2h_data;
438 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
439 NVME_TCP_F_DATA_SUCCESS : 0);
440 pdu->hdr.hlen = sizeof(*pdu);
441 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
442 pdu->hdr.plen =
443 cpu_to_le32(pdu->hdr.hlen + hdgst +
444 cmd->req.transfer_len + ddgst);
445 pdu->command_id = cmd->req.cqe->command_id;
446 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
447 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
448
449 if (queue->data_digest) {
450 pdu->hdr.flags |= NVME_TCP_F_DDGST;
451 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
452 }
453
454 if (cmd->queue->hdr_digest) {
455 pdu->hdr.flags |= NVME_TCP_F_HDGST;
456 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
457 }
458 }
459
nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd * cmd)460 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
461 {
462 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
463 struct nvmet_tcp_queue *queue = cmd->queue;
464 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
465
466 cmd->offset = 0;
467 cmd->state = NVMET_TCP_SEND_R2T;
468
469 pdu->hdr.type = nvme_tcp_r2t;
470 pdu->hdr.flags = 0;
471 pdu->hdr.hlen = sizeof(*pdu);
472 pdu->hdr.pdo = 0;
473 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
474
475 pdu->command_id = cmd->req.cmd->common.command_id;
476 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
477 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
478 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
479 if (cmd->queue->hdr_digest) {
480 pdu->hdr.flags |= NVME_TCP_F_HDGST;
481 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
482 }
483 }
484
nvmet_setup_response_pdu(struct nvmet_tcp_cmd * cmd)485 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
486 {
487 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
488 struct nvmet_tcp_queue *queue = cmd->queue;
489 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
490
491 cmd->offset = 0;
492 cmd->state = NVMET_TCP_SEND_RESPONSE;
493
494 pdu->hdr.type = nvme_tcp_rsp;
495 pdu->hdr.flags = 0;
496 pdu->hdr.hlen = sizeof(*pdu);
497 pdu->hdr.pdo = 0;
498 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
499 if (cmd->queue->hdr_digest) {
500 pdu->hdr.flags |= NVME_TCP_F_HDGST;
501 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
502 }
503 }
504
nvmet_tcp_process_resp_list(struct nvmet_tcp_queue * queue)505 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
506 {
507 struct llist_node *node;
508 struct nvmet_tcp_cmd *cmd;
509
510 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
511 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
512 list_add(&cmd->entry, &queue->resp_send_list);
513 queue->send_list_len++;
514 }
515 }
516
nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue * queue)517 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
518 {
519 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
520 struct nvmet_tcp_cmd, entry);
521 if (!queue->snd_cmd) {
522 nvmet_tcp_process_resp_list(queue);
523 queue->snd_cmd =
524 list_first_entry_or_null(&queue->resp_send_list,
525 struct nvmet_tcp_cmd, entry);
526 if (unlikely(!queue->snd_cmd))
527 return NULL;
528 }
529
530 list_del_init(&queue->snd_cmd->entry);
531 queue->send_list_len--;
532
533 if (nvmet_tcp_need_data_out(queue->snd_cmd))
534 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
535 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
536 nvmet_setup_r2t_pdu(queue->snd_cmd);
537 else
538 nvmet_setup_response_pdu(queue->snd_cmd);
539
540 return queue->snd_cmd;
541 }
542
nvmet_tcp_queue_response(struct nvmet_req * req)543 static void nvmet_tcp_queue_response(struct nvmet_req *req)
544 {
545 struct nvmet_tcp_cmd *cmd =
546 container_of(req, struct nvmet_tcp_cmd, req);
547 struct nvmet_tcp_queue *queue = cmd->queue;
548 struct nvme_sgl_desc *sgl;
549 u32 len;
550
551 if (unlikely(cmd == queue->cmd)) {
552 sgl = &cmd->req.cmd->common.dptr.sgl;
553 len = le32_to_cpu(sgl->length);
554
555 /*
556 * Wait for inline data before processing the response.
557 * Avoid using helpers, this might happen before
558 * nvmet_req_init is completed.
559 */
560 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
561 len && len <= cmd->req.port->inline_data_size &&
562 nvme_is_write(cmd->req.cmd))
563 return;
564 }
565
566 llist_add(&cmd->lentry, &queue->resp_list);
567 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
568 }
569
nvmet_tcp_execute_request(struct nvmet_tcp_cmd * cmd)570 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
571 {
572 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
573 nvmet_tcp_queue_response(&cmd->req);
574 else
575 cmd->req.execute(&cmd->req);
576 }
577
nvmet_try_send_data_pdu(struct nvmet_tcp_cmd * cmd)578 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
579 {
580 struct msghdr msg = {
581 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
582 };
583 struct bio_vec bvec;
584 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
585 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
586 int ret;
587
588 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
589 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
590 ret = sock_sendmsg(cmd->queue->sock, &msg);
591 if (ret <= 0)
592 return ret;
593
594 cmd->offset += ret;
595 left -= ret;
596
597 if (left)
598 return -EAGAIN;
599
600 cmd->state = NVMET_TCP_SEND_DATA;
601 cmd->offset = 0;
602 return 1;
603 }
604
nvmet_try_send_data(struct nvmet_tcp_cmd * cmd,bool last_in_batch)605 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
606 {
607 struct nvmet_tcp_queue *queue = cmd->queue;
608 int ret;
609
610 while (cmd->cur_sg) {
611 struct msghdr msg = {
612 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
613 };
614 struct page *page = sg_page(cmd->cur_sg);
615 struct bio_vec bvec;
616 u32 left = cmd->cur_sg->length - cmd->offset;
617
618 if ((!last_in_batch && cmd->queue->send_list_len) ||
619 cmd->wbytes_done + left < cmd->req.transfer_len ||
620 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
621 msg.msg_flags |= MSG_MORE;
622
623 bvec_set_page(&bvec, page, left, cmd->offset);
624 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
625 ret = sock_sendmsg(cmd->queue->sock, &msg);
626 if (ret <= 0)
627 return ret;
628
629 cmd->offset += ret;
630 cmd->wbytes_done += ret;
631
632 /* Done with sg?*/
633 if (cmd->offset == cmd->cur_sg->length) {
634 cmd->cur_sg = sg_next(cmd->cur_sg);
635 cmd->offset = 0;
636 }
637 }
638
639 if (queue->data_digest) {
640 cmd->state = NVMET_TCP_SEND_DDGST;
641 cmd->offset = 0;
642 } else {
643 if (queue->nvme_sq.sqhd_disabled) {
644 cmd->queue->snd_cmd = NULL;
645 nvmet_tcp_put_cmd(cmd);
646 } else {
647 nvmet_setup_response_pdu(cmd);
648 }
649 }
650
651 if (queue->nvme_sq.sqhd_disabled)
652 nvmet_tcp_free_cmd_buffers(cmd);
653
654 return 1;
655
656 }
657
nvmet_try_send_response(struct nvmet_tcp_cmd * cmd,bool last_in_batch)658 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
659 bool last_in_batch)
660 {
661 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
662 struct bio_vec bvec;
663 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
664 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
665 int ret;
666
667 if (!last_in_batch && cmd->queue->send_list_len)
668 msg.msg_flags |= MSG_MORE;
669 else
670 msg.msg_flags |= MSG_EOR;
671
672 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
673 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
674 ret = sock_sendmsg(cmd->queue->sock, &msg);
675 if (ret <= 0)
676 return ret;
677 cmd->offset += ret;
678 left -= ret;
679
680 if (left)
681 return -EAGAIN;
682
683 nvmet_tcp_free_cmd_buffers(cmd);
684 cmd->queue->snd_cmd = NULL;
685 nvmet_tcp_put_cmd(cmd);
686 return 1;
687 }
688
nvmet_try_send_r2t(struct nvmet_tcp_cmd * cmd,bool last_in_batch)689 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
690 {
691 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
692 struct bio_vec bvec;
693 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
694 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
695 int ret;
696
697 if (!last_in_batch && cmd->queue->send_list_len)
698 msg.msg_flags |= MSG_MORE;
699 else
700 msg.msg_flags |= MSG_EOR;
701
702 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
703 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
704 ret = sock_sendmsg(cmd->queue->sock, &msg);
705 if (ret <= 0)
706 return ret;
707 cmd->offset += ret;
708 left -= ret;
709
710 if (left)
711 return -EAGAIN;
712
713 cmd->queue->snd_cmd = NULL;
714 return 1;
715 }
716
nvmet_try_send_ddgst(struct nvmet_tcp_cmd * cmd,bool last_in_batch)717 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
718 {
719 struct nvmet_tcp_queue *queue = cmd->queue;
720 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
721 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
722 struct kvec iov = {
723 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
724 .iov_len = left
725 };
726 int ret;
727
728 if (!last_in_batch && cmd->queue->send_list_len)
729 msg.msg_flags |= MSG_MORE;
730 else
731 msg.msg_flags |= MSG_EOR;
732
733 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
734 if (unlikely(ret <= 0))
735 return ret;
736
737 cmd->offset += ret;
738 left -= ret;
739
740 if (left)
741 return -EAGAIN;
742
743 if (queue->nvme_sq.sqhd_disabled) {
744 cmd->queue->snd_cmd = NULL;
745 nvmet_tcp_put_cmd(cmd);
746 } else {
747 nvmet_setup_response_pdu(cmd);
748 }
749 return 1;
750 }
751
nvmet_tcp_try_send_one(struct nvmet_tcp_queue * queue,bool last_in_batch)752 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
753 bool last_in_batch)
754 {
755 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
756 int ret = 0;
757
758 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
759 cmd = nvmet_tcp_fetch_cmd(queue);
760 if (unlikely(!cmd))
761 return 0;
762 }
763
764 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
765 ret = nvmet_try_send_data_pdu(cmd);
766 if (ret <= 0)
767 goto done_send;
768 }
769
770 if (cmd->state == NVMET_TCP_SEND_DATA) {
771 ret = nvmet_try_send_data(cmd, last_in_batch);
772 if (ret <= 0)
773 goto done_send;
774 }
775
776 if (cmd->state == NVMET_TCP_SEND_DDGST) {
777 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
778 if (ret <= 0)
779 goto done_send;
780 }
781
782 if (cmd->state == NVMET_TCP_SEND_R2T) {
783 ret = nvmet_try_send_r2t(cmd, last_in_batch);
784 if (ret <= 0)
785 goto done_send;
786 }
787
788 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
789 ret = nvmet_try_send_response(cmd, last_in_batch);
790
791 done_send:
792 if (ret < 0) {
793 if (ret == -EAGAIN)
794 return 0;
795 return ret;
796 }
797
798 return 1;
799 }
800
nvmet_tcp_try_send(struct nvmet_tcp_queue * queue,int budget,int * sends)801 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
802 int budget, int *sends)
803 {
804 int i, ret = 0;
805
806 for (i = 0; i < budget; i++) {
807 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
808 if (unlikely(ret < 0)) {
809 nvmet_tcp_socket_error(queue, ret);
810 goto done;
811 } else if (ret == 0) {
812 break;
813 }
814 (*sends)++;
815 }
816 done:
817 return ret;
818 }
819
nvmet_prepare_receive_pdu(struct nvmet_tcp_queue * queue)820 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
821 {
822 queue->offset = 0;
823 queue->left = sizeof(struct nvme_tcp_hdr);
824 queue->cmd = NULL;
825 queue->rcv_state = NVMET_TCP_RECV_PDU;
826 }
827
nvmet_tcp_free_crypto(struct nvmet_tcp_queue * queue)828 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
829 {
830 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
831
832 ahash_request_free(queue->rcv_hash);
833 ahash_request_free(queue->snd_hash);
834 crypto_free_ahash(tfm);
835 }
836
nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue * queue)837 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
838 {
839 struct crypto_ahash *tfm;
840
841 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
842 if (IS_ERR(tfm))
843 return PTR_ERR(tfm);
844
845 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
846 if (!queue->snd_hash)
847 goto free_tfm;
848 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
849
850 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
851 if (!queue->rcv_hash)
852 goto free_snd_hash;
853 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
854
855 return 0;
856 free_snd_hash:
857 ahash_request_free(queue->snd_hash);
858 free_tfm:
859 crypto_free_ahash(tfm);
860 return -ENOMEM;
861 }
862
863
nvmet_tcp_handle_icreq(struct nvmet_tcp_queue * queue)864 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
865 {
866 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
867 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
868 struct msghdr msg = {};
869 struct kvec iov;
870 int ret;
871
872 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
873 pr_err("bad nvme-tcp pdu length (%d)\n",
874 le32_to_cpu(icreq->hdr.plen));
875 nvmet_tcp_fatal_error(queue);
876 }
877
878 if (icreq->pfv != NVME_TCP_PFV_1_0) {
879 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
880 return -EPROTO;
881 }
882
883 if (icreq->hpda != 0) {
884 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
885 icreq->hpda);
886 return -EPROTO;
887 }
888
889 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
890 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
891 if (queue->hdr_digest || queue->data_digest) {
892 ret = nvmet_tcp_alloc_crypto(queue);
893 if (ret)
894 return ret;
895 }
896
897 memset(icresp, 0, sizeof(*icresp));
898 icresp->hdr.type = nvme_tcp_icresp;
899 icresp->hdr.hlen = sizeof(*icresp);
900 icresp->hdr.pdo = 0;
901 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
902 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
903 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
904 icresp->cpda = 0;
905 if (queue->hdr_digest)
906 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
907 if (queue->data_digest)
908 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
909
910 iov.iov_base = icresp;
911 iov.iov_len = sizeof(*icresp);
912 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
913 if (ret < 0)
914 return ret; /* queue removal will cleanup */
915
916 queue->state = NVMET_TCP_Q_LIVE;
917 nvmet_prepare_receive_pdu(queue);
918 return 0;
919 }
920
nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * cmd,struct nvmet_req * req)921 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
922 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
923 {
924 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
925 int ret;
926
927 /*
928 * This command has not been processed yet, hence we are trying to
929 * figure out if there is still pending data left to receive. If
930 * we don't, we can simply prepare for the next pdu and bail out,
931 * otherwise we will need to prepare a buffer and receive the
932 * stale data before continuing forward.
933 */
934 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
935 data_len > cmd->req.port->inline_data_size) {
936 nvmet_prepare_receive_pdu(queue);
937 return;
938 }
939
940 ret = nvmet_tcp_map_data(cmd);
941 if (unlikely(ret)) {
942 pr_err("queue %d: failed to map data\n", queue->idx);
943 nvmet_tcp_fatal_error(queue);
944 return;
945 }
946
947 queue->rcv_state = NVMET_TCP_RECV_DATA;
948 nvmet_tcp_build_pdu_iovec(cmd);
949 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
950 }
951
nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue * queue)952 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
953 {
954 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
955 struct nvmet_tcp_cmd *cmd;
956
957 if (likely(queue->nr_cmds)) {
958 if (unlikely(data->ttag >= queue->nr_cmds)) {
959 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
960 queue->idx, data->ttag, queue->nr_cmds);
961 nvmet_tcp_fatal_error(queue);
962 return -EPROTO;
963 }
964 cmd = &queue->cmds[data->ttag];
965 } else {
966 cmd = &queue->connect;
967 }
968
969 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
970 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
971 data->ttag, le32_to_cpu(data->data_offset),
972 cmd->rbytes_done);
973 /* FIXME: use path and transport errors */
974 nvmet_req_complete(&cmd->req,
975 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
976 return -EPROTO;
977 }
978
979 cmd->pdu_len = le32_to_cpu(data->data_length);
980 cmd->pdu_recv = 0;
981 nvmet_tcp_build_pdu_iovec(cmd);
982 queue->cmd = cmd;
983 queue->rcv_state = NVMET_TCP_RECV_DATA;
984
985 return 0;
986 }
987
nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue * queue)988 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
989 {
990 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
991 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
992 struct nvmet_req *req;
993 int ret;
994
995 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
996 if (hdr->type != nvme_tcp_icreq) {
997 pr_err("unexpected pdu type (%d) before icreq\n",
998 hdr->type);
999 nvmet_tcp_fatal_error(queue);
1000 return -EPROTO;
1001 }
1002 return nvmet_tcp_handle_icreq(queue);
1003 }
1004
1005 if (unlikely(hdr->type == nvme_tcp_icreq)) {
1006 pr_err("queue %d: received icreq pdu in state %d\n",
1007 queue->idx, queue->state);
1008 nvmet_tcp_fatal_error(queue);
1009 return -EPROTO;
1010 }
1011
1012 if (hdr->type == nvme_tcp_h2c_data) {
1013 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1014 if (unlikely(ret))
1015 return ret;
1016 return 0;
1017 }
1018
1019 queue->cmd = nvmet_tcp_get_cmd(queue);
1020 if (unlikely(!queue->cmd)) {
1021 /* This should never happen */
1022 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1023 queue->idx, queue->nr_cmds, queue->send_list_len,
1024 nvme_cmd->common.opcode);
1025 nvmet_tcp_fatal_error(queue);
1026 return -ENOMEM;
1027 }
1028
1029 req = &queue->cmd->req;
1030 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1031
1032 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1033 &queue->nvme_sq, &nvmet_tcp_ops))) {
1034 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1035 req->cmd, req->cmd->common.command_id,
1036 req->cmd->common.opcode,
1037 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1038
1039 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1040 return 0;
1041 }
1042
1043 ret = nvmet_tcp_map_data(queue->cmd);
1044 if (unlikely(ret)) {
1045 pr_err("queue %d: failed to map data\n", queue->idx);
1046 if (nvmet_tcp_has_inline_data(queue->cmd))
1047 nvmet_tcp_fatal_error(queue);
1048 else
1049 nvmet_req_complete(req, ret);
1050 ret = -EAGAIN;
1051 goto out;
1052 }
1053
1054 if (nvmet_tcp_need_data_in(queue->cmd)) {
1055 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1056 queue->rcv_state = NVMET_TCP_RECV_DATA;
1057 nvmet_tcp_build_pdu_iovec(queue->cmd);
1058 return 0;
1059 }
1060 /* send back R2T */
1061 nvmet_tcp_queue_response(&queue->cmd->req);
1062 goto out;
1063 }
1064
1065 queue->cmd->req.execute(&queue->cmd->req);
1066 out:
1067 nvmet_prepare_receive_pdu(queue);
1068 return ret;
1069 }
1070
1071 static const u8 nvme_tcp_pdu_sizes[] = {
1072 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1073 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1074 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1075 };
1076
nvmet_tcp_pdu_size(u8 type)1077 static inline u8 nvmet_tcp_pdu_size(u8 type)
1078 {
1079 size_t idx = type;
1080
1081 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1082 nvme_tcp_pdu_sizes[idx]) ?
1083 nvme_tcp_pdu_sizes[idx] : 0;
1084 }
1085
nvmet_tcp_pdu_valid(u8 type)1086 static inline bool nvmet_tcp_pdu_valid(u8 type)
1087 {
1088 switch (type) {
1089 case nvme_tcp_icreq:
1090 case nvme_tcp_cmd:
1091 case nvme_tcp_h2c_data:
1092 /* fallthru */
1093 return true;
1094 }
1095
1096 return false;
1097 }
1098
nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue * queue)1099 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1100 {
1101 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1102 int len;
1103 struct kvec iov;
1104 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1105
1106 recv:
1107 iov.iov_base = (void *)&queue->pdu + queue->offset;
1108 iov.iov_len = queue->left;
1109 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1110 iov.iov_len, msg.msg_flags);
1111 if (unlikely(len < 0))
1112 return len;
1113
1114 queue->offset += len;
1115 queue->left -= len;
1116 if (queue->left)
1117 return -EAGAIN;
1118
1119 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1120 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1121
1122 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1123 pr_err("unexpected pdu type %d\n", hdr->type);
1124 nvmet_tcp_fatal_error(queue);
1125 return -EIO;
1126 }
1127
1128 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1129 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1130 return -EIO;
1131 }
1132
1133 queue->left = hdr->hlen - queue->offset + hdgst;
1134 goto recv;
1135 }
1136
1137 if (queue->hdr_digest &&
1138 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1139 nvmet_tcp_fatal_error(queue); /* fatal */
1140 return -EPROTO;
1141 }
1142
1143 if (queue->data_digest &&
1144 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1145 nvmet_tcp_fatal_error(queue); /* fatal */
1146 return -EPROTO;
1147 }
1148
1149 return nvmet_tcp_done_recv_pdu(queue);
1150 }
1151
nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd * cmd)1152 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1153 {
1154 struct nvmet_tcp_queue *queue = cmd->queue;
1155
1156 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1157 queue->offset = 0;
1158 queue->left = NVME_TCP_DIGEST_LENGTH;
1159 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1160 }
1161
nvmet_tcp_try_recv_data(struct nvmet_tcp_queue * queue)1162 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1163 {
1164 struct nvmet_tcp_cmd *cmd = queue->cmd;
1165 int ret;
1166
1167 while (msg_data_left(&cmd->recv_msg)) {
1168 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1169 cmd->recv_msg.msg_flags);
1170 if (ret <= 0)
1171 return ret;
1172
1173 cmd->pdu_recv += ret;
1174 cmd->rbytes_done += ret;
1175 }
1176
1177 if (queue->data_digest) {
1178 nvmet_tcp_prep_recv_ddgst(cmd);
1179 return 0;
1180 }
1181
1182 if (cmd->rbytes_done == cmd->req.transfer_len)
1183 nvmet_tcp_execute_request(cmd);
1184
1185 nvmet_prepare_receive_pdu(queue);
1186 return 0;
1187 }
1188
nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue * queue)1189 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1190 {
1191 struct nvmet_tcp_cmd *cmd = queue->cmd;
1192 int ret;
1193 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1194 struct kvec iov = {
1195 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1196 .iov_len = queue->left
1197 };
1198
1199 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1200 iov.iov_len, msg.msg_flags);
1201 if (unlikely(ret < 0))
1202 return ret;
1203
1204 queue->offset += ret;
1205 queue->left -= ret;
1206 if (queue->left)
1207 return -EAGAIN;
1208
1209 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1210 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1211 queue->idx, cmd->req.cmd->common.command_id,
1212 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1213 le32_to_cpu(cmd->exp_ddgst));
1214 nvmet_req_uninit(&cmd->req);
1215 nvmet_tcp_free_cmd_buffers(cmd);
1216 nvmet_tcp_fatal_error(queue);
1217 ret = -EPROTO;
1218 goto out;
1219 }
1220
1221 if (cmd->rbytes_done == cmd->req.transfer_len)
1222 nvmet_tcp_execute_request(cmd);
1223
1224 ret = 0;
1225 out:
1226 nvmet_prepare_receive_pdu(queue);
1227 return ret;
1228 }
1229
nvmet_tcp_try_recv_one(struct nvmet_tcp_queue * queue)1230 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1231 {
1232 int result = 0;
1233
1234 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1235 return 0;
1236
1237 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1238 result = nvmet_tcp_try_recv_pdu(queue);
1239 if (result != 0)
1240 goto done_recv;
1241 }
1242
1243 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1244 result = nvmet_tcp_try_recv_data(queue);
1245 if (result != 0)
1246 goto done_recv;
1247 }
1248
1249 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1250 result = nvmet_tcp_try_recv_ddgst(queue);
1251 if (result != 0)
1252 goto done_recv;
1253 }
1254
1255 done_recv:
1256 if (result < 0) {
1257 if (result == -EAGAIN)
1258 return 0;
1259 return result;
1260 }
1261 return 1;
1262 }
1263
nvmet_tcp_try_recv(struct nvmet_tcp_queue * queue,int budget,int * recvs)1264 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1265 int budget, int *recvs)
1266 {
1267 int i, ret = 0;
1268
1269 for (i = 0; i < budget; i++) {
1270 ret = nvmet_tcp_try_recv_one(queue);
1271 if (unlikely(ret < 0)) {
1272 nvmet_tcp_socket_error(queue, ret);
1273 goto done;
1274 } else if (ret == 0) {
1275 break;
1276 }
1277 (*recvs)++;
1278 }
1279 done:
1280 return ret;
1281 }
1282
nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue * queue)1283 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1284 {
1285 spin_lock(&queue->state_lock);
1286 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1287 queue->state = NVMET_TCP_Q_DISCONNECTING;
1288 queue_work(nvmet_wq, &queue->release_work);
1289 }
1290 spin_unlock(&queue->state_lock);
1291 }
1292
nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue * queue)1293 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1294 {
1295 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1296 }
1297
nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue * queue,int ops)1298 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1299 int ops)
1300 {
1301 if (!idle_poll_period_usecs)
1302 return false;
1303
1304 if (ops)
1305 nvmet_tcp_arm_queue_deadline(queue);
1306
1307 return !time_after(jiffies, queue->poll_end);
1308 }
1309
nvmet_tcp_io_work(struct work_struct * w)1310 static void nvmet_tcp_io_work(struct work_struct *w)
1311 {
1312 struct nvmet_tcp_queue *queue =
1313 container_of(w, struct nvmet_tcp_queue, io_work);
1314 bool pending;
1315 int ret, ops = 0;
1316
1317 do {
1318 pending = false;
1319
1320 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1321 if (ret > 0)
1322 pending = true;
1323 else if (ret < 0)
1324 return;
1325
1326 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1327 if (ret > 0)
1328 pending = true;
1329 else if (ret < 0)
1330 return;
1331
1332 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1333
1334 /*
1335 * Requeue the worker if idle deadline period is in progress or any
1336 * ops activity was recorded during the do-while loop above.
1337 */
1338 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1339 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1340 }
1341
nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue * queue,struct nvmet_tcp_cmd * c)1342 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1343 struct nvmet_tcp_cmd *c)
1344 {
1345 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1346
1347 c->queue = queue;
1348 c->req.port = queue->port->nport;
1349
1350 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1351 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1352 if (!c->cmd_pdu)
1353 return -ENOMEM;
1354 c->req.cmd = &c->cmd_pdu->cmd;
1355
1356 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1357 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1358 if (!c->rsp_pdu)
1359 goto out_free_cmd;
1360 c->req.cqe = &c->rsp_pdu->cqe;
1361
1362 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1363 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1364 if (!c->data_pdu)
1365 goto out_free_rsp;
1366
1367 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1368 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1369 if (!c->r2t_pdu)
1370 goto out_free_data;
1371
1372 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1373
1374 list_add_tail(&c->entry, &queue->free_list);
1375
1376 return 0;
1377 out_free_data:
1378 page_frag_free(c->data_pdu);
1379 out_free_rsp:
1380 page_frag_free(c->rsp_pdu);
1381 out_free_cmd:
1382 page_frag_free(c->cmd_pdu);
1383 return -ENOMEM;
1384 }
1385
nvmet_tcp_free_cmd(struct nvmet_tcp_cmd * c)1386 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1387 {
1388 page_frag_free(c->r2t_pdu);
1389 page_frag_free(c->data_pdu);
1390 page_frag_free(c->rsp_pdu);
1391 page_frag_free(c->cmd_pdu);
1392 }
1393
nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue * queue)1394 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1395 {
1396 struct nvmet_tcp_cmd *cmds;
1397 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1398
1399 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1400 if (!cmds)
1401 goto out;
1402
1403 for (i = 0; i < nr_cmds; i++) {
1404 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1405 if (ret)
1406 goto out_free;
1407 }
1408
1409 queue->cmds = cmds;
1410
1411 return 0;
1412 out_free:
1413 while (--i >= 0)
1414 nvmet_tcp_free_cmd(cmds + i);
1415 kfree(cmds);
1416 out:
1417 return ret;
1418 }
1419
nvmet_tcp_free_cmds(struct nvmet_tcp_queue * queue)1420 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1421 {
1422 struct nvmet_tcp_cmd *cmds = queue->cmds;
1423 int i;
1424
1425 for (i = 0; i < queue->nr_cmds; i++)
1426 nvmet_tcp_free_cmd(cmds + i);
1427
1428 nvmet_tcp_free_cmd(&queue->connect);
1429 kfree(cmds);
1430 }
1431
nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue * queue)1432 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1433 {
1434 struct socket *sock = queue->sock;
1435
1436 write_lock_bh(&sock->sk->sk_callback_lock);
1437 sock->sk->sk_data_ready = queue->data_ready;
1438 sock->sk->sk_state_change = queue->state_change;
1439 sock->sk->sk_write_space = queue->write_space;
1440 sock->sk->sk_user_data = NULL;
1441 write_unlock_bh(&sock->sk->sk_callback_lock);
1442 }
1443
nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue * queue)1444 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1445 {
1446 struct nvmet_tcp_cmd *cmd = queue->cmds;
1447 int i;
1448
1449 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1450 if (nvmet_tcp_need_data_in(cmd))
1451 nvmet_req_uninit(&cmd->req);
1452 }
1453
1454 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1455 /* failed in connect */
1456 nvmet_req_uninit(&queue->connect.req);
1457 }
1458 }
1459
nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue * queue)1460 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1461 {
1462 struct nvmet_tcp_cmd *cmd = queue->cmds;
1463 int i;
1464
1465 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1466 if (nvmet_tcp_need_data_in(cmd))
1467 nvmet_tcp_free_cmd_buffers(cmd);
1468 }
1469
1470 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1471 nvmet_tcp_free_cmd_buffers(&queue->connect);
1472 }
1473
nvmet_tcp_release_queue_work(struct work_struct * w)1474 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1475 {
1476 struct page *page;
1477 struct nvmet_tcp_queue *queue =
1478 container_of(w, struct nvmet_tcp_queue, release_work);
1479
1480 mutex_lock(&nvmet_tcp_queue_mutex);
1481 list_del_init(&queue->queue_list);
1482 mutex_unlock(&nvmet_tcp_queue_mutex);
1483
1484 nvmet_tcp_restore_socket_callbacks(queue);
1485 cancel_work_sync(&queue->io_work);
1486 /* stop accepting incoming data */
1487 queue->rcv_state = NVMET_TCP_RECV_ERR;
1488
1489 nvmet_tcp_uninit_data_in_cmds(queue);
1490 nvmet_sq_destroy(&queue->nvme_sq);
1491 cancel_work_sync(&queue->io_work);
1492 nvmet_tcp_free_cmd_data_in_buffers(queue);
1493 sock_release(queue->sock);
1494 nvmet_tcp_free_cmds(queue);
1495 if (queue->hdr_digest || queue->data_digest)
1496 nvmet_tcp_free_crypto(queue);
1497 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1498
1499 page = virt_to_head_page(queue->pf_cache.va);
1500 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1501 kfree(queue);
1502 }
1503
nvmet_tcp_data_ready(struct sock * sk)1504 static void nvmet_tcp_data_ready(struct sock *sk)
1505 {
1506 struct nvmet_tcp_queue *queue;
1507
1508 trace_sk_data_ready(sk);
1509
1510 read_lock_bh(&sk->sk_callback_lock);
1511 queue = sk->sk_user_data;
1512 if (likely(queue))
1513 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1514 read_unlock_bh(&sk->sk_callback_lock);
1515 }
1516
nvmet_tcp_write_space(struct sock * sk)1517 static void nvmet_tcp_write_space(struct sock *sk)
1518 {
1519 struct nvmet_tcp_queue *queue;
1520
1521 read_lock_bh(&sk->sk_callback_lock);
1522 queue = sk->sk_user_data;
1523 if (unlikely(!queue))
1524 goto out;
1525
1526 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1527 queue->write_space(sk);
1528 goto out;
1529 }
1530
1531 if (sk_stream_is_writeable(sk)) {
1532 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1533 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1534 }
1535 out:
1536 read_unlock_bh(&sk->sk_callback_lock);
1537 }
1538
nvmet_tcp_state_change(struct sock * sk)1539 static void nvmet_tcp_state_change(struct sock *sk)
1540 {
1541 struct nvmet_tcp_queue *queue;
1542
1543 read_lock_bh(&sk->sk_callback_lock);
1544 queue = sk->sk_user_data;
1545 if (!queue)
1546 goto done;
1547
1548 switch (sk->sk_state) {
1549 case TCP_FIN_WAIT2:
1550 case TCP_LAST_ACK:
1551 break;
1552 case TCP_FIN_WAIT1:
1553 case TCP_CLOSE_WAIT:
1554 case TCP_CLOSE:
1555 /* FALLTHRU */
1556 nvmet_tcp_schedule_release_queue(queue);
1557 break;
1558 default:
1559 pr_warn("queue %d unhandled state %d\n",
1560 queue->idx, sk->sk_state);
1561 }
1562 done:
1563 read_unlock_bh(&sk->sk_callback_lock);
1564 }
1565
nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue * queue)1566 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1567 {
1568 struct socket *sock = queue->sock;
1569 struct inet_sock *inet = inet_sk(sock->sk);
1570 int ret;
1571
1572 ret = kernel_getsockname(sock,
1573 (struct sockaddr *)&queue->sockaddr);
1574 if (ret < 0)
1575 return ret;
1576
1577 ret = kernel_getpeername(sock,
1578 (struct sockaddr *)&queue->sockaddr_peer);
1579 if (ret < 0)
1580 return ret;
1581
1582 /*
1583 * Cleanup whatever is sitting in the TCP transmit queue on socket
1584 * close. This is done to prevent stale data from being sent should
1585 * the network connection be restored before TCP times out.
1586 */
1587 sock_no_linger(sock->sk);
1588
1589 if (so_priority > 0)
1590 sock_set_priority(sock->sk, so_priority);
1591
1592 /* Set socket type of service */
1593 if (inet->rcv_tos > 0)
1594 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1595
1596 ret = 0;
1597 write_lock_bh(&sock->sk->sk_callback_lock);
1598 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1599 /*
1600 * If the socket is already closing, don't even start
1601 * consuming it
1602 */
1603 ret = -ENOTCONN;
1604 } else {
1605 sock->sk->sk_user_data = queue;
1606 queue->data_ready = sock->sk->sk_data_ready;
1607 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1608 queue->state_change = sock->sk->sk_state_change;
1609 sock->sk->sk_state_change = nvmet_tcp_state_change;
1610 queue->write_space = sock->sk->sk_write_space;
1611 sock->sk->sk_write_space = nvmet_tcp_write_space;
1612 if (idle_poll_period_usecs)
1613 nvmet_tcp_arm_queue_deadline(queue);
1614 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1615 }
1616 write_unlock_bh(&sock->sk->sk_callback_lock);
1617
1618 return ret;
1619 }
1620
nvmet_tcp_alloc_queue(struct nvmet_tcp_port * port,struct socket * newsock)1621 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1622 struct socket *newsock)
1623 {
1624 struct nvmet_tcp_queue *queue;
1625 int ret;
1626
1627 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1628 if (!queue)
1629 return -ENOMEM;
1630
1631 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1632 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1633 queue->sock = newsock;
1634 queue->port = port;
1635 queue->nr_cmds = 0;
1636 spin_lock_init(&queue->state_lock);
1637 queue->state = NVMET_TCP_Q_CONNECTING;
1638 INIT_LIST_HEAD(&queue->free_list);
1639 init_llist_head(&queue->resp_list);
1640 INIT_LIST_HEAD(&queue->resp_send_list);
1641
1642 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1643 if (queue->idx < 0) {
1644 ret = queue->idx;
1645 goto out_free_queue;
1646 }
1647
1648 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1649 if (ret)
1650 goto out_ida_remove;
1651
1652 ret = nvmet_sq_init(&queue->nvme_sq);
1653 if (ret)
1654 goto out_free_connect;
1655
1656 nvmet_prepare_receive_pdu(queue);
1657
1658 mutex_lock(&nvmet_tcp_queue_mutex);
1659 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1660 mutex_unlock(&nvmet_tcp_queue_mutex);
1661
1662 ret = nvmet_tcp_set_queue_sock(queue);
1663 if (ret)
1664 goto out_destroy_sq;
1665
1666 return 0;
1667 out_destroy_sq:
1668 mutex_lock(&nvmet_tcp_queue_mutex);
1669 list_del_init(&queue->queue_list);
1670 mutex_unlock(&nvmet_tcp_queue_mutex);
1671 nvmet_sq_destroy(&queue->nvme_sq);
1672 out_free_connect:
1673 nvmet_tcp_free_cmd(&queue->connect);
1674 out_ida_remove:
1675 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1676 out_free_queue:
1677 kfree(queue);
1678 return ret;
1679 }
1680
nvmet_tcp_accept_work(struct work_struct * w)1681 static void nvmet_tcp_accept_work(struct work_struct *w)
1682 {
1683 struct nvmet_tcp_port *port =
1684 container_of(w, struct nvmet_tcp_port, accept_work);
1685 struct socket *newsock;
1686 int ret;
1687
1688 while (true) {
1689 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1690 if (ret < 0) {
1691 if (ret != -EAGAIN)
1692 pr_warn("failed to accept err=%d\n", ret);
1693 return;
1694 }
1695 ret = nvmet_tcp_alloc_queue(port, newsock);
1696 if (ret) {
1697 pr_err("failed to allocate queue\n");
1698 sock_release(newsock);
1699 }
1700 }
1701 }
1702
nvmet_tcp_listen_data_ready(struct sock * sk)1703 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1704 {
1705 struct nvmet_tcp_port *port;
1706
1707 trace_sk_data_ready(sk);
1708
1709 read_lock_bh(&sk->sk_callback_lock);
1710 port = sk->sk_user_data;
1711 if (!port)
1712 goto out;
1713
1714 if (sk->sk_state == TCP_LISTEN)
1715 queue_work(nvmet_wq, &port->accept_work);
1716 out:
1717 read_unlock_bh(&sk->sk_callback_lock);
1718 }
1719
nvmet_tcp_add_port(struct nvmet_port * nport)1720 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1721 {
1722 struct nvmet_tcp_port *port;
1723 __kernel_sa_family_t af;
1724 int ret;
1725
1726 port = kzalloc(sizeof(*port), GFP_KERNEL);
1727 if (!port)
1728 return -ENOMEM;
1729
1730 switch (nport->disc_addr.adrfam) {
1731 case NVMF_ADDR_FAMILY_IP4:
1732 af = AF_INET;
1733 break;
1734 case NVMF_ADDR_FAMILY_IP6:
1735 af = AF_INET6;
1736 break;
1737 default:
1738 pr_err("address family %d not supported\n",
1739 nport->disc_addr.adrfam);
1740 ret = -EINVAL;
1741 goto err_port;
1742 }
1743
1744 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1745 nport->disc_addr.trsvcid, &port->addr);
1746 if (ret) {
1747 pr_err("malformed ip/port passed: %s:%s\n",
1748 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1749 goto err_port;
1750 }
1751
1752 port->nport = nport;
1753 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1754 if (port->nport->inline_data_size < 0)
1755 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1756
1757 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1758 IPPROTO_TCP, &port->sock);
1759 if (ret) {
1760 pr_err("failed to create a socket\n");
1761 goto err_port;
1762 }
1763
1764 port->sock->sk->sk_user_data = port;
1765 port->data_ready = port->sock->sk->sk_data_ready;
1766 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1767 sock_set_reuseaddr(port->sock->sk);
1768 tcp_sock_set_nodelay(port->sock->sk);
1769 if (so_priority > 0)
1770 sock_set_priority(port->sock->sk, so_priority);
1771
1772 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1773 sizeof(port->addr));
1774 if (ret) {
1775 pr_err("failed to bind port socket %d\n", ret);
1776 goto err_sock;
1777 }
1778
1779 ret = kernel_listen(port->sock, 128);
1780 if (ret) {
1781 pr_err("failed to listen %d on port sock\n", ret);
1782 goto err_sock;
1783 }
1784
1785 nport->priv = port;
1786 pr_info("enabling port %d (%pISpc)\n",
1787 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1788
1789 return 0;
1790
1791 err_sock:
1792 sock_release(port->sock);
1793 err_port:
1794 kfree(port);
1795 return ret;
1796 }
1797
nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port * port)1798 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1799 {
1800 struct nvmet_tcp_queue *queue;
1801
1802 mutex_lock(&nvmet_tcp_queue_mutex);
1803 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1804 if (queue->port == port)
1805 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1806 mutex_unlock(&nvmet_tcp_queue_mutex);
1807 }
1808
nvmet_tcp_remove_port(struct nvmet_port * nport)1809 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1810 {
1811 struct nvmet_tcp_port *port = nport->priv;
1812
1813 write_lock_bh(&port->sock->sk->sk_callback_lock);
1814 port->sock->sk->sk_data_ready = port->data_ready;
1815 port->sock->sk->sk_user_data = NULL;
1816 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1817 cancel_work_sync(&port->accept_work);
1818 /*
1819 * Destroy the remaining queues, which are not belong to any
1820 * controller yet.
1821 */
1822 nvmet_tcp_destroy_port_queues(port);
1823
1824 sock_release(port->sock);
1825 kfree(port);
1826 }
1827
nvmet_tcp_delete_ctrl(struct nvmet_ctrl * ctrl)1828 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1829 {
1830 struct nvmet_tcp_queue *queue;
1831
1832 mutex_lock(&nvmet_tcp_queue_mutex);
1833 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1834 if (queue->nvme_sq.ctrl == ctrl)
1835 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1836 mutex_unlock(&nvmet_tcp_queue_mutex);
1837 }
1838
nvmet_tcp_install_queue(struct nvmet_sq * sq)1839 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1840 {
1841 struct nvmet_tcp_queue *queue =
1842 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1843
1844 if (sq->qid == 0) {
1845 /* Let inflight controller teardown complete */
1846 flush_workqueue(nvmet_wq);
1847 }
1848
1849 queue->nr_cmds = sq->size * 2;
1850 if (nvmet_tcp_alloc_cmds(queue))
1851 return NVME_SC_INTERNAL;
1852 return 0;
1853 }
1854
nvmet_tcp_disc_port_addr(struct nvmet_req * req,struct nvmet_port * nport,char * traddr)1855 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1856 struct nvmet_port *nport, char *traddr)
1857 {
1858 struct nvmet_tcp_port *port = nport->priv;
1859
1860 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1861 struct nvmet_tcp_cmd *cmd =
1862 container_of(req, struct nvmet_tcp_cmd, req);
1863 struct nvmet_tcp_queue *queue = cmd->queue;
1864
1865 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1866 } else {
1867 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1868 }
1869 }
1870
1871 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1872 .owner = THIS_MODULE,
1873 .type = NVMF_TRTYPE_TCP,
1874 .msdbd = 1,
1875 .add_port = nvmet_tcp_add_port,
1876 .remove_port = nvmet_tcp_remove_port,
1877 .queue_response = nvmet_tcp_queue_response,
1878 .delete_ctrl = nvmet_tcp_delete_ctrl,
1879 .install_queue = nvmet_tcp_install_queue,
1880 .disc_traddr = nvmet_tcp_disc_port_addr,
1881 };
1882
nvmet_tcp_init(void)1883 static int __init nvmet_tcp_init(void)
1884 {
1885 int ret;
1886
1887 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1888 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1889 if (!nvmet_tcp_wq)
1890 return -ENOMEM;
1891
1892 ret = nvmet_register_transport(&nvmet_tcp_ops);
1893 if (ret)
1894 goto err;
1895
1896 return 0;
1897 err:
1898 destroy_workqueue(nvmet_tcp_wq);
1899 return ret;
1900 }
1901
nvmet_tcp_exit(void)1902 static void __exit nvmet_tcp_exit(void)
1903 {
1904 struct nvmet_tcp_queue *queue;
1905
1906 nvmet_unregister_transport(&nvmet_tcp_ops);
1907
1908 flush_workqueue(nvmet_wq);
1909 mutex_lock(&nvmet_tcp_queue_mutex);
1910 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1911 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1912 mutex_unlock(&nvmet_tcp_queue_mutex);
1913 flush_workqueue(nvmet_wq);
1914
1915 destroy_workqueue(nvmet_tcp_wq);
1916 }
1917
1918 module_init(nvmet_tcp_init);
1919 module_exit(nvmet_tcp_exit);
1920
1921 MODULE_LICENSE("GPL v2");
1922 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
1923