1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (c) 2025 Stefan Metzmacher
4 */
5
6 #ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
7 #define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
8
9 #include <linux/wait.h>
10 #include <linux/workqueue.h>
11 #include <linux/kref.h>
12 #include <linux/mempool.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/completion.h>
16 #include <rdma/rw.h>
17
18 enum smbdirect_socket_status {
19 SMBDIRECT_SOCKET_CREATED,
20 SMBDIRECT_SOCKET_LISTENING,
21 SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED,
22 SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING,
23 SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED,
24 SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED,
25 SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING,
26 SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED,
27 SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED,
28 SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING,
29 SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED,
30 SMBDIRECT_SOCKET_NEGOTIATE_NEEDED,
31 SMBDIRECT_SOCKET_NEGOTIATE_RUNNING,
32 SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
33 SMBDIRECT_SOCKET_CONNECTED,
34 SMBDIRECT_SOCKET_ERROR,
35 SMBDIRECT_SOCKET_DISCONNECTING,
36 SMBDIRECT_SOCKET_DISCONNECTED,
37 SMBDIRECT_SOCKET_DESTROYED
38 };
39
40 static __always_inline
smbdirect_socket_status_string(enum smbdirect_socket_status status)41 const char *smbdirect_socket_status_string(enum smbdirect_socket_status status)
42 {
43 switch (status) {
44 case SMBDIRECT_SOCKET_CREATED:
45 return "CREATED";
46 case SMBDIRECT_SOCKET_LISTENING:
47 return "LISTENING";
48 case SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED:
49 return "RESOLVE_ADDR_NEEDED";
50 case SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING:
51 return "RESOLVE_ADDR_RUNNING";
52 case SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED:
53 return "RESOLVE_ADDR_FAILED";
54 case SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED:
55 return "RESOLVE_ROUTE_NEEDED";
56 case SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING:
57 return "RESOLVE_ROUTE_RUNNING";
58 case SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED:
59 return "RESOLVE_ROUTE_FAILED";
60 case SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED:
61 return "RDMA_CONNECT_NEEDED";
62 case SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING:
63 return "RDMA_CONNECT_RUNNING";
64 case SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED:
65 return "RDMA_CONNECT_FAILED";
66 case SMBDIRECT_SOCKET_NEGOTIATE_NEEDED:
67 return "NEGOTIATE_NEEDED";
68 case SMBDIRECT_SOCKET_NEGOTIATE_RUNNING:
69 return "NEGOTIATE_RUNNING";
70 case SMBDIRECT_SOCKET_NEGOTIATE_FAILED:
71 return "NEGOTIATE_FAILED";
72 case SMBDIRECT_SOCKET_CONNECTED:
73 return "CONNECTED";
74 case SMBDIRECT_SOCKET_ERROR:
75 return "ERROR";
76 case SMBDIRECT_SOCKET_DISCONNECTING:
77 return "DISCONNECTING";
78 case SMBDIRECT_SOCKET_DISCONNECTED:
79 return "DISCONNECTED";
80 case SMBDIRECT_SOCKET_DESTROYED:
81 return "DESTROYED";
82 }
83
84 return "<unknown>";
85 }
86
87 /*
88 * This can be used with %1pe to print errors as strings or '0'
89 * And it avoids warnings like: warn: passing zero to 'ERR_PTR'
90 * from smatch -p=kernel --pedantic
91 */
92 static __always_inline
SMBDIRECT_DEBUG_ERR_PTR(long error)93 const void * __must_check SMBDIRECT_DEBUG_ERR_PTR(long error)
94 {
95 if (error == 0)
96 return NULL;
97 return ERR_PTR(error);
98 }
99
100 enum smbdirect_keepalive_status {
101 SMBDIRECT_KEEPALIVE_NONE,
102 SMBDIRECT_KEEPALIVE_PENDING,
103 SMBDIRECT_KEEPALIVE_SENT
104 };
105
106 struct smbdirect_socket {
107 enum smbdirect_socket_status status;
108 wait_queue_head_t status_wait;
109 int first_error;
110
111 /*
112 * This points to the workqueues to
113 * be used for this socket.
114 */
115 struct {
116 struct workqueue_struct *accept;
117 struct workqueue_struct *connect;
118 struct workqueue_struct *idle;
119 struct workqueue_struct *refill;
120 struct workqueue_struct *immediate;
121 struct workqueue_struct *cleanup;
122 } workqueues;
123
124 struct work_struct disconnect_work;
125
126 /*
127 * The reference counts.
128 */
129 struct {
130 /*
131 * This holds the references by the
132 * frontend, typically the smb layer.
133 *
134 * It is typically 1 and a disconnect
135 * will happen if it reaches 0.
136 */
137 struct kref disconnect;
138
139 /*
140 * This holds the reference by the
141 * backend, the code that manages
142 * the lifetime of the whole
143 * struct smbdirect_socket,
144 * if this reaches 0 it can will
145 * be freed.
146 *
147 * Can be REFCOUNT_MAX is part
148 * of another structure.
149 *
150 * This is equal or higher than
151 * the disconnect refcount.
152 */
153 struct kref destroy;
154 } refs;
155
156 /* RDMA related */
157 struct {
158 struct rdma_cm_id *cm_id;
159 /*
160 * The expected event in our current
161 * cm_id->event_handler, all other events
162 * are treated as an error.
163 */
164 enum rdma_cm_event_type expected_event;
165 /*
166 * This is for iWarp MPA v1
167 */
168 bool legacy_iwarp;
169 } rdma;
170
171 /* IB verbs related */
172 struct {
173 struct ib_pd *pd;
174 enum ib_poll_context poll_ctx;
175 struct ib_cq *send_cq;
176 struct ib_cq *recv_cq;
177
178 /*
179 * shortcuts for rdma.cm_id->{qp,device};
180 */
181 struct ib_qp *qp;
182 struct ib_device *dev;
183 } ib;
184
185 struct smbdirect_socket_parameters parameters;
186
187 /*
188 * The state for connect/negotiation
189 */
190 struct {
191 spinlock_t lock;
192 struct work_struct work;
193 } connect;
194
195 /*
196 * The state for keepalive and timeout handling
197 */
198 struct {
199 enum smbdirect_keepalive_status keepalive;
200 struct work_struct immediate_work;
201 struct delayed_work timer_work;
202 } idle;
203
204 /*
205 * The state for listen sockets
206 */
207 struct {
208 spinlock_t lock;
209 struct list_head pending;
210 struct list_head ready;
211 wait_queue_head_t wait_queue;
212 /*
213 * This starts as -1 and a value != -1
214 * means this socket was in LISTENING state
215 * before. Note the valid backlog can
216 * only be > 0.
217 */
218 int backlog;
219 } listen;
220
221 /*
222 * The state for sockets waiting
223 * for accept, either still waiting
224 * for the negotiation to finish
225 * or already ready with a usable
226 * connection.
227 */
228 struct {
229 struct smbdirect_socket *listener;
230 struct list_head list;
231 } accept;
232
233 /*
234 * The state for posted send buffers
235 */
236 struct {
237 /*
238 * Memory pools for preallocating
239 * smbdirect_send_io buffers
240 */
241 struct {
242 struct kmem_cache *cache;
243 mempool_t *pool;
244 gfp_t gfp_mask;
245 } mem;
246
247 /*
248 * This is a coordination for smbdirect_send_batch.
249 *
250 * There's only one possible credit, which means
251 * only one instance is running at a time.
252 */
253 struct {
254 atomic_t count;
255 wait_queue_head_t wait_queue;
256 } bcredits;
257
258 /*
259 * The local credit state for ib_post_send()
260 */
261 struct {
262 atomic_t count;
263 wait_queue_head_t wait_queue;
264 } lcredits;
265
266 /*
267 * The remote credit state for the send side
268 */
269 struct {
270 atomic_t count;
271 wait_queue_head_t wait_queue;
272 } credits;
273
274 /*
275 * The state about posted/pending sends
276 */
277 struct {
278 atomic_t count;
279 /*
280 * woken when count reached zero
281 */
282 wait_queue_head_t zero_wait_queue;
283 } pending;
284 } send_io;
285
286 /*
287 * The state for posted receive buffers
288 */
289 struct {
290 /*
291 * The type of PDU we are expecting
292 */
293 enum {
294 SMBDIRECT_EXPECT_NEGOTIATE_REQ = 1,
295 SMBDIRECT_EXPECT_NEGOTIATE_REP = 2,
296 SMBDIRECT_EXPECT_DATA_TRANSFER = 3,
297 } expected;
298
299 /*
300 * Memory pools for preallocating
301 * smbdirect_recv_io buffers
302 */
303 struct {
304 struct kmem_cache *cache;
305 mempool_t *pool;
306 gfp_t gfp_mask;
307 } mem;
308
309 /*
310 * The list of free smbdirect_recv_io
311 * structures
312 */
313 struct {
314 struct list_head list;
315 spinlock_t lock;
316 } free;
317
318 /*
319 * The state for posted recv_io messages
320 * and the refill work struct.
321 */
322 struct {
323 atomic_t count;
324 struct work_struct refill_work;
325 } posted;
326
327 /*
328 * The credit state for the recv side
329 */
330 struct {
331 u16 target;
332 atomic_t available;
333 atomic_t count;
334 } credits;
335
336 /*
337 * The list of arrived non-empty smbdirect_recv_io
338 * structures
339 *
340 * This represents the reassembly queue.
341 */
342 struct {
343 struct list_head list;
344 spinlock_t lock;
345 wait_queue_head_t wait_queue;
346 /* total data length of reassembly queue */
347 int data_length;
348 int queue_length;
349 /* the offset to first buffer in reassembly queue */
350 int first_entry_offset;
351 /*
352 * Indicate if we have received a full packet on the
353 * connection This is used to identify the first SMBD
354 * packet of a assembled payload (SMB packet) in
355 * reassembly queue so we can return a RFC1002 length to
356 * upper layer to indicate the length of the SMB packet
357 * received
358 */
359 bool full_packet_received;
360 } reassembly;
361 } recv_io;
362
363 /*
364 * The state for Memory registrations on the client
365 */
366 struct {
367 enum ib_mr_type type;
368
369 /*
370 * The list of free smbdirect_mr_io
371 * structures
372 */
373 struct {
374 struct list_head list;
375 spinlock_t lock;
376 } all;
377
378 /*
379 * The number of available MRs ready for memory registration
380 */
381 struct {
382 atomic_t count;
383 wait_queue_head_t wait_queue;
384 } ready;
385
386 /*
387 * The number of used MRs
388 */
389 struct {
390 atomic_t count;
391 } used;
392 } mr_io;
393
394 /*
395 * The state for RDMA read/write requests on the server
396 */
397 struct {
398 /*
399 * Memory hints for
400 * smbdirect_rw_io structs
401 */
402 struct {
403 gfp_t gfp_mask;
404 } mem;
405
406 /*
407 * The credit state for the send side
408 */
409 struct {
410 /*
411 * The maximum number of rw credits
412 */
413 size_t max;
414 /*
415 * The number of pages per credit
416 */
417 size_t num_pages;
418 atomic_t count;
419 wait_queue_head_t wait_queue;
420 } credits;
421 } rw_io;
422
423 /*
424 * For debug purposes
425 */
426 struct {
427 u64 get_receive_buffer;
428 u64 put_receive_buffer;
429 u64 enqueue_reassembly_queue;
430 u64 dequeue_reassembly_queue;
431 u64 send_empty;
432 } statistics;
433
434 struct {
435 void *private_ptr;
436 bool (*needed)(struct smbdirect_socket *sc,
437 void *private_ptr,
438 unsigned int lvl,
439 unsigned int cls);
440 void (*vaprintf)(struct smbdirect_socket *sc,
441 const char *func,
442 unsigned int line,
443 void *private_ptr,
444 unsigned int lvl,
445 unsigned int cls,
446 struct va_format *vaf);
447 } logging;
448 };
449
__smbdirect_socket_disabled_work(struct work_struct * work)450 static void __smbdirect_socket_disabled_work(struct work_struct *work)
451 {
452 /*
453 * Should never be called as disable_[delayed_]work_sync() was used.
454 */
455 WARN_ON_ONCE(1);
456 }
457
__smbdirect_log_needed(struct smbdirect_socket * sc,void * private_ptr,unsigned int lvl,unsigned int cls)458 static bool __smbdirect_log_needed(struct smbdirect_socket *sc,
459 void *private_ptr,
460 unsigned int lvl,
461 unsigned int cls)
462 {
463 /*
464 * Should never be called, the caller should
465 * set it's own functions.
466 */
467 WARN_ON_ONCE(1);
468 return false;
469 }
470
__smbdirect_log_vaprintf(struct smbdirect_socket * sc,const char * func,unsigned int line,void * private_ptr,unsigned int lvl,unsigned int cls,struct va_format * vaf)471 static void __smbdirect_log_vaprintf(struct smbdirect_socket *sc,
472 const char *func,
473 unsigned int line,
474 void *private_ptr,
475 unsigned int lvl,
476 unsigned int cls,
477 struct va_format *vaf)
478 {
479 /*
480 * Should never be called, the caller should
481 * set it's own functions.
482 */
483 WARN_ON_ONCE(1);
484 }
485
486 __printf(6, 7)
487 static void __smbdirect_log_printf(struct smbdirect_socket *sc,
488 const char *func,
489 unsigned int line,
490 unsigned int lvl,
491 unsigned int cls,
492 const char *fmt,
493 ...);
494 __maybe_unused
__smbdirect_log_printf(struct smbdirect_socket * sc,const char * func,unsigned int line,unsigned int lvl,unsigned int cls,const char * fmt,...)495 static void __smbdirect_log_printf(struct smbdirect_socket *sc,
496 const char *func,
497 unsigned int line,
498 unsigned int lvl,
499 unsigned int cls,
500 const char *fmt,
501 ...)
502 {
503 struct va_format vaf;
504 va_list args;
505
506 va_start(args, fmt);
507
508 vaf.fmt = fmt;
509 vaf.va = &args;
510
511 sc->logging.vaprintf(sc,
512 func,
513 line,
514 sc->logging.private_ptr,
515 lvl,
516 cls,
517 &vaf);
518 va_end(args);
519 }
520
521 #define ___smbdirect_log_generic(sc, func, line, lvl, cls, fmt, args...) do { \
522 if (sc->logging.needed(sc, sc->logging.private_ptr, lvl, cls)) { \
523 __smbdirect_log_printf(sc, func, line, lvl, cls, fmt, ##args); \
524 } \
525 } while (0)
526 #define __smbdirect_log_generic(sc, lvl, cls, fmt, args...) \
527 ___smbdirect_log_generic(sc, __func__, __LINE__, lvl, cls, fmt, ##args)
528
529 #define smbdirect_log_outgoing(sc, lvl, fmt, args...) \
530 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_OUTGOING, fmt, ##args)
531 #define smbdirect_log_incoming(sc, lvl, fmt, args...) \
532 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_INCOMING, fmt, ##args)
533 #define smbdirect_log_read(sc, lvl, fmt, args...) \
534 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_READ, fmt, ##args)
535 #define smbdirect_log_write(sc, lvl, fmt, args...) \
536 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_WRITE, fmt, ##args)
537 #define smbdirect_log_rdma_send(sc, lvl, fmt, args...) \
538 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_RDMA_SEND, fmt, ##args)
539 #define smbdirect_log_rdma_recv(sc, lvl, fmt, args...) \
540 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_RDMA_RECV, fmt, ##args)
541 #define smbdirect_log_keep_alive(sc, lvl, fmt, args...) \
542 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_KEEP_ALIVE, fmt, ##args)
543 #define smbdirect_log_rdma_event(sc, lvl, fmt, args...) \
544 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_RDMA_EVENT, fmt, ##args)
545 #define smbdirect_log_rdma_mr(sc, lvl, fmt, args...) \
546 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_RDMA_MR, fmt, ##args)
547 #define smbdirect_log_rdma_rw(sc, lvl, fmt, args...) \
548 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_RDMA_RW, fmt, ##args)
549 #define smbdirect_log_negotiate(sc, lvl, fmt, args...) \
550 __smbdirect_log_generic(sc, lvl, SMBDIRECT_LOG_NEGOTIATE, fmt, ##args)
551
smbdirect_socket_init(struct smbdirect_socket * sc)552 static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc)
553 {
554 /*
555 * This also sets status = SMBDIRECT_SOCKET_CREATED
556 */
557 BUILD_BUG_ON(SMBDIRECT_SOCKET_CREATED != 0);
558 memset(sc, 0, sizeof(*sc));
559
560 init_waitqueue_head(&sc->status_wait);
561
562 sc->workqueues.accept = smbdirect_globals.workqueues.accept;
563 sc->workqueues.connect = smbdirect_globals.workqueues.connect;
564 sc->workqueues.idle = smbdirect_globals.workqueues.idle;
565 sc->workqueues.refill = smbdirect_globals.workqueues.refill;
566 sc->workqueues.immediate = smbdirect_globals.workqueues.immediate;
567 sc->workqueues.cleanup = smbdirect_globals.workqueues.cleanup;
568
569 INIT_WORK(&sc->disconnect_work, __smbdirect_socket_disabled_work);
570 disable_work_sync(&sc->disconnect_work);
571
572 kref_init(&sc->refs.disconnect);
573 sc->refs.destroy = (struct kref) KREF_INIT(REFCOUNT_MAX);
574
575 sc->rdma.expected_event = RDMA_CM_EVENT_INTERNAL;
576
577 sc->ib.poll_ctx = IB_POLL_UNBOUND_WORKQUEUE;
578
579 spin_lock_init(&sc->connect.lock);
580 INIT_WORK(&sc->connect.work, __smbdirect_socket_disabled_work);
581 disable_work_sync(&sc->connect.work);
582
583 INIT_WORK(&sc->idle.immediate_work, __smbdirect_socket_disabled_work);
584 disable_work_sync(&sc->idle.immediate_work);
585 INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work);
586 disable_delayed_work_sync(&sc->idle.timer_work);
587
588 spin_lock_init(&sc->listen.lock);
589 INIT_LIST_HEAD(&sc->listen.pending);
590 INIT_LIST_HEAD(&sc->listen.ready);
591 sc->listen.backlog = -1; /* not a listener */
592 init_waitqueue_head(&sc->listen.wait_queue);
593
594 INIT_LIST_HEAD(&sc->accept.list);
595
596 sc->send_io.mem.gfp_mask = GFP_KERNEL;
597
598 atomic_set(&sc->send_io.bcredits.count, 0);
599 init_waitqueue_head(&sc->send_io.bcredits.wait_queue);
600
601 atomic_set(&sc->send_io.lcredits.count, 0);
602 init_waitqueue_head(&sc->send_io.lcredits.wait_queue);
603
604 atomic_set(&sc->send_io.credits.count, 0);
605 init_waitqueue_head(&sc->send_io.credits.wait_queue);
606
607 atomic_set(&sc->send_io.pending.count, 0);
608 init_waitqueue_head(&sc->send_io.pending.zero_wait_queue);
609
610 sc->recv_io.mem.gfp_mask = GFP_KERNEL;
611
612 INIT_LIST_HEAD(&sc->recv_io.free.list);
613 spin_lock_init(&sc->recv_io.free.lock);
614
615 atomic_set(&sc->recv_io.posted.count, 0);
616 INIT_WORK(&sc->recv_io.posted.refill_work, __smbdirect_socket_disabled_work);
617 disable_work_sync(&sc->recv_io.posted.refill_work);
618
619 atomic_set(&sc->recv_io.credits.available, 0);
620 atomic_set(&sc->recv_io.credits.count, 0);
621
622 INIT_LIST_HEAD(&sc->recv_io.reassembly.list);
623 spin_lock_init(&sc->recv_io.reassembly.lock);
624 init_waitqueue_head(&sc->recv_io.reassembly.wait_queue);
625
626 sc->rw_io.mem.gfp_mask = GFP_KERNEL;
627 atomic_set(&sc->rw_io.credits.count, 0);
628 init_waitqueue_head(&sc->rw_io.credits.wait_queue);
629
630 spin_lock_init(&sc->mr_io.all.lock);
631 INIT_LIST_HEAD(&sc->mr_io.all.list);
632 atomic_set(&sc->mr_io.ready.count, 0);
633 init_waitqueue_head(&sc->mr_io.ready.wait_queue);
634 atomic_set(&sc->mr_io.used.count, 0);
635
636 sc->logging.private_ptr = NULL;
637 sc->logging.needed = __smbdirect_log_needed;
638 sc->logging.vaprintf = __smbdirect_log_vaprintf;
639 }
640
641 #define __SMBDIRECT_CHECK_STATUS_FAILED(__sc, __expected_status, __error_cmd, __unexpected_cmd) ({ \
642 bool __failed = false; \
643 if (unlikely((__sc)->first_error)) { \
644 __failed = true; \
645 __error_cmd \
646 } else if (unlikely((__sc)->status != (__expected_status))) { \
647 __failed = true; \
648 __unexpected_cmd \
649 } \
650 __failed; \
651 })
652
653 #define __SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status, __unexpected_cmd) \
654 __SMBDIRECT_CHECK_STATUS_FAILED(__sc, __expected_status, \
655 { \
656 const struct sockaddr_storage *__src = NULL; \
657 const struct sockaddr_storage *__dst = NULL; \
658 if ((__sc)->rdma.cm_id) { \
659 __src = &(__sc)->rdma.cm_id->route.addr.src_addr; \
660 __dst = &(__sc)->rdma.cm_id->route.addr.dst_addr; \
661 } \
662 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_INFO, \
663 "expected[%s] != %s first_error=%1pe local=%pISpsfc remote=%pISpsfc\n", \
664 smbdirect_socket_status_string(__expected_status), \
665 smbdirect_socket_status_string((__sc)->status), \
666 SMBDIRECT_DEBUG_ERR_PTR((__sc)->first_error), \
667 __src, __dst); \
668 }, \
669 { \
670 const struct sockaddr_storage *__src = NULL; \
671 const struct sockaddr_storage *__dst = NULL; \
672 if ((__sc)->rdma.cm_id) { \
673 __src = &(__sc)->rdma.cm_id->route.addr.src_addr; \
674 __dst = &(__sc)->rdma.cm_id->route.addr.dst_addr; \
675 } \
676 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR, \
677 "expected[%s] != %s first_error=%1pe local=%pISpsfc remote=%pISpsfc\n", \
678 smbdirect_socket_status_string(__expected_status), \
679 smbdirect_socket_status_string((__sc)->status), \
680 SMBDIRECT_DEBUG_ERR_PTR((__sc)->first_error), \
681 __src, __dst); \
682 WARN_ONCE(1, \
683 "expected[%s] != %s first_error=%1pe local=%pISpsfc remote=%pISpsfc\n", \
684 smbdirect_socket_status_string(__expected_status), \
685 smbdirect_socket_status_string((__sc)->status), \
686 SMBDIRECT_DEBUG_ERR_PTR((__sc)->first_error), \
687 __src, __dst); \
688 __unexpected_cmd \
689 })
690
691 #define SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status) \
692 __SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status, /* nothing */)
693
694 #ifndef __SMBDIRECT_SOCKET_DISCONNECT
695 #define __SMBDIRECT_SOCKET_DISCONNECT(__sc) \
696 smbdirect_socket_schedule_cleanup(__sc, -ECONNABORTED)
697 #endif /* ! __SMBDIRECT_SOCKET_DISCONNECT */
698
699 #define SMBDIRECT_CHECK_STATUS_DISCONNECT(__sc, __expected_status) \
700 __SMBDIRECT_CHECK_STATUS_WARN(__sc, __expected_status, \
701 __SMBDIRECT_SOCKET_DISCONNECT(__sc);)
702
703 struct smbdirect_send_io {
704 struct smbdirect_socket *socket;
705 struct ib_cqe cqe;
706
707 /*
708 * The SGE entries for this work request
709 *
710 * The first points to the packet header
711 */
712 #define SMBDIRECT_SEND_IO_MAX_SGE 6
713 size_t num_sge;
714 struct ib_sge sge[SMBDIRECT_SEND_IO_MAX_SGE];
715
716 /*
717 * Link to the list of sibling smbdirect_send_io
718 * messages.
719 */
720 struct list_head sibling_list;
721 struct ib_send_wr wr;
722
723 /* SMBD packet header follows this structure */
724 u8 packet[];
725 };
726
727 struct smbdirect_send_batch {
728 /*
729 * List of smbdirect_send_io messages
730 */
731 struct list_head msg_list;
732 /*
733 * Number of list entries
734 */
735 size_t wr_cnt;
736
737 /*
738 * Possible remote key invalidation state
739 */
740 bool need_invalidate_rkey;
741 u32 remote_key;
742
743 int credit;
744 };
745
746 struct smbdirect_recv_io {
747 struct smbdirect_socket *socket;
748 struct ib_cqe cqe;
749
750 /*
751 * For now we only use a single SGE
752 * as we have just one large buffer
753 * per posted recv.
754 */
755 #define SMBDIRECT_RECV_IO_MAX_SGE 1
756 struct ib_sge sge;
757
758 /* Link to free or reassembly list */
759 struct list_head list;
760
761 /* Indicate if this is the 1st packet of a payload */
762 bool first_segment;
763
764 /* SMBD packet header and payload follows this structure */
765 u8 packet[];
766 };
767
768 enum smbdirect_mr_state {
769 SMBDIRECT_MR_READY,
770 SMBDIRECT_MR_REGISTERED,
771 SMBDIRECT_MR_INVALIDATED,
772 SMBDIRECT_MR_ERROR,
773 SMBDIRECT_MR_DISABLED
774 };
775
776 struct smbdirect_mr_io {
777 struct smbdirect_socket *socket;
778 struct ib_cqe cqe;
779
780 /*
781 * We can have up to two references:
782 * 1. by the connection
783 * 2. by the registration
784 */
785 struct kref kref;
786 struct mutex mutex;
787
788 struct list_head list;
789
790 enum smbdirect_mr_state state;
791 struct ib_mr *mr;
792 struct sg_table sgt;
793 enum dma_data_direction dir;
794 union {
795 struct ib_reg_wr wr;
796 struct ib_send_wr inv_wr;
797 };
798
799 bool need_invalidate;
800 struct completion invalidate_done;
801 };
802
803 struct smbdirect_rw_io {
804 struct smbdirect_socket *socket;
805 struct ib_cqe cqe;
806
807 struct list_head list;
808
809 int error;
810 struct completion *completion;
811
812 struct rdma_rw_ctx rdma_ctx;
813 struct sg_table sgt;
814 struct scatterlist sg_list[];
815 };
816
smbdirect_get_buf_page_count(const void * buf,size_t size)817 static inline size_t smbdirect_get_buf_page_count(const void *buf, size_t size)
818 {
819 return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
820 (uintptr_t)buf / PAGE_SIZE;
821 }
822
823 /*
824 * Maximum number of retries on data transfer operations
825 */
826 #define SMBDIRECT_RDMA_CM_RETRY 6
827 /*
828 * No need to retry on Receiver Not Ready since SMB_DIRECT manages credits
829 */
830 #define SMBDIRECT_RDMA_CM_RNR_RETRY 0
831
832 #endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
833