xref: /linux/fs/smb/server/transport_rdma.c (revision 048091722259b6e8d2ef3b138b0c121a2afabe61)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2017, Microsoft Corporation.
4  *   Copyright (C) 2018, LG Electronics.
5  *
6  *   Author(s): Long Li <longli@microsoft.com>,
7  *		Hyunchul Lee <hyc.lee@gmail.com>
8  */
9 
10 #define SUBMOD_NAME	"smb_direct"
11 
12 #include <linux/kthread.h>
13 #include <linux/list.h>
14 #include <linux/mempool.h>
15 #include <linux/highmem.h>
16 #include <linux/scatterlist.h>
17 #include <linux/string_choices.h>
18 #include <rdma/ib_verbs.h>
19 #include <rdma/rdma_cm.h>
20 #include <rdma/rw.h>
21 
22 #define __SMBDIRECT_SOCKET_DISCONNECT(__sc) smb_direct_disconnect_rdma_connection(__sc)
23 
24 #include "glob.h"
25 #include "connection.h"
26 #include "smb_common.h"
27 #include "../common/smb2status.h"
28 #include "../common/smbdirect/smbdirect.h"
29 #include "../common/smbdirect/smbdirect_pdu.h"
30 #include "../common/smbdirect/smbdirect_socket.h"
31 #include "transport_rdma.h"
32 
33 #define SMB_DIRECT_PORT_IWARP		5445
34 #define SMB_DIRECT_PORT_INFINIBAND	445
35 
36 #define SMB_DIRECT_VERSION_LE		cpu_to_le16(SMBDIRECT_V1)
37 
38 /* SMB_DIRECT negotiation timeout (for the server) in seconds */
39 #define SMB_DIRECT_NEGOTIATE_TIMEOUT		5
40 
41 /* The timeout to wait for a keepalive message from peer in seconds */
42 #define SMB_DIRECT_KEEPALIVE_SEND_INTERVAL	120
43 
44 /* The timeout to wait for a keepalive message from peer in seconds */
45 #define SMB_DIRECT_KEEPALIVE_RECV_TIMEOUT	5
46 
47 /*
48  * Default maximum number of RDMA read/write outstanding on this connection
49  * This value is possibly decreased during QP creation on hardware limit
50  */
51 #define SMB_DIRECT_CM_INITIATOR_DEPTH		8
52 
53 /* Maximum number of retries on data transfer operations */
54 #define SMB_DIRECT_CM_RETRY			6
55 /* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */
56 #define SMB_DIRECT_CM_RNR_RETRY		0
57 
58 /*
59  * User configurable initial values per SMB_DIRECT transport connection
60  * as defined in [MS-SMBD] 3.1.1.1
61  * Those may change after a SMB_DIRECT negotiation
62  */
63 
64 /* The local peer's maximum number of credits to grant to the peer */
65 static int smb_direct_receive_credit_max = 255;
66 
67 /* The remote peer's credit request of local peer */
68 static int smb_direct_send_credit_target = 255;
69 
70 /* The maximum single message size can be sent to remote peer */
71 static int smb_direct_max_send_size = 1364;
72 
73 /*
74  * The maximum fragmented upper-layer payload receive size supported
75  *
76  * Assume max_payload_per_credit is
77  * smb_direct_receive_credit_max - 24 = 1340
78  *
79  * The maximum number would be
80  * smb_direct_receive_credit_max * max_payload_per_credit
81  *
82  *                       1340 * 255 = 341700 (0x536C4)
83  *
84  * The minimum value from the spec is 131072 (0x20000)
85  *
86  * For now we use the logic we used before:
87  *                 (1364 * 255) / 2 = 173910 (0x2A756)
88  */
89 static int smb_direct_max_fragmented_recv_size = (1364 * 255) / 2;
90 
91 /*  The maximum single-message size which can be received */
92 static int smb_direct_max_receive_size = 1364;
93 
94 static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE;
95 
96 static LIST_HEAD(smb_direct_device_list);
97 static DEFINE_RWLOCK(smb_direct_device_lock);
98 
99 struct smb_direct_device {
100 	struct ib_device	*ib_dev;
101 	struct list_head	list;
102 };
103 
104 static struct smb_direct_listener {
105 	int			port;
106 	struct rdma_cm_id	*cm_id;
107 } smb_direct_ib_listener, smb_direct_iw_listener;
108 
109 static struct workqueue_struct *smb_direct_wq;
110 
111 struct smb_direct_transport {
112 	struct ksmbd_transport	transport;
113 
114 	struct smbdirect_socket socket;
115 };
116 
117 #define KSMBD_TRANS(t) (&(t)->transport)
118 #define SMBD_TRANS(t)	(container_of(t, \
119 				struct smb_direct_transport, transport))
120 
121 static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops;
122 
init_smbd_max_io_size(unsigned int sz)123 void init_smbd_max_io_size(unsigned int sz)
124 {
125 	sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE);
126 	smb_direct_max_read_write_size = sz;
127 }
128 
get_smbd_max_read_write_size(struct ksmbd_transport * kt)129 unsigned int get_smbd_max_read_write_size(struct ksmbd_transport *kt)
130 {
131 	struct smb_direct_transport *t;
132 	struct smbdirect_socket *sc;
133 	struct smbdirect_socket_parameters *sp;
134 
135 	if (kt->ops != &ksmbd_smb_direct_transport_ops)
136 		return 0;
137 
138 	t = SMBD_TRANS(kt);
139 	sc = &t->socket;
140 	sp = &sc->parameters;
141 
142 	return sp->max_read_write_size;
143 }
144 
get_buf_page_count(void * buf,int size)145 static inline int get_buf_page_count(void *buf, int size)
146 {
147 	return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
148 		(uintptr_t)buf / PAGE_SIZE;
149 }
150 
151 static void smb_direct_destroy_pools(struct smbdirect_socket *sc);
152 static void smb_direct_post_recv_credits(struct work_struct *work);
153 static int smb_direct_post_send_data(struct smbdirect_socket *sc,
154 				     struct smbdirect_send_batch *send_ctx,
155 				     struct kvec *iov, int niov,
156 				     int remaining_data_length);
157 
158 static inline void
smbdirect_recv_io_payload(struct smbdirect_recv_io * recvmsg)159 *smbdirect_recv_io_payload(struct smbdirect_recv_io *recvmsg)
160 {
161 	return (void *)recvmsg->packet;
162 }
163 
164 static struct
get_free_recvmsg(struct smbdirect_socket * sc)165 smbdirect_recv_io *get_free_recvmsg(struct smbdirect_socket *sc)
166 {
167 	struct smbdirect_recv_io *recvmsg = NULL;
168 	unsigned long flags;
169 
170 	spin_lock_irqsave(&sc->recv_io.free.lock, flags);
171 	if (!list_empty(&sc->recv_io.free.list)) {
172 		recvmsg = list_first_entry(&sc->recv_io.free.list,
173 					   struct smbdirect_recv_io,
174 					   list);
175 		list_del(&recvmsg->list);
176 	}
177 	spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
178 	return recvmsg;
179 }
180 
put_recvmsg(struct smbdirect_socket * sc,struct smbdirect_recv_io * recvmsg)181 static void put_recvmsg(struct smbdirect_socket *sc,
182 			struct smbdirect_recv_io *recvmsg)
183 {
184 	unsigned long flags;
185 
186 	if (likely(recvmsg->sge.length != 0)) {
187 		ib_dma_unmap_single(sc->ib.dev,
188 				    recvmsg->sge.addr,
189 				    recvmsg->sge.length,
190 				    DMA_FROM_DEVICE);
191 		recvmsg->sge.length = 0;
192 	}
193 
194 	spin_lock_irqsave(&sc->recv_io.free.lock, flags);
195 	list_add(&recvmsg->list, &sc->recv_io.free.list);
196 	spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
197 
198 	queue_work(sc->workqueue, &sc->recv_io.posted.refill_work);
199 }
200 
enqueue_reassembly(struct smbdirect_socket * sc,struct smbdirect_recv_io * recvmsg,int data_length)201 static void enqueue_reassembly(struct smbdirect_socket *sc,
202 			       struct smbdirect_recv_io *recvmsg,
203 			       int data_length)
204 {
205 	unsigned long flags;
206 
207 	spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
208 	list_add_tail(&recvmsg->list, &sc->recv_io.reassembly.list);
209 	sc->recv_io.reassembly.queue_length++;
210 	/*
211 	 * Make sure reassembly_data_length is updated after list and
212 	 * reassembly_queue_length are updated. On the dequeue side
213 	 * reassembly_data_length is checked without a lock to determine
214 	 * if reassembly_queue_length and list is up to date
215 	 */
216 	virt_wmb();
217 	sc->recv_io.reassembly.data_length += data_length;
218 	spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
219 }
220 
get_first_reassembly(struct smbdirect_socket * sc)221 static struct smbdirect_recv_io *get_first_reassembly(struct smbdirect_socket *sc)
222 {
223 	if (!list_empty(&sc->recv_io.reassembly.list))
224 		return list_first_entry(&sc->recv_io.reassembly.list,
225 				struct smbdirect_recv_io, list);
226 	else
227 		return NULL;
228 }
229 
smb_direct_disconnect_wake_up_all(struct smbdirect_socket * sc)230 static void smb_direct_disconnect_wake_up_all(struct smbdirect_socket *sc)
231 {
232 	/*
233 	 * Wake up all waiters in all wait queues
234 	 * in order to notice the broken connection.
235 	 */
236 	wake_up_all(&sc->status_wait);
237 	wake_up_all(&sc->send_io.bcredits.wait_queue);
238 	wake_up_all(&sc->send_io.lcredits.wait_queue);
239 	wake_up_all(&sc->send_io.credits.wait_queue);
240 	wake_up_all(&sc->send_io.pending.zero_wait_queue);
241 	wake_up_all(&sc->recv_io.reassembly.wait_queue);
242 	wake_up_all(&sc->rw_io.credits.wait_queue);
243 }
244 
smb_direct_disconnect_rdma_work(struct work_struct * work)245 static void smb_direct_disconnect_rdma_work(struct work_struct *work)
246 {
247 	struct smbdirect_socket *sc =
248 		container_of(work, struct smbdirect_socket, disconnect_work);
249 
250 	if (sc->first_error == 0)
251 		sc->first_error = -ECONNABORTED;
252 
253 	/*
254 	 * make sure this and other work is not queued again
255 	 * but here we don't block and avoid
256 	 * disable[_delayed]_work_sync()
257 	 */
258 	disable_work(&sc->disconnect_work);
259 	disable_work(&sc->connect.work);
260 	disable_work(&sc->recv_io.posted.refill_work);
261 	disable_delayed_work(&sc->idle.timer_work);
262 	disable_work(&sc->idle.immediate_work);
263 
264 	switch (sc->status) {
265 	case SMBDIRECT_SOCKET_NEGOTIATE_NEEDED:
266 	case SMBDIRECT_SOCKET_NEGOTIATE_RUNNING:
267 	case SMBDIRECT_SOCKET_NEGOTIATE_FAILED:
268 	case SMBDIRECT_SOCKET_CONNECTED:
269 	case SMBDIRECT_SOCKET_ERROR:
270 		sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
271 		rdma_disconnect(sc->rdma.cm_id);
272 		break;
273 
274 	case SMBDIRECT_SOCKET_CREATED:
275 	case SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED:
276 	case SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING:
277 	case SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED:
278 	case SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED:
279 	case SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING:
280 	case SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED:
281 	case SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED:
282 	case SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING:
283 	case SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED:
284 		/*
285 		 * rdma_accept() never reached
286 		 * RDMA_CM_EVENT_ESTABLISHED
287 		 */
288 		sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
289 		break;
290 
291 	case SMBDIRECT_SOCKET_DISCONNECTING:
292 	case SMBDIRECT_SOCKET_DISCONNECTED:
293 	case SMBDIRECT_SOCKET_DESTROYED:
294 		break;
295 	}
296 
297 	/*
298 	 * Wake up all waiters in all wait queues
299 	 * in order to notice the broken connection.
300 	 */
301 	smb_direct_disconnect_wake_up_all(sc);
302 }
303 
304 static void
smb_direct_disconnect_rdma_connection(struct smbdirect_socket * sc)305 smb_direct_disconnect_rdma_connection(struct smbdirect_socket *sc)
306 {
307 	if (sc->first_error == 0)
308 		sc->first_error = -ECONNABORTED;
309 
310 	/*
311 	 * make sure other work (than disconnect_work) is
312 	 * not queued again but here we don't block and avoid
313 	 * disable[_delayed]_work_sync()
314 	 */
315 	disable_work(&sc->connect.work);
316 	disable_work(&sc->recv_io.posted.refill_work);
317 	disable_work(&sc->idle.immediate_work);
318 	disable_delayed_work(&sc->idle.timer_work);
319 
320 	switch (sc->status) {
321 	case SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED:
322 	case SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED:
323 	case SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED:
324 	case SMBDIRECT_SOCKET_NEGOTIATE_FAILED:
325 	case SMBDIRECT_SOCKET_ERROR:
326 	case SMBDIRECT_SOCKET_DISCONNECTING:
327 	case SMBDIRECT_SOCKET_DISCONNECTED:
328 	case SMBDIRECT_SOCKET_DESTROYED:
329 		/*
330 		 * Keep the current error status
331 		 */
332 		break;
333 
334 	case SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED:
335 	case SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING:
336 		sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED;
337 		break;
338 
339 	case SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED:
340 	case SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING:
341 		sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED;
342 		break;
343 
344 	case SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED:
345 	case SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING:
346 		sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED;
347 		break;
348 
349 	case SMBDIRECT_SOCKET_NEGOTIATE_NEEDED:
350 	case SMBDIRECT_SOCKET_NEGOTIATE_RUNNING:
351 		sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
352 		break;
353 
354 	case SMBDIRECT_SOCKET_CREATED:
355 		sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
356 		break;
357 
358 	case SMBDIRECT_SOCKET_CONNECTED:
359 		sc->status = SMBDIRECT_SOCKET_ERROR;
360 		break;
361 	}
362 
363 	/*
364 	 * Wake up all waiters in all wait queues
365 	 * in order to notice the broken connection.
366 	 */
367 	smb_direct_disconnect_wake_up_all(sc);
368 
369 	queue_work(sc->workqueue, &sc->disconnect_work);
370 }
371 
smb_direct_send_immediate_work(struct work_struct * work)372 static void smb_direct_send_immediate_work(struct work_struct *work)
373 {
374 	struct smbdirect_socket *sc =
375 		container_of(work, struct smbdirect_socket, idle.immediate_work);
376 
377 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
378 		return;
379 
380 	smb_direct_post_send_data(sc, NULL, NULL, 0, 0);
381 }
382 
smb_direct_idle_connection_timer(struct work_struct * work)383 static void smb_direct_idle_connection_timer(struct work_struct *work)
384 {
385 	struct smbdirect_socket *sc =
386 		container_of(work, struct smbdirect_socket, idle.timer_work.work);
387 	struct smbdirect_socket_parameters *sp = &sc->parameters;
388 
389 	if (sc->idle.keepalive != SMBDIRECT_KEEPALIVE_NONE) {
390 		smb_direct_disconnect_rdma_connection(sc);
391 		return;
392 	}
393 
394 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
395 		return;
396 
397 	/*
398 	 * Now use the keepalive timeout (instead of keepalive interval)
399 	 * in order to wait for a response
400 	 */
401 	sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING;
402 	mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
403 			 msecs_to_jiffies(sp->keepalive_timeout_msec));
404 	queue_work(sc->workqueue, &sc->idle.immediate_work);
405 }
406 
alloc_transport(struct rdma_cm_id * cm_id)407 static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
408 {
409 	struct smb_direct_transport *t;
410 	struct smbdirect_socket *sc;
411 	struct smbdirect_socket_parameters *sp;
412 	struct ksmbd_conn *conn;
413 
414 	t = kzalloc_obj(*t, KSMBD_DEFAULT_GFP);
415 	if (!t)
416 		return NULL;
417 	sc = &t->socket;
418 	smbdirect_socket_init(sc);
419 	sp = &sc->parameters;
420 
421 	sc->workqueue = smb_direct_wq;
422 
423 	INIT_WORK(&sc->disconnect_work, smb_direct_disconnect_rdma_work);
424 
425 	sp->negotiate_timeout_msec = SMB_DIRECT_NEGOTIATE_TIMEOUT * 1000;
426 	sp->initiator_depth = SMB_DIRECT_CM_INITIATOR_DEPTH;
427 	sp->responder_resources = 1;
428 	sp->recv_credit_max = smb_direct_receive_credit_max;
429 	sp->send_credit_target = smb_direct_send_credit_target;
430 	sp->max_send_size = smb_direct_max_send_size;
431 	sp->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
432 	sp->max_recv_size = smb_direct_max_receive_size;
433 	sp->max_read_write_size = smb_direct_max_read_write_size;
434 	sp->keepalive_interval_msec = SMB_DIRECT_KEEPALIVE_SEND_INTERVAL * 1000;
435 	sp->keepalive_timeout_msec = SMB_DIRECT_KEEPALIVE_RECV_TIMEOUT * 1000;
436 
437 	sc->rdma.cm_id = cm_id;
438 	cm_id->context = sc;
439 
440 	sc->ib.dev = sc->rdma.cm_id->device;
441 
442 	INIT_DELAYED_WORK(&sc->idle.timer_work, smb_direct_idle_connection_timer);
443 
444 	conn = ksmbd_conn_alloc();
445 	if (!conn)
446 		goto err;
447 
448 	down_write(&conn_list_lock);
449 	hash_add(conn_list, &conn->hlist, 0);
450 	up_write(&conn_list_lock);
451 
452 	conn->transport = KSMBD_TRANS(t);
453 	KSMBD_TRANS(t)->conn = conn;
454 	KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops;
455 	return t;
456 err:
457 	kfree(t);
458 	return NULL;
459 }
460 
smb_direct_free_transport(struct ksmbd_transport * kt)461 static void smb_direct_free_transport(struct ksmbd_transport *kt)
462 {
463 	kfree(SMBD_TRANS(kt));
464 }
465 
free_transport(struct smb_direct_transport * t)466 static void free_transport(struct smb_direct_transport *t)
467 {
468 	struct smbdirect_socket *sc = &t->socket;
469 	struct smbdirect_recv_io *recvmsg;
470 
471 	disable_work_sync(&sc->disconnect_work);
472 	if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING)
473 		smb_direct_disconnect_rdma_work(&sc->disconnect_work);
474 	if (sc->status < SMBDIRECT_SOCKET_DISCONNECTED)
475 		wait_event(sc->status_wait, sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
476 
477 	/*
478 	 * Wake up all waiters in all wait queues
479 	 * in order to notice the broken connection.
480 	 *
481 	 * Most likely this was already called via
482 	 * smb_direct_disconnect_rdma_work(), but call it again...
483 	 */
484 	smb_direct_disconnect_wake_up_all(sc);
485 
486 	disable_work_sync(&sc->connect.work);
487 	disable_work_sync(&sc->recv_io.posted.refill_work);
488 	disable_delayed_work_sync(&sc->idle.timer_work);
489 	disable_work_sync(&sc->idle.immediate_work);
490 
491 	if (sc->rdma.cm_id)
492 		rdma_lock_handler(sc->rdma.cm_id);
493 
494 	if (sc->ib.qp) {
495 		ib_drain_qp(sc->ib.qp);
496 		sc->ib.qp = NULL;
497 		rdma_destroy_qp(sc->rdma.cm_id);
498 	}
499 
500 	ksmbd_debug(RDMA, "drain the reassembly queue\n");
501 	do {
502 		unsigned long flags;
503 
504 		spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
505 		recvmsg = get_first_reassembly(sc);
506 		if (recvmsg) {
507 			list_del(&recvmsg->list);
508 			spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
509 			put_recvmsg(sc, recvmsg);
510 		} else {
511 			spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
512 		}
513 	} while (recvmsg);
514 	sc->recv_io.reassembly.data_length = 0;
515 
516 	if (sc->ib.send_cq)
517 		ib_free_cq(sc->ib.send_cq);
518 	if (sc->ib.recv_cq)
519 		ib_free_cq(sc->ib.recv_cq);
520 	if (sc->ib.pd)
521 		ib_dealloc_pd(sc->ib.pd);
522 	if (sc->rdma.cm_id) {
523 		rdma_unlock_handler(sc->rdma.cm_id);
524 		rdma_destroy_id(sc->rdma.cm_id);
525 	}
526 
527 	smb_direct_destroy_pools(sc);
528 	ksmbd_conn_free(KSMBD_TRANS(t)->conn);
529 }
530 
531 static struct smbdirect_send_io
smb_direct_alloc_sendmsg(struct smbdirect_socket * sc)532 *smb_direct_alloc_sendmsg(struct smbdirect_socket *sc)
533 {
534 	struct smbdirect_send_io *msg;
535 
536 	msg = mempool_alloc(sc->send_io.mem.pool, KSMBD_DEFAULT_GFP);
537 	if (!msg)
538 		return ERR_PTR(-ENOMEM);
539 	msg->socket = sc;
540 	INIT_LIST_HEAD(&msg->sibling_list);
541 	msg->num_sge = 0;
542 	return msg;
543 }
544 
smb_direct_free_sendmsg(struct smbdirect_socket * sc,struct smbdirect_send_io * msg)545 static void smb_direct_free_sendmsg(struct smbdirect_socket *sc,
546 				    struct smbdirect_send_io *msg)
547 {
548 	int i;
549 
550 	/*
551 	 * The list needs to be empty!
552 	 * The caller should take care of it.
553 	 */
554 	WARN_ON_ONCE(!list_empty(&msg->sibling_list));
555 
556 	if (msg->num_sge > 0) {
557 		ib_dma_unmap_single(sc->ib.dev,
558 				    msg->sge[0].addr, msg->sge[0].length,
559 				    DMA_TO_DEVICE);
560 		for (i = 1; i < msg->num_sge; i++)
561 			ib_dma_unmap_page(sc->ib.dev,
562 					  msg->sge[i].addr, msg->sge[i].length,
563 					  DMA_TO_DEVICE);
564 	}
565 	mempool_free(msg, sc->send_io.mem.pool);
566 }
567 
smb_direct_check_recvmsg(struct smbdirect_recv_io * recvmsg)568 static int smb_direct_check_recvmsg(struct smbdirect_recv_io *recvmsg)
569 {
570 	struct smbdirect_socket *sc = recvmsg->socket;
571 
572 	switch (sc->recv_io.expected) {
573 	case SMBDIRECT_EXPECT_DATA_TRANSFER: {
574 		struct smbdirect_data_transfer *req =
575 			(struct smbdirect_data_transfer *)recvmsg->packet;
576 		struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
577 				+ le32_to_cpu(req->data_offset));
578 		ksmbd_debug(RDMA,
579 			    "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
580 			    le16_to_cpu(req->credits_granted),
581 			    le16_to_cpu(req->credits_requested),
582 			    req->data_length, req->remaining_data_length,
583 			    hdr->ProtocolId, hdr->Command);
584 		return 0;
585 	}
586 	case SMBDIRECT_EXPECT_NEGOTIATE_REQ: {
587 		struct smbdirect_negotiate_req *req =
588 			(struct smbdirect_negotiate_req *)recvmsg->packet;
589 		ksmbd_debug(RDMA,
590 			    "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n",
591 			    le16_to_cpu(req->min_version),
592 			    le16_to_cpu(req->max_version),
593 			    le16_to_cpu(req->credits_requested),
594 			    le32_to_cpu(req->preferred_send_size),
595 			    le32_to_cpu(req->max_receive_size),
596 			    le32_to_cpu(req->max_fragmented_size));
597 		if (le16_to_cpu(req->min_version) > 0x0100 ||
598 		    le16_to_cpu(req->max_version) < 0x0100)
599 			return -EOPNOTSUPP;
600 		if (le16_to_cpu(req->credits_requested) <= 0 ||
601 		    le32_to_cpu(req->max_receive_size) <= 128 ||
602 		    le32_to_cpu(req->max_fragmented_size) <=
603 					128 * 1024)
604 			return -ECONNABORTED;
605 
606 		return 0;
607 	}
608 	case SMBDIRECT_EXPECT_NEGOTIATE_REP:
609 		/* client only */
610 		break;
611 	}
612 
613 	/* This is an internal error */
614 	return -EINVAL;
615 }
616 
recv_done(struct ib_cq * cq,struct ib_wc * wc)617 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
618 {
619 	struct smbdirect_recv_io *recvmsg;
620 	struct smbdirect_socket *sc;
621 	struct smbdirect_socket_parameters *sp;
622 
623 	recvmsg = container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
624 	sc = recvmsg->socket;
625 	sp = &sc->parameters;
626 
627 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
628 		put_recvmsg(sc, recvmsg);
629 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
630 			pr_err("Recv error. status='%s (%d)' opcode=%d\n",
631 			       ib_wc_status_msg(wc->status), wc->status,
632 			       wc->opcode);
633 			smb_direct_disconnect_rdma_connection(sc);
634 		}
635 		return;
636 	}
637 
638 	ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n",
639 		    ib_wc_status_msg(wc->status), wc->status,
640 		    wc->opcode);
641 
642 	ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr,
643 				   recvmsg->sge.length, DMA_FROM_DEVICE);
644 
645 	/*
646 	 * Reset timer to the keepalive interval in
647 	 * order to trigger our next keepalive message.
648 	 */
649 	sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE;
650 	mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
651 			 msecs_to_jiffies(sp->keepalive_interval_msec));
652 
653 	switch (sc->recv_io.expected) {
654 	case SMBDIRECT_EXPECT_NEGOTIATE_REQ:
655 		/* see smb_direct_negotiate_recv_done */
656 		break;
657 	case SMBDIRECT_EXPECT_DATA_TRANSFER: {
658 		struct smbdirect_data_transfer *data_transfer =
659 			(struct smbdirect_data_transfer *)recvmsg->packet;
660 		u32 remaining_data_length, data_offset, data_length;
661 		int current_recv_credits;
662 		u16 old_recv_credit_target;
663 
664 		if (wc->byte_len <
665 		    offsetof(struct smbdirect_data_transfer, padding)) {
666 			put_recvmsg(sc, recvmsg);
667 			smb_direct_disconnect_rdma_connection(sc);
668 			return;
669 		}
670 
671 		remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
672 		data_length = le32_to_cpu(data_transfer->data_length);
673 		data_offset = le32_to_cpu(data_transfer->data_offset);
674 		if (wc->byte_len < data_offset ||
675 		    wc->byte_len < (u64)data_offset + data_length) {
676 			put_recvmsg(sc, recvmsg);
677 			smb_direct_disconnect_rdma_connection(sc);
678 			return;
679 		}
680 		if (remaining_data_length > sp->max_fragmented_recv_size ||
681 		    data_length > sp->max_fragmented_recv_size ||
682 		    (u64)remaining_data_length + (u64)data_length >
683 		    (u64)sp->max_fragmented_recv_size) {
684 			put_recvmsg(sc, recvmsg);
685 			smb_direct_disconnect_rdma_connection(sc);
686 			return;
687 		}
688 
689 		if (data_length) {
690 			if (sc->recv_io.reassembly.full_packet_received)
691 				recvmsg->first_segment = true;
692 
693 			if (le32_to_cpu(data_transfer->remaining_data_length))
694 				sc->recv_io.reassembly.full_packet_received = false;
695 			else
696 				sc->recv_io.reassembly.full_packet_received = true;
697 		}
698 
699 		atomic_dec(&sc->recv_io.posted.count);
700 		current_recv_credits = atomic_dec_return(&sc->recv_io.credits.count);
701 
702 		old_recv_credit_target = sc->recv_io.credits.target;
703 		sc->recv_io.credits.target =
704 				le16_to_cpu(data_transfer->credits_requested);
705 		sc->recv_io.credits.target =
706 			min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max);
707 		sc->recv_io.credits.target =
708 			max_t(u16, sc->recv_io.credits.target, 1);
709 		atomic_add(le16_to_cpu(data_transfer->credits_granted),
710 			   &sc->send_io.credits.count);
711 
712 		if (le16_to_cpu(data_transfer->flags) &
713 		    SMBDIRECT_FLAG_RESPONSE_REQUESTED)
714 			queue_work(sc->workqueue, &sc->idle.immediate_work);
715 
716 		if (atomic_read(&sc->send_io.credits.count) > 0)
717 			wake_up(&sc->send_io.credits.wait_queue);
718 
719 		if (data_length) {
720 			if (current_recv_credits <= (sc->recv_io.credits.target / 4) ||
721 			    sc->recv_io.credits.target > old_recv_credit_target)
722 				queue_work(sc->workqueue, &sc->recv_io.posted.refill_work);
723 
724 			enqueue_reassembly(sc, recvmsg, (int)data_length);
725 			wake_up(&sc->recv_io.reassembly.wait_queue);
726 		} else
727 			put_recvmsg(sc, recvmsg);
728 
729 		return;
730 	}
731 	case SMBDIRECT_EXPECT_NEGOTIATE_REP:
732 		/* client only */
733 		break;
734 	}
735 
736 	/*
737 	 * This is an internal error!
738 	 */
739 	WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_DATA_TRANSFER);
740 	put_recvmsg(sc, recvmsg);
741 	smb_direct_disconnect_rdma_connection(sc);
742 }
743 
744 static void smb_direct_negotiate_recv_work(struct work_struct *work);
745 
smb_direct_negotiate_recv_done(struct ib_cq * cq,struct ib_wc * wc)746 static void smb_direct_negotiate_recv_done(struct ib_cq *cq, struct ib_wc *wc)
747 {
748 	struct smbdirect_recv_io *recv_io =
749 		container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
750 	struct smbdirect_socket *sc = recv_io->socket;
751 	unsigned long flags;
752 
753 	/*
754 	 * reset the common recv_done for later reuse.
755 	 */
756 	recv_io->cqe.done = recv_done;
757 
758 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
759 		put_recvmsg(sc, recv_io);
760 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
761 			pr_err("Negotiate Recv error. status='%s (%d)' opcode=%d\n",
762 			       ib_wc_status_msg(wc->status), wc->status,
763 			       wc->opcode);
764 			smb_direct_disconnect_rdma_connection(sc);
765 		}
766 		return;
767 	}
768 
769 	ksmbd_debug(RDMA, "Negotiate Recv completed. status='%s (%d)', opcode=%d\n",
770 		    ib_wc_status_msg(wc->status), wc->status,
771 		    wc->opcode);
772 
773 	ib_dma_sync_single_for_cpu(sc->ib.dev,
774 				   recv_io->sge.addr,
775 				   recv_io->sge.length,
776 				   DMA_FROM_DEVICE);
777 
778 	/*
779 	 * This is an internal error!
780 	 */
781 	if (WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_NEGOTIATE_REQ)) {
782 		put_recvmsg(sc, recv_io);
783 		smb_direct_disconnect_rdma_connection(sc);
784 		return;
785 	}
786 
787 	/*
788 	 * Don't reset timer to the keepalive interval in
789 	 * this will be done in smb_direct_negotiate_recv_work.
790 	 */
791 
792 	/*
793 	 * Only remember the recv_io if it has enough bytes,
794 	 * this gives smb_direct_negotiate_recv_work enough
795 	 * information in order to disconnect if it was not
796 	 * valid.
797 	 */
798 	sc->recv_io.reassembly.full_packet_received = true;
799 	if (wc->byte_len >= sizeof(struct smbdirect_negotiate_req))
800 		enqueue_reassembly(sc, recv_io, 0);
801 	else
802 		put_recvmsg(sc, recv_io);
803 
804 	/*
805 	 * Some drivers (at least mlx5_ib and irdma in roce mode)
806 	 * might post a recv completion before RDMA_CM_EVENT_ESTABLISHED,
807 	 * we need to adjust our expectation in that case.
808 	 *
809 	 * So we defer further processing of the negotiation
810 	 * to smb_direct_negotiate_recv_work().
811 	 *
812 	 * If we are already in SMBDIRECT_SOCKET_NEGOTIATE_NEEDED
813 	 * we queue the work directly otherwise
814 	 * smb_direct_cm_handler() will do it, when
815 	 * RDMA_CM_EVENT_ESTABLISHED arrived.
816 	 */
817 	spin_lock_irqsave(&sc->connect.lock, flags);
818 	if (!sc->first_error) {
819 		INIT_WORK(&sc->connect.work, smb_direct_negotiate_recv_work);
820 		if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)
821 			queue_work(sc->workqueue, &sc->connect.work);
822 	}
823 	spin_unlock_irqrestore(&sc->connect.lock, flags);
824 }
825 
smb_direct_negotiate_recv_work(struct work_struct * work)826 static void smb_direct_negotiate_recv_work(struct work_struct *work)
827 {
828 	struct smbdirect_socket *sc =
829 		container_of(work, struct smbdirect_socket, connect.work);
830 	const struct smbdirect_socket_parameters *sp = &sc->parameters;
831 	struct smbdirect_recv_io *recv_io;
832 
833 	if (sc->first_error)
834 		return;
835 
836 	ksmbd_debug(RDMA, "Negotiate Recv Work running\n");
837 
838 	/*
839 	 * Reset timer to the keepalive interval in
840 	 * order to trigger our next keepalive message.
841 	 */
842 	sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE;
843 	mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
844 			 msecs_to_jiffies(sp->keepalive_interval_msec));
845 
846 	/*
847 	 * If smb_direct_negotiate_recv_done() detected an
848 	 * invalid request we want to disconnect.
849 	 */
850 	recv_io = get_first_reassembly(sc);
851 	if (!recv_io) {
852 		smb_direct_disconnect_rdma_connection(sc);
853 		return;
854 	}
855 
856 	if (SMBDIRECT_CHECK_STATUS_WARN(sc, SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)) {
857 		smb_direct_disconnect_rdma_connection(sc);
858 		return;
859 	}
860 	sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
861 	wake_up(&sc->status_wait);
862 }
863 
smb_direct_post_recv(struct smbdirect_socket * sc,struct smbdirect_recv_io * recvmsg)864 static int smb_direct_post_recv(struct smbdirect_socket *sc,
865 				struct smbdirect_recv_io *recvmsg)
866 {
867 	struct smbdirect_socket_parameters *sp = &sc->parameters;
868 	struct ib_recv_wr wr;
869 	int ret;
870 
871 	recvmsg->sge.addr = ib_dma_map_single(sc->ib.dev,
872 					      recvmsg->packet,
873 					      sp->max_recv_size,
874 					      DMA_FROM_DEVICE);
875 	ret = ib_dma_mapping_error(sc->ib.dev, recvmsg->sge.addr);
876 	if (ret)
877 		return ret;
878 	recvmsg->sge.length = sp->max_recv_size;
879 	recvmsg->sge.lkey = sc->ib.pd->local_dma_lkey;
880 
881 	wr.wr_cqe = &recvmsg->cqe;
882 	wr.next = NULL;
883 	wr.sg_list = &recvmsg->sge;
884 	wr.num_sge = 1;
885 
886 	ret = ib_post_recv(sc->ib.qp, &wr, NULL);
887 	if (ret) {
888 		pr_err("Can't post recv: %d\n", ret);
889 		ib_dma_unmap_single(sc->ib.dev,
890 				    recvmsg->sge.addr, recvmsg->sge.length,
891 				    DMA_FROM_DEVICE);
892 		recvmsg->sge.length = 0;
893 		smb_direct_disconnect_rdma_connection(sc);
894 		return ret;
895 	}
896 	return ret;
897 }
898 
smb_direct_read(struct ksmbd_transport * t,char * buf,unsigned int size,int unused)899 static int smb_direct_read(struct ksmbd_transport *t, char *buf,
900 			   unsigned int size, int unused)
901 {
902 	struct smbdirect_recv_io *recvmsg;
903 	struct smbdirect_data_transfer *data_transfer;
904 	int to_copy, to_read, data_read, offset;
905 	u32 data_length, remaining_data_length, data_offset;
906 	int rc;
907 	struct smb_direct_transport *st = SMBD_TRANS(t);
908 	struct smbdirect_socket *sc = &st->socket;
909 
910 again:
911 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
912 		pr_err("disconnected\n");
913 		return -ENOTCONN;
914 	}
915 
916 	/*
917 	 * No need to hold the reassembly queue lock all the time as we are
918 	 * the only one reading from the front of the queue. The transport
919 	 * may add more entries to the back of the queue at the same time
920 	 */
921 	if (sc->recv_io.reassembly.data_length >= size) {
922 		int queue_length;
923 		int queue_removed = 0;
924 		unsigned long flags;
925 
926 		/*
927 		 * Need to make sure reassembly_data_length is read before
928 		 * reading reassembly_queue_length and calling
929 		 * get_first_reassembly. This call is lock free
930 		 * as we never read at the end of the queue which are being
931 		 * updated in SOFTIRQ as more data is received
932 		 */
933 		virt_rmb();
934 		queue_length = sc->recv_io.reassembly.queue_length;
935 		data_read = 0;
936 		to_read = size;
937 		offset = sc->recv_io.reassembly.first_entry_offset;
938 		while (data_read < size) {
939 			recvmsg = get_first_reassembly(sc);
940 			data_transfer = smbdirect_recv_io_payload(recvmsg);
941 			data_length = le32_to_cpu(data_transfer->data_length);
942 			remaining_data_length =
943 				le32_to_cpu(data_transfer->remaining_data_length);
944 			data_offset = le32_to_cpu(data_transfer->data_offset);
945 
946 			/*
947 			 * The upper layer expects RFC1002 length at the
948 			 * beginning of the payload. Return it to indicate
949 			 * the total length of the packet. This minimize the
950 			 * change to upper layer packet processing logic. This
951 			 * will be eventually remove when an intermediate
952 			 * transport layer is added
953 			 */
954 			if (recvmsg->first_segment && size == 4) {
955 				unsigned int rfc1002_len =
956 					data_length + remaining_data_length;
957 				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
958 				data_read = 4;
959 				recvmsg->first_segment = false;
960 				ksmbd_debug(RDMA,
961 					    "returning rfc1002 length %d\n",
962 					    rfc1002_len);
963 				goto read_rfc1002_done;
964 			}
965 
966 			to_copy = min_t(int, data_length - offset, to_read);
967 			memcpy(buf + data_read, (char *)data_transfer + data_offset + offset,
968 			       to_copy);
969 
970 			/* move on to the next buffer? */
971 			if (to_copy == data_length - offset) {
972 				queue_length--;
973 				/*
974 				 * No need to lock if we are not at the
975 				 * end of the queue
976 				 */
977 				if (queue_length) {
978 					list_del(&recvmsg->list);
979 				} else {
980 					spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
981 					list_del(&recvmsg->list);
982 					spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
983 				}
984 				queue_removed++;
985 				put_recvmsg(sc, recvmsg);
986 				offset = 0;
987 			} else {
988 				offset += to_copy;
989 			}
990 
991 			to_read -= to_copy;
992 			data_read += to_copy;
993 		}
994 
995 		spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
996 		sc->recv_io.reassembly.data_length -= data_read;
997 		sc->recv_io.reassembly.queue_length -= queue_removed;
998 		spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
999 
1000 		sc->recv_io.reassembly.first_entry_offset = offset;
1001 		ksmbd_debug(RDMA,
1002 			    "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
1003 			    data_read, sc->recv_io.reassembly.data_length,
1004 			    sc->recv_io.reassembly.first_entry_offset);
1005 read_rfc1002_done:
1006 		return data_read;
1007 	}
1008 
1009 	ksmbd_debug(RDMA, "wait_event on more data\n");
1010 	rc = wait_event_interruptible(sc->recv_io.reassembly.wait_queue,
1011 				      sc->recv_io.reassembly.data_length >= size ||
1012 				       sc->status != SMBDIRECT_SOCKET_CONNECTED);
1013 	if (rc)
1014 		return -EINTR;
1015 
1016 	goto again;
1017 }
1018 
smb_direct_post_recv_credits(struct work_struct * work)1019 static void smb_direct_post_recv_credits(struct work_struct *work)
1020 {
1021 	struct smbdirect_socket *sc =
1022 		container_of(work, struct smbdirect_socket, recv_io.posted.refill_work);
1023 	struct smbdirect_recv_io *recvmsg;
1024 	int credits = 0;
1025 	int ret;
1026 
1027 	if (atomic_read(&sc->recv_io.credits.count) < sc->recv_io.credits.target) {
1028 		while (true) {
1029 			recvmsg = get_free_recvmsg(sc);
1030 			if (!recvmsg)
1031 				break;
1032 
1033 			recvmsg->first_segment = false;
1034 
1035 			ret = smb_direct_post_recv(sc, recvmsg);
1036 			if (ret) {
1037 				pr_err("Can't post recv: %d\n", ret);
1038 				put_recvmsg(sc, recvmsg);
1039 				break;
1040 			}
1041 			credits++;
1042 
1043 			atomic_inc(&sc->recv_io.posted.count);
1044 		}
1045 	}
1046 
1047 	atomic_add(credits, &sc->recv_io.credits.available);
1048 
1049 	/*
1050 	 * If the last send credit is waiting for credits
1051 	 * it can grant we need to wake it up
1052 	 */
1053 	if (credits &&
1054 	    atomic_read(&sc->send_io.bcredits.count) == 0 &&
1055 	    atomic_read(&sc->send_io.credits.count) == 0)
1056 		wake_up(&sc->send_io.credits.wait_queue);
1057 
1058 	if (credits)
1059 		queue_work(sc->workqueue, &sc->idle.immediate_work);
1060 }
1061 
send_done(struct ib_cq * cq,struct ib_wc * wc)1062 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
1063 {
1064 	struct smbdirect_send_io *sendmsg, *sibling, *next;
1065 	struct smbdirect_socket *sc;
1066 	int lcredits = 0;
1067 
1068 	sendmsg = container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
1069 	sc = sendmsg->socket;
1070 
1071 	ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
1072 		    ib_wc_status_msg(wc->status), wc->status,
1073 		    wc->opcode);
1074 
1075 	if (unlikely(!(sendmsg->wr.send_flags & IB_SEND_SIGNALED))) {
1076 		/*
1077 		 * This happens when smbdirect_send_io is a sibling
1078 		 * before the final message, it is signaled on
1079 		 * error anyway, so we need to skip
1080 		 * smbdirect_connection_free_send_io here,
1081 		 * otherwise is will destroy the memory
1082 		 * of the siblings too, which will cause
1083 		 * use after free problems for the others
1084 		 * triggered from ib_drain_qp().
1085 		 */
1086 		if (wc->status != IB_WC_SUCCESS)
1087 			goto skip_free;
1088 
1089 		/*
1090 		 * This should not happen!
1091 		 * But we better just close the
1092 		 * connection...
1093 		 */
1094 		pr_err("unexpected send completion wc->status=%s (%d) wc->opcode=%d\n",
1095 		       ib_wc_status_msg(wc->status), wc->status, wc->opcode);
1096 		smb_direct_disconnect_rdma_connection(sc);
1097 		return;
1098 	}
1099 
1100 	/*
1101 	 * Free possible siblings and then the main send_io
1102 	 */
1103 	list_for_each_entry_safe(sibling, next, &sendmsg->sibling_list, sibling_list) {
1104 		list_del_init(&sibling->sibling_list);
1105 		smb_direct_free_sendmsg(sc, sibling);
1106 		lcredits += 1;
1107 	}
1108 	/* Note this frees wc->wr_cqe, but not wc */
1109 	smb_direct_free_sendmsg(sc, sendmsg);
1110 	lcredits += 1;
1111 
1112 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
1113 skip_free:
1114 		pr_err("Send error. status='%s (%d)', opcode=%d\n",
1115 		       ib_wc_status_msg(wc->status), wc->status,
1116 		       wc->opcode);
1117 		smb_direct_disconnect_rdma_connection(sc);
1118 		return;
1119 	}
1120 
1121 	atomic_add(lcredits, &sc->send_io.lcredits.count);
1122 	wake_up(&sc->send_io.lcredits.wait_queue);
1123 
1124 	if (atomic_dec_and_test(&sc->send_io.pending.count))
1125 		wake_up(&sc->send_io.pending.zero_wait_queue);
1126 }
1127 
manage_credits_prior_sending(struct smbdirect_socket * sc)1128 static int manage_credits_prior_sending(struct smbdirect_socket *sc)
1129 {
1130 	int missing;
1131 	int available;
1132 	int new_credits;
1133 
1134 	if (atomic_read(&sc->recv_io.credits.count) >= sc->recv_io.credits.target)
1135 		return 0;
1136 
1137 	missing = (int)sc->recv_io.credits.target - atomic_read(&sc->recv_io.credits.count);
1138 	available = atomic_xchg(&sc->recv_io.credits.available, 0);
1139 	new_credits = (u16)min3(U16_MAX, missing, available);
1140 	if (new_credits <= 0) {
1141 		/*
1142 		 * If credits are available, but not granted
1143 		 * we need to re-add them again.
1144 		 */
1145 		if (available)
1146 			atomic_add(available, &sc->recv_io.credits.available);
1147 		return 0;
1148 	}
1149 
1150 	if (new_credits < available) {
1151 		/*
1152 		 * Readd the remaining available again.
1153 		 */
1154 		available -= new_credits;
1155 		atomic_add(available, &sc->recv_io.credits.available);
1156 	}
1157 
1158 	/*
1159 	 * Remember we granted the credits
1160 	 */
1161 	atomic_add(new_credits, &sc->recv_io.credits.count);
1162 	return new_credits;
1163 }
1164 
manage_keep_alive_before_sending(struct smbdirect_socket * sc)1165 static int manage_keep_alive_before_sending(struct smbdirect_socket *sc)
1166 {
1167 	struct smbdirect_socket_parameters *sp = &sc->parameters;
1168 
1169 	if (sc->idle.keepalive == SMBDIRECT_KEEPALIVE_PENDING) {
1170 		sc->idle.keepalive = SMBDIRECT_KEEPALIVE_SENT;
1171 		/*
1172 		 * Now use the keepalive timeout (instead of keepalive interval)
1173 		 * in order to wait for a response
1174 		 */
1175 		mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
1176 				 msecs_to_jiffies(sp->keepalive_timeout_msec));
1177 		return 1;
1178 	}
1179 	return 0;
1180 }
1181 
smb_direct_post_send(struct smbdirect_socket * sc,struct ib_send_wr * wr)1182 static int smb_direct_post_send(struct smbdirect_socket *sc,
1183 				struct ib_send_wr *wr)
1184 {
1185 	int ret;
1186 
1187 	atomic_inc(&sc->send_io.pending.count);
1188 	ret = ib_post_send(sc->ib.qp, wr, NULL);
1189 	if (ret) {
1190 		pr_err("failed to post send: %d\n", ret);
1191 		smb_direct_disconnect_rdma_connection(sc);
1192 	}
1193 	return ret;
1194 }
1195 
smb_direct_send_ctx_init(struct smbdirect_send_batch * send_ctx,bool need_invalidate_rkey,unsigned int remote_key)1196 static void smb_direct_send_ctx_init(struct smbdirect_send_batch *send_ctx,
1197 				     bool need_invalidate_rkey,
1198 				     unsigned int remote_key)
1199 {
1200 	INIT_LIST_HEAD(&send_ctx->msg_list);
1201 	send_ctx->wr_cnt = 0;
1202 	send_ctx->need_invalidate_rkey = need_invalidate_rkey;
1203 	send_ctx->remote_key = remote_key;
1204 	send_ctx->credit = 0;
1205 }
1206 
smb_direct_flush_send_list(struct smbdirect_socket * sc,struct smbdirect_send_batch * send_ctx,bool is_last)1207 static int smb_direct_flush_send_list(struct smbdirect_socket *sc,
1208 				      struct smbdirect_send_batch *send_ctx,
1209 				      bool is_last)
1210 {
1211 	struct smbdirect_send_io *first, *last;
1212 	int ret = 0;
1213 
1214 	if (list_empty(&send_ctx->msg_list))
1215 		goto release_credit;
1216 
1217 	first = list_first_entry(&send_ctx->msg_list,
1218 				 struct smbdirect_send_io,
1219 				 sibling_list);
1220 	last = list_last_entry(&send_ctx->msg_list,
1221 			       struct smbdirect_send_io,
1222 			       sibling_list);
1223 
1224 	if (send_ctx->need_invalidate_rkey) {
1225 		first->wr.opcode = IB_WR_SEND_WITH_INV;
1226 		first->wr.ex.invalidate_rkey = send_ctx->remote_key;
1227 		send_ctx->need_invalidate_rkey = false;
1228 		send_ctx->remote_key = 0;
1229 	}
1230 
1231 	last->wr.send_flags = IB_SEND_SIGNALED;
1232 	last->wr.wr_cqe = &last->cqe;
1233 
1234 	/*
1235 	 * Remove last from send_ctx->msg_list
1236 	 * and splice the rest of send_ctx->msg_list
1237 	 * to last->sibling_list.
1238 	 *
1239 	 * send_ctx->msg_list is a valid empty list
1240 	 * at the end.
1241 	 */
1242 	list_del_init(&last->sibling_list);
1243 	list_splice_tail_init(&send_ctx->msg_list, &last->sibling_list);
1244 	send_ctx->wr_cnt = 0;
1245 
1246 	ret = smb_direct_post_send(sc, &first->wr);
1247 	if (ret) {
1248 		struct smbdirect_send_io *sibling, *next;
1249 
1250 		list_for_each_entry_safe(sibling, next, &last->sibling_list, sibling_list) {
1251 			list_del_init(&sibling->sibling_list);
1252 			smb_direct_free_sendmsg(sc, sibling);
1253 		}
1254 		smb_direct_free_sendmsg(sc, last);
1255 	}
1256 
1257 release_credit:
1258 	if (is_last && !ret && send_ctx->credit) {
1259 		atomic_add(send_ctx->credit, &sc->send_io.bcredits.count);
1260 		send_ctx->credit = 0;
1261 		wake_up(&sc->send_io.bcredits.wait_queue);
1262 	}
1263 
1264 	return ret;
1265 }
1266 
wait_for_credits(struct smbdirect_socket * sc,wait_queue_head_t * waitq,atomic_t * total_credits,int needed)1267 static int wait_for_credits(struct smbdirect_socket *sc,
1268 			    wait_queue_head_t *waitq, atomic_t *total_credits,
1269 			    int needed)
1270 {
1271 	int ret;
1272 
1273 	do {
1274 		if (atomic_sub_return(needed, total_credits) >= 0)
1275 			return 0;
1276 
1277 		atomic_add(needed, total_credits);
1278 		ret = wait_event_interruptible(*waitq,
1279 					       atomic_read(total_credits) >= needed ||
1280 					       sc->status != SMBDIRECT_SOCKET_CONNECTED);
1281 
1282 		if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
1283 			return -ENOTCONN;
1284 		else if (ret < 0)
1285 			return ret;
1286 	} while (true);
1287 }
1288 
wait_for_send_bcredit(struct smbdirect_socket * sc,struct smbdirect_send_batch * send_ctx)1289 static int wait_for_send_bcredit(struct smbdirect_socket *sc,
1290 				 struct smbdirect_send_batch *send_ctx)
1291 {
1292 	int ret;
1293 
1294 	if (send_ctx->credit)
1295 		return 0;
1296 
1297 	ret = wait_for_credits(sc,
1298 			       &sc->send_io.bcredits.wait_queue,
1299 			       &sc->send_io.bcredits.count,
1300 			       1);
1301 	if (ret)
1302 		return ret;
1303 
1304 	send_ctx->credit = 1;
1305 	return 0;
1306 }
1307 
wait_for_send_lcredit(struct smbdirect_socket * sc,struct smbdirect_send_batch * send_ctx)1308 static int wait_for_send_lcredit(struct smbdirect_socket *sc,
1309 				 struct smbdirect_send_batch *send_ctx)
1310 {
1311 	if (send_ctx && (atomic_read(&sc->send_io.lcredits.count) <= 1)) {
1312 		int ret;
1313 
1314 		ret = smb_direct_flush_send_list(sc, send_ctx, false);
1315 		if (ret)
1316 			return ret;
1317 	}
1318 
1319 	return wait_for_credits(sc,
1320 				&sc->send_io.lcredits.wait_queue,
1321 				&sc->send_io.lcredits.count,
1322 				1);
1323 }
1324 
wait_for_send_credits(struct smbdirect_socket * sc,struct smbdirect_send_batch * send_ctx)1325 static int wait_for_send_credits(struct smbdirect_socket *sc,
1326 				 struct smbdirect_send_batch *send_ctx)
1327 {
1328 	int ret;
1329 
1330 	if (send_ctx &&
1331 	    (send_ctx->wr_cnt >= 16 || atomic_read(&sc->send_io.credits.count) <= 1)) {
1332 		ret = smb_direct_flush_send_list(sc, send_ctx, false);
1333 		if (ret)
1334 			return ret;
1335 	}
1336 
1337 	return wait_for_credits(sc, &sc->send_io.credits.wait_queue, &sc->send_io.credits.count, 1);
1338 }
1339 
wait_for_rw_credits(struct smbdirect_socket * sc,int credits)1340 static int wait_for_rw_credits(struct smbdirect_socket *sc, int credits)
1341 {
1342 	return wait_for_credits(sc,
1343 				&sc->rw_io.credits.wait_queue,
1344 				&sc->rw_io.credits.count,
1345 				credits);
1346 }
1347 
calc_rw_credits(struct smbdirect_socket * sc,char * buf,unsigned int len)1348 static int calc_rw_credits(struct smbdirect_socket *sc,
1349 			   char *buf, unsigned int len)
1350 {
1351 	return DIV_ROUND_UP(get_buf_page_count(buf, len),
1352 			    sc->rw_io.credits.num_pages);
1353 }
1354 
smb_direct_create_header(struct smbdirect_socket * sc,int size,int remaining_data_length,int new_credits,struct smbdirect_send_io ** sendmsg_out)1355 static int smb_direct_create_header(struct smbdirect_socket *sc,
1356 				    int size, int remaining_data_length,
1357 				    int new_credits,
1358 				    struct smbdirect_send_io **sendmsg_out)
1359 {
1360 	struct smbdirect_socket_parameters *sp = &sc->parameters;
1361 	struct smbdirect_send_io *sendmsg;
1362 	struct smbdirect_data_transfer *packet;
1363 	int header_length;
1364 	int ret;
1365 
1366 	sendmsg = smb_direct_alloc_sendmsg(sc);
1367 	if (IS_ERR(sendmsg))
1368 		return PTR_ERR(sendmsg);
1369 
1370 	/* Fill in the packet header */
1371 	packet = (struct smbdirect_data_transfer *)sendmsg->packet;
1372 	packet->credits_requested = cpu_to_le16(sp->send_credit_target);
1373 	packet->credits_granted = cpu_to_le16(new_credits);
1374 
1375 	packet->flags = 0;
1376 	if (manage_keep_alive_before_sending(sc))
1377 		packet->flags |= cpu_to_le16(SMBDIRECT_FLAG_RESPONSE_REQUESTED);
1378 
1379 	packet->reserved = 0;
1380 	if (!size)
1381 		packet->data_offset = 0;
1382 	else
1383 		packet->data_offset = cpu_to_le32(24);
1384 	packet->data_length = cpu_to_le32(size);
1385 	packet->remaining_data_length = cpu_to_le32(remaining_data_length);
1386 	packet->padding = 0;
1387 
1388 	ksmbd_debug(RDMA,
1389 		    "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
1390 		    le16_to_cpu(packet->credits_requested),
1391 		    le16_to_cpu(packet->credits_granted),
1392 		    le32_to_cpu(packet->data_offset),
1393 		    le32_to_cpu(packet->data_length),
1394 		    le32_to_cpu(packet->remaining_data_length));
1395 
1396 	/* Map the packet to DMA */
1397 	header_length = sizeof(struct smbdirect_data_transfer);
1398 	/* If this is a packet without payload, don't send padding */
1399 	if (!size)
1400 		header_length =
1401 			offsetof(struct smbdirect_data_transfer, padding);
1402 
1403 	sendmsg->sge[0].addr = ib_dma_map_single(sc->ib.dev,
1404 						 (void *)packet,
1405 						 header_length,
1406 						 DMA_TO_DEVICE);
1407 	ret = ib_dma_mapping_error(sc->ib.dev, sendmsg->sge[0].addr);
1408 	if (ret) {
1409 		smb_direct_free_sendmsg(sc, sendmsg);
1410 		return ret;
1411 	}
1412 
1413 	sendmsg->num_sge = 1;
1414 	sendmsg->sge[0].length = header_length;
1415 	sendmsg->sge[0].lkey = sc->ib.pd->local_dma_lkey;
1416 
1417 	*sendmsg_out = sendmsg;
1418 	return 0;
1419 }
1420 
get_sg_list(void * buf,int size,struct scatterlist * sg_list,int nentries)1421 static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries)
1422 {
1423 	bool high = is_vmalloc_addr(buf);
1424 	struct page *page;
1425 	int offset, len;
1426 	int i = 0;
1427 
1428 	if (size <= 0 || nentries < get_buf_page_count(buf, size))
1429 		return -EINVAL;
1430 
1431 	offset = offset_in_page(buf);
1432 	buf -= offset;
1433 	while (size > 0) {
1434 		len = min_t(int, PAGE_SIZE - offset, size);
1435 		if (high)
1436 			page = vmalloc_to_page(buf);
1437 		else
1438 			page = kmap_to_page(buf);
1439 
1440 		if (!sg_list)
1441 			return -EINVAL;
1442 		sg_set_page(sg_list, page, len, offset);
1443 		sg_list = sg_next(sg_list);
1444 
1445 		buf += PAGE_SIZE;
1446 		size -= len;
1447 		offset = 0;
1448 		i++;
1449 	}
1450 	return i;
1451 }
1452 
get_mapped_sg_list(struct ib_device * device,void * buf,int size,struct scatterlist * sg_list,int nentries,enum dma_data_direction dir,int * npages)1453 static int get_mapped_sg_list(struct ib_device *device, void *buf, int size,
1454 			      struct scatterlist *sg_list, int nentries,
1455 			      enum dma_data_direction dir, int *npages)
1456 {
1457 	*npages = get_sg_list(buf, size, sg_list, nentries);
1458 	if (*npages < 0)
1459 		return -EINVAL;
1460 	return ib_dma_map_sg(device, sg_list, *npages, dir);
1461 }
1462 
post_sendmsg(struct smbdirect_socket * sc,struct smbdirect_send_batch * send_ctx,struct smbdirect_send_io * msg)1463 static int post_sendmsg(struct smbdirect_socket *sc,
1464 			struct smbdirect_send_batch *send_ctx,
1465 			struct smbdirect_send_io *msg)
1466 {
1467 	int i;
1468 
1469 	for (i = 0; i < msg->num_sge; i++)
1470 		ib_dma_sync_single_for_device(sc->ib.dev,
1471 					      msg->sge[i].addr, msg->sge[i].length,
1472 					      DMA_TO_DEVICE);
1473 
1474 	msg->cqe.done = send_done;
1475 	msg->wr.opcode = IB_WR_SEND;
1476 	msg->wr.sg_list = &msg->sge[0];
1477 	msg->wr.num_sge = msg->num_sge;
1478 	msg->wr.next = NULL;
1479 
1480 	if (send_ctx) {
1481 		msg->wr.wr_cqe = NULL;
1482 		msg->wr.send_flags = 0;
1483 		if (!list_empty(&send_ctx->msg_list)) {
1484 			struct smbdirect_send_io *last;
1485 
1486 			last = list_last_entry(&send_ctx->msg_list,
1487 					       struct smbdirect_send_io,
1488 					       sibling_list);
1489 			last->wr.next = &msg->wr;
1490 		}
1491 		list_add_tail(&msg->sibling_list, &send_ctx->msg_list);
1492 		send_ctx->wr_cnt++;
1493 		return 0;
1494 	}
1495 
1496 	msg->wr.wr_cqe = &msg->cqe;
1497 	msg->wr.send_flags = IB_SEND_SIGNALED;
1498 	return smb_direct_post_send(sc, &msg->wr);
1499 }
1500 
smb_direct_post_send_data(struct smbdirect_socket * sc,struct smbdirect_send_batch * send_ctx,struct kvec * iov,int niov,int remaining_data_length)1501 static int smb_direct_post_send_data(struct smbdirect_socket *sc,
1502 				     struct smbdirect_send_batch *send_ctx,
1503 				     struct kvec *iov, int niov,
1504 				     int remaining_data_length)
1505 {
1506 	int i, j, ret;
1507 	struct smbdirect_send_io *msg;
1508 	int data_length;
1509 	struct scatterlist sg[SMBDIRECT_SEND_IO_MAX_SGE - 1];
1510 	struct smbdirect_send_batch _send_ctx;
1511 	int new_credits;
1512 
1513 	if (!send_ctx) {
1514 		smb_direct_send_ctx_init(&_send_ctx, false, 0);
1515 		send_ctx = &_send_ctx;
1516 	}
1517 
1518 	ret = wait_for_send_bcredit(sc, send_ctx);
1519 	if (ret)
1520 		goto bcredit_failed;
1521 
1522 	ret = wait_for_send_lcredit(sc, send_ctx);
1523 	if (ret)
1524 		goto lcredit_failed;
1525 
1526 	ret = wait_for_send_credits(sc, send_ctx);
1527 	if (ret)
1528 		goto credit_failed;
1529 
1530 	new_credits = manage_credits_prior_sending(sc);
1531 	if (new_credits == 0 &&
1532 	    atomic_read(&sc->send_io.credits.count) == 0 &&
1533 	    atomic_read(&sc->recv_io.credits.count) == 0) {
1534 		queue_work(sc->workqueue, &sc->recv_io.posted.refill_work);
1535 		ret = wait_event_interruptible(sc->send_io.credits.wait_queue,
1536 					       atomic_read(&sc->send_io.credits.count) >= 1 ||
1537 					       atomic_read(&sc->recv_io.credits.available) >= 1 ||
1538 					       sc->status != SMBDIRECT_SOCKET_CONNECTED);
1539 		if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
1540 			ret = -ENOTCONN;
1541 		if (ret < 0)
1542 			goto credit_failed;
1543 
1544 		new_credits = manage_credits_prior_sending(sc);
1545 	}
1546 
1547 	data_length = 0;
1548 	for (i = 0; i < niov; i++)
1549 		data_length += iov[i].iov_len;
1550 
1551 	ret = smb_direct_create_header(sc, data_length, remaining_data_length,
1552 				       new_credits, &msg);
1553 	if (ret)
1554 		goto header_failed;
1555 
1556 	for (i = 0; i < niov; i++) {
1557 		struct ib_sge *sge;
1558 		int sg_cnt;
1559 		int npages;
1560 
1561 		sg_init_table(sg, SMBDIRECT_SEND_IO_MAX_SGE - 1);
1562 		sg_cnt = get_mapped_sg_list(sc->ib.dev,
1563 					    iov[i].iov_base, iov[i].iov_len,
1564 					    sg, SMBDIRECT_SEND_IO_MAX_SGE - 1,
1565 					    DMA_TO_DEVICE, &npages);
1566 		if (sg_cnt <= 0) {
1567 			pr_err("failed to map buffer\n");
1568 			ret = -ENOMEM;
1569 			goto err;
1570 		} else if (sg_cnt + msg->num_sge > SMBDIRECT_SEND_IO_MAX_SGE) {
1571 			pr_err("buffer not fitted into sges\n");
1572 			ret = -E2BIG;
1573 			ib_dma_unmap_sg(sc->ib.dev, sg, npages,
1574 					DMA_TO_DEVICE);
1575 			goto err;
1576 		}
1577 
1578 		for (j = 0; j < sg_cnt; j++) {
1579 			sge = &msg->sge[msg->num_sge];
1580 			sge->addr = sg_dma_address(&sg[j]);
1581 			sge->length = sg_dma_len(&sg[j]);
1582 			sge->lkey  = sc->ib.pd->local_dma_lkey;
1583 			msg->num_sge++;
1584 		}
1585 	}
1586 
1587 	ret = post_sendmsg(sc, send_ctx, msg);
1588 	if (ret)
1589 		goto err;
1590 
1591 	/*
1592 	 * From here msg is moved to send_ctx
1593 	 * and we should not free it explicitly.
1594 	 */
1595 
1596 	if (send_ctx == &_send_ctx) {
1597 		ret = smb_direct_flush_send_list(sc, send_ctx, true);
1598 		if (ret)
1599 			goto flush_failed;
1600 	}
1601 
1602 	return 0;
1603 err:
1604 	smb_direct_free_sendmsg(sc, msg);
1605 flush_failed:
1606 header_failed:
1607 	atomic_inc(&sc->send_io.credits.count);
1608 credit_failed:
1609 	atomic_inc(&sc->send_io.lcredits.count);
1610 lcredit_failed:
1611 	atomic_add(send_ctx->credit, &sc->send_io.bcredits.count);
1612 	send_ctx->credit = 0;
1613 bcredit_failed:
1614 	return ret;
1615 }
1616 
smb_direct_writev(struct ksmbd_transport * t,struct kvec * iov,int niovs,int buflen,bool need_invalidate,unsigned int remote_key)1617 static int smb_direct_writev(struct ksmbd_transport *t,
1618 			     struct kvec *iov, int niovs, int buflen,
1619 			     bool need_invalidate, unsigned int remote_key)
1620 {
1621 	struct smb_direct_transport *st = SMBD_TRANS(t);
1622 	struct smbdirect_socket *sc = &st->socket;
1623 	struct smbdirect_socket_parameters *sp = &sc->parameters;
1624 	size_t remaining_data_length;
1625 	size_t iov_idx;
1626 	size_t iov_ofs;
1627 	size_t max_iov_size = sp->max_send_size -
1628 			sizeof(struct smbdirect_data_transfer);
1629 	int ret;
1630 	struct smbdirect_send_batch send_ctx;
1631 	int error = 0;
1632 
1633 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
1634 		return -ENOTCONN;
1635 
1636 	//FIXME: skip RFC1002 header..
1637 	if (WARN_ON_ONCE(niovs <= 1 || iov[0].iov_len != 4))
1638 		return -EINVAL;
1639 	buflen -= 4;
1640 	iov_idx = 1;
1641 	iov_ofs = 0;
1642 
1643 	remaining_data_length = buflen;
1644 	ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
1645 
1646 	smb_direct_send_ctx_init(&send_ctx, need_invalidate, remote_key);
1647 	while (remaining_data_length) {
1648 		struct kvec vecs[SMBDIRECT_SEND_IO_MAX_SGE - 1]; /* minus smbdirect hdr */
1649 		size_t possible_bytes = max_iov_size;
1650 		size_t possible_vecs;
1651 		size_t bytes = 0;
1652 		size_t nvecs = 0;
1653 
1654 		/*
1655 		 * For the last message remaining_data_length should be
1656 		 * have been 0 already!
1657 		 */
1658 		if (WARN_ON_ONCE(iov_idx >= niovs)) {
1659 			error = -EINVAL;
1660 			goto done;
1661 		}
1662 
1663 		/*
1664 		 * We have 2 factors which limit the arguments we pass
1665 		 * to smb_direct_post_send_data():
1666 		 *
1667 		 * 1. The number of supported sges for the send,
1668 		 *    while one is reserved for the smbdirect header.
1669 		 *    And we currently need one SGE per page.
1670 		 * 2. The number of negotiated payload bytes per send.
1671 		 */
1672 		possible_vecs = min_t(size_t, ARRAY_SIZE(vecs), niovs - iov_idx);
1673 
1674 		while (iov_idx < niovs && possible_vecs && possible_bytes) {
1675 			struct kvec *v = &vecs[nvecs];
1676 			int page_count;
1677 
1678 			v->iov_base = ((u8 *)iov[iov_idx].iov_base) + iov_ofs;
1679 			v->iov_len = min_t(size_t,
1680 					   iov[iov_idx].iov_len - iov_ofs,
1681 					   possible_bytes);
1682 			page_count = get_buf_page_count(v->iov_base, v->iov_len);
1683 			if (page_count > possible_vecs) {
1684 				/*
1685 				 * If the number of pages in the buffer
1686 				 * is to much (because we currently require
1687 				 * one SGE per page), we need to limit the
1688 				 * length.
1689 				 *
1690 				 * We know possible_vecs is at least 1,
1691 				 * so we always keep the first page.
1692 				 *
1693 				 * We need to calculate the number extra
1694 				 * pages (epages) we can also keep.
1695 				 *
1696 				 * We calculate the number of bytes in the
1697 				 * first page (fplen), this should never be
1698 				 * larger than v->iov_len because page_count is
1699 				 * at least 2, but adding a limitation feels
1700 				 * better.
1701 				 *
1702 				 * Then we calculate the number of bytes (elen)
1703 				 * we can keep for the extra pages.
1704 				 */
1705 				size_t epages = possible_vecs - 1;
1706 				size_t fpofs = offset_in_page(v->iov_base);
1707 				size_t fplen = min_t(size_t, PAGE_SIZE - fpofs, v->iov_len);
1708 				size_t elen = min_t(size_t, v->iov_len - fplen, epages*PAGE_SIZE);
1709 
1710 				v->iov_len = fplen + elen;
1711 				page_count = get_buf_page_count(v->iov_base, v->iov_len);
1712 				if (WARN_ON_ONCE(page_count > possible_vecs)) {
1713 					/*
1714 					 * Something went wrong in the above
1715 					 * logic...
1716 					 */
1717 					error = -EINVAL;
1718 					goto done;
1719 				}
1720 			}
1721 			possible_vecs -= page_count;
1722 			nvecs += 1;
1723 			possible_bytes -= v->iov_len;
1724 			bytes += v->iov_len;
1725 
1726 			iov_ofs += v->iov_len;
1727 			if (iov_ofs >= iov[iov_idx].iov_len) {
1728 				iov_idx += 1;
1729 				iov_ofs = 0;
1730 			}
1731 		}
1732 
1733 		remaining_data_length -= bytes;
1734 
1735 		ret = smb_direct_post_send_data(sc, &send_ctx,
1736 						vecs, nvecs,
1737 						remaining_data_length);
1738 		if (unlikely(ret)) {
1739 			error = ret;
1740 			goto done;
1741 		}
1742 	}
1743 
1744 done:
1745 	ret = smb_direct_flush_send_list(sc, &send_ctx, true);
1746 	if (unlikely(!ret && error))
1747 		ret = error;
1748 
1749 	/*
1750 	 * As an optimization, we don't wait for individual I/O to finish
1751 	 * before sending the next one.
1752 	 * Send them all and wait for pending send count to get to 0
1753 	 * that means all the I/Os have been out and we are good to return
1754 	 */
1755 
1756 	wait_event(sc->send_io.pending.zero_wait_queue,
1757 		   atomic_read(&sc->send_io.pending.count) == 0 ||
1758 		   sc->status != SMBDIRECT_SOCKET_CONNECTED);
1759 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED && ret == 0)
1760 		ret = -ENOTCONN;
1761 
1762 	return ret;
1763 }
1764 
smb_direct_free_rdma_rw_msg(struct smb_direct_transport * t,struct smbdirect_rw_io * msg,enum dma_data_direction dir)1765 static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t,
1766 					struct smbdirect_rw_io *msg,
1767 					enum dma_data_direction dir)
1768 {
1769 	struct smbdirect_socket *sc = &t->socket;
1770 
1771 	rdma_rw_ctx_destroy(&msg->rdma_ctx, sc->ib.qp, sc->ib.qp->port,
1772 			    msg->sgt.sgl, msg->sgt.nents, dir);
1773 	sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1774 	kfree(msg);
1775 }
1776 
read_write_done(struct ib_cq * cq,struct ib_wc * wc,enum dma_data_direction dir)1777 static void read_write_done(struct ib_cq *cq, struct ib_wc *wc,
1778 			    enum dma_data_direction dir)
1779 {
1780 	struct smbdirect_rw_io *msg =
1781 		container_of(wc->wr_cqe, struct smbdirect_rw_io, cqe);
1782 	struct smbdirect_socket *sc = msg->socket;
1783 
1784 	if (wc->status != IB_WC_SUCCESS) {
1785 		msg->error = -EIO;
1786 		pr_err("read/write error. opcode = %d, status = %s(%d)\n",
1787 		       wc->opcode, ib_wc_status_msg(wc->status), wc->status);
1788 		if (wc->status != IB_WC_WR_FLUSH_ERR)
1789 			smb_direct_disconnect_rdma_connection(sc);
1790 	}
1791 
1792 	complete(msg->completion);
1793 }
1794 
read_done(struct ib_cq * cq,struct ib_wc * wc)1795 static void read_done(struct ib_cq *cq, struct ib_wc *wc)
1796 {
1797 	read_write_done(cq, wc, DMA_FROM_DEVICE);
1798 }
1799 
write_done(struct ib_cq * cq,struct ib_wc * wc)1800 static void write_done(struct ib_cq *cq, struct ib_wc *wc)
1801 {
1802 	read_write_done(cq, wc, DMA_TO_DEVICE);
1803 }
1804 
smb_direct_rdma_xmit(struct smb_direct_transport * t,void * buf,int buf_len,struct smbdirect_buffer_descriptor_v1 * desc,unsigned int desc_len,bool is_read)1805 static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
1806 				void *buf, int buf_len,
1807 				struct smbdirect_buffer_descriptor_v1 *desc,
1808 				unsigned int desc_len,
1809 				bool is_read)
1810 {
1811 	struct smbdirect_socket *sc = &t->socket;
1812 	struct smbdirect_socket_parameters *sp = &sc->parameters;
1813 	struct smbdirect_rw_io *msg, *next_msg;
1814 	int i, ret;
1815 	DECLARE_COMPLETION_ONSTACK(completion);
1816 	struct ib_send_wr *first_wr;
1817 	LIST_HEAD(msg_list);
1818 	char *desc_buf;
1819 	int credits_needed;
1820 	unsigned int desc_buf_len, desc_num = 0;
1821 
1822 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
1823 		return -ENOTCONN;
1824 
1825 	if (buf_len > sp->max_read_write_size)
1826 		return -EINVAL;
1827 
1828 	/* calculate needed credits */
1829 	credits_needed = 0;
1830 	desc_buf = buf;
1831 	for (i = 0; i < desc_len / sizeof(*desc); i++) {
1832 		if (!buf_len)
1833 			break;
1834 
1835 		desc_buf_len = le32_to_cpu(desc[i].length);
1836 		if (!desc_buf_len)
1837 			return -EINVAL;
1838 
1839 		if (desc_buf_len > buf_len) {
1840 			desc_buf_len = buf_len;
1841 			desc[i].length = cpu_to_le32(desc_buf_len);
1842 			buf_len = 0;
1843 		}
1844 
1845 		credits_needed += calc_rw_credits(sc, desc_buf, desc_buf_len);
1846 		desc_buf += desc_buf_len;
1847 		buf_len -= desc_buf_len;
1848 		desc_num++;
1849 	}
1850 
1851 	ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
1852 		    str_read_write(is_read), buf_len, credits_needed);
1853 
1854 	ret = wait_for_rw_credits(sc, credits_needed);
1855 	if (ret < 0)
1856 		return ret;
1857 
1858 	/* build rdma_rw_ctx for each descriptor */
1859 	desc_buf = buf;
1860 	for (i = 0; i < desc_num; i++) {
1861 		msg = kzalloc_flex(*msg, sg_list, SG_CHUNK_SIZE,
1862 				   KSMBD_DEFAULT_GFP);
1863 		if (!msg) {
1864 			ret = -ENOMEM;
1865 			goto out;
1866 		}
1867 
1868 		desc_buf_len = le32_to_cpu(desc[i].length);
1869 
1870 		msg->socket = sc;
1871 		msg->cqe.done = is_read ? read_done : write_done;
1872 		msg->completion = &completion;
1873 
1874 		msg->sgt.sgl = &msg->sg_list[0];
1875 		ret = sg_alloc_table_chained(&msg->sgt,
1876 					     get_buf_page_count(desc_buf, desc_buf_len),
1877 					     msg->sg_list, SG_CHUNK_SIZE);
1878 		if (ret) {
1879 			ret = -ENOMEM;
1880 			goto free_msg;
1881 		}
1882 
1883 		ret = get_sg_list(desc_buf, desc_buf_len,
1884 				  msg->sgt.sgl, msg->sgt.orig_nents);
1885 		if (ret < 0)
1886 			goto free_table;
1887 
1888 		ret = rdma_rw_ctx_init(&msg->rdma_ctx, sc->ib.qp, sc->ib.qp->port,
1889 				       msg->sgt.sgl,
1890 				       get_buf_page_count(desc_buf, desc_buf_len),
1891 				       0,
1892 				       le64_to_cpu(desc[i].offset),
1893 				       le32_to_cpu(desc[i].token),
1894 				       is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1895 		if (ret < 0) {
1896 			pr_err("failed to init rdma_rw_ctx: %d\n", ret);
1897 			goto free_table;
1898 		}
1899 
1900 		list_add_tail(&msg->list, &msg_list);
1901 		desc_buf += desc_buf_len;
1902 	}
1903 
1904 	/* concatenate work requests of rdma_rw_ctxs */
1905 	first_wr = NULL;
1906 	list_for_each_entry_reverse(msg, &msg_list, list) {
1907 		first_wr = rdma_rw_ctx_wrs(&msg->rdma_ctx, sc->ib.qp, sc->ib.qp->port,
1908 					   &msg->cqe, first_wr);
1909 	}
1910 
1911 	ret = ib_post_send(sc->ib.qp, first_wr, NULL);
1912 	if (ret) {
1913 		pr_err("failed to post send wr for RDMA R/W: %d\n", ret);
1914 		goto out;
1915 	}
1916 
1917 	msg = list_last_entry(&msg_list, struct smbdirect_rw_io, list);
1918 	wait_for_completion(&completion);
1919 	ret = msg->error;
1920 out:
1921 	list_for_each_entry_safe(msg, next_msg, &msg_list, list) {
1922 		list_del(&msg->list);
1923 		smb_direct_free_rdma_rw_msg(t, msg,
1924 					    is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1925 	}
1926 	atomic_add(credits_needed, &sc->rw_io.credits.count);
1927 	wake_up(&sc->rw_io.credits.wait_queue);
1928 	return ret;
1929 
1930 free_table:
1931 	sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
1932 free_msg:
1933 	kfree(msg);
1934 	goto out;
1935 }
1936 
smb_direct_rdma_write(struct ksmbd_transport * t,void * buf,unsigned int buflen,struct smbdirect_buffer_descriptor_v1 * desc,unsigned int desc_len)1937 static int smb_direct_rdma_write(struct ksmbd_transport *t,
1938 				 void *buf, unsigned int buflen,
1939 				 struct smbdirect_buffer_descriptor_v1 *desc,
1940 				 unsigned int desc_len)
1941 {
1942 	return smb_direct_rdma_xmit(SMBD_TRANS(t), buf, buflen,
1943 				    desc, desc_len, false);
1944 }
1945 
smb_direct_rdma_read(struct ksmbd_transport * t,void * buf,unsigned int buflen,struct smbdirect_buffer_descriptor_v1 * desc,unsigned int desc_len)1946 static int smb_direct_rdma_read(struct ksmbd_transport *t,
1947 				void *buf, unsigned int buflen,
1948 				struct smbdirect_buffer_descriptor_v1 *desc,
1949 				unsigned int desc_len)
1950 {
1951 	return smb_direct_rdma_xmit(SMBD_TRANS(t), buf, buflen,
1952 				    desc, desc_len, true);
1953 }
1954 
smb_direct_disconnect(struct ksmbd_transport * t)1955 static void smb_direct_disconnect(struct ksmbd_transport *t)
1956 {
1957 	struct smb_direct_transport *st = SMBD_TRANS(t);
1958 	struct smbdirect_socket *sc = &st->socket;
1959 
1960 	ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", sc->rdma.cm_id);
1961 
1962 	free_transport(st);
1963 }
1964 
smb_direct_shutdown(struct ksmbd_transport * t)1965 static void smb_direct_shutdown(struct ksmbd_transport *t)
1966 {
1967 	struct smb_direct_transport *st = SMBD_TRANS(t);
1968 	struct smbdirect_socket *sc = &st->socket;
1969 
1970 	ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", sc->rdma.cm_id);
1971 
1972 	smb_direct_disconnect_rdma_work(&sc->disconnect_work);
1973 }
1974 
smb_direct_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)1975 static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
1976 				 struct rdma_cm_event *event)
1977 {
1978 	struct smbdirect_socket *sc = cm_id->context;
1979 	unsigned long flags;
1980 
1981 	ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
1982 		    cm_id, rdma_event_msg(event->event), event->event);
1983 
1984 	switch (event->event) {
1985 	case RDMA_CM_EVENT_ESTABLISHED: {
1986 		/*
1987 		 * Some drivers (at least mlx5_ib and irdma in roce mode)
1988 		 * might post a recv completion before RDMA_CM_EVENT_ESTABLISHED,
1989 		 * we need to adjust our expectation in that case.
1990 		 *
1991 		 * If smb_direct_negotiate_recv_done was called first
1992 		 * it initialized sc->connect.work only for us to
1993 		 * start, so that we turned into
1994 		 * SMBDIRECT_SOCKET_NEGOTIATE_NEEDED, before
1995 		 * smb_direct_negotiate_recv_work() runs.
1996 		 *
1997 		 * If smb_direct_negotiate_recv_done didn't happen
1998 		 * yet. sc->connect.work is still be disabled and
1999 		 * queue_work() is a no-op.
2000 		 */
2001 		if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING))
2002 			break;
2003 		sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
2004 		spin_lock_irqsave(&sc->connect.lock, flags);
2005 		if (!sc->first_error)
2006 			queue_work(sc->workqueue, &sc->connect.work);
2007 		spin_unlock_irqrestore(&sc->connect.lock, flags);
2008 		wake_up(&sc->status_wait);
2009 		break;
2010 	}
2011 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
2012 	case RDMA_CM_EVENT_DISCONNECTED: {
2013 		sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
2014 		smb_direct_disconnect_rdma_work(&sc->disconnect_work);
2015 		if (sc->ib.qp)
2016 			ib_drain_qp(sc->ib.qp);
2017 		break;
2018 	}
2019 	case RDMA_CM_EVENT_CONNECT_ERROR: {
2020 		sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
2021 		smb_direct_disconnect_rdma_work(&sc->disconnect_work);
2022 		break;
2023 	}
2024 	default:
2025 		pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n",
2026 		       cm_id, rdma_event_msg(event->event),
2027 		       event->event);
2028 		break;
2029 	}
2030 	return 0;
2031 }
2032 
smb_direct_qpair_handler(struct ib_event * event,void * context)2033 static void smb_direct_qpair_handler(struct ib_event *event, void *context)
2034 {
2035 	struct smbdirect_socket *sc = context;
2036 
2037 	ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n",
2038 		    sc->rdma.cm_id, ib_event_msg(event->event), event->event);
2039 
2040 	switch (event->event) {
2041 	case IB_EVENT_CQ_ERR:
2042 	case IB_EVENT_QP_FATAL:
2043 		smb_direct_disconnect_rdma_connection(sc);
2044 		break;
2045 	default:
2046 		break;
2047 	}
2048 }
2049 
smb_direct_send_negotiate_response(struct smbdirect_socket * sc,int failed)2050 static int smb_direct_send_negotiate_response(struct smbdirect_socket *sc,
2051 					      int failed)
2052 {
2053 	struct smbdirect_socket_parameters *sp = &sc->parameters;
2054 	struct smbdirect_send_io *sendmsg;
2055 	struct smbdirect_negotiate_resp *resp;
2056 	int ret;
2057 
2058 	sendmsg = smb_direct_alloc_sendmsg(sc);
2059 	if (IS_ERR(sendmsg))
2060 		return -ENOMEM;
2061 
2062 	resp = (struct smbdirect_negotiate_resp *)sendmsg->packet;
2063 	if (failed) {
2064 		memset(resp, 0, sizeof(*resp));
2065 		resp->min_version = SMB_DIRECT_VERSION_LE;
2066 		resp->max_version = SMB_DIRECT_VERSION_LE;
2067 		resp->status = STATUS_NOT_SUPPORTED;
2068 
2069 		sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
2070 	} else {
2071 		resp->status = STATUS_SUCCESS;
2072 		resp->min_version = SMB_DIRECT_VERSION_LE;
2073 		resp->max_version = SMB_DIRECT_VERSION_LE;
2074 		resp->negotiated_version = SMB_DIRECT_VERSION_LE;
2075 		resp->reserved = 0;
2076 		resp->credits_requested =
2077 				cpu_to_le16(sp->send_credit_target);
2078 		resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(sc));
2079 		resp->max_readwrite_size = cpu_to_le32(sp->max_read_write_size);
2080 		resp->preferred_send_size = cpu_to_le32(sp->max_send_size);
2081 		resp->max_receive_size = cpu_to_le32(sp->max_recv_size);
2082 		resp->max_fragmented_size =
2083 				cpu_to_le32(sp->max_fragmented_recv_size);
2084 
2085 		atomic_set(&sc->send_io.bcredits.count, 1);
2086 		sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER;
2087 		sc->status = SMBDIRECT_SOCKET_CONNECTED;
2088 	}
2089 
2090 	sendmsg->sge[0].addr = ib_dma_map_single(sc->ib.dev,
2091 						 (void *)resp, sizeof(*resp),
2092 						 DMA_TO_DEVICE);
2093 	ret = ib_dma_mapping_error(sc->ib.dev, sendmsg->sge[0].addr);
2094 	if (ret) {
2095 		smb_direct_free_sendmsg(sc, sendmsg);
2096 		return ret;
2097 	}
2098 
2099 	sendmsg->num_sge = 1;
2100 	sendmsg->sge[0].length = sizeof(*resp);
2101 	sendmsg->sge[0].lkey = sc->ib.pd->local_dma_lkey;
2102 
2103 	ret = post_sendmsg(sc, NULL, sendmsg);
2104 	if (ret) {
2105 		smb_direct_free_sendmsg(sc, sendmsg);
2106 		return ret;
2107 	}
2108 
2109 	wait_event(sc->send_io.pending.zero_wait_queue,
2110 		   atomic_read(&sc->send_io.pending.count) == 0 ||
2111 		   sc->status != SMBDIRECT_SOCKET_CONNECTED);
2112 	if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
2113 		return -ENOTCONN;
2114 
2115 	return 0;
2116 }
2117 
smb_direct_accept_client(struct smbdirect_socket * sc)2118 static int smb_direct_accept_client(struct smbdirect_socket *sc)
2119 {
2120 	struct smbdirect_socket_parameters *sp = &sc->parameters;
2121 	struct rdma_conn_param conn_param;
2122 	__be32 ird_ord_hdr[2];
2123 	int ret;
2124 
2125 	/*
2126 	 * smb_direct_handle_connect_request()
2127 	 * already negotiated sp->initiator_depth
2128 	 * and sp->responder_resources
2129 	 */
2130 	memset(&conn_param, 0, sizeof(conn_param));
2131 	conn_param.initiator_depth = sp->initiator_depth;
2132 	conn_param.responder_resources = sp->responder_resources;
2133 
2134 	if (sc->rdma.legacy_iwarp) {
2135 		ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
2136 		ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
2137 		conn_param.private_data = ird_ord_hdr;
2138 		conn_param.private_data_len = sizeof(ird_ord_hdr);
2139 	} else {
2140 		conn_param.private_data = NULL;
2141 		conn_param.private_data_len = 0;
2142 	}
2143 	conn_param.retry_count = SMB_DIRECT_CM_RETRY;
2144 	conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY;
2145 	conn_param.flow_control = 0;
2146 
2147 	/*
2148 	 * start with the negotiate timeout and SMBDIRECT_KEEPALIVE_PENDING
2149 	 * so that the timer will cause a disconnect.
2150 	 */
2151 	sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING;
2152 	mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
2153 			 msecs_to_jiffies(sp->negotiate_timeout_msec));
2154 
2155 	WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED);
2156 	sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING;
2157 	ret = rdma_accept(sc->rdma.cm_id, &conn_param);
2158 	if (ret) {
2159 		pr_err("error at rdma_accept: %d\n", ret);
2160 		return ret;
2161 	}
2162 	return 0;
2163 }
2164 
smb_direct_prepare_negotiation(struct smbdirect_socket * sc)2165 static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)
2166 {
2167 	struct smbdirect_recv_io *recvmsg;
2168 	bool recv_posted = false;
2169 	int ret;
2170 
2171 	WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_CREATED);
2172 	sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED;
2173 
2174 	sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REQ;
2175 
2176 	recvmsg = get_free_recvmsg(sc);
2177 	if (!recvmsg)
2178 		return -ENOMEM;
2179 	recvmsg->cqe.done = smb_direct_negotiate_recv_done;
2180 
2181 	ret = smb_direct_post_recv(sc, recvmsg);
2182 	if (ret) {
2183 		pr_err("Can't post recv: %d\n", ret);
2184 		goto out_err;
2185 	}
2186 	recv_posted = true;
2187 
2188 	ret = smb_direct_accept_client(sc);
2189 	if (ret) {
2190 		pr_err("Can't accept client\n");
2191 		goto out_err;
2192 	}
2193 
2194 	return 0;
2195 out_err:
2196 	/*
2197 	 * If the recv was never posted, return it to the free list.
2198 	 * If it was posted, leave it alone so disconnect teardown can
2199 	 * drain the QP and complete it (flush) and the completion path
2200 	 * will unmap it exactly once.
2201 	 */
2202 	if (!recv_posted)
2203 		put_recvmsg(sc, recvmsg);
2204 	return ret;
2205 }
2206 
smb_direct_init_params(struct smbdirect_socket * sc)2207 static int smb_direct_init_params(struct smbdirect_socket *sc)
2208 {
2209 	struct smbdirect_socket_parameters *sp = &sc->parameters;
2210 	int max_send_sges;
2211 	unsigned int maxpages;
2212 
2213 	/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
2214 	 * SMB2 response could be mapped.
2215 	 */
2216 	max_send_sges = DIV_ROUND_UP(sp->max_send_size, PAGE_SIZE) + 3;
2217 	if (max_send_sges > SMBDIRECT_SEND_IO_MAX_SGE) {
2218 		pr_err("max_send_size %d is too large\n", sp->max_send_size);
2219 		return -EINVAL;
2220 	}
2221 
2222 	atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target);
2223 
2224 	maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE);
2225 	sc->rw_io.credits.max = rdma_rw_mr_factor(sc->ib.dev,
2226 						  sc->rdma.cm_id->port_num,
2227 						  maxpages);
2228 	sc->rw_io.credits.num_pages = DIV_ROUND_UP(maxpages, sc->rw_io.credits.max);
2229 	/* add one extra in order to handle unaligned pages */
2230 	sc->rw_io.credits.max += 1;
2231 
2232 	sc->recv_io.credits.target = 1;
2233 
2234 	atomic_set(&sc->rw_io.credits.count, sc->rw_io.credits.max);
2235 
2236 	return 0;
2237 }
2238 
smb_direct_destroy_pools(struct smbdirect_socket * sc)2239 static void smb_direct_destroy_pools(struct smbdirect_socket *sc)
2240 {
2241 	struct smbdirect_recv_io *recvmsg;
2242 
2243 	while ((recvmsg = get_free_recvmsg(sc)))
2244 		mempool_free(recvmsg, sc->recv_io.mem.pool);
2245 
2246 	mempool_destroy(sc->recv_io.mem.pool);
2247 	sc->recv_io.mem.pool = NULL;
2248 
2249 	kmem_cache_destroy(sc->recv_io.mem.cache);
2250 	sc->recv_io.mem.cache = NULL;
2251 
2252 	mempool_destroy(sc->send_io.mem.pool);
2253 	sc->send_io.mem.pool = NULL;
2254 
2255 	kmem_cache_destroy(sc->send_io.mem.cache);
2256 	sc->send_io.mem.cache = NULL;
2257 }
2258 
smb_direct_create_pools(struct smbdirect_socket * sc)2259 static int smb_direct_create_pools(struct smbdirect_socket *sc)
2260 {
2261 	struct smbdirect_socket_parameters *sp = &sc->parameters;
2262 	char name[80];
2263 	int i;
2264 	struct smbdirect_recv_io *recvmsg;
2265 
2266 	snprintf(name, sizeof(name), "smbdirect_send_io_pool_%p", sc);
2267 	sc->send_io.mem.cache = kmem_cache_create(name,
2268 					     sizeof(struct smbdirect_send_io) +
2269 					      sizeof(struct smbdirect_negotiate_resp),
2270 					     0, SLAB_HWCACHE_ALIGN, NULL);
2271 	if (!sc->send_io.mem.cache)
2272 		return -ENOMEM;
2273 
2274 	sc->send_io.mem.pool = mempool_create(sp->send_credit_target,
2275 					    mempool_alloc_slab, mempool_free_slab,
2276 					    sc->send_io.mem.cache);
2277 	if (!sc->send_io.mem.pool)
2278 		goto err;
2279 
2280 	snprintf(name, sizeof(name), "smbdirect_recv_io_pool_%p", sc);
2281 	sc->recv_io.mem.cache = kmem_cache_create(name,
2282 					     sizeof(struct smbdirect_recv_io) +
2283 					     sp->max_recv_size,
2284 					     0, SLAB_HWCACHE_ALIGN, NULL);
2285 	if (!sc->recv_io.mem.cache)
2286 		goto err;
2287 
2288 	sc->recv_io.mem.pool =
2289 		mempool_create(sp->recv_credit_max, mempool_alloc_slab,
2290 			       mempool_free_slab, sc->recv_io.mem.cache);
2291 	if (!sc->recv_io.mem.pool)
2292 		goto err;
2293 
2294 	for (i = 0; i < sp->recv_credit_max; i++) {
2295 		recvmsg = mempool_alloc(sc->recv_io.mem.pool, KSMBD_DEFAULT_GFP);
2296 		if (!recvmsg)
2297 			goto err;
2298 		recvmsg->socket = sc;
2299 		recvmsg->sge.length = 0;
2300 		list_add(&recvmsg->list, &sc->recv_io.free.list);
2301 	}
2302 
2303 	return 0;
2304 err:
2305 	smb_direct_destroy_pools(sc);
2306 	return -ENOMEM;
2307 }
2308 
smb_direct_rdma_rw_send_wrs(struct ib_device * dev,const struct ib_qp_init_attr * attr)2309 static u32 smb_direct_rdma_rw_send_wrs(struct ib_device *dev, const struct ib_qp_init_attr *attr)
2310 {
2311 	/*
2312 	 * This could be split out of rdma_rw_init_qp()
2313 	 * and be a helper function next to rdma_rw_mr_factor()
2314 	 *
2315 	 * We can't check unlikely(rdma_rw_force_mr) here,
2316 	 * but that is most likely 0 anyway.
2317 	 */
2318 	u32 factor;
2319 
2320 	WARN_ON_ONCE(attr->port_num == 0);
2321 
2322 	/*
2323 	 * Each context needs at least one RDMA READ or WRITE WR.
2324 	 *
2325 	 * For some hardware we might need more, eventually we should ask the
2326 	 * HCA driver for a multiplier here.
2327 	 */
2328 	factor = 1;
2329 
2330 	/*
2331 	 * If the device needs MRs to perform RDMA READ or WRITE operations,
2332 	 * we'll need two additional MRs for the registrations and the
2333 	 * invalidation.
2334 	 */
2335 	if (rdma_protocol_iwarp(dev, attr->port_num) || dev->attrs.max_sgl_rd)
2336 		factor += 2;	/* inv + reg */
2337 
2338 	return factor * attr->cap.max_rdma_ctxs;
2339 }
2340 
smb_direct_create_qpair(struct smbdirect_socket * sc)2341 static int smb_direct_create_qpair(struct smbdirect_socket *sc)
2342 {
2343 	struct smbdirect_socket_parameters *sp = &sc->parameters;
2344 	int ret;
2345 	struct ib_qp_cap qp_cap;
2346 	struct ib_qp_init_attr qp_attr;
2347 	u32 max_send_wr;
2348 	u32 rdma_send_wr;
2349 
2350 	/*
2351 	 * Note that {rdma,ib}_create_qp() will call
2352 	 * rdma_rw_init_qp() if cap->max_rdma_ctxs is not 0.
2353 	 * It will adjust cap->max_send_wr to the required
2354 	 * number of additional WRs for the RDMA RW operations.
2355 	 * It will cap cap->max_send_wr to the device limit.
2356 	 *
2357 	 * +1 for ib_drain_qp
2358 	 */
2359 	qp_cap.max_send_wr = sp->send_credit_target + 1;
2360 	qp_cap.max_recv_wr = sp->recv_credit_max + 1;
2361 	qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
2362 	qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
2363 	qp_cap.max_inline_data = 0;
2364 	qp_cap.max_rdma_ctxs = sc->rw_io.credits.max;
2365 
2366 	/*
2367 	 * Find out the number of max_send_wr
2368 	 * after rdma_rw_init_qp() adjusted it.
2369 	 *
2370 	 * We only do it on a temporary variable,
2371 	 * as rdma_create_qp() will trigger
2372 	 * rdma_rw_init_qp() again.
2373 	 */
2374 	memset(&qp_attr, 0, sizeof(qp_attr));
2375 	qp_attr.cap = qp_cap;
2376 	qp_attr.port_num = sc->rdma.cm_id->port_num;
2377 	rdma_send_wr = smb_direct_rdma_rw_send_wrs(sc->ib.dev, &qp_attr);
2378 	max_send_wr = qp_cap.max_send_wr + rdma_send_wr;
2379 
2380 	if (qp_cap.max_send_wr > sc->ib.dev->attrs.max_cqe ||
2381 	    qp_cap.max_send_wr > sc->ib.dev->attrs.max_qp_wr) {
2382 		pr_err("Possible CQE overrun: max_send_wr %d\n",
2383 		       qp_cap.max_send_wr);
2384 		pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
2385 		       IB_DEVICE_NAME_MAX,
2386 		       sc->ib.dev->name,
2387 		       sc->ib.dev->attrs.max_cqe,
2388 		       sc->ib.dev->attrs.max_qp_wr);
2389 		pr_err("consider lowering send_credit_target = %d\n",
2390 		       sp->send_credit_target);
2391 		return -EINVAL;
2392 	}
2393 
2394 	if (qp_cap.max_rdma_ctxs &&
2395 	    (max_send_wr >= sc->ib.dev->attrs.max_cqe ||
2396 	     max_send_wr >= sc->ib.dev->attrs.max_qp_wr)) {
2397 		pr_err("Possible CQE overrun: rdma_send_wr %d + max_send_wr %d = %d\n",
2398 		       rdma_send_wr, qp_cap.max_send_wr, max_send_wr);
2399 		pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
2400 		       IB_DEVICE_NAME_MAX,
2401 		       sc->ib.dev->name,
2402 		       sc->ib.dev->attrs.max_cqe,
2403 		       sc->ib.dev->attrs.max_qp_wr);
2404 		pr_err("consider lowering send_credit_target = %d, max_rdma_ctxs = %d\n",
2405 		       sp->send_credit_target, qp_cap.max_rdma_ctxs);
2406 		return -EINVAL;
2407 	}
2408 
2409 	if (qp_cap.max_recv_wr > sc->ib.dev->attrs.max_cqe ||
2410 	    qp_cap.max_recv_wr > sc->ib.dev->attrs.max_qp_wr) {
2411 		pr_err("Possible CQE overrun: max_recv_wr %d\n",
2412 		       qp_cap.max_recv_wr);
2413 		pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
2414 		       IB_DEVICE_NAME_MAX,
2415 		       sc->ib.dev->name,
2416 		       sc->ib.dev->attrs.max_cqe,
2417 		       sc->ib.dev->attrs.max_qp_wr);
2418 		pr_err("consider lowering receive_credit_max = %d\n",
2419 		       sp->recv_credit_max);
2420 		return -EINVAL;
2421 	}
2422 
2423 	if (qp_cap.max_send_sge > sc->ib.dev->attrs.max_send_sge ||
2424 	    qp_cap.max_recv_sge > sc->ib.dev->attrs.max_recv_sge) {
2425 		pr_err("device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
2426 		       IB_DEVICE_NAME_MAX,
2427 		       sc->ib.dev->name,
2428 		       sc->ib.dev->attrs.max_send_sge,
2429 		       sc->ib.dev->attrs.max_recv_sge);
2430 		return -EINVAL;
2431 	}
2432 
2433 	sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
2434 	if (IS_ERR(sc->ib.pd)) {
2435 		pr_err("Can't create RDMA PD\n");
2436 		ret = PTR_ERR(sc->ib.pd);
2437 		sc->ib.pd = NULL;
2438 		return ret;
2439 	}
2440 
2441 	sc->ib.send_cq = ib_alloc_cq_any(sc->ib.dev, sc,
2442 					 max_send_wr,
2443 					 IB_POLL_WORKQUEUE);
2444 	if (IS_ERR(sc->ib.send_cq)) {
2445 		pr_err("Can't create RDMA send CQ\n");
2446 		ret = PTR_ERR(sc->ib.send_cq);
2447 		sc->ib.send_cq = NULL;
2448 		goto err;
2449 	}
2450 
2451 	sc->ib.recv_cq = ib_alloc_cq_any(sc->ib.dev, sc,
2452 					 qp_cap.max_recv_wr,
2453 					 IB_POLL_WORKQUEUE);
2454 	if (IS_ERR(sc->ib.recv_cq)) {
2455 		pr_err("Can't create RDMA recv CQ\n");
2456 		ret = PTR_ERR(sc->ib.recv_cq);
2457 		sc->ib.recv_cq = NULL;
2458 		goto err;
2459 	}
2460 
2461 	/*
2462 	 * We reset completely here!
2463 	 * As the above use was just temporary
2464 	 * to calc max_send_wr and rdma_send_wr.
2465 	 *
2466 	 * rdma_create_qp() will trigger rdma_rw_init_qp()
2467 	 * again if max_rdma_ctxs is not 0.
2468 	 */
2469 	memset(&qp_attr, 0, sizeof(qp_attr));
2470 	qp_attr.event_handler = smb_direct_qpair_handler;
2471 	qp_attr.qp_context = sc;
2472 	qp_attr.cap = qp_cap;
2473 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
2474 	qp_attr.qp_type = IB_QPT_RC;
2475 	qp_attr.send_cq = sc->ib.send_cq;
2476 	qp_attr.recv_cq = sc->ib.recv_cq;
2477 	qp_attr.port_num = ~0;
2478 
2479 	ret = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
2480 	if (ret) {
2481 		pr_err("Can't create RDMA QP: %d\n", ret);
2482 		goto err;
2483 	}
2484 
2485 	sc->ib.qp = sc->rdma.cm_id->qp;
2486 	sc->rdma.cm_id->event_handler = smb_direct_cm_handler;
2487 
2488 	return 0;
2489 err:
2490 	if (sc->ib.qp) {
2491 		sc->ib.qp = NULL;
2492 		rdma_destroy_qp(sc->rdma.cm_id);
2493 	}
2494 	if (sc->ib.recv_cq) {
2495 		ib_destroy_cq(sc->ib.recv_cq);
2496 		sc->ib.recv_cq = NULL;
2497 	}
2498 	if (sc->ib.send_cq) {
2499 		ib_destroy_cq(sc->ib.send_cq);
2500 		sc->ib.send_cq = NULL;
2501 	}
2502 	if (sc->ib.pd) {
2503 		ib_dealloc_pd(sc->ib.pd);
2504 		sc->ib.pd = NULL;
2505 	}
2506 	return ret;
2507 }
2508 
smb_direct_prepare(struct ksmbd_transport * t)2509 static int smb_direct_prepare(struct ksmbd_transport *t)
2510 {
2511 	struct smb_direct_transport *st = SMBD_TRANS(t);
2512 	struct smbdirect_socket *sc = &st->socket;
2513 	struct smbdirect_socket_parameters *sp = &sc->parameters;
2514 	struct smbdirect_recv_io *recvmsg;
2515 	struct smbdirect_negotiate_req *req;
2516 	unsigned long flags;
2517 	int ret;
2518 
2519 	/*
2520 	 * We are waiting to pass the following states:
2521 	 *
2522 	 * SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED
2523 	 * SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING
2524 	 * SMBDIRECT_SOCKET_NEGOTIATE_NEEDED
2525 	 *
2526 	 * To finally get to SMBDIRECT_SOCKET_NEGOTIATE_RUNNING
2527 	 * in order to continue below.
2528 	 *
2529 	 * Everything else is unexpected and an error.
2530 	 */
2531 	ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
2532 	ret = wait_event_interruptible_timeout(sc->status_wait,
2533 					sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED &&
2534 					sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING &&
2535 					sc->status != SMBDIRECT_SOCKET_NEGOTIATE_NEEDED,
2536 					msecs_to_jiffies(sp->negotiate_timeout_msec));
2537 	if (ret <= 0 || sc->status != SMBDIRECT_SOCKET_NEGOTIATE_RUNNING)
2538 		return ret < 0 ? ret : -ETIMEDOUT;
2539 
2540 	recvmsg = get_first_reassembly(sc);
2541 	if (!recvmsg)
2542 		return -ECONNABORTED;
2543 
2544 	ret = smb_direct_check_recvmsg(recvmsg);
2545 	if (ret)
2546 		goto put;
2547 
2548 	req = (struct smbdirect_negotiate_req *)recvmsg->packet;
2549 	sp->max_recv_size = min_t(u32, sp->max_recv_size,
2550 				  le32_to_cpu(req->preferred_send_size));
2551 	sp->max_send_size = min_t(u32, sp->max_send_size,
2552 				  le32_to_cpu(req->max_receive_size));
2553 	sp->max_fragmented_send_size =
2554 		le32_to_cpu(req->max_fragmented_size);
2555 	/*
2556 	 * The maximum fragmented upper-layer payload receive size supported
2557 	 *
2558 	 * Assume max_payload_per_credit is
2559 	 * smb_direct_receive_credit_max - 24 = 1340
2560 	 *
2561 	 * The maximum number would be
2562 	 * smb_direct_receive_credit_max * max_payload_per_credit
2563 	 *
2564 	 *                       1340 * 255 = 341700 (0x536C4)
2565 	 *
2566 	 * The minimum value from the spec is 131072 (0x20000)
2567 	 *
2568 	 * For now we use the logic we used before:
2569 	 *                 (1364 * 255) / 2 = 173910 (0x2A756)
2570 	 *
2571 	 * We need to adjust this here in case the peer
2572 	 * lowered sp->max_recv_size.
2573 	 *
2574 	 * TODO: instead of adjusting max_fragmented_recv_size
2575 	 * we should adjust the number of available buffers,
2576 	 * but for now we keep the current logic.
2577 	 */
2578 	sp->max_fragmented_recv_size =
2579 		(sp->recv_credit_max * sp->max_recv_size) / 2;
2580 	sc->recv_io.credits.target = le16_to_cpu(req->credits_requested);
2581 	sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max);
2582 	sc->recv_io.credits.target = max_t(u16, sc->recv_io.credits.target, 1);
2583 
2584 put:
2585 	spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
2586 	sc->recv_io.reassembly.queue_length--;
2587 	list_del(&recvmsg->list);
2588 	spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
2589 	put_recvmsg(sc, recvmsg);
2590 
2591 	if (ret == -ECONNABORTED)
2592 		return ret;
2593 
2594 	if (ret)
2595 		goto respond;
2596 
2597 	/*
2598 	 * We negotiated with success, so we need to refill the recv queue.
2599 	 * We do that with sc->idle.immediate_work still being disabled
2600 	 * via smbdirect_socket_init(), so that queue_work(sc->workqueue,
2601 	 * &sc->idle.immediate_work) in smb_direct_post_recv_credits()
2602 	 * is a no-op.
2603 	 *
2604 	 * The message that grants the credits to the client is
2605 	 * the negotiate response.
2606 	 */
2607 	INIT_WORK(&sc->recv_io.posted.refill_work, smb_direct_post_recv_credits);
2608 	smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work);
2609 	if (unlikely(sc->first_error))
2610 		return sc->first_error;
2611 	INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work);
2612 
2613 respond:
2614 	ret = smb_direct_send_negotiate_response(sc, ret);
2615 
2616 	return ret;
2617 }
2618 
smb_direct_connect(struct smbdirect_socket * sc)2619 static int smb_direct_connect(struct smbdirect_socket *sc)
2620 {
2621 	struct smbdirect_recv_io *recv_io;
2622 	int ret;
2623 
2624 	ret = smb_direct_init_params(sc);
2625 	if (ret) {
2626 		pr_err("Can't configure RDMA parameters\n");
2627 		return ret;
2628 	}
2629 
2630 	ret = smb_direct_create_pools(sc);
2631 	if (ret) {
2632 		pr_err("Can't init RDMA pool: %d\n", ret);
2633 		return ret;
2634 	}
2635 
2636 	list_for_each_entry(recv_io, &sc->recv_io.free.list, list)
2637 		recv_io->cqe.done = recv_done;
2638 
2639 	ret = smb_direct_create_qpair(sc);
2640 	if (ret) {
2641 		pr_err("Can't accept RDMA client: %d\n", ret);
2642 		return ret;
2643 	}
2644 
2645 	ret = smb_direct_prepare_negotiation(sc);
2646 	if (ret) {
2647 		pr_err("Can't negotiate: %d\n", ret);
2648 		return ret;
2649 	}
2650 	return 0;
2651 }
2652 
rdma_frwr_is_supported(struct ib_device_attr * attrs)2653 static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
2654 {
2655 	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
2656 		return false;
2657 	if (attrs->max_fast_reg_page_list_len == 0)
2658 		return false;
2659 	return true;
2660 }
2661 
smb_direct_handle_connect_request(struct rdma_cm_id * new_cm_id,struct rdma_cm_event * event)2662 static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id,
2663 					     struct rdma_cm_event *event)
2664 {
2665 	struct smb_direct_listener *listener = new_cm_id->context;
2666 	struct smb_direct_transport *t;
2667 	struct smbdirect_socket *sc;
2668 	struct smbdirect_socket_parameters *sp;
2669 	struct task_struct *handler;
2670 	u8 peer_initiator_depth;
2671 	u8 peer_responder_resources;
2672 	int ret;
2673 
2674 	if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
2675 		ksmbd_debug(RDMA,
2676 			    "Fast Registration Work Requests is not supported. device capabilities=%llx\n",
2677 			    new_cm_id->device->attrs.device_cap_flags);
2678 		return -EPROTONOSUPPORT;
2679 	}
2680 
2681 	t = alloc_transport(new_cm_id);
2682 	if (!t)
2683 		return -ENOMEM;
2684 	sc = &t->socket;
2685 	sp = &sc->parameters;
2686 
2687 	peer_initiator_depth = event->param.conn.initiator_depth;
2688 	peer_responder_resources = event->param.conn.responder_resources;
2689 	if (rdma_protocol_iwarp(new_cm_id->device, new_cm_id->port_num) &&
2690 	    event->param.conn.private_data_len == 8) {
2691 		/*
2692 		 * Legacy clients with only iWarp MPA v1 support
2693 		 * need a private blob in order to negotiate
2694 		 * the IRD/ORD values.
2695 		 */
2696 		const __be32 *ird_ord_hdr = event->param.conn.private_data;
2697 		u32 ird32 = be32_to_cpu(ird_ord_hdr[0]);
2698 		u32 ord32 = be32_to_cpu(ird_ord_hdr[1]);
2699 
2700 		/*
2701 		 * cifs.ko sends the legacy IRD/ORD negotiation
2702 		 * event if iWarp MPA v2 was used.
2703 		 *
2704 		 * Here we check that the values match and only
2705 		 * mark the client as legacy if they don't match.
2706 		 */
2707 		if ((u32)event->param.conn.initiator_depth != ird32 ||
2708 		    (u32)event->param.conn.responder_resources != ord32) {
2709 			/*
2710 			 * There are broken clients (old cifs.ko)
2711 			 * using little endian and also
2712 			 * struct rdma_conn_param only uses u8
2713 			 * for initiator_depth and responder_resources,
2714 			 * so we truncate the value to U8_MAX.
2715 			 *
2716 			 * smb_direct_accept_client() will then
2717 			 * do the real negotiation in order to
2718 			 * select the minimum between client and
2719 			 * server.
2720 			 */
2721 			ird32 = min_t(u32, ird32, U8_MAX);
2722 			ord32 = min_t(u32, ord32, U8_MAX);
2723 
2724 			sc->rdma.legacy_iwarp = true;
2725 			peer_initiator_depth = (u8)ird32;
2726 			peer_responder_resources = (u8)ord32;
2727 		}
2728 	}
2729 
2730 	/*
2731 	 * First set what the we as server are able to support
2732 	 */
2733 	sp->initiator_depth = min_t(u8, sp->initiator_depth,
2734 				   new_cm_id->device->attrs.max_qp_rd_atom);
2735 
2736 	/*
2737 	 * negotiate the value by using the minimum
2738 	 * between client and server if the client provided
2739 	 * non 0 values.
2740 	 */
2741 	if (peer_initiator_depth != 0)
2742 		sp->initiator_depth = min_t(u8, sp->initiator_depth,
2743 					   peer_initiator_depth);
2744 	if (peer_responder_resources != 0)
2745 		sp->responder_resources = min_t(u8, sp->responder_resources,
2746 					       peer_responder_resources);
2747 
2748 	ret = smb_direct_connect(sc);
2749 	if (ret)
2750 		goto out_err;
2751 
2752 	handler = kthread_run(ksmbd_conn_handler_loop,
2753 			      KSMBD_TRANS(t)->conn, "ksmbd:r%u",
2754 			      listener->port);
2755 	if (IS_ERR(handler)) {
2756 		ret = PTR_ERR(handler);
2757 		pr_err("Can't start thread\n");
2758 		goto out_err;
2759 	}
2760 
2761 	return 0;
2762 out_err:
2763 	free_transport(t);
2764 	return ret;
2765 }
2766 
smb_direct_listen_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)2767 static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
2768 				     struct rdma_cm_event *event)
2769 {
2770 	switch (event->event) {
2771 	case RDMA_CM_EVENT_CONNECT_REQUEST: {
2772 		int ret = smb_direct_handle_connect_request(cm_id, event);
2773 
2774 		if (ret) {
2775 			pr_err("Can't create transport: %d\n", ret);
2776 			return ret;
2777 		}
2778 
2779 		ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n",
2780 			    cm_id);
2781 		break;
2782 	}
2783 	default:
2784 		pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n",
2785 		       cm_id, rdma_event_msg(event->event), event->event);
2786 		break;
2787 	}
2788 	return 0;
2789 }
2790 
smb_direct_listen(struct smb_direct_listener * listener,int port)2791 static int smb_direct_listen(struct smb_direct_listener *listener,
2792 			     int port)
2793 {
2794 	int ret;
2795 	struct rdma_cm_id *cm_id;
2796 	u8 node_type = RDMA_NODE_UNSPECIFIED;
2797 	struct sockaddr_in sin = {
2798 		.sin_family		= AF_INET,
2799 		.sin_addr.s_addr	= htonl(INADDR_ANY),
2800 		.sin_port		= htons(port),
2801 	};
2802 
2803 	switch (port) {
2804 	case SMB_DIRECT_PORT_IWARP:
2805 		/*
2806 		 * only allow iWarp devices
2807 		 * for port 5445.
2808 		 */
2809 		node_type = RDMA_NODE_RNIC;
2810 		break;
2811 	case SMB_DIRECT_PORT_INFINIBAND:
2812 		/*
2813 		 * only allow InfiniBand, RoCEv1 or RoCEv2
2814 		 * devices for port 445.
2815 		 *
2816 		 * (Basically don't allow iWarp devices)
2817 		 */
2818 		node_type = RDMA_NODE_IB_CA;
2819 		break;
2820 	default:
2821 		pr_err("unsupported smbdirect port=%d!\n", port);
2822 		return -ENODEV;
2823 	}
2824 
2825 	cm_id = rdma_create_id(&init_net, smb_direct_listen_handler,
2826 			       listener, RDMA_PS_TCP, IB_QPT_RC);
2827 	if (IS_ERR(cm_id)) {
2828 		pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id));
2829 		return PTR_ERR(cm_id);
2830 	}
2831 
2832 	ret = rdma_restrict_node_type(cm_id, node_type);
2833 	if (ret) {
2834 		pr_err("rdma_restrict_node_type(%u) failed %d\n",
2835 		       node_type, ret);
2836 		goto err;
2837 	}
2838 
2839 	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
2840 	if (ret) {
2841 		pr_err("Can't bind: %d\n", ret);
2842 		goto err;
2843 	}
2844 
2845 	ret = rdma_listen(cm_id, 10);
2846 	if (ret) {
2847 		pr_err("Can't listen: %d\n", ret);
2848 		goto err;
2849 	}
2850 
2851 	listener->port = port;
2852 	listener->cm_id = cm_id;
2853 
2854 	return 0;
2855 err:
2856 	listener->port = 0;
2857 	listener->cm_id = NULL;
2858 	rdma_destroy_id(cm_id);
2859 	return ret;
2860 }
2861 
smb_direct_ib_client_add(struct ib_device * ib_dev)2862 static int smb_direct_ib_client_add(struct ib_device *ib_dev)
2863 {
2864 	struct smb_direct_device *smb_dev;
2865 
2866 	if (!rdma_frwr_is_supported(&ib_dev->attrs))
2867 		return 0;
2868 
2869 	smb_dev = kzalloc_obj(*smb_dev, KSMBD_DEFAULT_GFP);
2870 	if (!smb_dev)
2871 		return -ENOMEM;
2872 	smb_dev->ib_dev = ib_dev;
2873 
2874 	write_lock(&smb_direct_device_lock);
2875 	list_add(&smb_dev->list, &smb_direct_device_list);
2876 	write_unlock(&smb_direct_device_lock);
2877 
2878 	ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
2879 	return 0;
2880 }
2881 
smb_direct_ib_client_remove(struct ib_device * ib_dev,void * client_data)2882 static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
2883 					void *client_data)
2884 {
2885 	struct smb_direct_device *smb_dev, *tmp;
2886 
2887 	write_lock(&smb_direct_device_lock);
2888 	list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
2889 		if (smb_dev->ib_dev == ib_dev) {
2890 			list_del(&smb_dev->list);
2891 			kfree(smb_dev);
2892 			break;
2893 		}
2894 	}
2895 	write_unlock(&smb_direct_device_lock);
2896 }
2897 
2898 static struct ib_client smb_direct_ib_client = {
2899 	.name	= "ksmbd_smb_direct_ib",
2900 	.add	= smb_direct_ib_client_add,
2901 	.remove	= smb_direct_ib_client_remove,
2902 };
2903 
ksmbd_rdma_init(void)2904 int ksmbd_rdma_init(void)
2905 {
2906 	int ret;
2907 
2908 	smb_direct_ib_listener = smb_direct_iw_listener = (struct smb_direct_listener) {
2909 		.cm_id = NULL,
2910 	};
2911 
2912 	ret = ib_register_client(&smb_direct_ib_client);
2913 	if (ret) {
2914 		pr_err("failed to ib_register_client\n");
2915 		return ret;
2916 	}
2917 
2918 	/* When a client is running out of send credits, the credits are
2919 	 * granted by the server's sending a packet using this queue.
2920 	 * This avoids the situation that a clients cannot send packets
2921 	 * for lack of credits
2922 	 */
2923 	smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq",
2924 					WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_PERCPU,
2925 					0);
2926 	if (!smb_direct_wq) {
2927 		ret = -ENOMEM;
2928 		goto err;
2929 	}
2930 
2931 	ret = smb_direct_listen(&smb_direct_ib_listener,
2932 				SMB_DIRECT_PORT_INFINIBAND);
2933 	if (ret) {
2934 		pr_err("Can't listen on InfiniBand/RoCEv1/RoCEv2: %d\n", ret);
2935 		goto err;
2936 	}
2937 
2938 	ksmbd_debug(RDMA, "InfiniBand/RoCEv1/RoCEv2 RDMA listener. cm_id=%p\n",
2939 		    smb_direct_ib_listener.cm_id);
2940 
2941 	ret = smb_direct_listen(&smb_direct_iw_listener,
2942 				SMB_DIRECT_PORT_IWARP);
2943 	if (ret) {
2944 		pr_err("Can't listen on iWarp: %d\n", ret);
2945 		goto err;
2946 	}
2947 
2948 	ksmbd_debug(RDMA, "iWarp RDMA listener. cm_id=%p\n",
2949 		    smb_direct_iw_listener.cm_id);
2950 
2951 	return 0;
2952 err:
2953 	ksmbd_rdma_stop_listening();
2954 	ksmbd_rdma_destroy();
2955 	return ret;
2956 }
2957 
ksmbd_rdma_stop_listening(void)2958 void ksmbd_rdma_stop_listening(void)
2959 {
2960 	if (!smb_direct_ib_listener.cm_id && !smb_direct_iw_listener.cm_id)
2961 		return;
2962 
2963 	ib_unregister_client(&smb_direct_ib_client);
2964 
2965 	if (smb_direct_ib_listener.cm_id)
2966 		rdma_destroy_id(smb_direct_ib_listener.cm_id);
2967 	if (smb_direct_iw_listener.cm_id)
2968 		rdma_destroy_id(smb_direct_iw_listener.cm_id);
2969 
2970 	smb_direct_ib_listener = smb_direct_iw_listener = (struct smb_direct_listener) {
2971 		.cm_id = NULL,
2972 	};
2973 }
2974 
ksmbd_rdma_destroy(void)2975 void ksmbd_rdma_destroy(void)
2976 {
2977 	if (smb_direct_wq) {
2978 		destroy_workqueue(smb_direct_wq);
2979 		smb_direct_wq = NULL;
2980 	}
2981 }
2982 
ksmbd_find_rdma_capable_netdev(struct net_device * netdev)2983 static bool ksmbd_find_rdma_capable_netdev(struct net_device *netdev)
2984 {
2985 	struct smb_direct_device *smb_dev;
2986 	int i;
2987 	bool rdma_capable = false;
2988 
2989 	read_lock(&smb_direct_device_lock);
2990 	list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
2991 		for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
2992 			struct net_device *ndev;
2993 
2994 			ndev = ib_device_get_netdev(smb_dev->ib_dev, i + 1);
2995 			if (!ndev)
2996 				continue;
2997 
2998 			if (ndev == netdev) {
2999 				dev_put(ndev);
3000 				rdma_capable = true;
3001 				goto out;
3002 			}
3003 			dev_put(ndev);
3004 		}
3005 	}
3006 out:
3007 	read_unlock(&smb_direct_device_lock);
3008 
3009 	if (rdma_capable == false) {
3010 		struct ib_device *ibdev;
3011 
3012 		ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
3013 		if (ibdev) {
3014 			rdma_capable = rdma_frwr_is_supported(&ibdev->attrs);
3015 			ib_device_put(ibdev);
3016 		}
3017 	}
3018 
3019 	ksmbd_debug(RDMA, "netdev(%s) rdma capable : %s\n",
3020 		    netdev->name, str_true_false(rdma_capable));
3021 
3022 	return rdma_capable;
3023 }
3024 
ksmbd_rdma_capable_netdev(struct net_device * netdev)3025 bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
3026 {
3027 	struct net_device *lower_dev;
3028 	struct list_head *iter;
3029 
3030 	if (ksmbd_find_rdma_capable_netdev(netdev))
3031 		return true;
3032 
3033 	/* check if netdev is bridge or VLAN */
3034 	if (netif_is_bridge_master(netdev) ||
3035 	    netdev->priv_flags & IFF_802_1Q_VLAN)
3036 		netdev_for_each_lower_dev(netdev, lower_dev, iter)
3037 			if (ksmbd_find_rdma_capable_netdev(lower_dev))
3038 				return true;
3039 
3040 	/* check if netdev is IPoIB safely without layer violation */
3041 	if (netdev->type == ARPHRD_INFINIBAND)
3042 		return true;
3043 
3044 	return false;
3045 }
3046 
3047 static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
3048 	.prepare	= smb_direct_prepare,
3049 	.disconnect	= smb_direct_disconnect,
3050 	.shutdown	= smb_direct_shutdown,
3051 	.writev		= smb_direct_writev,
3052 	.read		= smb_direct_read,
3053 	.rdma_read	= smb_direct_rdma_read,
3054 	.rdma_write	= smb_direct_rdma_write,
3055 	.free_transport = smb_direct_free_transport,
3056 };
3057