1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017, Microsoft Corporation.
4 * Copyright (C) 2018, LG Electronics.
5 * Copyright (c) 2025, Stefan Metzmacher
6 */
7
8 #include "smbdirect_internal.h"
9 #include <net/sock.h>
10 #include "../../common/smb2status.h"
11
12 static int smbdirect_accept_rdma_event_handler(struct rdma_cm_id *id,
13 struct rdma_cm_event *event);
14 static int smbdirect_accept_init_params(struct smbdirect_socket *sc);
15 static void smbdirect_accept_negotiate_recv_done(struct ib_cq *cq, struct ib_wc *wc);
16 static void smbdirect_accept_negotiate_send_done(struct ib_cq *cq, struct ib_wc *wc);
17
smbdirect_accept_connect_request(struct smbdirect_socket * sc,const struct rdma_conn_param * param)18 int smbdirect_accept_connect_request(struct smbdirect_socket *sc,
19 const struct rdma_conn_param *param)
20 {
21 struct smbdirect_socket_parameters *sp = &sc->parameters;
22 struct smbdirect_recv_io *recv_io;
23 u8 peer_initiator_depth;
24 u8 peer_responder_resources;
25 struct rdma_conn_param conn_param;
26 __be32 ird_ord_hdr[2];
27 int ret;
28
29 if (SMBDIRECT_CHECK_STATUS_WARN(sc, SMBDIRECT_SOCKET_CREATED))
30 return -EINVAL;
31
32 /*
33 * First set what the we as server are able to support
34 */
35 sp->initiator_depth = min_t(u8, sp->initiator_depth,
36 sc->ib.dev->attrs.max_qp_rd_atom);
37
38 peer_initiator_depth = param->initiator_depth;
39 peer_responder_resources = param->responder_resources;
40 smbdirect_connection_negotiate_rdma_resources(sc,
41 peer_initiator_depth,
42 peer_responder_resources,
43 param);
44
45 ret = smbdirect_accept_init_params(sc);
46 if (ret) {
47 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
48 "smbdirect_accept_init_params() failed %1pe\n",
49 SMBDIRECT_DEBUG_ERR_PTR(ret));
50 goto init_params_failed;
51 }
52
53 ret = smbdirect_connection_create_qp(sc);
54 if (ret) {
55 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
56 "smbdirect_connection_create_qp() failed %1pe\n",
57 SMBDIRECT_DEBUG_ERR_PTR(ret));
58 goto create_qp_failed;
59 }
60
61 ret = smbdirect_connection_create_mem_pools(sc);
62 if (ret) {
63 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
64 "smbdirect_connection_create_mem_pools() failed %1pe\n",
65 SMBDIRECT_DEBUG_ERR_PTR(ret));
66 goto create_mem_failed;
67 }
68
69 recv_io = smbdirect_connection_get_recv_io(sc);
70 if (WARN_ON_ONCE(!recv_io)) {
71 ret = -EINVAL;
72 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
73 "smbdirect_connection_get_recv_io() failed %1pe\n",
74 SMBDIRECT_DEBUG_ERR_PTR(ret));
75 goto get_recv_io_failed;
76 }
77 recv_io->cqe.done = smbdirect_accept_negotiate_recv_done;
78
79 /*
80 * Now post the recv_io buffer in order to get
81 * the negotiate request
82 */
83 sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REQ;
84 ret = smbdirect_connection_post_recv_io(recv_io);
85 if (ret) {
86 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
87 "smbdirect_connection_post_recv_io() failed %1pe\n",
88 SMBDIRECT_DEBUG_ERR_PTR(ret));
89 goto post_recv_io_failed;
90 }
91 /*
92 * From here recv_io is known to the RDMA QP and needs ib_drain_qp and
93 * smbdirect_accept_negotiate_recv_done to cleanup...
94 */
95 recv_io = NULL;
96
97 /* already checked with SMBDIRECT_CHECK_STATUS_WARN above */
98 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_CREATED);
99 sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED;
100
101 /*
102 * We already negotiated sp->initiator_depth
103 * and sp->responder_resources above.
104 */
105 memset(&conn_param, 0, sizeof(conn_param));
106 conn_param.initiator_depth = sp->initiator_depth;
107 conn_param.responder_resources = sp->responder_resources;
108
109 if (sc->rdma.legacy_iwarp) {
110 ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
111 ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
112 conn_param.private_data = ird_ord_hdr;
113 conn_param.private_data_len = sizeof(ird_ord_hdr);
114 } else {
115 conn_param.private_data = NULL;
116 conn_param.private_data_len = 0;
117 }
118 conn_param.retry_count = SMBDIRECT_RDMA_CM_RETRY;
119 conn_param.rnr_retry_count = SMBDIRECT_RDMA_CM_RNR_RETRY;
120 conn_param.flow_control = 0;
121
122 /* explicitly set above */
123 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED);
124 sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING;
125 sc->rdma.expected_event = RDMA_CM_EVENT_ESTABLISHED;
126 sc->rdma.cm_id->event_handler = smbdirect_accept_rdma_event_handler;
127 ret = rdma_accept(sc->rdma.cm_id, &conn_param);
128 if (ret) {
129 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
130 "rdma_accept() failed %1pe\n",
131 SMBDIRECT_DEBUG_ERR_PTR(ret));
132 goto rdma_accept_failed;
133 }
134
135 /*
136 * start with the negotiate timeout and SMBDIRECT_KEEPALIVE_PENDING
137 * so that the timer will cause a disconnect.
138 */
139 INIT_DELAYED_WORK(&sc->idle.timer_work, smbdirect_connection_idle_timer_work);
140 sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING;
141 mod_delayed_work(sc->workqueues.idle, &sc->idle.timer_work,
142 msecs_to_jiffies(sp->negotiate_timeout_msec));
143
144 return 0;
145
146 rdma_accept_failed:
147 /*
148 * smbdirect_connection_destroy_qp() calls ib_drain_qp(),
149 * so that smbdirect_accept_negotiate_recv_done() will
150 * call smbdirect_connection_put_recv_io()
151 */
152 post_recv_io_failed:
153 if (recv_io)
154 smbdirect_connection_put_recv_io(recv_io);
155 get_recv_io_failed:
156 smbdirect_connection_destroy_mem_pools(sc);
157 create_mem_failed:
158 smbdirect_connection_destroy_qp(sc);
159 create_qp_failed:
160 init_params_failed:
161 return ret;
162 }
163
smbdirect_accept_init_params(struct smbdirect_socket * sc)164 static int smbdirect_accept_init_params(struct smbdirect_socket *sc)
165 {
166 const struct smbdirect_socket_parameters *sp = &sc->parameters;
167 int max_send_sges;
168 unsigned int maxpages;
169
170 /* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
171 * SMB2 response could be mapped.
172 */
173 max_send_sges = DIV_ROUND_UP(sp->max_send_size, PAGE_SIZE) + 3;
174 if (max_send_sges > SMBDIRECT_SEND_IO_MAX_SGE) {
175 pr_err("max_send_size %d is too large\n", sp->max_send_size);
176 return -EINVAL;
177 }
178
179 /*
180 * There is only a single batch credit
181 */
182 atomic_set(&sc->send_io.bcredits.count, 1);
183
184 /*
185 * Initialize the local credits to post
186 * IB_WR_SEND[_WITH_INV].
187 */
188 atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target);
189
190 if (sp->max_read_write_size) {
191 maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE);
192 sc->rw_io.credits.max = rdma_rw_mr_factor(sc->ib.dev,
193 sc->rdma.cm_id->port_num,
194 maxpages);
195 sc->rw_io.credits.num_pages = DIV_ROUND_UP(maxpages, sc->rw_io.credits.max);
196 /* add one extra in order to handle unaligned pages */
197 sc->rw_io.credits.max += 1;
198 }
199
200 sc->recv_io.credits.target = 1;
201
202 atomic_set(&sc->rw_io.credits.count, sc->rw_io.credits.max);
203
204 return 0;
205 }
206
207 static void smbdirect_accept_negotiate_recv_work(struct work_struct *work);
208
smbdirect_accept_negotiate_recv_done(struct ib_cq * cq,struct ib_wc * wc)209 static void smbdirect_accept_negotiate_recv_done(struct ib_cq *cq, struct ib_wc *wc)
210 {
211 struct smbdirect_recv_io *recv_io =
212 container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
213 struct smbdirect_socket *sc = recv_io->socket;
214 unsigned long flags;
215
216 if (unlikely(wc->status != IB_WC_SUCCESS || WARN_ON_ONCE(wc->opcode != IB_WC_RECV))) {
217 if (wc->status != IB_WC_WR_FLUSH_ERR)
218 smbdirect_log_rdma_recv(sc, SMBDIRECT_LOG_ERR,
219 "wc->status=%s (%d) wc->opcode=%d\n",
220 ib_wc_status_msg(wc->status), wc->status, wc->opcode);
221 goto error;
222 }
223
224 smbdirect_log_rdma_recv(sc, SMBDIRECT_LOG_INFO,
225 "smbdirect_recv_io completed. status='%s (%d)', opcode=%d\n",
226 ib_wc_status_msg(wc->status), wc->status, wc->opcode);
227
228 /*
229 * This is an internal error!
230 */
231 if (WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_NEGOTIATE_REQ))
232 goto error;
233
234 /*
235 * Don't reset timer to the keepalive interval in
236 * this will be done in smbdirect_accept_direct_negotiate_recv_work.
237 */
238
239 ib_dma_sync_single_for_cpu(sc->ib.dev,
240 recv_io->sge.addr,
241 recv_io->sge.length,
242 DMA_FROM_DEVICE);
243
244 /*
245 * Only remember recv_io if it has enough bytes,
246 * this gives smbdirect_accept_negotiate_recv_work enough
247 * information in order to disconnect if it was not
248 * valid.
249 */
250 sc->recv_io.reassembly.full_packet_received = true;
251 if (wc->byte_len >= sizeof(struct smbdirect_negotiate_req))
252 smbdirect_connection_reassembly_append_recv_io(sc, recv_io, 0);
253 else
254 smbdirect_connection_put_recv_io(recv_io);
255
256 /*
257 * Some drivers (at least mlx5_ib and irdma) might post a
258 * recv completion before RDMA_CM_EVENT_ESTABLISHED,
259 * we need to adjust our expectation in that case.
260 *
261 * So we defer further processing of the negotiation
262 * to smbdirect_accept_negotiate_recv_work().
263 *
264 * If we are already in SMBDIRECT_SOCKET_NEGOTIATE_NEEDED
265 * we queue the work directly otherwise
266 * smbdirect_accept_rdma_event_handler() will do it, when
267 * RDMA_CM_EVENT_ESTABLISHED arrived.
268 */
269 spin_lock_irqsave(&sc->connect.lock, flags);
270 if (!sc->first_error) {
271 INIT_WORK(&sc->connect.work, smbdirect_accept_negotiate_recv_work);
272 if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)
273 queue_work(sc->workqueues.accept, &sc->connect.work);
274 }
275 spin_unlock_irqrestore(&sc->connect.lock, flags);
276
277 return;
278
279 error:
280 /*
281 * recv_io.posted.refill_work is still disabled,
282 * so smbdirect_connection_put_recv_io() won't
283 * start it.
284 */
285 smbdirect_connection_put_recv_io(recv_io);
286 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
287 }
288
smbdirect_accept_negotiate_recv_work(struct work_struct * work)289 static void smbdirect_accept_negotiate_recv_work(struct work_struct *work)
290 {
291 struct smbdirect_socket *sc =
292 container_of(work, struct smbdirect_socket, connect.work);
293 struct smbdirect_socket_parameters *sp = &sc->parameters;
294 struct smbdirect_recv_io *recv_io;
295 struct smbdirect_negotiate_req *nreq;
296 unsigned long flags;
297 u16 min_version;
298 u16 max_version;
299 u16 credits_requested;
300 u32 preferred_send_size;
301 u32 max_receive_size;
302 u32 max_fragmented_size;
303 u32 ntstatus;
304
305 if (sc->first_error)
306 return;
307
308 /*
309 * make sure we won't start again...
310 */
311 disable_work(work);
312
313 /*
314 * Reset timer to the keepalive interval in
315 * order to trigger our next keepalive message.
316 */
317 sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE;
318 mod_delayed_work(sc->workqueues.idle, &sc->idle.timer_work,
319 msecs_to_jiffies(sp->keepalive_interval_msec));
320
321 /*
322 * If smbdirect_accept_negotiate_recv_done() detected an
323 * invalid request we want to disconnect.
324 */
325 recv_io = smbdirect_connection_reassembly_first_recv_io(sc);
326 if (!recv_io) {
327 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
328 return;
329 }
330 spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
331 sc->recv_io.reassembly.queue_length--;
332 list_del(&recv_io->list);
333 spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
334 smbdirect_connection_put_recv_io(recv_io);
335
336 if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_NEGOTIATE_NEEDED))
337 return;
338 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
339
340 /*
341 * Note recv_io is already part of the free list,
342 * as we just called smbdirect_connection_put_recv_io(),
343 * but it won't be reused before we call
344 * smbdirect_connection_recv_io_refill() below.
345 */
346
347 nreq = (struct smbdirect_negotiate_req *)recv_io->packet;
348 min_version = le16_to_cpu(nreq->min_version);
349 max_version = le16_to_cpu(nreq->max_version);
350 credits_requested = le16_to_cpu(nreq->credits_requested);
351 preferred_send_size = le32_to_cpu(nreq->preferred_send_size);
352 max_receive_size = le32_to_cpu(nreq->max_receive_size);
353 max_fragmented_size = le32_to_cpu(nreq->max_fragmented_size);
354
355 smbdirect_log_negotiate(sc, SMBDIRECT_LOG_INFO,
356 "ReqIn: %s%x, %s%x, %s%u, %s%u, %s%u, %s%u\n",
357 "MinVersion=0x",
358 le16_to_cpu(nreq->min_version),
359 "MaxVersion=0x",
360 le16_to_cpu(nreq->max_version),
361 "CreditsRequested=",
362 le16_to_cpu(nreq->credits_requested),
363 "PreferredSendSize=",
364 le32_to_cpu(nreq->preferred_send_size),
365 "MaxRecvSize=",
366 le32_to_cpu(nreq->max_receive_size),
367 "MaxFragmentedSize=",
368 le32_to_cpu(nreq->max_fragmented_size));
369
370 if (!(min_version <= SMBDIRECT_V1 && max_version >= SMBDIRECT_V1)) {
371 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
372 "invalid: min_version=0x%x max_version=0x%x\n",
373 min_version, max_version);
374 ntstatus = le32_to_cpu(STATUS_NOT_SUPPORTED);
375 goto not_supported;
376 }
377
378 if (credits_requested == 0) {
379 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
380 "invalid: credits_requested == 0\n");
381 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
382 return;
383 }
384
385 if (max_receive_size < SMBDIRECT_MIN_RECEIVE_SIZE) {
386 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
387 "invalid: max_receive_size=%u < %u\n",
388 max_receive_size,
389 SMBDIRECT_MIN_RECEIVE_SIZE);
390 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
391 return;
392 }
393
394 if (max_fragmented_size < SMBDIRECT_MIN_FRAGMENTED_SIZE) {
395 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
396 "invalid: max_fragmented_size=%u < %u\n",
397 max_fragmented_size,
398 SMBDIRECT_MIN_FRAGMENTED_SIZE);
399 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
400 return;
401 }
402
403 /*
404 * At least the value of SMBDIRECT_MIN_RECEIVE_SIZE is used.
405 */
406 sp->max_recv_size = min_t(u32, sp->max_recv_size, preferred_send_size);
407 sp->max_recv_size = max_t(u32, sp->max_recv_size, SMBDIRECT_MIN_RECEIVE_SIZE);
408
409 /*
410 * The maximum fragmented upper-layer payload receive size supported
411 *
412 * Assume max_payload_per_credit is
413 * smb_direct_receive_credit_max - 24 = 1340
414 *
415 * The maximum number would be
416 * smb_direct_receive_credit_max * max_payload_per_credit
417 *
418 * 1340 * 255 = 341700 (0x536C4)
419 *
420 * The minimum value from the spec is 131072 (0x20000)
421 *
422 * For now we use the logic we used in ksmbd before:
423 * (1364 * 255) / 2 = 173910 (0x2A756)
424 *
425 * We need to adjust this here in case the peer
426 * lowered sp->max_recv_size.
427 *
428 * TODO: instead of adjusting max_fragmented_recv_size
429 * we should adjust the number of available buffers,
430 * but for now we keep the logic as it was used
431 * in ksmbd before.
432 */
433 sp->max_fragmented_recv_size = (sp->recv_credit_max * sp->max_recv_size) / 2;
434
435 /*
436 * We take the value from the peer, which is checked to be higher than 0,
437 * but we limit it to the max value we support in order to have
438 * the main logic simpler.
439 */
440 sc->recv_io.credits.target = credits_requested;
441 sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target,
442 sp->recv_credit_max);
443
444 /*
445 * Note nreq->max_receive_size was already checked against
446 * SMBDIRECT_MIN_RECEIVE_SIZE above.
447 */
448 sp->max_send_size = min_t(u32, sp->max_send_size, max_receive_size);
449
450 /*
451 * Note nreq->max_fragmented_size was already checked against
452 * SMBDIRECT_MIN_FRAGMENTED_SIZE above.
453 */
454 sp->max_fragmented_send_size = max_fragmented_size;
455
456 if (sc->accept.listener) {
457 struct smbdirect_socket *lsc = sc->accept.listener;
458 unsigned long flags;
459
460 spin_lock_irqsave(&lsc->listen.lock, flags);
461 list_del(&sc->accept.list);
462 list_add_tail(&sc->accept.list, &lsc->listen.ready);
463 wake_up(&lsc->listen.wait_queue);
464 spin_unlock_irqrestore(&lsc->listen.lock, flags);
465
466 /*
467 * smbdirect_socket_accept() will call
468 * smbdirect_accept_negotiate_finish(nsc, 0);
469 *
470 * So that we don't send the negotiation
471 * response that grants credits to the peer
472 * before the socket is accepted by the
473 * application.
474 */
475 return;
476 }
477
478 ntstatus = le32_to_cpu(STATUS_SUCCESS);
479
480 not_supported:
481 smbdirect_accept_negotiate_finish(sc, ntstatus);
482 }
483
smbdirect_accept_negotiate_finish(struct smbdirect_socket * sc,u32 ntstatus)484 void smbdirect_accept_negotiate_finish(struct smbdirect_socket *sc, u32 ntstatus)
485 {
486 const struct smbdirect_socket_parameters *sp = &sc->parameters;
487 struct smbdirect_recv_io *recv_io;
488 struct smbdirect_send_io *send_io;
489 struct smbdirect_negotiate_resp *nrep;
490 int posted;
491 u16 new_credits;
492 int ret;
493
494 if (ntstatus)
495 goto not_supported;
496
497 /*
498 * Prepare for receiving data_transfer messages
499 */
500 sc->recv_io.reassembly.full_packet_received = true;
501 sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER;
502 list_for_each_entry(recv_io, &sc->recv_io.free.list, list)
503 recv_io->cqe.done = smbdirect_connection_recv_io_done;
504 recv_io = NULL;
505
506 /*
507 * We should at least post 1 smbdirect_recv_io!
508 */
509 posted = smbdirect_connection_recv_io_refill(sc);
510 if (posted < 1) {
511 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
512 "smbdirect_connection_recv_io_refill() failed %1pe\n",
513 SMBDIRECT_DEBUG_ERR_PTR(posted));
514 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
515 return;
516 }
517
518 /*
519 * The response will grant credits for all posted
520 * smbdirect_recv_io messages.
521 */
522 new_credits = smbdirect_connection_grant_recv_credits(sc);
523
524 not_supported:
525 send_io = smbdirect_connection_alloc_send_io(sc);
526 if (IS_ERR(send_io)) {
527 ret = PTR_ERR(send_io);
528 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
529 "smbdirect_connection_alloc_send_io() failed %1pe\n",
530 SMBDIRECT_DEBUG_ERR_PTR(ret));
531 smbdirect_socket_schedule_cleanup(sc, ret);
532 return;
533 }
534 send_io->cqe.done = smbdirect_accept_negotiate_send_done;
535
536 nrep = (struct smbdirect_negotiate_resp *)send_io->packet;
537 nrep->min_version = cpu_to_le16(SMBDIRECT_V1);
538 nrep->max_version = cpu_to_le16(SMBDIRECT_V1);
539 if (ntstatus == 0) {
540 nrep->negotiated_version = cpu_to_le16(SMBDIRECT_V1);
541 nrep->reserved = 0;
542 nrep->credits_requested = cpu_to_le16(sp->send_credit_target);
543 nrep->credits_granted = cpu_to_le16(new_credits);
544 nrep->status = cpu_to_le32(ntstatus);
545 nrep->max_readwrite_size = cpu_to_le32(sp->max_read_write_size);
546 nrep->preferred_send_size = cpu_to_le32(sp->max_send_size);
547 nrep->max_receive_size = cpu_to_le32(sp->max_recv_size);
548 nrep->max_fragmented_size = cpu_to_le32(sp->max_fragmented_recv_size);
549 } else {
550 nrep->negotiated_version = 0;
551 nrep->reserved = 0;
552 nrep->credits_requested = 0;
553 nrep->credits_granted = 0;
554 nrep->status = cpu_to_le32(ntstatus);
555 nrep->max_readwrite_size = 0;
556 nrep->preferred_send_size = 0;
557 nrep->max_receive_size = 0;
558 nrep->max_fragmented_size = 0;
559 }
560
561 smbdirect_log_negotiate(sc, SMBDIRECT_LOG_INFO,
562 "RepOut: %s%x, %s%x, %s%x, %s%u, %s%u, %s%x, %s%u, %s%u, %s%u, %s%u\n",
563 "MinVersion=0x",
564 le16_to_cpu(nrep->min_version),
565 "MaxVersion=0x",
566 le16_to_cpu(nrep->max_version),
567 "NegotiatedVersion=0x",
568 le16_to_cpu(nrep->negotiated_version),
569 "CreditsRequested=",
570 le16_to_cpu(nrep->credits_requested),
571 "CreditsGranted=",
572 le16_to_cpu(nrep->credits_granted),
573 "Status=0x",
574 le32_to_cpu(nrep->status),
575 "MaxReadWriteSize=",
576 le32_to_cpu(nrep->max_readwrite_size),
577 "PreferredSendSize=",
578 le32_to_cpu(nrep->preferred_send_size),
579 "MaxRecvSize=",
580 le32_to_cpu(nrep->max_receive_size),
581 "MaxFragmentedSize=",
582 le32_to_cpu(nrep->max_fragmented_size));
583
584 send_io->sge[0].addr = ib_dma_map_single(sc->ib.dev,
585 nrep,
586 sizeof(*nrep),
587 DMA_TO_DEVICE);
588 ret = ib_dma_mapping_error(sc->ib.dev, send_io->sge[0].addr);
589 if (ret) {
590 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
591 "ib_dma_mapping_error() failed %1pe\n",
592 SMBDIRECT_DEBUG_ERR_PTR(ret));
593 smbdirect_connection_free_send_io(send_io);
594 smbdirect_socket_schedule_cleanup(sc, ret);
595 return;
596 }
597
598 send_io->sge[0].length = sizeof(*nrep);
599 send_io->sge[0].lkey = sc->ib.pd->local_dma_lkey;
600 send_io->num_sge = 1;
601
602 ib_dma_sync_single_for_device(sc->ib.dev,
603 send_io->sge[0].addr,
604 send_io->sge[0].length,
605 DMA_TO_DEVICE);
606
607 send_io->wr.next = NULL;
608 send_io->wr.wr_cqe = &send_io->cqe;
609 send_io->wr.sg_list = send_io->sge;
610 send_io->wr.num_sge = send_io->num_sge;
611 send_io->wr.opcode = IB_WR_SEND;
612 send_io->wr.send_flags = IB_SEND_SIGNALED;
613
614 ret = smbdirect_connection_post_send_wr(sc, &send_io->wr);
615 if (ret) {
616 /* if we reach here, post send failed */
617 smbdirect_log_rdma_send(sc, SMBDIRECT_LOG_ERR,
618 "smbdirect_connection_post_send_wr() failed %1pe\n",
619 SMBDIRECT_DEBUG_ERR_PTR(ret));
620 /*
621 * Note smbdirect_connection_free_send_io()
622 * does ib_dma_unmap_page()
623 */
624 smbdirect_connection_free_send_io(send_io);
625 smbdirect_socket_schedule_cleanup(sc, ret);
626 return;
627 }
628
629 /*
630 * smbdirect_accept_negotiate_send_done
631 * will do all remaining work...
632 */
633 }
634
smbdirect_accept_negotiate_send_done(struct ib_cq * cq,struct ib_wc * wc)635 static void smbdirect_accept_negotiate_send_done(struct ib_cq *cq, struct ib_wc *wc)
636 {
637 struct smbdirect_send_io *send_io =
638 container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
639 struct smbdirect_socket *sc = send_io->socket;
640 struct smbdirect_negotiate_resp *nrep;
641 u32 ntstatus;
642
643 smbdirect_log_rdma_send(sc, SMBDIRECT_LOG_INFO,
644 "smbdirect_send_io completed. status='%s (%d)', opcode=%d\n",
645 ib_wc_status_msg(wc->status), wc->status, wc->opcode);
646
647 nrep = (struct smbdirect_negotiate_resp *)send_io->packet;
648 ntstatus = le32_to_cpu(nrep->status);
649
650 /* Note this frees wc->wr_cqe, but not wc */
651 smbdirect_connection_free_send_io(send_io);
652 atomic_dec(&sc->send_io.pending.count);
653
654 if (unlikely(wc->status != IB_WC_SUCCESS || WARN_ON_ONCE(wc->opcode != IB_WC_SEND))) {
655 if (wc->status != IB_WC_WR_FLUSH_ERR)
656 smbdirect_log_rdma_send(sc, SMBDIRECT_LOG_ERR,
657 "wc->status=%s (%d) wc->opcode=%d\n",
658 ib_wc_status_msg(wc->status), wc->status, wc->opcode);
659 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
660 return;
661 }
662
663 /*
664 * If we send a smbdirect_negotiate_resp without NT_STATUS_OK (0)
665 * we need to disconnect now.
666 *
667 * Otherwise smbdirect_connection_negotiation_done()
668 * will setup all required things and wake up
669 * the waiter.
670 */
671 if (ntstatus)
672 smbdirect_socket_schedule_cleanup(sc, -EOPNOTSUPP);
673 else
674 smbdirect_connection_negotiation_done(sc);
675 }
676
smbdirect_accept_rdma_event_handler(struct rdma_cm_id * id,struct rdma_cm_event * event)677 static int smbdirect_accept_rdma_event_handler(struct rdma_cm_id *id,
678 struct rdma_cm_event *event)
679 {
680 struct smbdirect_socket *sc = id->context;
681 unsigned long flags;
682
683 /*
684 * cma_cm_event_handler() has
685 * lockdep_assert_held(&id_priv->handler_mutex);
686 *
687 * Mutexes are not allowed in interrupts,
688 * and we rely on not being in an interrupt here,
689 * as we might sleep.
690 *
691 * We didn't timeout so we cancel our idle timer,
692 * it will be scheduled again if needed.
693 */
694 WARN_ON_ONCE(in_interrupt());
695
696 if (event->status || event->event != sc->rdma.expected_event) {
697 int ret = -ECONNABORTED;
698
699 if (event->event == RDMA_CM_EVENT_REJECTED)
700 ret = -ECONNREFUSED;
701 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
702 ret = -ENETDOWN;
703 if (IS_ERR(SMBDIRECT_DEBUG_ERR_PTR(event->status)))
704 ret = event->status;
705
706 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
707 "%s (first_error=%1pe, expected=%s) => event=%s status=%d => ret=%1pe\n",
708 smbdirect_socket_status_string(sc->status),
709 SMBDIRECT_DEBUG_ERR_PTR(sc->first_error),
710 rdma_event_msg(sc->rdma.expected_event),
711 rdma_event_msg(event->event),
712 event->status,
713 SMBDIRECT_DEBUG_ERR_PTR(ret));
714
715 smbdirect_socket_schedule_cleanup(sc, ret);
716 return 0;
717 }
718
719 smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_INFO,
720 "%s (first_error=%1pe) event=%s\n",
721 smbdirect_socket_status_string(sc->status),
722 SMBDIRECT_DEBUG_ERR_PTR(sc->first_error),
723 rdma_event_msg(event->event));
724
725 if (sc->first_error)
726 return 0;
727
728 switch (event->event) {
729 case RDMA_CM_EVENT_ESTABLISHED:
730 smbdirect_connection_rdma_established(sc);
731
732 /*
733 * Some drivers (at least mlx5_ib and irdma) might post a
734 * recv completion before RDMA_CM_EVENT_ESTABLISHED,
735 * we need to adjust our expectation in that case.
736 *
737 * If smbdirect_accept_negotiate_recv_done was called first
738 * it initialized sc->connect.work only for us to
739 * start, so that we turned into
740 * SMBDIRECT_SOCKET_NEGOTIATE_NEEDED, before
741 * smbdirect_accept_negotiate_recv_work() runs.
742 *
743 * If smbdirect_accept_negotiate_recv_done didn't happen
744 * yet. sc->connect.work is still be disabled and
745 * queue_work() is a no-op.
746 */
747 if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING))
748 return 0;
749 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
750 spin_lock_irqsave(&sc->connect.lock, flags);
751 if (!sc->first_error)
752 queue_work(sc->workqueues.accept, &sc->connect.work);
753 spin_unlock_irqrestore(&sc->connect.lock, flags);
754
755 /*
756 * wait for smbdirect_accept_negotiate_recv_done()
757 * to get the negotiate request.
758 */
759 return 0;
760
761 default:
762 break;
763 }
764
765 /*
766 * This is an internal error
767 */
768 WARN_ON_ONCE(sc->rdma.expected_event != RDMA_CM_EVENT_ESTABLISHED);
769 smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
770 return 0;
771 }
772
smbdirect_socket_wait_for_accept(struct smbdirect_socket * lsc,long timeo)773 static long smbdirect_socket_wait_for_accept(struct smbdirect_socket *lsc, long timeo)
774 {
775 long ret;
776
777 ret = wait_event_interruptible_timeout(lsc->listen.wait_queue,
778 !list_empty_careful(&lsc->listen.ready) ||
779 lsc->status != SMBDIRECT_SOCKET_LISTENING ||
780 lsc->first_error,
781 timeo);
782 if (lsc->status != SMBDIRECT_SOCKET_LISTENING)
783 return -EINVAL;
784 if (lsc->first_error)
785 return lsc->first_error;
786 if (!ret)
787 ret = -ETIMEDOUT;
788 if (ret < 0)
789 return ret;
790
791 return 0;
792 }
793
smbdirect_socket_accept(struct smbdirect_socket * lsc,long timeo,struct proto_accept_arg * arg)794 struct smbdirect_socket *smbdirect_socket_accept(struct smbdirect_socket *lsc,
795 long timeo,
796 struct proto_accept_arg *arg)
797 {
798 struct smbdirect_socket *nsc;
799 unsigned long flags;
800
801 if (lsc->status != SMBDIRECT_SOCKET_LISTENING) {
802 arg->err = -EINVAL;
803 return NULL;
804 }
805
806 if (lsc->first_error) {
807 arg->err = lsc->first_error;
808 return NULL;
809 }
810
811 if (list_empty_careful(&lsc->listen.ready)) {
812 int ret;
813
814 if (timeo == 0) {
815 arg->err = -EAGAIN;
816 return NULL;
817 }
818
819 ret = smbdirect_socket_wait_for_accept(lsc, timeo);
820 if (ret) {
821 arg->err = ret;
822 return NULL;
823 }
824 }
825
826 spin_lock_irqsave(&lsc->listen.lock, flags);
827 nsc = list_first_entry_or_null(&lsc->listen.ready,
828 struct smbdirect_socket,
829 accept.list);
830 if (nsc) {
831 nsc->accept.listener = NULL;
832 list_del_init_careful(&nsc->accept.list);
833 arg->is_empty = list_empty_careful(&lsc->listen.ready);
834 }
835 spin_unlock_irqrestore(&lsc->listen.lock, flags);
836 if (!nsc) {
837 arg->err = -EAGAIN;
838 return NULL;
839 }
840
841 /*
842 * We did not send the negotiation response
843 * yet, so we did not grant any credits to the client,
844 * so it didn't grant any credits to us.
845 *
846 * The caller expects a connected socket
847 * now as there are no credits anyway.
848 *
849 * Then we send the negotiation response in
850 * order to grant credits to the peer.
851 */
852 nsc->status = SMBDIRECT_SOCKET_CONNECTED;
853 smbdirect_accept_negotiate_finish(nsc, 0);
854
855 return nsc;
856 }
857 __SMBDIRECT_EXPORT_SYMBOL__(smbdirect_socket_accept);
858