1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12 #include "rxe_task.h"
13
14 enum comp_state {
15 COMPST_GET_ACK,
16 COMPST_GET_WQE,
17 COMPST_COMP_WQE,
18 COMPST_COMP_ACK,
19 COMPST_CHECK_PSN,
20 COMPST_CHECK_ACK,
21 COMPST_READ,
22 COMPST_ATOMIC,
23 COMPST_WRITE_SEND,
24 COMPST_UPDATE_COMP,
25 COMPST_ERROR_RETRY,
26 COMPST_RNR_RETRY,
27 COMPST_ERROR,
28 COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
29 COMPST_DONE, /* The completer finished successflly */
30 };
31
32 static char *comp_state_name[] = {
33 [COMPST_GET_ACK] = "GET ACK",
34 [COMPST_GET_WQE] = "GET WQE",
35 [COMPST_COMP_WQE] = "COMP WQE",
36 [COMPST_COMP_ACK] = "COMP ACK",
37 [COMPST_CHECK_PSN] = "CHECK PSN",
38 [COMPST_CHECK_ACK] = "CHECK ACK",
39 [COMPST_READ] = "READ",
40 [COMPST_ATOMIC] = "ATOMIC",
41 [COMPST_WRITE_SEND] = "WRITE/SEND",
42 [COMPST_UPDATE_COMP] = "UPDATE COMP",
43 [COMPST_ERROR_RETRY] = "ERROR RETRY",
44 [COMPST_RNR_RETRY] = "RNR RETRY",
45 [COMPST_ERROR] = "ERROR",
46 [COMPST_EXIT] = "EXIT",
47 [COMPST_DONE] = "DONE",
48 };
49
50 static unsigned long rnrnak_usec[32] = {
51 [IB_RNR_TIMER_655_36] = 655360,
52 [IB_RNR_TIMER_000_01] = 10,
53 [IB_RNR_TIMER_000_02] = 20,
54 [IB_RNR_TIMER_000_03] = 30,
55 [IB_RNR_TIMER_000_04] = 40,
56 [IB_RNR_TIMER_000_06] = 60,
57 [IB_RNR_TIMER_000_08] = 80,
58 [IB_RNR_TIMER_000_12] = 120,
59 [IB_RNR_TIMER_000_16] = 160,
60 [IB_RNR_TIMER_000_24] = 240,
61 [IB_RNR_TIMER_000_32] = 320,
62 [IB_RNR_TIMER_000_48] = 480,
63 [IB_RNR_TIMER_000_64] = 640,
64 [IB_RNR_TIMER_000_96] = 960,
65 [IB_RNR_TIMER_001_28] = 1280,
66 [IB_RNR_TIMER_001_92] = 1920,
67 [IB_RNR_TIMER_002_56] = 2560,
68 [IB_RNR_TIMER_003_84] = 3840,
69 [IB_RNR_TIMER_005_12] = 5120,
70 [IB_RNR_TIMER_007_68] = 7680,
71 [IB_RNR_TIMER_010_24] = 10240,
72 [IB_RNR_TIMER_015_36] = 15360,
73 [IB_RNR_TIMER_020_48] = 20480,
74 [IB_RNR_TIMER_030_72] = 30720,
75 [IB_RNR_TIMER_040_96] = 40960,
76 [IB_RNR_TIMER_061_44] = 61410,
77 [IB_RNR_TIMER_081_92] = 81920,
78 [IB_RNR_TIMER_122_88] = 122880,
79 [IB_RNR_TIMER_163_84] = 163840,
80 [IB_RNR_TIMER_245_76] = 245760,
81 [IB_RNR_TIMER_327_68] = 327680,
82 [IB_RNR_TIMER_491_52] = 491520,
83 };
84
rnrnak_jiffies(u8 timeout)85 static inline unsigned long rnrnak_jiffies(u8 timeout)
86 {
87 return max_t(unsigned long,
88 usecs_to_jiffies(rnrnak_usec[timeout]), 1);
89 }
90
wr_to_wc_opcode(enum ib_wr_opcode opcode)91 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
92 {
93 switch (opcode) {
94 case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
95 case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
96 case IB_WR_SEND: return IB_WC_SEND;
97 case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
98 case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
99 case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
100 case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
101 case IB_WR_LSO: return IB_WC_LSO;
102 case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
103 case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
104 case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
105 case IB_WR_REG_MR: return IB_WC_REG_MR;
106 case IB_WR_BIND_MW: return IB_WC_BIND_MW;
107 case IB_WR_ATOMIC_WRITE: return IB_WC_ATOMIC_WRITE;
108 case IB_WR_FLUSH: return IB_WC_FLUSH;
109
110 default:
111 return 0xff;
112 }
113 }
114
retransmit_timer(struct timer_list * t)115 void retransmit_timer(struct timer_list *t)
116 {
117 struct rxe_qp *qp = timer_container_of(qp, t, retrans_timer);
118 unsigned long flags;
119
120 rxe_dbg_qp(qp, "retransmit timer fired\n");
121
122 if (!rxe_get(qp))
123 return;
124 spin_lock_irqsave(&qp->state_lock, flags);
125 if (qp->valid) {
126 qp->comp.timeout = 1;
127 rxe_sched_task(&qp->send_task);
128 }
129 spin_unlock_irqrestore(&qp->state_lock, flags);
130 rxe_put(qp);
131 }
132
rxe_comp_queue_pkt(struct rxe_qp * qp,struct sk_buff * skb)133 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
134 {
135 rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
136 skb_queue_tail(&qp->resp_pkts, skb);
137 rxe_sched_task(&qp->send_task);
138 }
139
get_wqe(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe ** wqe_p)140 static inline enum comp_state get_wqe(struct rxe_qp *qp,
141 struct rxe_pkt_info *pkt,
142 struct rxe_send_wqe **wqe_p)
143 {
144 struct rxe_send_wqe *wqe;
145
146 /* we come here whether or not we found a response packet to see if
147 * there are any posted WQEs
148 */
149 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
150 *wqe_p = wqe;
151
152 /* no WQE or requester has not started it yet */
153 if (!wqe || wqe->state == wqe_state_posted)
154 return pkt ? COMPST_DONE : COMPST_EXIT;
155
156 /* WQE does not require an ack */
157 if (wqe->state == wqe_state_done)
158 return COMPST_COMP_WQE;
159
160 /* WQE caused an error */
161 if (wqe->state == wqe_state_error)
162 return COMPST_ERROR;
163
164 /* we have a WQE, if we also have an ack check its PSN */
165 return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
166 }
167
reset_retry_counters(struct rxe_qp * qp)168 static inline void reset_retry_counters(struct rxe_qp *qp)
169 {
170 qp->comp.retry_cnt = qp->attr.retry_cnt;
171 qp->comp.rnr_retry = qp->attr.rnr_retry;
172 qp->comp.started_retry = 0;
173 }
174
check_psn(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)175 static inline enum comp_state check_psn(struct rxe_qp *qp,
176 struct rxe_pkt_info *pkt,
177 struct rxe_send_wqe *wqe)
178 {
179 s32 diff;
180
181 /* check to see if response is past the oldest WQE. if it is, complete
182 * send/write or error read/atomic
183 */
184 diff = psn_compare(pkt->psn, wqe->last_psn);
185 if (diff > 0) {
186 if (wqe->state == wqe_state_pending) {
187 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
188 return COMPST_ERROR_RETRY;
189
190 reset_retry_counters(qp);
191 return COMPST_COMP_WQE;
192 } else {
193 return COMPST_DONE;
194 }
195 }
196
197 /* compare response packet to expected response */
198 diff = psn_compare(pkt->psn, qp->comp.psn);
199 if (diff < 0) {
200 /* response is most likely a retried packet if it matches an
201 * uncompleted WQE go complete it else ignore it
202 */
203 if (pkt->psn == wqe->last_psn)
204 return COMPST_COMP_ACK;
205 else if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE &&
206 (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST ||
207 qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE))
208 return COMPST_CHECK_ACK;
209 else
210 return COMPST_DONE;
211 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
212 return COMPST_DONE;
213 } else {
214 return COMPST_CHECK_ACK;
215 }
216 }
217
check_ack(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)218 static inline enum comp_state check_ack(struct rxe_qp *qp,
219 struct rxe_pkt_info *pkt,
220 struct rxe_send_wqe *wqe)
221 {
222 unsigned int mask = pkt->mask;
223 u8 syn;
224 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
225
226 /* Check the sequence only */
227 switch (qp->comp.opcode) {
228 case -1:
229 /* Will catch all *_ONLY cases. */
230 if (!(mask & RXE_START_MASK))
231 return COMPST_ERROR;
232
233 break;
234
235 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
236 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
237 /* Check NAK code to handle a remote error */
238 if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE)
239 break;
240
241 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
242 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
243 /* read retries of partial data may restart from
244 * read response first or response only.
245 */
246 if ((pkt->psn == wqe->first_psn &&
247 pkt->opcode ==
248 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
249 (wqe->first_psn == wqe->last_psn &&
250 pkt->opcode ==
251 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
252 break;
253
254 return COMPST_ERROR;
255 }
256 break;
257 default:
258 WARN_ON_ONCE(1);
259 }
260
261 /* Check operation validity. */
262 switch (pkt->opcode) {
263 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
264 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
265 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
266 syn = aeth_syn(pkt);
267
268 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
269 return COMPST_ERROR;
270
271 if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE)
272 return COMPST_WRITE_SEND;
273
274 fallthrough;
275 /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
276 */
277 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
278 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
279 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV &&
280 wqe->wr.opcode != IB_WR_FLUSH) {
281 wqe->status = IB_WC_FATAL_ERR;
282 return COMPST_ERROR;
283 }
284 reset_retry_counters(qp);
285 return COMPST_READ;
286
287 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
288 syn = aeth_syn(pkt);
289
290 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
291 return COMPST_ERROR;
292
293 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
294 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
295 return COMPST_ERROR;
296 reset_retry_counters(qp);
297 return COMPST_ATOMIC;
298
299 case IB_OPCODE_RC_ACKNOWLEDGE:
300 syn = aeth_syn(pkt);
301 switch (syn & AETH_TYPE_MASK) {
302 case AETH_ACK:
303 reset_retry_counters(qp);
304 return COMPST_WRITE_SEND;
305
306 case AETH_RNR_NAK:
307 rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
308 return COMPST_RNR_RETRY;
309
310 case AETH_NAK:
311 switch (syn) {
312 case AETH_NAK_PSN_SEQ_ERROR:
313 /* a nak implicitly acks all packets with psns
314 * before
315 */
316 if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
317 rxe_counter_inc(rxe,
318 RXE_CNT_RCV_SEQ_ERR);
319 qp->comp.psn = pkt->psn;
320 if (qp->req.wait_psn) {
321 qp->req.wait_psn = 0;
322 qp->req.again = 1;
323 }
324 }
325 return COMPST_ERROR_RETRY;
326
327 case AETH_NAK_INVALID_REQ:
328 wqe->status = IB_WC_REM_INV_REQ_ERR;
329 return COMPST_ERROR;
330
331 case AETH_NAK_REM_ACC_ERR:
332 wqe->status = IB_WC_REM_ACCESS_ERR;
333 return COMPST_ERROR;
334
335 case AETH_NAK_REM_OP_ERR:
336 wqe->status = IB_WC_REM_OP_ERR;
337 return COMPST_ERROR;
338
339 default:
340 rxe_dbg_qp(qp, "unexpected nak %x\n", syn);
341 wqe->status = IB_WC_REM_OP_ERR;
342 return COMPST_ERROR;
343 }
344
345 default:
346 return COMPST_ERROR;
347 }
348 break;
349
350 default:
351 rxe_dbg_qp(qp, "unexpected opcode\n");
352 }
353
354 return COMPST_ERROR;
355 }
356
do_read(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)357 static inline enum comp_state do_read(struct rxe_qp *qp,
358 struct rxe_pkt_info *pkt,
359 struct rxe_send_wqe *wqe)
360 {
361 int ret;
362
363 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
364 &wqe->dma, payload_addr(pkt),
365 payload_size(pkt), RXE_TO_MR_OBJ);
366 if (ret) {
367 wqe->status = IB_WC_LOC_PROT_ERR;
368 return COMPST_ERROR;
369 }
370
371 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
372 return COMPST_COMP_ACK;
373
374 return COMPST_UPDATE_COMP;
375 }
376
do_atomic(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)377 static inline enum comp_state do_atomic(struct rxe_qp *qp,
378 struct rxe_pkt_info *pkt,
379 struct rxe_send_wqe *wqe)
380 {
381 int ret;
382
383 u64 atomic_orig = atmack_orig(pkt);
384
385 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
386 &wqe->dma, &atomic_orig,
387 sizeof(u64), RXE_TO_MR_OBJ);
388 if (ret) {
389 wqe->status = IB_WC_LOC_PROT_ERR;
390 return COMPST_ERROR;
391 }
392
393 return COMPST_COMP_ACK;
394 }
395
make_send_cqe(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_cqe * cqe)396 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
397 struct rxe_cqe *cqe)
398 {
399 struct ib_wc *wc = &cqe->ibwc;
400 struct ib_uverbs_wc *uwc = &cqe->uibwc;
401
402 memset(cqe, 0, sizeof(*cqe));
403
404 if (!qp->is_user) {
405 wc->wr_id = wqe->wr.wr_id;
406 wc->status = wqe->status;
407 wc->qp = &qp->ibqp;
408 } else {
409 uwc->wr_id = wqe->wr.wr_id;
410 uwc->status = wqe->status;
411 uwc->qp_num = qp->ibqp.qp_num;
412 }
413
414 if (wqe->status == IB_WC_SUCCESS) {
415 if (!qp->is_user) {
416 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
417 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
418 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
419 wc->wc_flags = IB_WC_WITH_IMM;
420 wc->byte_len = wqe->dma.length;
421 } else {
422 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
423 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
424 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
425 uwc->wc_flags = IB_WC_WITH_IMM;
426 uwc->byte_len = wqe->dma.length;
427 }
428 } else {
429 if (wqe->status != IB_WC_WR_FLUSH_ERR)
430 rxe_err_qp(qp, "non-flush error status = %d\n",
431 wqe->status);
432 }
433 }
434
435 /*
436 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
437 * ---------8<---------8<-------------
438 * ...Note that if a completion error occurs, a Work Completion
439 * will always be generated, even if the signaling
440 * indicator requests an Unsignaled Completion.
441 * ---------8<---------8<-------------
442 */
do_complete(struct rxe_qp * qp,struct rxe_send_wqe * wqe)443 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
444 {
445 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
446 struct rxe_cqe cqe;
447 bool post;
448
449 /* do we need to post a completion */
450 post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
451 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
452 wqe->status != IB_WC_SUCCESS);
453
454 if (post)
455 make_send_cqe(qp, wqe, &cqe);
456
457 queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
458
459 if (post)
460 rxe_cq_post(qp->scq, &cqe, 0);
461
462 if (wqe->wr.opcode == IB_WR_SEND ||
463 wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
464 wqe->wr.opcode == IB_WR_SEND_WITH_INV)
465 rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND);
466
467 /*
468 * we completed something so let req run again
469 * if it is trying to fence
470 */
471 if (qp->req.wait_fence) {
472 qp->req.wait_fence = 0;
473 qp->req.again = 1;
474 }
475 }
476
comp_check_sq_drain_done(struct rxe_qp * qp)477 static void comp_check_sq_drain_done(struct rxe_qp *qp)
478 {
479 unsigned long flags;
480
481 spin_lock_irqsave(&qp->state_lock, flags);
482 if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
483 if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
484 qp->attr.sq_draining = 0;
485 spin_unlock_irqrestore(&qp->state_lock, flags);
486
487 if (qp->ibqp.event_handler) {
488 struct ib_event ev;
489
490 ev.device = qp->ibqp.device;
491 ev.element.qp = &qp->ibqp;
492 ev.event = IB_EVENT_SQ_DRAINED;
493 qp->ibqp.event_handler(&ev,
494 qp->ibqp.qp_context);
495 }
496 return;
497 }
498 }
499 spin_unlock_irqrestore(&qp->state_lock, flags);
500 }
501
complete_ack(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)502 static inline enum comp_state complete_ack(struct rxe_qp *qp,
503 struct rxe_pkt_info *pkt,
504 struct rxe_send_wqe *wqe)
505 {
506 if (wqe->has_rd_atomic) {
507 wqe->has_rd_atomic = 0;
508 atomic_inc(&qp->req.rd_atomic);
509 if (qp->req.need_rd_atomic) {
510 qp->comp.timeout_retry = 0;
511 qp->req.need_rd_atomic = 0;
512 qp->req.again = 1;
513 }
514 }
515
516 comp_check_sq_drain_done(qp);
517
518 do_complete(qp, wqe);
519
520 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
521 return COMPST_UPDATE_COMP;
522 else
523 return COMPST_DONE;
524 }
525
complete_wqe(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)526 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
527 struct rxe_pkt_info *pkt,
528 struct rxe_send_wqe *wqe)
529 {
530 if (pkt && wqe->state == wqe_state_pending) {
531 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
532 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
533 qp->comp.opcode = -1;
534 }
535
536 if (qp->req.wait_psn) {
537 qp->req.wait_psn = 0;
538 qp->req.again = 1;
539 }
540 }
541
542 do_complete(qp, wqe);
543
544 return COMPST_GET_WQE;
545 }
546
547 /* drain incoming response packet queue */
drain_resp_pkts(struct rxe_qp * qp)548 static void drain_resp_pkts(struct rxe_qp *qp)
549 {
550 struct sk_buff *skb;
551
552 while ((skb = skb_dequeue(&qp->resp_pkts))) {
553 rxe_put(qp);
554 kfree_skb(skb);
555 ib_device_put(qp->ibqp.device);
556 }
557 }
558
559 /* complete send wqe with flush error */
flush_send_wqe(struct rxe_qp * qp,struct rxe_send_wqe * wqe)560 static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
561 {
562 struct rxe_cqe cqe = {};
563 struct ib_wc *wc = &cqe.ibwc;
564 struct ib_uverbs_wc *uwc = &cqe.uibwc;
565 int err;
566
567 if (qp->is_user) {
568 uwc->wr_id = wqe->wr.wr_id;
569 uwc->status = IB_WC_WR_FLUSH_ERR;
570 uwc->qp_num = qp->ibqp.qp_num;
571 } else {
572 wc->wr_id = wqe->wr.wr_id;
573 wc->status = IB_WC_WR_FLUSH_ERR;
574 wc->qp = &qp->ibqp;
575 }
576
577 err = rxe_cq_post(qp->scq, &cqe, 0);
578 if (err)
579 rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err);
580
581 return err;
582 }
583
584 /* drain and optionally complete the send queue
585 * if unable to complete a wqe, i.e. cq is full, stop
586 * completing and flush the remaining wqes
587 */
flush_send_queue(struct rxe_qp * qp,bool notify)588 static void flush_send_queue(struct rxe_qp *qp, bool notify)
589 {
590 struct rxe_send_wqe *wqe;
591 struct rxe_queue *q = qp->sq.queue;
592 int err;
593
594 /* send queue never got created. nothing to do. */
595 if (!qp->sq.queue)
596 return;
597
598 while ((wqe = queue_head(q, q->type))) {
599 if (notify) {
600 err = flush_send_wqe(qp, wqe);
601 if (err)
602 notify = 0;
603 }
604 queue_advance_consumer(q, q->type);
605 }
606 }
607
free_pkt(struct rxe_pkt_info * pkt)608 static void free_pkt(struct rxe_pkt_info *pkt)
609 {
610 struct sk_buff *skb = PKT_TO_SKB(pkt);
611 struct rxe_qp *qp = pkt->qp;
612 struct ib_device *dev = qp->ibqp.device;
613
614 kfree_skb(skb);
615 rxe_put(qp);
616 ib_device_put(dev);
617 }
618
619 /* reset the retry timer if
620 * - QP is type RC
621 * - there is a packet sent by the requester that
622 * might be acked (we still might get spurious
623 * timeouts but try to keep them as few as possible)
624 * - the timeout parameter is set
625 * - the QP is alive
626 */
reset_retry_timer(struct rxe_qp * qp)627 static void reset_retry_timer(struct rxe_qp *qp)
628 {
629 unsigned long flags;
630
631 if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
632 spin_lock_irqsave(&qp->state_lock, flags);
633 if (qp_state(qp) >= IB_QPS_RTS &&
634 psn_compare(qp->req.psn, qp->comp.psn) > 0)
635 mod_timer(&qp->retrans_timer,
636 jiffies + qp->qp_timeout_jiffies);
637 spin_unlock_irqrestore(&qp->state_lock, flags);
638 }
639 }
640
rxe_completer(struct rxe_qp * qp)641 int rxe_completer(struct rxe_qp *qp)
642 {
643 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
644 struct rxe_send_wqe *wqe = NULL;
645 struct sk_buff *skb = NULL;
646 struct rxe_pkt_info *pkt = NULL;
647 enum comp_state state;
648 int ret;
649 unsigned long flags;
650
651 qp->req.again = 0;
652
653 spin_lock_irqsave(&qp->state_lock, flags);
654 if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
655 qp_state(qp) == IB_QPS_RESET) {
656 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
657
658 drain_resp_pkts(qp);
659 flush_send_queue(qp, notify);
660 spin_unlock_irqrestore(&qp->state_lock, flags);
661 goto exit;
662 }
663 spin_unlock_irqrestore(&qp->state_lock, flags);
664
665 if (qp->comp.timeout) {
666 qp->comp.timeout_retry = 1;
667 qp->comp.timeout = 0;
668 } else {
669 qp->comp.timeout_retry = 0;
670 }
671
672 if (qp->req.need_retry)
673 goto exit;
674
675 state = COMPST_GET_ACK;
676
677 while (1) {
678 rxe_dbg_qp(qp, "state = %s\n", comp_state_name[state]);
679 switch (state) {
680 case COMPST_GET_ACK:
681 skb = skb_dequeue(&qp->resp_pkts);
682 if (skb) {
683 pkt = SKB_TO_PKT(skb);
684 qp->comp.timeout_retry = 0;
685 }
686 state = COMPST_GET_WQE;
687 break;
688
689 case COMPST_GET_WQE:
690 state = get_wqe(qp, pkt, &wqe);
691 break;
692
693 case COMPST_CHECK_PSN:
694 state = check_psn(qp, pkt, wqe);
695 break;
696
697 case COMPST_CHECK_ACK:
698 state = check_ack(qp, pkt, wqe);
699 break;
700
701 case COMPST_READ:
702 state = do_read(qp, pkt, wqe);
703 break;
704
705 case COMPST_ATOMIC:
706 state = do_atomic(qp, pkt, wqe);
707 break;
708
709 case COMPST_WRITE_SEND:
710 if (wqe->state == wqe_state_pending &&
711 wqe->last_psn == pkt->psn)
712 state = COMPST_COMP_ACK;
713 else
714 state = COMPST_UPDATE_COMP;
715 break;
716
717 case COMPST_COMP_ACK:
718 state = complete_ack(qp, pkt, wqe);
719 break;
720
721 case COMPST_COMP_WQE:
722 state = complete_wqe(qp, pkt, wqe);
723 break;
724
725 case COMPST_UPDATE_COMP:
726 if (pkt->mask & RXE_END_MASK)
727 qp->comp.opcode = -1;
728 else
729 qp->comp.opcode = pkt->opcode;
730
731 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
732 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
733
734 if (qp->req.wait_psn) {
735 qp->req.wait_psn = 0;
736 qp->req.again = 1;
737 }
738
739 state = COMPST_DONE;
740 break;
741
742 case COMPST_DONE:
743 goto done;
744
745 case COMPST_EXIT:
746 if (qp->comp.timeout_retry && wqe) {
747 state = COMPST_ERROR_RETRY;
748 break;
749 }
750
751 reset_retry_timer(qp);
752 goto exit;
753
754 case COMPST_ERROR_RETRY:
755 /* we come here if the retry timer fired and we did
756 * not receive a response packet. try to retry the send
757 * queue if that makes sense and the limits have not
758 * been exceeded. remember that some timeouts are
759 * spurious since we do not reset the timer but kick
760 * it down the road or let it expire
761 */
762
763 /* there is nothing to retry in this case */
764 if (!wqe || (wqe->state == wqe_state_posted))
765 goto exit;
766
767 /* if we've started a retry, don't start another
768 * retry sequence, unless this is a timeout.
769 */
770 if (qp->comp.started_retry &&
771 !qp->comp.timeout_retry)
772 goto done;
773
774 if (qp->comp.retry_cnt > 0) {
775 if (qp->comp.retry_cnt != 7)
776 qp->comp.retry_cnt--;
777
778 /* no point in retrying if we have already
779 * seen the last ack that the requester could
780 * have caused
781 */
782 if (psn_compare(qp->req.psn,
783 qp->comp.psn) > 0) {
784 /* tell the requester to retry the
785 * send queue next time around
786 */
787 rxe_counter_inc(rxe,
788 RXE_CNT_COMP_RETRY);
789 qp->req.need_retry = 1;
790 qp->comp.started_retry = 1;
791 qp->req.again = 1;
792 }
793 goto done;
794
795 } else {
796 rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
797 wqe->status = IB_WC_RETRY_EXC_ERR;
798 state = COMPST_ERROR;
799 }
800 break;
801
802 case COMPST_RNR_RETRY:
803 /* we come here if we received an RNR NAK */
804 if (qp->comp.rnr_retry > 0) {
805 if (qp->comp.rnr_retry != 7)
806 qp->comp.rnr_retry--;
807
808 /* don't start a retry flow until the
809 * rnr timer has fired
810 */
811 qp->req.wait_for_rnr_timer = 1;
812 rxe_dbg_qp(qp, "set rnr nak timer\n");
813 // TODO who protects from destroy_qp??
814 mod_timer(&qp->rnr_nak_timer,
815 jiffies + rnrnak_jiffies(aeth_syn(pkt)
816 & ~AETH_TYPE_MASK));
817 goto exit;
818 } else {
819 rxe_counter_inc(rxe,
820 RXE_CNT_RNR_RETRY_EXCEEDED);
821 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
822 state = COMPST_ERROR;
823 }
824 break;
825
826 case COMPST_ERROR:
827 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
828 do_complete(qp, wqe);
829 rxe_qp_error(qp);
830 goto exit;
831 }
832 }
833
834 /* A non-zero return value will cause rxe_do_task to
835 * exit its loop and end the work item. A zero return
836 * will continue looping and return to rxe_completer
837 */
838 done:
839 ret = 0;
840 goto out;
841 exit:
842 ret = (qp->req.again) ? 0 : -EAGAIN;
843 out:
844 qp->req.again = 0;
845 if (pkt)
846 free_pkt(pkt);
847 return ret;
848 }
849