xref: /linux/drivers/infiniband/sw/rxe/rxe_req.c (revision 311aa68319f6a3d64a1e6d940d885830c7acba4c)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #include <linux/skbuff.h>
8 
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12 
13 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
14 		       u32 opcode);
15 
retry_first_write_send(struct rxe_qp * qp,struct rxe_send_wqe * wqe,int npsn)16 static inline void retry_first_write_send(struct rxe_qp *qp,
17 					  struct rxe_send_wqe *wqe, int npsn)
18 {
19 	int i;
20 
21 	for (i = 0; i < npsn; i++) {
22 		int to_send = (wqe->dma.resid > qp->mtu) ?
23 				qp->mtu : wqe->dma.resid;
24 
25 		qp->req.opcode = next_opcode(qp, wqe,
26 					     wqe->wr.opcode);
27 
28 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
29 			wqe->dma.resid -= to_send;
30 			wqe->dma.sge_offset += to_send;
31 		} else {
32 			advance_dma_data(&wqe->dma, to_send);
33 		}
34 	}
35 }
36 
req_retry(struct rxe_qp * qp)37 static void req_retry(struct rxe_qp *qp)
38 {
39 	struct rxe_send_wqe *wqe;
40 	unsigned int wqe_index;
41 	unsigned int mask;
42 	int npsn;
43 	int first = 1;
44 	struct rxe_queue *q = qp->sq.queue;
45 	unsigned int cons;
46 	unsigned int prod;
47 
48 	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
49 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
50 
51 	qp->req.wqe_index	= cons;
52 	qp->req.psn		= qp->comp.psn;
53 	qp->req.opcode		= -1;
54 
55 	for (wqe_index = cons; wqe_index != prod;
56 			wqe_index = queue_next_index(q, wqe_index)) {
57 		wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
58 		mask = wr_opcode_mask(wqe->wr.opcode, qp);
59 
60 		if (wqe->state == wqe_state_posted)
61 			break;
62 
63 		if (wqe->state == wqe_state_done)
64 			continue;
65 
66 		wqe->iova = (mask & WR_ATOMIC_MASK) ?
67 			     wqe->wr.wr.atomic.remote_addr :
68 			     (mask & WR_READ_OR_WRITE_MASK) ?
69 			     wqe->wr.wr.rdma.remote_addr :
70 			     0;
71 
72 		if (!first || (mask & WR_READ_MASK) == 0) {
73 			wqe->dma.resid = wqe->dma.length;
74 			wqe->dma.cur_sge = 0;
75 			wqe->dma.sge_offset = 0;
76 		}
77 
78 		if (first) {
79 			first = 0;
80 
81 			if (mask & WR_WRITE_OR_SEND_MASK) {
82 				npsn = (qp->comp.psn - wqe->first_psn) &
83 					BTH_PSN_MASK;
84 				retry_first_write_send(qp, wqe, npsn);
85 			}
86 
87 			if (mask & WR_READ_MASK) {
88 				npsn = (wqe->dma.length - wqe->dma.resid) /
89 					qp->mtu;
90 				wqe->iova += npsn * qp->mtu;
91 			}
92 		}
93 
94 		wqe->state = wqe_state_posted;
95 	}
96 }
97 
rnr_nak_timer(struct timer_list * t)98 void rnr_nak_timer(struct timer_list *t)
99 {
100 	struct rxe_qp *qp = timer_container_of(qp, t, rnr_nak_timer);
101 	unsigned long flags;
102 
103 	rxe_dbg_qp(qp, "nak timer fired\n");
104 
105 	if (!rxe_get(qp))
106 		return;
107 	spin_lock_irqsave(&qp->state_lock, flags);
108 	if (qp->valid) {
109 		/* request a send queue retry */
110 		qp->req.need_retry = 1;
111 		qp->req.wait_for_rnr_timer = 0;
112 		rxe_sched_task(&qp->send_task);
113 	}
114 	spin_unlock_irqrestore(&qp->state_lock, flags);
115 	rxe_put(qp);
116 }
117 
req_check_sq_drain_done(struct rxe_qp * qp)118 static void req_check_sq_drain_done(struct rxe_qp *qp)
119 {
120 	struct rxe_queue *q;
121 	unsigned int index;
122 	unsigned int cons;
123 	struct rxe_send_wqe *wqe;
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&qp->state_lock, flags);
127 	if (qp_state(qp) == IB_QPS_SQD) {
128 		q = qp->sq.queue;
129 		index = qp->req.wqe_index;
130 		cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
131 		wqe = queue_addr_from_index(q, cons);
132 
133 		/* check to see if we are drained;
134 		 * state_lock used by requester and completer
135 		 */
136 		do {
137 			if (!qp->attr.sq_draining)
138 				/* comp just finished */
139 				break;
140 
141 			if (wqe && ((index != cons) ||
142 				(wqe->state != wqe_state_posted)))
143 				/* comp not done yet */
144 				break;
145 
146 			qp->attr.sq_draining = 0;
147 			spin_unlock_irqrestore(&qp->state_lock, flags);
148 
149 			if (qp->ibqp.event_handler) {
150 				struct ib_event ev;
151 
152 				ev.device = qp->ibqp.device;
153 				ev.element.qp = &qp->ibqp;
154 				ev.event = IB_EVENT_SQ_DRAINED;
155 				qp->ibqp.event_handler(&ev,
156 					qp->ibqp.qp_context);
157 			}
158 			return;
159 		} while (0);
160 	}
161 	spin_unlock_irqrestore(&qp->state_lock, flags);
162 }
163 
__req_next_wqe(struct rxe_qp * qp)164 static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
165 {
166 	struct rxe_queue *q = qp->sq.queue;
167 	unsigned int index = qp->req.wqe_index;
168 	unsigned int prod;
169 
170 	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
171 	if (index == prod)
172 		return NULL;
173 	else
174 		return queue_addr_from_index(q, index);
175 }
176 
req_next_wqe(struct rxe_qp * qp)177 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
178 {
179 	struct rxe_send_wqe *wqe;
180 	unsigned long flags;
181 
182 	req_check_sq_drain_done(qp);
183 
184 	wqe = __req_next_wqe(qp);
185 	if (wqe == NULL)
186 		return NULL;
187 
188 	spin_lock_irqsave(&qp->state_lock, flags);
189 	if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
190 		     (wqe->state != wqe_state_processing))) {
191 		spin_unlock_irqrestore(&qp->state_lock, flags);
192 		return NULL;
193 	}
194 	spin_unlock_irqrestore(&qp->state_lock, flags);
195 
196 	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
197 	return wqe;
198 }
199 
200 /**
201  * rxe_wqe_is_fenced - check if next wqe is fenced
202  * @qp: the queue pair
203  * @wqe: the next wqe
204  *
205  * Returns: 1 if wqe needs to wait
206  *	    0 if wqe is ready to go
207  */
rxe_wqe_is_fenced(struct rxe_qp * qp,struct rxe_send_wqe * wqe)208 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
209 {
210 	/* Local invalidate fence (LIF) see IBA 10.6.5.1
211 	 * Requires ALL previous operations on the send queue
212 	 * are complete. Make mandatory for the rxe driver.
213 	 */
214 	if (wqe->wr.opcode == IB_WR_LOCAL_INV)
215 		return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
216 						QUEUE_TYPE_FROM_CLIENT);
217 
218 	/* Fence see IBA 10.8.3.3
219 	 * Requires that all previous read and atomic operations
220 	 * are complete.
221 	 */
222 	return (wqe->wr.send_flags & IB_SEND_FENCE) &&
223 		atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
224 }
225 
next_opcode_rc(struct rxe_qp * qp,u32 opcode,int fits)226 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
227 {
228 	switch (opcode) {
229 	case IB_WR_RDMA_WRITE:
230 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
231 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
232 			return fits ?
233 				IB_OPCODE_RC_RDMA_WRITE_LAST :
234 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
235 		else
236 			return fits ?
237 				IB_OPCODE_RC_RDMA_WRITE_ONLY :
238 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
239 
240 	case IB_WR_RDMA_WRITE_WITH_IMM:
241 		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
242 		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
243 			return fits ?
244 				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
245 				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
246 		else
247 			return fits ?
248 				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
249 				IB_OPCODE_RC_RDMA_WRITE_FIRST;
250 
251 	case IB_WR_SEND:
252 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
253 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
254 			return fits ?
255 				IB_OPCODE_RC_SEND_LAST :
256 				IB_OPCODE_RC_SEND_MIDDLE;
257 		else
258 			return fits ?
259 				IB_OPCODE_RC_SEND_ONLY :
260 				IB_OPCODE_RC_SEND_FIRST;
261 
262 	case IB_WR_SEND_WITH_IMM:
263 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
264 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
265 			return fits ?
266 				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
267 				IB_OPCODE_RC_SEND_MIDDLE;
268 		else
269 			return fits ?
270 				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
271 				IB_OPCODE_RC_SEND_FIRST;
272 
273 	case IB_WR_FLUSH:
274 		return IB_OPCODE_RC_FLUSH;
275 
276 	case IB_WR_RDMA_READ:
277 		return IB_OPCODE_RC_RDMA_READ_REQUEST;
278 
279 	case IB_WR_ATOMIC_CMP_AND_SWP:
280 		return IB_OPCODE_RC_COMPARE_SWAP;
281 
282 	case IB_WR_ATOMIC_FETCH_AND_ADD:
283 		return IB_OPCODE_RC_FETCH_ADD;
284 
285 	case IB_WR_SEND_WITH_INV:
286 		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
287 		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
288 			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
289 				IB_OPCODE_RC_SEND_MIDDLE;
290 		else
291 			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
292 				IB_OPCODE_RC_SEND_FIRST;
293 
294 	case IB_WR_ATOMIC_WRITE:
295 		return IB_OPCODE_RC_ATOMIC_WRITE;
296 
297 	case IB_WR_REG_MR:
298 	case IB_WR_LOCAL_INV:
299 		return opcode;
300 	}
301 
302 	return -EINVAL;
303 }
304 
next_opcode_uc(struct rxe_qp * qp,u32 opcode,int fits)305 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
306 {
307 	switch (opcode) {
308 	case IB_WR_RDMA_WRITE:
309 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
310 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
311 			return fits ?
312 				IB_OPCODE_UC_RDMA_WRITE_LAST :
313 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
314 		else
315 			return fits ?
316 				IB_OPCODE_UC_RDMA_WRITE_ONLY :
317 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
318 
319 	case IB_WR_RDMA_WRITE_WITH_IMM:
320 		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
321 		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
322 			return fits ?
323 				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
324 				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
325 		else
326 			return fits ?
327 				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
328 				IB_OPCODE_UC_RDMA_WRITE_FIRST;
329 
330 	case IB_WR_SEND:
331 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
332 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
333 			return fits ?
334 				IB_OPCODE_UC_SEND_LAST :
335 				IB_OPCODE_UC_SEND_MIDDLE;
336 		else
337 			return fits ?
338 				IB_OPCODE_UC_SEND_ONLY :
339 				IB_OPCODE_UC_SEND_FIRST;
340 
341 	case IB_WR_SEND_WITH_IMM:
342 		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
343 		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
344 			return fits ?
345 				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
346 				IB_OPCODE_UC_SEND_MIDDLE;
347 		else
348 			return fits ?
349 				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
350 				IB_OPCODE_UC_SEND_FIRST;
351 	}
352 
353 	return -EINVAL;
354 }
355 
next_opcode(struct rxe_qp * qp,struct rxe_send_wqe * wqe,u32 opcode)356 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
357 		       u32 opcode)
358 {
359 	int fits = (wqe->dma.resid <= qp->mtu);
360 
361 	switch (qp_type(qp)) {
362 	case IB_QPT_RC:
363 		return next_opcode_rc(qp, opcode, fits);
364 
365 	case IB_QPT_UC:
366 		return next_opcode_uc(qp, opcode, fits);
367 
368 	case IB_QPT_UD:
369 	case IB_QPT_GSI:
370 		switch (opcode) {
371 		case IB_WR_SEND:
372 			return IB_OPCODE_UD_SEND_ONLY;
373 
374 		case IB_WR_SEND_WITH_IMM:
375 			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
376 		}
377 		break;
378 
379 	default:
380 		break;
381 	}
382 
383 	return -EINVAL;
384 }
385 
check_init_depth(struct rxe_qp * qp,struct rxe_send_wqe * wqe)386 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
387 {
388 	int depth;
389 
390 	if (wqe->has_rd_atomic)
391 		return 0;
392 
393 	qp->req.need_rd_atomic = 1;
394 	depth = atomic_dec_return(&qp->req.rd_atomic);
395 
396 	if (depth >= 0) {
397 		qp->req.need_rd_atomic = 0;
398 		wqe->has_rd_atomic = 1;
399 		return 0;
400 	}
401 
402 	atomic_inc(&qp->req.rd_atomic);
403 	return -EAGAIN;
404 }
405 
get_mtu(struct rxe_qp * qp)406 static inline int get_mtu(struct rxe_qp *qp)
407 {
408 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
409 
410 	if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
411 		return qp->mtu;
412 
413 	return rxe->port.mtu_cap;
414 }
415 
init_req_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,int opcode,u32 payload,struct rxe_pkt_info * pkt)416 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
417 				       struct rxe_av *av,
418 				       struct rxe_send_wqe *wqe,
419 				       int opcode, u32 payload,
420 				       struct rxe_pkt_info *pkt)
421 {
422 	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
423 	struct sk_buff		*skb;
424 	struct rxe_send_wr	*ibwr = &wqe->wr;
425 	int			pad = (-payload) & 0x3;
426 	int			paylen;
427 	int			solicited;
428 	u32			qp_num;
429 	int			ack_req = 0;
430 
431 	/* length from start of bth to end of icrc */
432 	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
433 	pkt->paylen = paylen;
434 
435 	/* init skb */
436 	skb = rxe_init_packet(rxe, av, paylen, pkt);
437 	if (unlikely(!skb))
438 		return NULL;
439 
440 	/* init bth */
441 	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
442 			(pkt->mask & RXE_END_MASK) &&
443 			((pkt->mask & (RXE_SEND_MASK)) ||
444 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
445 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
446 
447 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
448 					 qp->attr.dest_qp_num;
449 
450 	if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
451 		ack_req = ((pkt->mask & RXE_END_MASK) ||
452 			   (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
453 	if (ack_req)
454 		qp->req.noack_pkts = 0;
455 
456 	bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
457 		 ack_req, pkt->psn);
458 
459 	/* init optional headers */
460 	if (pkt->mask & RXE_RETH_MASK) {
461 		if (pkt->mask & RXE_FETH_MASK)
462 			reth_set_rkey(pkt, ibwr->wr.flush.rkey);
463 		else
464 			reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
465 		reth_set_va(pkt, wqe->iova);
466 		reth_set_len(pkt, wqe->dma.resid);
467 	}
468 
469 	/* Fill Flush Extension Transport Header */
470 	if (pkt->mask & RXE_FETH_MASK)
471 		feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
472 
473 	if (pkt->mask & RXE_IMMDT_MASK)
474 		immdt_set_imm(pkt, ibwr->ex.imm_data);
475 
476 	if (pkt->mask & RXE_IETH_MASK)
477 		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
478 
479 	if (pkt->mask & RXE_ATMETH_MASK) {
480 		atmeth_set_va(pkt, wqe->iova);
481 		if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
482 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
483 			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
484 		} else {
485 			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
486 		}
487 		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
488 	}
489 
490 	if (pkt->mask & RXE_DETH_MASK) {
491 		if (qp->ibqp.qp_num == 1)
492 			deth_set_qkey(pkt, GSI_QKEY);
493 		else
494 			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
495 		deth_set_sqp(pkt, qp->ibqp.qp_num);
496 	}
497 
498 	return skb;
499 }
500 
finish_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,struct sk_buff * skb,u32 payload)501 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
502 			 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
503 			 struct sk_buff *skb, u32 payload)
504 {
505 	int err;
506 
507 	err = rxe_prepare(av, pkt, skb);
508 	if (err)
509 		return err;
510 
511 	if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
512 		if (wqe->wr.send_flags & IB_SEND_INLINE) {
513 			u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
514 
515 			memcpy(payload_addr(pkt), tmp, payload);
516 
517 			wqe->dma.resid -= payload;
518 			wqe->dma.sge_offset += payload;
519 		} else {
520 			err = copy_data(qp->pd, 0, &wqe->dma,
521 					payload_addr(pkt), payload,
522 					RXE_FROM_MR_OBJ);
523 			if (err)
524 				return err;
525 		}
526 		if (bth_pad(pkt)) {
527 			u8 *pad = payload_addr(pkt) + payload;
528 
529 			memset(pad, 0, bth_pad(pkt));
530 		}
531 	} else if (pkt->mask & RXE_FLUSH_MASK) {
532 		/* oA19-2: shall have no payload. */
533 		wqe->dma.resid = 0;
534 	}
535 
536 	if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
537 		memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
538 		wqe->dma.resid -= payload;
539 	}
540 
541 	return 0;
542 }
543 
update_wqe_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt)544 static void update_wqe_state(struct rxe_qp *qp,
545 		struct rxe_send_wqe *wqe,
546 		struct rxe_pkt_info *pkt)
547 {
548 	if (pkt->mask & RXE_END_MASK) {
549 		if (qp_type(qp) == IB_QPT_RC)
550 			wqe->state = wqe_state_pending;
551 		else
552 			wqe->state = wqe_state_done;
553 	} else {
554 		wqe->state = wqe_state_processing;
555 	}
556 }
557 
update_wqe_psn(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,u32 payload)558 static void update_wqe_psn(struct rxe_qp *qp,
559 			   struct rxe_send_wqe *wqe,
560 			   struct rxe_pkt_info *pkt,
561 			   u32 payload)
562 {
563 	/* number of packets left to send including current one */
564 	int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
565 
566 	/* handle zero length packet case */
567 	if (num_pkt == 0)
568 		num_pkt = 1;
569 
570 	if (pkt->mask & RXE_START_MASK) {
571 		wqe->first_psn = qp->req.psn;
572 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
573 	}
574 
575 	if (pkt->mask & RXE_READ_MASK)
576 		qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
577 	else
578 		qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
579 }
580 
update_state(struct rxe_qp * qp,struct rxe_pkt_info * pkt)581 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
582 {
583 	qp->req.opcode = pkt->opcode;
584 
585 	if (pkt->mask & RXE_END_MASK)
586 		qp->req.wqe_index = queue_next_index(qp->sq.queue,
587 						     qp->req.wqe_index);
588 
589 	qp->need_req_skb = 0;
590 
591 	if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
592 		mod_timer(&qp->retrans_timer,
593 			  jiffies + qp->qp_timeout_jiffies);
594 }
595 
rxe_do_local_ops(struct rxe_qp * qp,struct rxe_send_wqe * wqe)596 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
597 {
598 	u8 opcode = wqe->wr.opcode;
599 	u32 rkey;
600 	int ret;
601 
602 	switch (opcode) {
603 	case IB_WR_LOCAL_INV:
604 		rkey = wqe->wr.ex.invalidate_rkey;
605 		if (rkey_is_mw(rkey))
606 			ret = rxe_invalidate_mw(qp, rkey);
607 		else
608 			ret = rxe_invalidate_mr(qp, rkey);
609 
610 		if (unlikely(ret)) {
611 			wqe->status = IB_WC_LOC_QP_OP_ERR;
612 			return ret;
613 		}
614 		break;
615 	case IB_WR_REG_MR:
616 		ret = rxe_reg_fast_mr(qp, wqe);
617 		if (unlikely(ret)) {
618 			wqe->status = IB_WC_LOC_QP_OP_ERR;
619 			return ret;
620 		}
621 		break;
622 	case IB_WR_BIND_MW:
623 		ret = rxe_bind_mw(qp, wqe);
624 		if (unlikely(ret)) {
625 			wqe->status = IB_WC_MW_BIND_ERR;
626 			return ret;
627 		}
628 		break;
629 	default:
630 		rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
631 		wqe->status = IB_WC_LOC_QP_OP_ERR;
632 		return -EINVAL;
633 	}
634 
635 	wqe->state = wqe_state_done;
636 	wqe->status = IB_WC_SUCCESS;
637 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
638 
639 	return 0;
640 }
641 
rxe_requester(struct rxe_qp * qp)642 int rxe_requester(struct rxe_qp *qp)
643 {
644 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
645 	struct rxe_pkt_info pkt;
646 	struct sk_buff *skb;
647 	struct rxe_send_wqe *wqe;
648 	enum rxe_hdr_mask mask;
649 	u32 payload;
650 	int mtu;
651 	int opcode;
652 	int err;
653 	int ret;
654 	struct rxe_queue *q = qp->sq.queue;
655 	struct rxe_ah *ah;
656 	struct rxe_av *av;
657 	unsigned long flags;
658 
659 	spin_lock_irqsave(&qp->state_lock, flags);
660 	if (unlikely(!qp->valid)) {
661 		spin_unlock_irqrestore(&qp->state_lock, flags);
662 		goto exit;
663 	}
664 
665 	if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
666 		wqe = __req_next_wqe(qp);
667 		spin_unlock_irqrestore(&qp->state_lock, flags);
668 		if (wqe) {
669 			wqe->status = IB_WC_WR_FLUSH_ERR;
670 			goto err;
671 		} else {
672 			goto exit;
673 		}
674 	}
675 
676 	if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
677 		qp->req.wqe_index = queue_get_consumer(q,
678 						QUEUE_TYPE_FROM_CLIENT);
679 		qp->req.opcode = -1;
680 		qp->req.need_rd_atomic = 0;
681 		qp->req.wait_psn = 0;
682 		qp->req.need_retry = 0;
683 		qp->req.wait_for_rnr_timer = 0;
684 		spin_unlock_irqrestore(&qp->state_lock, flags);
685 		goto exit;
686 	}
687 	spin_unlock_irqrestore(&qp->state_lock, flags);
688 
689 	/* we come here if the retransmit timer has fired
690 	 * or if the rnr timer has fired. If the retransmit
691 	 * timer fires while we are processing an RNR NAK wait
692 	 * until the rnr timer has fired before starting the
693 	 * retry flow
694 	 */
695 	if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
696 		req_retry(qp);
697 		qp->req.need_retry = 0;
698 	}
699 
700 	wqe = req_next_wqe(qp);
701 	if (unlikely(!wqe))
702 		goto exit;
703 
704 	if (rxe_wqe_is_fenced(qp, wqe)) {
705 		qp->req.wait_fence = 1;
706 		goto exit;
707 	}
708 
709 	if (wqe->mask & WR_LOCAL_OP_MASK) {
710 		err = rxe_do_local_ops(qp, wqe);
711 		if (unlikely(err))
712 			goto err;
713 		else
714 			goto done;
715 	}
716 
717 	if (unlikely(qp_type(qp) == IB_QPT_RC &&
718 		psn_compare(qp->req.psn, (qp->comp.psn +
719 				RXE_MAX_UNACKED_PSNS)) > 0)) {
720 		qp->req.wait_psn = 1;
721 		goto exit;
722 	}
723 
724 	/* Limit the number of inflight SKBs per QP */
725 	if (unlikely(atomic_read(&qp->skb_out) >
726 		     RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
727 		qp->need_req_skb = 1;
728 		goto exit;
729 	}
730 
731 	opcode = next_opcode(qp, wqe, wqe->wr.opcode);
732 	if (unlikely(opcode < 0)) {
733 		wqe->status = IB_WC_LOC_QP_OP_ERR;
734 		goto err;
735 	}
736 
737 	mask = rxe_opcode[opcode].mask;
738 	if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
739 			RXE_ATOMIC_WRITE_MASK))) {
740 		if (check_init_depth(qp, wqe))
741 			goto exit;
742 	}
743 
744 	mtu = get_mtu(qp);
745 	payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
746 			wqe->dma.resid : 0;
747 	if (payload > mtu) {
748 		if (qp_type(qp) == IB_QPT_UD) {
749 			/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
750 			 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
751 			 * shall not emit any packets for this message. Further, the CI shall not
752 			 * generate an error due to this condition.
753 			 */
754 
755 			/* fake a successful UD send */
756 			wqe->first_psn = qp->req.psn;
757 			wqe->last_psn = qp->req.psn;
758 			qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
759 			qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
760 			qp->req.wqe_index = queue_next_index(qp->sq.queue,
761 						       qp->req.wqe_index);
762 			wqe->state = wqe_state_done;
763 			wqe->status = IB_WC_SUCCESS;
764 			goto done;
765 		}
766 		payload = mtu;
767 	}
768 
769 	pkt.rxe = rxe;
770 	pkt.opcode = opcode;
771 	pkt.qp = qp;
772 	pkt.psn = qp->req.psn;
773 	pkt.mask = rxe_opcode[opcode].mask;
774 	pkt.wqe = wqe;
775 
776 	av = rxe_get_av(&pkt, &ah);
777 	if (unlikely(!av)) {
778 		rxe_dbg_qp(qp, "Failed no address vector\n");
779 		wqe->status = IB_WC_LOC_QP_OP_ERR;
780 		goto err;
781 	}
782 
783 	skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
784 	if (unlikely(!skb)) {
785 		rxe_dbg_qp(qp, "Failed allocating skb\n");
786 		wqe->status = IB_WC_LOC_QP_OP_ERR;
787 		if (ah)
788 			rxe_put(ah);
789 		goto err;
790 	}
791 
792 	err = finish_packet(qp, av, wqe, &pkt, skb, payload);
793 	if (unlikely(err)) {
794 		rxe_dbg_qp(qp, "Error during finish packet\n");
795 		if (err == -EFAULT)
796 			wqe->status = IB_WC_LOC_PROT_ERR;
797 		else
798 			wqe->status = IB_WC_LOC_QP_OP_ERR;
799 		kfree_skb(skb);
800 		if (ah)
801 			rxe_put(ah);
802 		goto err;
803 	}
804 
805 	if (ah)
806 		rxe_put(ah);
807 
808 	err = rxe_xmit_packet(qp, &pkt, skb);
809 	if (err) {
810 		wqe->status = IB_WC_LOC_QP_OP_ERR;
811 		goto err;
812 	}
813 
814 	update_wqe_state(qp, wqe, &pkt);
815 	update_wqe_psn(qp, wqe, &pkt, payload);
816 	update_state(qp, &pkt);
817 
818 	/* A non-zero return value will cause rxe_do_task to
819 	 * exit its loop and end the work item. A zero return
820 	 * will continue looping and return to rxe_requester
821 	 */
822 done:
823 	ret = 0;
824 	goto out;
825 err:
826 	/* update wqe_index for each wqe completion */
827 	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
828 	wqe->state = wqe_state_error;
829 	rxe_qp_error(qp);
830 exit:
831 	ret = -EAGAIN;
832 out:
833 	return ret;
834 }
835 
rxe_sender(struct rxe_qp * qp)836 int rxe_sender(struct rxe_qp *qp)
837 {
838 	int req_ret;
839 	int comp_ret;
840 
841 	/* process the send queue */
842 	req_ret = rxe_requester(qp);
843 
844 	/* process the response queue */
845 	comp_ret = rxe_completer(qp);
846 
847 	/* exit the task loop if both requester and completer
848 	 * are ready
849 	 */
850 	return (req_ret && comp_ret) ? -EAGAIN : 0;
851 }
852