1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <net/mana/gdma.h>
5 #include <net/mana/hw_channel.h>
6 #include <linux/vmalloc.h>
7 
mana_hwc_get_msg_index(struct hw_channel_context * hwc,u16 * msg_id)8 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
9 {
10 	struct gdma_resource *r = &hwc->inflight_msg_res;
11 	unsigned long flags;
12 	u32 index;
13 
14 	down(&hwc->sema);
15 
16 	spin_lock_irqsave(&r->lock, flags);
17 
18 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
19 				    hwc->inflight_msg_res.size);
20 
21 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
22 
23 	spin_unlock_irqrestore(&r->lock, flags);
24 
25 	*msg_id = index;
26 
27 	return 0;
28 }
29 
mana_hwc_put_msg_index(struct hw_channel_context * hwc,u16 msg_id)30 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
31 {
32 	struct gdma_resource *r = &hwc->inflight_msg_res;
33 	unsigned long flags;
34 
35 	spin_lock_irqsave(&r->lock, flags);
36 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
37 	spin_unlock_irqrestore(&r->lock, flags);
38 
39 	up(&hwc->sema);
40 }
41 
mana_hwc_verify_resp_msg(const struct hwc_caller_ctx * caller_ctx,const struct gdma_resp_hdr * resp_msg,u32 resp_len)42 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
43 				    const struct gdma_resp_hdr *resp_msg,
44 				    u32 resp_len)
45 {
46 	if (resp_len < sizeof(*resp_msg))
47 		return -EPROTO;
48 
49 	if (resp_len > caller_ctx->output_buflen)
50 		return -EPROTO;
51 
52 	return 0;
53 }
54 
mana_hwc_post_rx_wqe(const struct hwc_wq * hwc_rxq,struct hwc_work_request * req)55 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
56 				struct hwc_work_request *req)
57 {
58 	struct device *dev = hwc_rxq->hwc->dev;
59 	struct gdma_sge *sge;
60 	int err;
61 
62 	sge = &req->sge;
63 	sge->address = (u64)req->buf_sge_addr;
64 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
65 	sge->size = req->buf_len;
66 
67 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
68 	req->wqe_req.sgl = sge;
69 	req->wqe_req.num_sge = 1;
70 	req->wqe_req.client_data_unit = 0;
71 
72 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
73 	if (err)
74 		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
75 	return err;
76 }
77 
mana_hwc_handle_resp(struct hw_channel_context * hwc,u32 resp_len,struct hwc_work_request * rx_req)78 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
79 				 struct hwc_work_request *rx_req)
80 {
81 	const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
82 	struct hwc_caller_ctx *ctx;
83 	int err;
84 
85 	if (!test_bit(resp_msg->response.hwc_msg_id,
86 		      hwc->inflight_msg_res.map)) {
87 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
88 			resp_msg->response.hwc_msg_id);
89 		mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
90 		return;
91 	}
92 
93 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
94 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
95 	if (err)
96 		goto out;
97 
98 	ctx->status_code = resp_msg->status;
99 
100 	memcpy(ctx->output_buf, resp_msg, resp_len);
101 out:
102 	ctx->error = err;
103 
104 	/* Must post rx wqe before complete(), otherwise the next rx may
105 	 * hit no_wqe error.
106 	 */
107 	mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
108 
109 	complete(&ctx->comp_event);
110 }
111 
mana_hwc_init_event_handler(void * ctx,struct gdma_queue * q_self,struct gdma_event * event)112 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
113 					struct gdma_event *event)
114 {
115 	struct hw_channel_context *hwc = ctx;
116 	struct gdma_dev *gd = hwc->gdma_dev;
117 	union hwc_init_type_data type_data;
118 	union hwc_init_eq_id_db eq_db;
119 	u32 type, val;
120 
121 	switch (event->type) {
122 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
123 		eq_db.as_uint32 = event->details[0];
124 		hwc->cq->gdma_eq->id = eq_db.eq_id;
125 		gd->doorbell = eq_db.doorbell;
126 		break;
127 
128 	case GDMA_EQE_HWC_INIT_DATA:
129 		type_data.as_uint32 = event->details[0];
130 		type = type_data.type;
131 		val = type_data.value;
132 
133 		switch (type) {
134 		case HWC_INIT_DATA_CQID:
135 			hwc->cq->gdma_cq->id = val;
136 			break;
137 
138 		case HWC_INIT_DATA_RQID:
139 			hwc->rxq->gdma_wq->id = val;
140 			break;
141 
142 		case HWC_INIT_DATA_SQID:
143 			hwc->txq->gdma_wq->id = val;
144 			break;
145 
146 		case HWC_INIT_DATA_QUEUE_DEPTH:
147 			hwc->hwc_init_q_depth_max = (u16)val;
148 			break;
149 
150 		case HWC_INIT_DATA_MAX_REQUEST:
151 			hwc->hwc_init_max_req_msg_size = val;
152 			break;
153 
154 		case HWC_INIT_DATA_MAX_RESPONSE:
155 			hwc->hwc_init_max_resp_msg_size = val;
156 			break;
157 
158 		case HWC_INIT_DATA_MAX_NUM_CQS:
159 			gd->gdma_context->max_num_cqs = val;
160 			break;
161 
162 		case HWC_INIT_DATA_PDID:
163 			hwc->gdma_dev->pdid = val;
164 			break;
165 
166 		case HWC_INIT_DATA_GPA_MKEY:
167 			hwc->rxq->msg_buf->gpa_mkey = val;
168 			hwc->txq->msg_buf->gpa_mkey = val;
169 			break;
170 
171 		case HWC_INIT_DATA_PF_DEST_RQ_ID:
172 			hwc->pf_dest_vrq_id = val;
173 			break;
174 
175 		case HWC_INIT_DATA_PF_DEST_CQ_ID:
176 			hwc->pf_dest_vrcq_id = val;
177 			break;
178 		}
179 
180 		break;
181 
182 	case GDMA_EQE_HWC_INIT_DONE:
183 		complete(&hwc->hwc_init_eqe_comp);
184 		break;
185 
186 	case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
187 		type_data.as_uint32 = event->details[0];
188 		type = type_data.type;
189 		val = type_data.value;
190 
191 		switch (type) {
192 		case HWC_DATA_CFG_HWC_TIMEOUT:
193 			hwc->hwc_timeout = val;
194 			break;
195 
196 		default:
197 			dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
198 			break;
199 		}
200 
201 		break;
202 
203 	default:
204 		dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
205 		/* Ignore unknown events, which should never happen. */
206 		break;
207 	}
208 }
209 
mana_hwc_rx_event_handler(void * ctx,u32 gdma_rxq_id,const struct hwc_rx_oob * rx_oob)210 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
211 				      const struct hwc_rx_oob *rx_oob)
212 {
213 	struct hw_channel_context *hwc = ctx;
214 	struct hwc_wq *hwc_rxq = hwc->rxq;
215 	struct hwc_work_request *rx_req;
216 	struct gdma_resp_hdr *resp;
217 	struct gdma_wqe *dma_oob;
218 	struct gdma_queue *rq;
219 	struct gdma_sge *sge;
220 	u64 rq_base_addr;
221 	u64 rx_req_idx;
222 	u8 *wqe;
223 
224 	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
225 		return;
226 
227 	rq = hwc_rxq->gdma_wq;
228 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
229 	dma_oob = (struct gdma_wqe *)wqe;
230 
231 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
232 
233 	/* Select the RX work request for virtual address and for reposting. */
234 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
235 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
236 
237 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
238 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
239 
240 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
241 		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
242 			resp->response.hwc_msg_id);
243 		return;
244 	}
245 
246 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
247 
248 	/* Can no longer use 'resp', because the buffer is posted to the HW
249 	 * in mana_hwc_handle_resp() above.
250 	 */
251 	resp = NULL;
252 }
253 
mana_hwc_tx_event_handler(void * ctx,u32 gdma_txq_id,const struct hwc_rx_oob * rx_oob)254 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
255 				      const struct hwc_rx_oob *rx_oob)
256 {
257 	struct hw_channel_context *hwc = ctx;
258 	struct hwc_wq *hwc_txq = hwc->txq;
259 
260 	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
261 }
262 
mana_hwc_create_gdma_wq(struct hw_channel_context * hwc,enum gdma_queue_type type,u64 queue_size,struct gdma_queue ** queue)263 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
264 				   enum gdma_queue_type type, u64 queue_size,
265 				   struct gdma_queue **queue)
266 {
267 	struct gdma_queue_spec spec = {};
268 
269 	if (type != GDMA_SQ && type != GDMA_RQ)
270 		return -EINVAL;
271 
272 	spec.type = type;
273 	spec.monitor_avl_buf = false;
274 	spec.queue_size = queue_size;
275 
276 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
277 }
278 
mana_hwc_create_gdma_cq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_cq_callback * cb,struct gdma_queue * parent_eq,struct gdma_queue ** queue)279 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
280 				   u64 queue_size,
281 				   void *ctx, gdma_cq_callback *cb,
282 				   struct gdma_queue *parent_eq,
283 				   struct gdma_queue **queue)
284 {
285 	struct gdma_queue_spec spec = {};
286 
287 	spec.type = GDMA_CQ;
288 	spec.monitor_avl_buf = false;
289 	spec.queue_size = queue_size;
290 	spec.cq.context = ctx;
291 	spec.cq.callback = cb;
292 	spec.cq.parent_eq = parent_eq;
293 
294 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
295 }
296 
mana_hwc_create_gdma_eq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_eq_callback * cb,struct gdma_queue ** queue)297 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
298 				   u64 queue_size,
299 				   void *ctx, gdma_eq_callback *cb,
300 				   struct gdma_queue **queue)
301 {
302 	struct gdma_queue_spec spec = {};
303 
304 	spec.type = GDMA_EQ;
305 	spec.monitor_avl_buf = false;
306 	spec.queue_size = queue_size;
307 	spec.eq.context = ctx;
308 	spec.eq.callback = cb;
309 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
310 	spec.eq.msix_index = 0;
311 
312 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
313 }
314 
mana_hwc_comp_event(void * ctx,struct gdma_queue * q_self)315 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
316 {
317 	struct hwc_rx_oob comp_data = {};
318 	struct gdma_comp *completions;
319 	struct hwc_cq *hwc_cq = ctx;
320 	int comp_read, i;
321 
322 	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
323 
324 	completions = hwc_cq->comp_buf;
325 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
326 	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
327 
328 	for (i = 0; i < comp_read; ++i) {
329 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
330 
331 		if (completions[i].is_sq)
332 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
333 						completions[i].wq_num,
334 						&comp_data);
335 		else
336 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
337 						completions[i].wq_num,
338 						&comp_data);
339 	}
340 
341 	mana_gd_ring_cq(q_self, SET_ARM_BIT);
342 }
343 
mana_hwc_destroy_cq(struct gdma_context * gc,struct hwc_cq * hwc_cq)344 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
345 {
346 	kfree(hwc_cq->comp_buf);
347 
348 	if (hwc_cq->gdma_cq)
349 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
350 
351 	if (hwc_cq->gdma_eq)
352 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
353 
354 	kfree(hwc_cq);
355 }
356 
mana_hwc_create_cq(struct hw_channel_context * hwc,u16 q_depth,gdma_eq_callback * callback,void * ctx,hwc_rx_event_handler_t * rx_ev_hdlr,void * rx_ev_ctx,hwc_tx_event_handler_t * tx_ev_hdlr,void * tx_ev_ctx,struct hwc_cq ** hwc_cq_ptr)357 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
358 			      gdma_eq_callback *callback, void *ctx,
359 			      hwc_rx_event_handler_t *rx_ev_hdlr,
360 			      void *rx_ev_ctx,
361 			      hwc_tx_event_handler_t *tx_ev_hdlr,
362 			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
363 {
364 	struct gdma_queue *eq, *cq;
365 	struct gdma_comp *comp_buf;
366 	struct hwc_cq *hwc_cq;
367 	u32 eq_size, cq_size;
368 	int err;
369 
370 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
371 	if (eq_size < MANA_MIN_QSIZE)
372 		eq_size = MANA_MIN_QSIZE;
373 
374 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
375 	if (cq_size < MANA_MIN_QSIZE)
376 		cq_size = MANA_MIN_QSIZE;
377 
378 	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
379 	if (!hwc_cq)
380 		return -ENOMEM;
381 
382 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
383 	if (err) {
384 		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
385 		goto out;
386 	}
387 	hwc_cq->gdma_eq = eq;
388 
389 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
390 				      eq, &cq);
391 	if (err) {
392 		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
393 		goto out;
394 	}
395 	hwc_cq->gdma_cq = cq;
396 
397 	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
398 	if (!comp_buf) {
399 		err = -ENOMEM;
400 		goto out;
401 	}
402 
403 	hwc_cq->hwc = hwc;
404 	hwc_cq->comp_buf = comp_buf;
405 	hwc_cq->queue_depth = q_depth;
406 	hwc_cq->rx_event_handler = rx_ev_hdlr;
407 	hwc_cq->rx_event_ctx = rx_ev_ctx;
408 	hwc_cq->tx_event_handler = tx_ev_hdlr;
409 	hwc_cq->tx_event_ctx = tx_ev_ctx;
410 
411 	*hwc_cq_ptr = hwc_cq;
412 	return 0;
413 out:
414 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
415 	return err;
416 }
417 
mana_hwc_alloc_dma_buf(struct hw_channel_context * hwc,u16 q_depth,u32 max_msg_size,struct hwc_dma_buf ** dma_buf_ptr)418 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
419 				  u32 max_msg_size,
420 				  struct hwc_dma_buf **dma_buf_ptr)
421 {
422 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
423 	struct hwc_work_request *hwc_wr;
424 	struct hwc_dma_buf *dma_buf;
425 	struct gdma_mem_info *gmi;
426 	void *virt_addr;
427 	u32 buf_size;
428 	u8 *base_pa;
429 	int err;
430 	u16 i;
431 
432 	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
433 	if (!dma_buf)
434 		return -ENOMEM;
435 
436 	dma_buf->num_reqs = q_depth;
437 
438 	buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
439 
440 	gmi = &dma_buf->mem_info;
441 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
442 	if (err) {
443 		dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
444 			buf_size, err);
445 		goto out;
446 	}
447 
448 	virt_addr = dma_buf->mem_info.virt_addr;
449 	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
450 
451 	for (i = 0; i < q_depth; i++) {
452 		hwc_wr = &dma_buf->reqs[i];
453 
454 		hwc_wr->buf_va = virt_addr + i * max_msg_size;
455 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
456 
457 		hwc_wr->buf_len = max_msg_size;
458 	}
459 
460 	*dma_buf_ptr = dma_buf;
461 	return 0;
462 out:
463 	kfree(dma_buf);
464 	return err;
465 }
466 
mana_hwc_dealloc_dma_buf(struct hw_channel_context * hwc,struct hwc_dma_buf * dma_buf)467 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
468 				     struct hwc_dma_buf *dma_buf)
469 {
470 	if (!dma_buf)
471 		return;
472 
473 	mana_gd_free_memory(&dma_buf->mem_info);
474 
475 	kfree(dma_buf);
476 }
477 
mana_hwc_destroy_wq(struct hw_channel_context * hwc,struct hwc_wq * hwc_wq)478 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
479 				struct hwc_wq *hwc_wq)
480 {
481 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
482 
483 	if (hwc_wq->gdma_wq)
484 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
485 				      hwc_wq->gdma_wq);
486 
487 	kfree(hwc_wq);
488 }
489 
mana_hwc_create_wq(struct hw_channel_context * hwc,enum gdma_queue_type q_type,u16 q_depth,u32 max_msg_size,struct hwc_cq * hwc_cq,struct hwc_wq ** hwc_wq_ptr)490 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
491 			      enum gdma_queue_type q_type, u16 q_depth,
492 			      u32 max_msg_size, struct hwc_cq *hwc_cq,
493 			      struct hwc_wq **hwc_wq_ptr)
494 {
495 	struct gdma_queue *queue;
496 	struct hwc_wq *hwc_wq;
497 	u32 queue_size;
498 	int err;
499 
500 	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
501 
502 	if (q_type == GDMA_RQ)
503 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
504 	else
505 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
506 
507 	if (queue_size < MANA_MIN_QSIZE)
508 		queue_size = MANA_MIN_QSIZE;
509 
510 	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
511 	if (!hwc_wq)
512 		return -ENOMEM;
513 
514 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
515 	if (err)
516 		goto out;
517 
518 	hwc_wq->hwc = hwc;
519 	hwc_wq->gdma_wq = queue;
520 	hwc_wq->queue_depth = q_depth;
521 	hwc_wq->hwc_cq = hwc_cq;
522 
523 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
524 				     &hwc_wq->msg_buf);
525 	if (err)
526 		goto out;
527 
528 	*hwc_wq_ptr = hwc_wq;
529 	return 0;
530 out:
531 	if (err)
532 		mana_hwc_destroy_wq(hwc, hwc_wq);
533 
534 	dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
535 		queue_size, q_type, err);
536 	return err;
537 }
538 
mana_hwc_post_tx_wqe(const struct hwc_wq * hwc_txq,struct hwc_work_request * req,u32 dest_virt_rq_id,u32 dest_virt_rcq_id,bool dest_pf)539 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
540 				struct hwc_work_request *req,
541 				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
542 				bool dest_pf)
543 {
544 	struct device *dev = hwc_txq->hwc->dev;
545 	struct hwc_tx_oob *tx_oob;
546 	struct gdma_sge *sge;
547 	int err;
548 
549 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
550 		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
551 			req->msg_size, req->buf_len);
552 		return -EINVAL;
553 	}
554 
555 	tx_oob = &req->tx_oob;
556 
557 	tx_oob->vrq_id = dest_virt_rq_id;
558 	tx_oob->dest_vfid = 0;
559 	tx_oob->vrcq_id = dest_virt_rcq_id;
560 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
561 	tx_oob->loopback = false;
562 	tx_oob->lso_override = false;
563 	tx_oob->dest_pf = dest_pf;
564 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
565 
566 	sge = &req->sge;
567 	sge->address = (u64)req->buf_sge_addr;
568 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
569 	sge->size = req->msg_size;
570 
571 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
572 	req->wqe_req.sgl = sge;
573 	req->wqe_req.num_sge = 1;
574 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
575 	req->wqe_req.inline_oob_data = tx_oob;
576 	req->wqe_req.client_data_unit = 0;
577 
578 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
579 	if (err)
580 		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
581 	return err;
582 }
583 
mana_hwc_init_inflight_msg(struct hw_channel_context * hwc,u16 num_msg)584 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
585 				      u16 num_msg)
586 {
587 	int err;
588 
589 	sema_init(&hwc->sema, num_msg);
590 
591 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
592 	if (err)
593 		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
594 	return err;
595 }
596 
mana_hwc_test_channel(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)597 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
598 				 u32 max_req_msg_size, u32 max_resp_msg_size)
599 {
600 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
601 	struct hwc_wq *hwc_rxq = hwc->rxq;
602 	struct hwc_work_request *req;
603 	struct hwc_caller_ctx *ctx;
604 	int err;
605 	int i;
606 
607 	/* Post all WQEs on the RQ */
608 	for (i = 0; i < q_depth; i++) {
609 		req = &hwc_rxq->msg_buf->reqs[i];
610 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
611 		if (err)
612 			return err;
613 	}
614 
615 	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
616 	if (!ctx)
617 		return -ENOMEM;
618 
619 	for (i = 0; i < q_depth; ++i)
620 		init_completion(&ctx[i].comp_event);
621 
622 	hwc->caller_ctx = ctx;
623 
624 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
625 }
626 
mana_hwc_establish_channel(struct gdma_context * gc,u16 * q_depth,u32 * max_req_msg_size,u32 * max_resp_msg_size)627 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
628 				      u32 *max_req_msg_size,
629 				      u32 *max_resp_msg_size)
630 {
631 	struct hw_channel_context *hwc = gc->hwc.driver_data;
632 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
633 	struct gdma_queue *sq = hwc->txq->gdma_wq;
634 	struct gdma_queue *eq = hwc->cq->gdma_eq;
635 	struct gdma_queue *cq = hwc->cq->gdma_cq;
636 	int err;
637 
638 	init_completion(&hwc->hwc_init_eqe_comp);
639 
640 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
641 				 eq->mem_info.dma_handle,
642 				 cq->mem_info.dma_handle,
643 				 rq->mem_info.dma_handle,
644 				 sq->mem_info.dma_handle,
645 				 eq->eq.msix_index);
646 	if (err)
647 		return err;
648 
649 	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
650 		return -ETIMEDOUT;
651 
652 	*q_depth = hwc->hwc_init_q_depth_max;
653 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
654 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
655 
656 	/* Both were set in mana_hwc_init_event_handler(). */
657 	if (WARN_ON(cq->id >= gc->max_num_cqs))
658 		return -EPROTO;
659 
660 	gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
661 	if (!gc->cq_table)
662 		return -ENOMEM;
663 
664 	gc->cq_table[cq->id] = cq;
665 
666 	return 0;
667 }
668 
mana_hwc_init_queues(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)669 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
670 				u32 max_req_msg_size, u32 max_resp_msg_size)
671 {
672 	int err;
673 
674 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
675 	if (err)
676 		return err;
677 
678 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
679 	 * queue depth and RQ queue depth.
680 	 */
681 	err = mana_hwc_create_cq(hwc, q_depth * 2,
682 				 mana_hwc_init_event_handler, hwc,
683 				 mana_hwc_rx_event_handler, hwc,
684 				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
685 	if (err) {
686 		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
687 		goto out;
688 	}
689 
690 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
691 				 hwc->cq, &hwc->rxq);
692 	if (err) {
693 		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
694 		goto out;
695 	}
696 
697 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
698 				 hwc->cq, &hwc->txq);
699 	if (err) {
700 		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
701 		goto out;
702 	}
703 
704 	hwc->num_inflight_msg = q_depth;
705 	hwc->max_req_msg_size = max_req_msg_size;
706 
707 	return 0;
708 out:
709 	/* mana_hwc_create_channel() will do the cleanup.*/
710 	return err;
711 }
712 
mana_hwc_create_channel(struct gdma_context * gc)713 int mana_hwc_create_channel(struct gdma_context *gc)
714 {
715 	u32 max_req_msg_size, max_resp_msg_size;
716 	struct gdma_dev *gd = &gc->hwc;
717 	struct hw_channel_context *hwc;
718 	u16 q_depth_max;
719 	int err;
720 
721 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
722 	if (!hwc)
723 		return -ENOMEM;
724 
725 	gd->gdma_context = gc;
726 	gd->driver_data = hwc;
727 	hwc->gdma_dev = gd;
728 	hwc->dev = gc->dev;
729 	hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
730 
731 	/* HWC's instance number is always 0. */
732 	gd->dev_id.as_uint32 = 0;
733 	gd->dev_id.type = GDMA_DEVICE_HWC;
734 
735 	gd->pdid = INVALID_PDID;
736 	gd->doorbell = INVALID_DOORBELL;
737 
738 	/* mana_hwc_init_queues() only creates the required data structures,
739 	 * and doesn't touch the HWC device.
740 	 */
741 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
742 				   HW_CHANNEL_MAX_REQUEST_SIZE,
743 				   HW_CHANNEL_MAX_RESPONSE_SIZE);
744 	if (err) {
745 		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
746 		goto out;
747 	}
748 
749 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
750 					 &max_resp_msg_size);
751 	if (err) {
752 		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
753 		goto out;
754 	}
755 
756 	err = mana_hwc_test_channel(gc->hwc.driver_data,
757 				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
758 				    max_req_msg_size, max_resp_msg_size);
759 	if (err) {
760 		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
761 		goto out;
762 	}
763 
764 	return 0;
765 out:
766 	mana_hwc_destroy_channel(gc);
767 	return err;
768 }
769 
mana_hwc_destroy_channel(struct gdma_context * gc)770 void mana_hwc_destroy_channel(struct gdma_context *gc)
771 {
772 	struct hw_channel_context *hwc = gc->hwc.driver_data;
773 
774 	if (!hwc)
775 		return;
776 
777 	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
778 	 * non-zero, the HWC worked and we should tear down the HWC here.
779 	 */
780 	if (gc->max_num_cqs > 0) {
781 		mana_smc_teardown_hwc(&gc->shm_channel, false);
782 		gc->max_num_cqs = 0;
783 	}
784 
785 	kfree(hwc->caller_ctx);
786 	hwc->caller_ctx = NULL;
787 
788 	if (hwc->txq)
789 		mana_hwc_destroy_wq(hwc, hwc->txq);
790 
791 	if (hwc->rxq)
792 		mana_hwc_destroy_wq(hwc, hwc->rxq);
793 
794 	if (hwc->cq)
795 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
796 
797 	mana_gd_free_res_map(&hwc->inflight_msg_res);
798 
799 	hwc->num_inflight_msg = 0;
800 
801 	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
802 	hwc->gdma_dev->pdid = INVALID_PDID;
803 
804 	hwc->hwc_timeout = 0;
805 
806 	kfree(hwc);
807 	gc->hwc.driver_data = NULL;
808 	gc->hwc.gdma_context = NULL;
809 
810 	vfree(gc->cq_table);
811 	gc->cq_table = NULL;
812 }
813 
mana_hwc_send_request(struct hw_channel_context * hwc,u32 req_len,const void * req,u32 resp_len,void * resp)814 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
815 			  const void *req, u32 resp_len, void *resp)
816 {
817 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
818 	struct hwc_work_request *tx_wr;
819 	struct hwc_wq *txq = hwc->txq;
820 	struct gdma_req_hdr *req_msg;
821 	struct hwc_caller_ctx *ctx;
822 	u32 dest_vrcq = 0;
823 	u32 dest_vrq = 0;
824 	u16 msg_id;
825 	int err;
826 
827 	mana_hwc_get_msg_index(hwc, &msg_id);
828 
829 	tx_wr = &txq->msg_buf->reqs[msg_id];
830 
831 	if (req_len > tx_wr->buf_len) {
832 		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
833 			tx_wr->buf_len);
834 		err = -EINVAL;
835 		goto out;
836 	}
837 
838 	ctx = hwc->caller_ctx + msg_id;
839 	ctx->output_buf = resp;
840 	ctx->output_buflen = resp_len;
841 
842 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
843 	if (req)
844 		memcpy(req_msg, req, req_len);
845 
846 	req_msg->req.hwc_msg_id = msg_id;
847 
848 	tx_wr->msg_size = req_len;
849 
850 	if (gc->is_pf) {
851 		dest_vrq = hwc->pf_dest_vrq_id;
852 		dest_vrcq = hwc->pf_dest_vrcq_id;
853 	}
854 
855 	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
856 	if (err) {
857 		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
858 		goto out;
859 	}
860 
861 	if (!wait_for_completion_timeout(&ctx->comp_event,
862 					 (msecs_to_jiffies(hwc->hwc_timeout)))) {
863 		dev_err(hwc->dev, "HWC: Request timed out!\n");
864 		err = -ETIMEDOUT;
865 		goto out;
866 	}
867 
868 	if (ctx->error) {
869 		err = ctx->error;
870 		goto out;
871 	}
872 
873 	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
874 		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
875 			ctx->status_code);
876 		err = -EPROTO;
877 		goto out;
878 	}
879 out:
880 	mana_hwc_put_msg_index(hwc, msg_id);
881 	return err;
882 }
883