1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/debugfs.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/utsname.h>
8 #include <linux/version.h>
9 
10 #include <net/mana/mana.h>
11 
12 struct dentry *mana_debugfs_root;
13 
mana_gd_r32(struct gdma_context * g,u64 offset)14 static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
15 {
16 	return readl(g->bar0_va + offset);
17 }
18 
mana_gd_r64(struct gdma_context * g,u64 offset)19 static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
20 {
21 	return readq(g->bar0_va + offset);
22 }
23 
mana_gd_init_pf_regs(struct pci_dev * pdev)24 static void mana_gd_init_pf_regs(struct pci_dev *pdev)
25 {
26 	struct gdma_context *gc = pci_get_drvdata(pdev);
27 	void __iomem *sriov_base_va;
28 	u64 sriov_base_off;
29 
30 	gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
31 	gc->db_page_base = gc->bar0_va +
32 				mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
33 
34 	sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
35 
36 	sriov_base_va = gc->bar0_va + sriov_base_off;
37 	gc->shm_base = sriov_base_va +
38 			mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
39 }
40 
mana_gd_init_vf_regs(struct pci_dev * pdev)41 static void mana_gd_init_vf_regs(struct pci_dev *pdev)
42 {
43 	struct gdma_context *gc = pci_get_drvdata(pdev);
44 
45 	gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
46 
47 	gc->db_page_base = gc->bar0_va +
48 				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
49 
50 	gc->phys_db_page_base = gc->bar0_pa +
51 				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
52 
53 	gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
54 }
55 
mana_gd_init_registers(struct pci_dev * pdev)56 static void mana_gd_init_registers(struct pci_dev *pdev)
57 {
58 	struct gdma_context *gc = pci_get_drvdata(pdev);
59 
60 	if (gc->is_pf)
61 		mana_gd_init_pf_regs(pdev);
62 	else
63 		mana_gd_init_vf_regs(pdev);
64 }
65 
mana_gd_query_max_resources(struct pci_dev * pdev)66 static int mana_gd_query_max_resources(struct pci_dev *pdev)
67 {
68 	struct gdma_context *gc = pci_get_drvdata(pdev);
69 	struct gdma_query_max_resources_resp resp = {};
70 	struct gdma_general_req req = {};
71 	int err;
72 
73 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
74 			     sizeof(req), sizeof(resp));
75 
76 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
77 	if (err || resp.hdr.status) {
78 		dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
79 			err, resp.hdr.status);
80 		return err ? err : -EPROTO;
81 	}
82 
83 	if (gc->num_msix_usable > resp.max_msix)
84 		gc->num_msix_usable = resp.max_msix;
85 
86 	if (gc->num_msix_usable <= 1)
87 		return -ENOSPC;
88 
89 	gc->max_num_queues = num_online_cpus();
90 	if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
91 		gc->max_num_queues = MANA_MAX_NUM_QUEUES;
92 
93 	if (gc->max_num_queues > resp.max_eq)
94 		gc->max_num_queues = resp.max_eq;
95 
96 	if (gc->max_num_queues > resp.max_cq)
97 		gc->max_num_queues = resp.max_cq;
98 
99 	if (gc->max_num_queues > resp.max_sq)
100 		gc->max_num_queues = resp.max_sq;
101 
102 	if (gc->max_num_queues > resp.max_rq)
103 		gc->max_num_queues = resp.max_rq;
104 
105 	/* The Hardware Channel (HWC) used 1 MSI-X */
106 	if (gc->max_num_queues > gc->num_msix_usable - 1)
107 		gc->max_num_queues = gc->num_msix_usable - 1;
108 
109 	return 0;
110 }
111 
mana_gd_query_hwc_timeout(struct pci_dev * pdev,u32 * timeout_val)112 static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val)
113 {
114 	struct gdma_context *gc = pci_get_drvdata(pdev);
115 	struct gdma_query_hwc_timeout_resp resp = {};
116 	struct gdma_query_hwc_timeout_req req = {};
117 	int err;
118 
119 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_HWC_TIMEOUT,
120 			     sizeof(req), sizeof(resp));
121 	req.timeout_ms = *timeout_val;
122 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
123 	if (err || resp.hdr.status)
124 		return err ? err : -EPROTO;
125 
126 	*timeout_val = resp.timeout_ms;
127 
128 	return 0;
129 }
130 
mana_gd_detect_devices(struct pci_dev * pdev)131 static int mana_gd_detect_devices(struct pci_dev *pdev)
132 {
133 	struct gdma_context *gc = pci_get_drvdata(pdev);
134 	struct gdma_list_devices_resp resp = {};
135 	struct gdma_general_req req = {};
136 	struct gdma_dev_id dev;
137 	int found_dev = 0;
138 	u16 dev_type;
139 	int err;
140 	u32 i;
141 
142 	mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
143 			     sizeof(resp));
144 
145 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
146 	if (err || resp.hdr.status) {
147 		dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
148 			resp.hdr.status);
149 		return err ? err : -EPROTO;
150 	}
151 
152 	for (i = 0; i < GDMA_DEV_LIST_SIZE &&
153 	     found_dev < resp.num_of_devs; i++) {
154 		dev = resp.devs[i];
155 		dev_type = dev.type;
156 
157 		/* Skip empty devices */
158 		if (dev.as_uint32 == 0)
159 			continue;
160 
161 		found_dev++;
162 
163 		/* HWC is already detected in mana_hwc_create_channel(). */
164 		if (dev_type == GDMA_DEVICE_HWC)
165 			continue;
166 
167 		if (dev_type == GDMA_DEVICE_MANA) {
168 			gc->mana.gdma_context = gc;
169 			gc->mana.dev_id = dev;
170 		} else if (dev_type == GDMA_DEVICE_MANA_IB) {
171 			gc->mana_ib.dev_id = dev;
172 			gc->mana_ib.gdma_context = gc;
173 		}
174 	}
175 
176 	return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
177 }
178 
mana_gd_send_request(struct gdma_context * gc,u32 req_len,const void * req,u32 resp_len,void * resp)179 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
180 			 u32 resp_len, void *resp)
181 {
182 	struct hw_channel_context *hwc = gc->hwc.driver_data;
183 
184 	return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
185 }
186 EXPORT_SYMBOL_NS(mana_gd_send_request, "NET_MANA");
187 
mana_gd_alloc_memory(struct gdma_context * gc,unsigned int length,struct gdma_mem_info * gmi)188 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
189 			 struct gdma_mem_info *gmi)
190 {
191 	dma_addr_t dma_handle;
192 	void *buf;
193 
194 	if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
195 		return -EINVAL;
196 
197 	gmi->dev = gc->dev;
198 	buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
199 	if (!buf)
200 		return -ENOMEM;
201 
202 	gmi->dma_handle = dma_handle;
203 	gmi->virt_addr = buf;
204 	gmi->length = length;
205 
206 	return 0;
207 }
208 
mana_gd_free_memory(struct gdma_mem_info * gmi)209 void mana_gd_free_memory(struct gdma_mem_info *gmi)
210 {
211 	dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
212 			  gmi->dma_handle);
213 }
214 
mana_gd_create_hw_eq(struct gdma_context * gc,struct gdma_queue * queue)215 static int mana_gd_create_hw_eq(struct gdma_context *gc,
216 				struct gdma_queue *queue)
217 {
218 	struct gdma_create_queue_resp resp = {};
219 	struct gdma_create_queue_req req = {};
220 	int err;
221 
222 	if (queue->type != GDMA_EQ)
223 		return -EINVAL;
224 
225 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
226 			     sizeof(req), sizeof(resp));
227 
228 	req.hdr.dev_id = queue->gdma_dev->dev_id;
229 	req.type = queue->type;
230 	req.pdid = queue->gdma_dev->pdid;
231 	req.doolbell_id = queue->gdma_dev->doorbell;
232 	req.gdma_region = queue->mem_info.dma_region_handle;
233 	req.queue_size = queue->queue_size;
234 	req.log2_throttle_limit = queue->eq.log2_throttle_limit;
235 	req.eq_pci_msix_index = queue->eq.msix_index;
236 
237 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
238 	if (err || resp.hdr.status) {
239 		dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
240 			resp.hdr.status);
241 		return err ? err : -EPROTO;
242 	}
243 
244 	queue->id = resp.queue_index;
245 	queue->eq.disable_needed = true;
246 	queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
247 	return 0;
248 }
249 
mana_gd_disable_queue(struct gdma_queue * queue)250 static int mana_gd_disable_queue(struct gdma_queue *queue)
251 {
252 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
253 	struct gdma_disable_queue_req req = {};
254 	struct gdma_general_resp resp = {};
255 	int err;
256 
257 	WARN_ON(queue->type != GDMA_EQ);
258 
259 	mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
260 			     sizeof(req), sizeof(resp));
261 
262 	req.hdr.dev_id = queue->gdma_dev->dev_id;
263 	req.type = queue->type;
264 	req.queue_index =  queue->id;
265 	req.alloc_res_id_on_creation = 1;
266 
267 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
268 	if (err || resp.hdr.status) {
269 		dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
270 			resp.hdr.status);
271 		return err ? err : -EPROTO;
272 	}
273 
274 	return 0;
275 }
276 
277 #define DOORBELL_OFFSET_SQ	0x0
278 #define DOORBELL_OFFSET_RQ	0x400
279 #define DOORBELL_OFFSET_CQ	0x800
280 #define DOORBELL_OFFSET_EQ	0xFF8
281 
mana_gd_ring_doorbell(struct gdma_context * gc,u32 db_index,enum gdma_queue_type q_type,u32 qid,u32 tail_ptr,u8 num_req)282 static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
283 				  enum gdma_queue_type q_type, u32 qid,
284 				  u32 tail_ptr, u8 num_req)
285 {
286 	void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
287 	union gdma_doorbell_entry e = {};
288 
289 	switch (q_type) {
290 	case GDMA_EQ:
291 		e.eq.id = qid;
292 		e.eq.tail_ptr = tail_ptr;
293 		e.eq.arm = num_req;
294 
295 		addr += DOORBELL_OFFSET_EQ;
296 		break;
297 
298 	case GDMA_CQ:
299 		e.cq.id = qid;
300 		e.cq.tail_ptr = tail_ptr;
301 		e.cq.arm = num_req;
302 
303 		addr += DOORBELL_OFFSET_CQ;
304 		break;
305 
306 	case GDMA_RQ:
307 		e.rq.id = qid;
308 		e.rq.tail_ptr = tail_ptr;
309 		e.rq.wqe_cnt = num_req;
310 
311 		addr += DOORBELL_OFFSET_RQ;
312 		break;
313 
314 	case GDMA_SQ:
315 		e.sq.id = qid;
316 		e.sq.tail_ptr = tail_ptr;
317 
318 		addr += DOORBELL_OFFSET_SQ;
319 		break;
320 
321 	default:
322 		WARN_ON(1);
323 		return;
324 	}
325 
326 	/* Ensure all writes are done before ring doorbell */
327 	wmb();
328 
329 	writeq(e.as_uint64, addr);
330 }
331 
mana_gd_wq_ring_doorbell(struct gdma_context * gc,struct gdma_queue * queue)332 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
333 {
334 	/* Hardware Spec specifies that software client should set 0 for
335 	 * wqe_cnt for Receive Queues. This value is not used in Send Queues.
336 	 */
337 	mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
338 			      queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
339 }
340 EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
341 
mana_gd_ring_cq(struct gdma_queue * cq,u8 arm_bit)342 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
343 {
344 	struct gdma_context *gc = cq->gdma_dev->gdma_context;
345 
346 	u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
347 
348 	u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
349 
350 	mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
351 			      head, arm_bit);
352 }
353 EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
354 
mana_gd_process_eqe(struct gdma_queue * eq)355 static void mana_gd_process_eqe(struct gdma_queue *eq)
356 {
357 	u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
358 	struct gdma_context *gc = eq->gdma_dev->gdma_context;
359 	struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
360 	union gdma_eqe_info eqe_info;
361 	enum gdma_eqe_type type;
362 	struct gdma_event event;
363 	struct gdma_queue *cq;
364 	struct gdma_eqe *eqe;
365 	u32 cq_id;
366 
367 	eqe = &eq_eqe_ptr[head];
368 	eqe_info.as_uint32 = eqe->eqe_info;
369 	type = eqe_info.type;
370 
371 	switch (type) {
372 	case GDMA_EQE_COMPLETION:
373 		cq_id = eqe->details[0] & 0xFFFFFF;
374 		if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
375 			break;
376 
377 		cq = gc->cq_table[cq_id];
378 		if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
379 			break;
380 
381 		if (cq->cq.callback)
382 			cq->cq.callback(cq->cq.context, cq);
383 
384 		break;
385 
386 	case GDMA_EQE_TEST_EVENT:
387 		gc->test_event_eq_id = eq->id;
388 		complete(&gc->eq_test_event);
389 		break;
390 
391 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
392 	case GDMA_EQE_HWC_INIT_DATA:
393 	case GDMA_EQE_HWC_INIT_DONE:
394 	case GDMA_EQE_RNIC_QP_FATAL:
395 		if (!eq->eq.callback)
396 			break;
397 
398 		event.type = type;
399 		memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
400 		eq->eq.callback(eq->eq.context, eq, &event);
401 		break;
402 
403 	default:
404 		break;
405 	}
406 }
407 
mana_gd_process_eq_events(void * arg)408 static void mana_gd_process_eq_events(void *arg)
409 {
410 	u32 owner_bits, new_bits, old_bits;
411 	union gdma_eqe_info eqe_info;
412 	struct gdma_eqe *eq_eqe_ptr;
413 	struct gdma_queue *eq = arg;
414 	struct gdma_context *gc;
415 	struct gdma_eqe *eqe;
416 	u32 head, num_eqe;
417 	int i;
418 
419 	gc = eq->gdma_dev->gdma_context;
420 
421 	num_eqe = eq->queue_size / GDMA_EQE_SIZE;
422 	eq_eqe_ptr = eq->queue_mem_ptr;
423 
424 	/* Process up to 5 EQEs at a time, and update the HW head. */
425 	for (i = 0; i < 5; i++) {
426 		eqe = &eq_eqe_ptr[eq->head % num_eqe];
427 		eqe_info.as_uint32 = eqe->eqe_info;
428 		owner_bits = eqe_info.owner_bits;
429 
430 		old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
431 		/* No more entries */
432 		if (owner_bits == old_bits) {
433 			/* return here without ringing the doorbell */
434 			if (i == 0)
435 				return;
436 			break;
437 		}
438 
439 		new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
440 		if (owner_bits != new_bits) {
441 			dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
442 			break;
443 		}
444 
445 		/* Per GDMA spec, rmb is necessary after checking owner_bits, before
446 		 * reading eqe.
447 		 */
448 		rmb();
449 
450 		mana_gd_process_eqe(eq);
451 
452 		eq->head++;
453 	}
454 
455 	head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
456 
457 	mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
458 			      head, SET_ARM_BIT);
459 }
460 
mana_gd_register_irq(struct gdma_queue * queue,const struct gdma_queue_spec * spec)461 static int mana_gd_register_irq(struct gdma_queue *queue,
462 				const struct gdma_queue_spec *spec)
463 {
464 	struct gdma_dev *gd = queue->gdma_dev;
465 	struct gdma_irq_context *gic;
466 	struct gdma_context *gc;
467 	unsigned int msi_index;
468 	unsigned long flags;
469 	struct device *dev;
470 	int err = 0;
471 
472 	gc = gd->gdma_context;
473 	dev = gc->dev;
474 	msi_index = spec->eq.msix_index;
475 
476 	if (msi_index >= gc->num_msix_usable) {
477 		err = -ENOSPC;
478 		dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
479 			err, msi_index, gc->num_msix_usable);
480 
481 		return err;
482 	}
483 
484 	queue->eq.msix_index = msi_index;
485 	gic = &gc->irq_contexts[msi_index];
486 
487 	spin_lock_irqsave(&gic->lock, flags);
488 	list_add_rcu(&queue->entry, &gic->eq_list);
489 	spin_unlock_irqrestore(&gic->lock, flags);
490 
491 	return 0;
492 }
493 
mana_gd_deregiser_irq(struct gdma_queue * queue)494 static void mana_gd_deregiser_irq(struct gdma_queue *queue)
495 {
496 	struct gdma_dev *gd = queue->gdma_dev;
497 	struct gdma_irq_context *gic;
498 	struct gdma_context *gc;
499 	unsigned int msix_index;
500 	unsigned long flags;
501 	struct gdma_queue *eq;
502 
503 	gc = gd->gdma_context;
504 
505 	/* At most num_online_cpus() + 1 interrupts are used. */
506 	msix_index = queue->eq.msix_index;
507 	if (WARN_ON(msix_index >= gc->num_msix_usable))
508 		return;
509 
510 	gic = &gc->irq_contexts[msix_index];
511 	spin_lock_irqsave(&gic->lock, flags);
512 	list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
513 		if (queue == eq) {
514 			list_del_rcu(&eq->entry);
515 			break;
516 		}
517 	}
518 	spin_unlock_irqrestore(&gic->lock, flags);
519 
520 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
521 	synchronize_rcu();
522 }
523 
mana_gd_test_eq(struct gdma_context * gc,struct gdma_queue * eq)524 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
525 {
526 	struct gdma_generate_test_event_req req = {};
527 	struct gdma_general_resp resp = {};
528 	struct device *dev = gc->dev;
529 	int err;
530 
531 	mutex_lock(&gc->eq_test_event_mutex);
532 
533 	init_completion(&gc->eq_test_event);
534 	gc->test_event_eq_id = INVALID_QUEUE_ID;
535 
536 	mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
537 			     sizeof(req), sizeof(resp));
538 
539 	req.hdr.dev_id = eq->gdma_dev->dev_id;
540 	req.queue_index = eq->id;
541 
542 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
543 	if (err) {
544 		dev_err(dev, "test_eq failed: %d\n", err);
545 		goto out;
546 	}
547 
548 	err = -EPROTO;
549 
550 	if (resp.hdr.status) {
551 		dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
552 		goto out;
553 	}
554 
555 	if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
556 		dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
557 		goto out;
558 	}
559 
560 	if (eq->id != gc->test_event_eq_id) {
561 		dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
562 			gc->test_event_eq_id, eq->id);
563 		goto out;
564 	}
565 
566 	err = 0;
567 out:
568 	mutex_unlock(&gc->eq_test_event_mutex);
569 	return err;
570 }
571 
mana_gd_destroy_eq(struct gdma_context * gc,bool flush_evenets,struct gdma_queue * queue)572 static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
573 			       struct gdma_queue *queue)
574 {
575 	int err;
576 
577 	if (flush_evenets) {
578 		err = mana_gd_test_eq(gc, queue);
579 		if (err)
580 			dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
581 	}
582 
583 	mana_gd_deregiser_irq(queue);
584 
585 	if (queue->eq.disable_needed)
586 		mana_gd_disable_queue(queue);
587 }
588 
mana_gd_create_eq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,bool create_hwq,struct gdma_queue * queue)589 static int mana_gd_create_eq(struct gdma_dev *gd,
590 			     const struct gdma_queue_spec *spec,
591 			     bool create_hwq, struct gdma_queue *queue)
592 {
593 	struct gdma_context *gc = gd->gdma_context;
594 	struct device *dev = gc->dev;
595 	u32 log2_num_entries;
596 	int err;
597 
598 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
599 	queue->id = INVALID_QUEUE_ID;
600 
601 	log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
602 
603 	if (spec->eq.log2_throttle_limit > log2_num_entries) {
604 		dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
605 			spec->eq.log2_throttle_limit, log2_num_entries);
606 		return -EINVAL;
607 	}
608 
609 	err = mana_gd_register_irq(queue, spec);
610 	if (err) {
611 		dev_err(dev, "Failed to register irq: %d\n", err);
612 		return err;
613 	}
614 
615 	queue->eq.callback = spec->eq.callback;
616 	queue->eq.context = spec->eq.context;
617 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
618 	queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
619 
620 	if (create_hwq) {
621 		err = mana_gd_create_hw_eq(gc, queue);
622 		if (err)
623 			goto out;
624 
625 		err = mana_gd_test_eq(gc, queue);
626 		if (err)
627 			goto out;
628 	}
629 
630 	return 0;
631 out:
632 	dev_err(dev, "Failed to create EQ: %d\n", err);
633 	mana_gd_destroy_eq(gc, false, queue);
634 	return err;
635 }
636 
mana_gd_create_cq(const struct gdma_queue_spec * spec,struct gdma_queue * queue)637 static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
638 			      struct gdma_queue *queue)
639 {
640 	u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
641 
642 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
643 	queue->cq.parent = spec->cq.parent_eq;
644 	queue->cq.context = spec->cq.context;
645 	queue->cq.callback = spec->cq.callback;
646 }
647 
mana_gd_destroy_cq(struct gdma_context * gc,struct gdma_queue * queue)648 static void mana_gd_destroy_cq(struct gdma_context *gc,
649 			       struct gdma_queue *queue)
650 {
651 	u32 id = queue->id;
652 
653 	if (id >= gc->max_num_cqs)
654 		return;
655 
656 	if (!gc->cq_table[id])
657 		return;
658 
659 	gc->cq_table[id] = NULL;
660 }
661 
mana_gd_create_hwc_queue(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)662 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
663 			     const struct gdma_queue_spec *spec,
664 			     struct gdma_queue **queue_ptr)
665 {
666 	struct gdma_context *gc = gd->gdma_context;
667 	struct gdma_mem_info *gmi;
668 	struct gdma_queue *queue;
669 	int err;
670 
671 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
672 	if (!queue)
673 		return -ENOMEM;
674 
675 	gmi = &queue->mem_info;
676 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
677 	if (err) {
678 		dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
679 			spec->type, spec->queue_size, err);
680 		goto free_q;
681 	}
682 
683 	queue->head = 0;
684 	queue->tail = 0;
685 	queue->queue_mem_ptr = gmi->virt_addr;
686 	queue->queue_size = spec->queue_size;
687 	queue->monitor_avl_buf = spec->monitor_avl_buf;
688 	queue->type = spec->type;
689 	queue->gdma_dev = gd;
690 
691 	if (spec->type == GDMA_EQ)
692 		err = mana_gd_create_eq(gd, spec, false, queue);
693 	else if (spec->type == GDMA_CQ)
694 		mana_gd_create_cq(spec, queue);
695 
696 	if (err)
697 		goto out;
698 
699 	*queue_ptr = queue;
700 	return 0;
701 out:
702 	dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
703 		spec->type, spec->queue_size, err);
704 	mana_gd_free_memory(gmi);
705 free_q:
706 	kfree(queue);
707 	return err;
708 }
709 
mana_gd_destroy_dma_region(struct gdma_context * gc,u64 dma_region_handle)710 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
711 {
712 	struct gdma_destroy_dma_region_req req = {};
713 	struct gdma_general_resp resp = {};
714 	int err;
715 
716 	if (dma_region_handle == GDMA_INVALID_DMA_REGION)
717 		return 0;
718 
719 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
720 			     sizeof(resp));
721 	req.dma_region_handle = dma_region_handle;
722 
723 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
724 	if (err || resp.hdr.status) {
725 		dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
726 			err, resp.hdr.status);
727 		return -EPROTO;
728 	}
729 
730 	return 0;
731 }
732 EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, "NET_MANA");
733 
mana_gd_create_dma_region(struct gdma_dev * gd,struct gdma_mem_info * gmi)734 static int mana_gd_create_dma_region(struct gdma_dev *gd,
735 				     struct gdma_mem_info *gmi)
736 {
737 	unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
738 	struct gdma_create_dma_region_req *req = NULL;
739 	struct gdma_create_dma_region_resp resp = {};
740 	struct gdma_context *gc = gd->gdma_context;
741 	struct hw_channel_context *hwc;
742 	u32 length = gmi->length;
743 	size_t req_msg_size;
744 	int err;
745 	int i;
746 
747 	if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
748 		return -EINVAL;
749 
750 	if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
751 		return -EINVAL;
752 
753 	hwc = gc->hwc.driver_data;
754 	req_msg_size = struct_size(req, page_addr_list, num_page);
755 	if (req_msg_size > hwc->max_req_msg_size)
756 		return -EINVAL;
757 
758 	req = kzalloc(req_msg_size, GFP_KERNEL);
759 	if (!req)
760 		return -ENOMEM;
761 
762 	mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
763 			     req_msg_size, sizeof(resp));
764 	req->length = length;
765 	req->offset_in_page = 0;
766 	req->gdma_page_type = GDMA_PAGE_TYPE_4K;
767 	req->page_count = num_page;
768 	req->page_addr_list_len = num_page;
769 
770 	for (i = 0; i < num_page; i++)
771 		req->page_addr_list[i] = gmi->dma_handle +  i * MANA_PAGE_SIZE;
772 
773 	err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
774 	if (err)
775 		goto out;
776 
777 	if (resp.hdr.status ||
778 	    resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
779 		dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
780 			resp.hdr.status);
781 		err = -EPROTO;
782 		goto out;
783 	}
784 
785 	gmi->dma_region_handle = resp.dma_region_handle;
786 	dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
787 		gmi->dma_region_handle);
788 out:
789 	if (err)
790 		dev_dbg(gc->dev,
791 			"Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
792 			length, req->gdma_page_type, resp.hdr.status, err);
793 	kfree(req);
794 	return err;
795 }
796 
mana_gd_create_mana_eq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)797 int mana_gd_create_mana_eq(struct gdma_dev *gd,
798 			   const struct gdma_queue_spec *spec,
799 			   struct gdma_queue **queue_ptr)
800 {
801 	struct gdma_context *gc = gd->gdma_context;
802 	struct gdma_mem_info *gmi;
803 	struct gdma_queue *queue;
804 	int err;
805 
806 	if (spec->type != GDMA_EQ)
807 		return -EINVAL;
808 
809 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
810 	if (!queue)
811 		return -ENOMEM;
812 
813 	gmi = &queue->mem_info;
814 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
815 	if (err) {
816 		dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
817 			spec->type, spec->queue_size, err);
818 		goto free_q;
819 	}
820 
821 	err = mana_gd_create_dma_region(gd, gmi);
822 	if (err)
823 		goto out;
824 
825 	queue->head = 0;
826 	queue->tail = 0;
827 	queue->queue_mem_ptr = gmi->virt_addr;
828 	queue->queue_size = spec->queue_size;
829 	queue->monitor_avl_buf = spec->monitor_avl_buf;
830 	queue->type = spec->type;
831 	queue->gdma_dev = gd;
832 
833 	err = mana_gd_create_eq(gd, spec, true, queue);
834 	if (err)
835 		goto out;
836 
837 	*queue_ptr = queue;
838 	return 0;
839 out:
840 	dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
841 		spec->type, spec->queue_size, err);
842 	mana_gd_free_memory(gmi);
843 free_q:
844 	kfree(queue);
845 	return err;
846 }
847 EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, "NET_MANA");
848 
mana_gd_create_mana_wq_cq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)849 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
850 			      const struct gdma_queue_spec *spec,
851 			      struct gdma_queue **queue_ptr)
852 {
853 	struct gdma_context *gc = gd->gdma_context;
854 	struct gdma_mem_info *gmi;
855 	struct gdma_queue *queue;
856 	int err;
857 
858 	if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
859 	    spec->type != GDMA_RQ)
860 		return -EINVAL;
861 
862 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
863 	if (!queue)
864 		return -ENOMEM;
865 
866 	gmi = &queue->mem_info;
867 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
868 	if (err) {
869 		dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
870 			spec->type, spec->queue_size, err);
871 		goto free_q;
872 	}
873 
874 	err = mana_gd_create_dma_region(gd, gmi);
875 	if (err)
876 		goto out;
877 
878 	queue->head = 0;
879 	queue->tail = 0;
880 	queue->queue_mem_ptr = gmi->virt_addr;
881 	queue->queue_size = spec->queue_size;
882 	queue->monitor_avl_buf = spec->monitor_avl_buf;
883 	queue->type = spec->type;
884 	queue->gdma_dev = gd;
885 
886 	if (spec->type == GDMA_CQ)
887 		mana_gd_create_cq(spec, queue);
888 
889 	*queue_ptr = queue;
890 	return 0;
891 out:
892 	dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
893 		spec->type, spec->queue_size, err);
894 	mana_gd_free_memory(gmi);
895 free_q:
896 	kfree(queue);
897 	return err;
898 }
899 EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
900 
mana_gd_destroy_queue(struct gdma_context * gc,struct gdma_queue * queue)901 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
902 {
903 	struct gdma_mem_info *gmi = &queue->mem_info;
904 
905 	switch (queue->type) {
906 	case GDMA_EQ:
907 		mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
908 		break;
909 
910 	case GDMA_CQ:
911 		mana_gd_destroy_cq(gc, queue);
912 		break;
913 
914 	case GDMA_RQ:
915 		break;
916 
917 	case GDMA_SQ:
918 		break;
919 
920 	default:
921 		dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
922 			queue->type);
923 		return;
924 	}
925 
926 	mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
927 	mana_gd_free_memory(gmi);
928 	kfree(queue);
929 }
930 EXPORT_SYMBOL_NS(mana_gd_destroy_queue, "NET_MANA");
931 
mana_gd_verify_vf_version(struct pci_dev * pdev)932 int mana_gd_verify_vf_version(struct pci_dev *pdev)
933 {
934 	struct gdma_context *gc = pci_get_drvdata(pdev);
935 	struct gdma_verify_ver_resp resp = {};
936 	struct gdma_verify_ver_req req = {};
937 	struct hw_channel_context *hwc;
938 	int err;
939 
940 	hwc = gc->hwc.driver_data;
941 	mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
942 			     sizeof(req), sizeof(resp));
943 
944 	req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
945 	req.protocol_ver_max = GDMA_PROTOCOL_LAST;
946 
947 	req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
948 	req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
949 	req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
950 	req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
951 
952 	req.drv_ver = 0;	/* Unused*/
953 	req.os_type = 0x10;	/* Linux */
954 	req.os_ver_major = LINUX_VERSION_MAJOR;
955 	req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
956 	req.os_ver_build = LINUX_VERSION_SUBLEVEL;
957 	strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
958 	strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
959 	strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
960 
961 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
962 	if (err || resp.hdr.status) {
963 		dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
964 			err, resp.hdr.status);
965 		return err ? err : -EPROTO;
966 	}
967 	if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
968 		err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
969 		if (err) {
970 			dev_err(gc->dev, "Failed to set the hwc timeout %d\n", err);
971 			return err;
972 		}
973 		dev_dbg(gc->dev, "set the hwc timeout to %u\n", hwc->hwc_timeout);
974 	}
975 	return 0;
976 }
977 
mana_gd_register_device(struct gdma_dev * gd)978 int mana_gd_register_device(struct gdma_dev *gd)
979 {
980 	struct gdma_context *gc = gd->gdma_context;
981 	struct gdma_register_device_resp resp = {};
982 	struct gdma_general_req req = {};
983 	int err;
984 
985 	gd->pdid = INVALID_PDID;
986 	gd->doorbell = INVALID_DOORBELL;
987 	gd->gpa_mkey = INVALID_MEM_KEY;
988 
989 	mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
990 			     sizeof(resp));
991 
992 	req.hdr.dev_id = gd->dev_id;
993 
994 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
995 	if (err || resp.hdr.status) {
996 		dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
997 			err, resp.hdr.status);
998 		return err ? err : -EPROTO;
999 	}
1000 
1001 	gd->pdid = resp.pdid;
1002 	gd->gpa_mkey = resp.gpa_mkey;
1003 	gd->doorbell = resp.db_id;
1004 
1005 	return 0;
1006 }
1007 EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA");
1008 
mana_gd_deregister_device(struct gdma_dev * gd)1009 int mana_gd_deregister_device(struct gdma_dev *gd)
1010 {
1011 	struct gdma_context *gc = gd->gdma_context;
1012 	struct gdma_general_resp resp = {};
1013 	struct gdma_general_req req = {};
1014 	int err;
1015 
1016 	if (gd->pdid == INVALID_PDID)
1017 		return -EINVAL;
1018 
1019 	mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
1020 			     sizeof(resp));
1021 
1022 	req.hdr.dev_id = gd->dev_id;
1023 
1024 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1025 	if (err || resp.hdr.status) {
1026 		dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
1027 			err, resp.hdr.status);
1028 		if (!err)
1029 			err = -EPROTO;
1030 	}
1031 
1032 	gd->pdid = INVALID_PDID;
1033 	gd->doorbell = INVALID_DOORBELL;
1034 	gd->gpa_mkey = INVALID_MEM_KEY;
1035 
1036 	return err;
1037 }
1038 EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA");
1039 
mana_gd_wq_avail_space(struct gdma_queue * wq)1040 u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
1041 {
1042 	u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
1043 	u32 wq_size = wq->queue_size;
1044 
1045 	WARN_ON_ONCE(used_space > wq_size);
1046 
1047 	return wq_size - used_space;
1048 }
1049 
mana_gd_get_wqe_ptr(const struct gdma_queue * wq,u32 wqe_offset)1050 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
1051 {
1052 	u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
1053 
1054 	WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
1055 
1056 	return wq->queue_mem_ptr + offset;
1057 }
1058 
mana_gd_write_client_oob(const struct gdma_wqe_request * wqe_req,enum gdma_queue_type q_type,u32 client_oob_size,u32 sgl_data_size,u8 * wqe_ptr)1059 static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
1060 				    enum gdma_queue_type q_type,
1061 				    u32 client_oob_size, u32 sgl_data_size,
1062 				    u8 *wqe_ptr)
1063 {
1064 	bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
1065 	bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
1066 	struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
1067 	u8 *ptr;
1068 
1069 	memset(header, 0, sizeof(struct gdma_wqe));
1070 	header->num_sge = wqe_req->num_sge;
1071 	header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
1072 
1073 	if (oob_in_sgl) {
1074 		WARN_ON_ONCE(wqe_req->num_sge < 2);
1075 
1076 		header->client_oob_in_sgl = 1;
1077 
1078 		if (pad_data)
1079 			header->last_vbytes = wqe_req->sgl[0].size;
1080 	}
1081 
1082 	if (q_type == GDMA_SQ)
1083 		header->client_data_unit = wqe_req->client_data_unit;
1084 
1085 	/* The size of gdma_wqe + client_oob_size must be less than or equal
1086 	 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1087 	 * the queue memory buffer boundary.
1088 	 */
1089 	ptr = wqe_ptr + sizeof(header);
1090 
1091 	if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1092 		memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1093 
1094 		if (client_oob_size > wqe_req->inline_oob_size)
1095 			memset(ptr + wqe_req->inline_oob_size, 0,
1096 			       client_oob_size - wqe_req->inline_oob_size);
1097 	}
1098 
1099 	return sizeof(header) + client_oob_size;
1100 }
1101 
mana_gd_write_sgl(struct gdma_queue * wq,u8 * wqe_ptr,const struct gdma_wqe_request * wqe_req)1102 static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
1103 			      const struct gdma_wqe_request *wqe_req)
1104 {
1105 	u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1106 	const u8 *address = (u8 *)wqe_req->sgl;
1107 	u8 *base_ptr, *end_ptr;
1108 	u32 size_to_end;
1109 
1110 	base_ptr = wq->queue_mem_ptr;
1111 	end_ptr = base_ptr + wq->queue_size;
1112 	size_to_end = (u32)(end_ptr - wqe_ptr);
1113 
1114 	if (size_to_end < sgl_size) {
1115 		memcpy(wqe_ptr, address, size_to_end);
1116 
1117 		wqe_ptr = base_ptr;
1118 		address += size_to_end;
1119 		sgl_size -= size_to_end;
1120 	}
1121 
1122 	memcpy(wqe_ptr, address, sgl_size);
1123 }
1124 
mana_gd_post_work_request(struct gdma_queue * wq,const struct gdma_wqe_request * wqe_req,struct gdma_posted_wqe_info * wqe_info)1125 int mana_gd_post_work_request(struct gdma_queue *wq,
1126 			      const struct gdma_wqe_request *wqe_req,
1127 			      struct gdma_posted_wqe_info *wqe_info)
1128 {
1129 	u32 client_oob_size = wqe_req->inline_oob_size;
1130 	struct gdma_context *gc;
1131 	u32 sgl_data_size;
1132 	u32 max_wqe_size;
1133 	u32 wqe_size;
1134 	u8 *wqe_ptr;
1135 
1136 	if (wqe_req->num_sge == 0)
1137 		return -EINVAL;
1138 
1139 	if (wq->type == GDMA_RQ) {
1140 		if (client_oob_size != 0)
1141 			return -EINVAL;
1142 
1143 		client_oob_size = INLINE_OOB_SMALL_SIZE;
1144 
1145 		max_wqe_size = GDMA_MAX_RQE_SIZE;
1146 	} else {
1147 		if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1148 		    client_oob_size != INLINE_OOB_LARGE_SIZE)
1149 			return -EINVAL;
1150 
1151 		max_wqe_size = GDMA_MAX_SQE_SIZE;
1152 	}
1153 
1154 	sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1155 	wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1156 			 sgl_data_size, GDMA_WQE_BU_SIZE);
1157 	if (wqe_size > max_wqe_size)
1158 		return -EINVAL;
1159 
1160 	if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1161 		gc = wq->gdma_dev->gdma_context;
1162 		dev_err(gc->dev, "unsuccessful flow control!\n");
1163 		return -ENOSPC;
1164 	}
1165 
1166 	if (wqe_info)
1167 		wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1168 
1169 	wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1170 	wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1171 					    sgl_data_size, wqe_ptr);
1172 	if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1173 		wqe_ptr -= wq->queue_size;
1174 
1175 	mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1176 
1177 	wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1178 
1179 	return 0;
1180 }
1181 EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
1182 
mana_gd_post_and_ring(struct gdma_queue * queue,const struct gdma_wqe_request * wqe_req,struct gdma_posted_wqe_info * wqe_info)1183 int mana_gd_post_and_ring(struct gdma_queue *queue,
1184 			  const struct gdma_wqe_request *wqe_req,
1185 			  struct gdma_posted_wqe_info *wqe_info)
1186 {
1187 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
1188 	int err;
1189 
1190 	err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1191 	if (err) {
1192 		dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
1193 			queue->type, queue->queue_size, err);
1194 		return err;
1195 	}
1196 
1197 	mana_gd_wq_ring_doorbell(gc, queue);
1198 
1199 	return 0;
1200 }
1201 
mana_gd_read_cqe(struct gdma_queue * cq,struct gdma_comp * comp)1202 static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1203 {
1204 	unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1205 	struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1206 	u32 owner_bits, new_bits, old_bits;
1207 	struct gdma_cqe *cqe;
1208 
1209 	cqe = &cq_cqe[cq->head % num_cqe];
1210 	owner_bits = cqe->cqe_info.owner_bits;
1211 
1212 	old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1213 	/* Return 0 if no more entries. */
1214 	if (owner_bits == old_bits)
1215 		return 0;
1216 
1217 	new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1218 	/* Return -1 if overflow detected. */
1219 	if (WARN_ON_ONCE(owner_bits != new_bits))
1220 		return -1;
1221 
1222 	/* Per GDMA spec, rmb is necessary after checking owner_bits, before
1223 	 * reading completion info
1224 	 */
1225 	rmb();
1226 
1227 	comp->wq_num = cqe->cqe_info.wq_num;
1228 	comp->is_sq = cqe->cqe_info.is_sq;
1229 	memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1230 
1231 	return 1;
1232 }
1233 
mana_gd_poll_cq(struct gdma_queue * cq,struct gdma_comp * comp,int num_cqe)1234 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1235 {
1236 	int cqe_idx;
1237 	int ret;
1238 
1239 	for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1240 		ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1241 
1242 		if (ret < 0) {
1243 			cq->head -= cqe_idx;
1244 			return ret;
1245 		}
1246 
1247 		if (ret == 0)
1248 			break;
1249 
1250 		cq->head++;
1251 	}
1252 
1253 	return cqe_idx;
1254 }
1255 EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
1256 
mana_gd_intr(int irq,void * arg)1257 static irqreturn_t mana_gd_intr(int irq, void *arg)
1258 {
1259 	struct gdma_irq_context *gic = arg;
1260 	struct list_head *eq_list = &gic->eq_list;
1261 	struct gdma_queue *eq;
1262 
1263 	rcu_read_lock();
1264 	list_for_each_entry_rcu(eq, eq_list, entry) {
1265 		gic->handler(eq);
1266 	}
1267 	rcu_read_unlock();
1268 
1269 	return IRQ_HANDLED;
1270 }
1271 
mana_gd_alloc_res_map(u32 res_avail,struct gdma_resource * r)1272 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1273 {
1274 	r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1275 	if (!r->map)
1276 		return -ENOMEM;
1277 
1278 	r->size = res_avail;
1279 	spin_lock_init(&r->lock);
1280 
1281 	return 0;
1282 }
1283 
mana_gd_free_res_map(struct gdma_resource * r)1284 void mana_gd_free_res_map(struct gdma_resource *r)
1285 {
1286 	bitmap_free(r->map);
1287 	r->map = NULL;
1288 	r->size = 0;
1289 }
1290 
irq_setup(unsigned int * irqs,unsigned int len,int node)1291 static int irq_setup(unsigned int *irqs, unsigned int len, int node)
1292 {
1293 	const struct cpumask *next, *prev = cpu_none_mask;
1294 	cpumask_var_t cpus __free(free_cpumask_var);
1295 	int cpu, weight;
1296 
1297 	if (!alloc_cpumask_var(&cpus, GFP_KERNEL))
1298 		return -ENOMEM;
1299 
1300 	rcu_read_lock();
1301 	for_each_numa_hop_mask(next, node) {
1302 		weight = cpumask_weight_andnot(next, prev);
1303 		while (weight > 0) {
1304 			cpumask_andnot(cpus, next, prev);
1305 			for_each_cpu(cpu, cpus) {
1306 				if (len-- == 0)
1307 					goto done;
1308 				irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
1309 				cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
1310 				--weight;
1311 			}
1312 		}
1313 		prev = next;
1314 	}
1315 done:
1316 	rcu_read_unlock();
1317 	return 0;
1318 }
1319 
mana_gd_setup_irqs(struct pci_dev * pdev)1320 static int mana_gd_setup_irqs(struct pci_dev *pdev)
1321 {
1322 	struct gdma_context *gc = pci_get_drvdata(pdev);
1323 	unsigned int max_queues_per_port;
1324 	struct gdma_irq_context *gic;
1325 	unsigned int max_irqs, cpu;
1326 	int start_irq_index = 1;
1327 	int nvec, *irqs, irq;
1328 	int err, i = 0, j;
1329 
1330 	cpus_read_lock();
1331 	max_queues_per_port = num_online_cpus();
1332 	if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1333 		max_queues_per_port = MANA_MAX_NUM_QUEUES;
1334 
1335 	/* Need 1 interrupt for the Hardware communication Channel (HWC) */
1336 	max_irqs = max_queues_per_port + 1;
1337 
1338 	nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
1339 	if (nvec < 0) {
1340 		cpus_read_unlock();
1341 		return nvec;
1342 	}
1343 	if (nvec <= num_online_cpus())
1344 		start_irq_index = 0;
1345 
1346 	irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
1347 	if (!irqs) {
1348 		err = -ENOMEM;
1349 		goto free_irq_vector;
1350 	}
1351 
1352 	gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
1353 				   GFP_KERNEL);
1354 	if (!gc->irq_contexts) {
1355 		err = -ENOMEM;
1356 		goto free_irq_array;
1357 	}
1358 
1359 	for (i = 0; i < nvec; i++) {
1360 		gic = &gc->irq_contexts[i];
1361 		gic->handler = mana_gd_process_eq_events;
1362 		INIT_LIST_HEAD(&gic->eq_list);
1363 		spin_lock_init(&gic->lock);
1364 
1365 		if (!i)
1366 			snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
1367 				 pci_name(pdev));
1368 		else
1369 			snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
1370 				 i - 1, pci_name(pdev));
1371 
1372 		irq = pci_irq_vector(pdev, i);
1373 		if (irq < 0) {
1374 			err = irq;
1375 			goto free_irq;
1376 		}
1377 
1378 		if (!i) {
1379 			err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
1380 			if (err)
1381 				goto free_irq;
1382 
1383 			/* If number of IRQ is one extra than number of online CPUs,
1384 			 * then we need to assign IRQ0 (hwc irq) and IRQ1 to
1385 			 * same CPU.
1386 			 * Else we will use different CPUs for IRQ0 and IRQ1.
1387 			 * Also we are using cpumask_local_spread instead of
1388 			 * cpumask_first for the node, because the node can be
1389 			 * mem only.
1390 			 */
1391 			if (start_irq_index) {
1392 				cpu = cpumask_local_spread(i, gc->numa_node);
1393 				irq_set_affinity_and_hint(irq, cpumask_of(cpu));
1394 			} else {
1395 				irqs[start_irq_index] = irq;
1396 			}
1397 		} else {
1398 			irqs[i - start_irq_index] = irq;
1399 			err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
1400 					  gic->name, gic);
1401 			if (err)
1402 				goto free_irq;
1403 		}
1404 	}
1405 
1406 	err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
1407 	if (err)
1408 		goto free_irq;
1409 
1410 	gc->max_num_msix = nvec;
1411 	gc->num_msix_usable = nvec;
1412 	cpus_read_unlock();
1413 	kfree(irqs);
1414 	return 0;
1415 
1416 free_irq:
1417 	for (j = i - 1; j >= 0; j--) {
1418 		irq = pci_irq_vector(pdev, j);
1419 		gic = &gc->irq_contexts[j];
1420 
1421 		irq_update_affinity_hint(irq, NULL);
1422 		free_irq(irq, gic);
1423 	}
1424 
1425 	kfree(gc->irq_contexts);
1426 	gc->irq_contexts = NULL;
1427 free_irq_array:
1428 	kfree(irqs);
1429 free_irq_vector:
1430 	cpus_read_unlock();
1431 	pci_free_irq_vectors(pdev);
1432 	return err;
1433 }
1434 
mana_gd_remove_irqs(struct pci_dev * pdev)1435 static void mana_gd_remove_irqs(struct pci_dev *pdev)
1436 {
1437 	struct gdma_context *gc = pci_get_drvdata(pdev);
1438 	struct gdma_irq_context *gic;
1439 	int irq, i;
1440 
1441 	if (gc->max_num_msix < 1)
1442 		return;
1443 
1444 	for (i = 0; i < gc->max_num_msix; i++) {
1445 		irq = pci_irq_vector(pdev, i);
1446 		if (irq < 0)
1447 			continue;
1448 
1449 		gic = &gc->irq_contexts[i];
1450 
1451 		/* Need to clear the hint before free_irq */
1452 		irq_update_affinity_hint(irq, NULL);
1453 		free_irq(irq, gic);
1454 	}
1455 
1456 	pci_free_irq_vectors(pdev);
1457 
1458 	gc->max_num_msix = 0;
1459 	gc->num_msix_usable = 0;
1460 	kfree(gc->irq_contexts);
1461 	gc->irq_contexts = NULL;
1462 }
1463 
mana_gd_setup(struct pci_dev * pdev)1464 static int mana_gd_setup(struct pci_dev *pdev)
1465 {
1466 	struct gdma_context *gc = pci_get_drvdata(pdev);
1467 	int err;
1468 
1469 	mana_gd_init_registers(pdev);
1470 	mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1471 
1472 	err = mana_gd_setup_irqs(pdev);
1473 	if (err) {
1474 		dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
1475 		return err;
1476 	}
1477 
1478 	err = mana_hwc_create_channel(gc);
1479 	if (err)
1480 		goto remove_irq;
1481 
1482 	err = mana_gd_verify_vf_version(pdev);
1483 	if (err)
1484 		goto destroy_hwc;
1485 
1486 	err = mana_gd_query_max_resources(pdev);
1487 	if (err)
1488 		goto destroy_hwc;
1489 
1490 	err = mana_gd_detect_devices(pdev);
1491 	if (err)
1492 		goto destroy_hwc;
1493 
1494 	dev_dbg(&pdev->dev, "mana gdma setup successful\n");
1495 	return 0;
1496 
1497 destroy_hwc:
1498 	mana_hwc_destroy_channel(gc);
1499 remove_irq:
1500 	mana_gd_remove_irqs(pdev);
1501 	dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
1502 	return err;
1503 }
1504 
mana_gd_cleanup(struct pci_dev * pdev)1505 static void mana_gd_cleanup(struct pci_dev *pdev)
1506 {
1507 	struct gdma_context *gc = pci_get_drvdata(pdev);
1508 
1509 	mana_hwc_destroy_channel(gc);
1510 
1511 	mana_gd_remove_irqs(pdev);
1512 	dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
1513 }
1514 
mana_is_pf(unsigned short dev_id)1515 static bool mana_is_pf(unsigned short dev_id)
1516 {
1517 	return dev_id == MANA_PF_DEVICE_ID;
1518 }
1519 
mana_gd_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1520 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1521 {
1522 	struct gdma_context *gc;
1523 	void __iomem *bar0_va;
1524 	int bar = 0;
1525 	int err;
1526 
1527 	/* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
1528 	BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
1529 
1530 	err = pci_enable_device(pdev);
1531 	if (err) {
1532 		dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
1533 		return -ENXIO;
1534 	}
1535 
1536 	pci_set_master(pdev);
1537 
1538 	err = pci_request_regions(pdev, "mana");
1539 	if (err)
1540 		goto disable_dev;
1541 
1542 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1543 	if (err) {
1544 		dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
1545 		goto release_region;
1546 	}
1547 	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1548 
1549 	err = -ENOMEM;
1550 	gc = vzalloc(sizeof(*gc));
1551 	if (!gc)
1552 		goto release_region;
1553 
1554 	mutex_init(&gc->eq_test_event_mutex);
1555 	pci_set_drvdata(pdev, gc);
1556 	gc->bar0_pa = pci_resource_start(pdev, 0);
1557 
1558 	bar0_va = pci_iomap(pdev, bar, 0);
1559 	if (!bar0_va)
1560 		goto free_gc;
1561 
1562 	gc->numa_node = dev_to_node(&pdev->dev);
1563 	gc->is_pf = mana_is_pf(pdev->device);
1564 	gc->bar0_va = bar0_va;
1565 	gc->dev = &pdev->dev;
1566 
1567 	if (gc->is_pf)
1568 		gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
1569 	else
1570 		gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot),
1571 							  mana_debugfs_root);
1572 
1573 	err = mana_gd_setup(pdev);
1574 	if (err)
1575 		goto unmap_bar;
1576 
1577 	err = mana_probe(&gc->mana, false);
1578 	if (err)
1579 		goto cleanup_gd;
1580 
1581 	return 0;
1582 
1583 cleanup_gd:
1584 	mana_gd_cleanup(pdev);
1585 unmap_bar:
1586 	/*
1587 	 * at this point we know that the other debugfs child dir/files
1588 	 * are either not yet created or are already cleaned up.
1589 	 * The pci debugfs folder clean-up now, will only be cleaning up
1590 	 * adapter-MTU file and apc->mana_pci_debugfs folder.
1591 	 */
1592 	debugfs_remove_recursive(gc->mana_pci_debugfs);
1593 	gc->mana_pci_debugfs = NULL;
1594 	pci_iounmap(pdev, bar0_va);
1595 free_gc:
1596 	pci_set_drvdata(pdev, NULL);
1597 	vfree(gc);
1598 release_region:
1599 	pci_release_regions(pdev);
1600 disable_dev:
1601 	pci_disable_device(pdev);
1602 	dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
1603 	return err;
1604 }
1605 
mana_gd_remove(struct pci_dev * pdev)1606 static void mana_gd_remove(struct pci_dev *pdev)
1607 {
1608 	struct gdma_context *gc = pci_get_drvdata(pdev);
1609 
1610 	mana_remove(&gc->mana, false);
1611 
1612 	mana_gd_cleanup(pdev);
1613 
1614 	debugfs_remove_recursive(gc->mana_pci_debugfs);
1615 
1616 	gc->mana_pci_debugfs = NULL;
1617 
1618 	pci_iounmap(pdev, gc->bar0_va);
1619 
1620 	vfree(gc);
1621 
1622 	pci_release_regions(pdev);
1623 	pci_disable_device(pdev);
1624 
1625 	dev_dbg(&pdev->dev, "mana gdma remove successful\n");
1626 }
1627 
1628 /* The 'state' parameter is not used. */
mana_gd_suspend(struct pci_dev * pdev,pm_message_t state)1629 static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
1630 {
1631 	struct gdma_context *gc = pci_get_drvdata(pdev);
1632 
1633 	mana_remove(&gc->mana, true);
1634 
1635 	mana_gd_cleanup(pdev);
1636 
1637 	return 0;
1638 }
1639 
1640 /* In case the NIC hardware stops working, the suspend and resume callbacks will
1641  * fail -- if this happens, it's safer to just report an error than try to undo
1642  * what has been done.
1643  */
mana_gd_resume(struct pci_dev * pdev)1644 static int mana_gd_resume(struct pci_dev *pdev)
1645 {
1646 	struct gdma_context *gc = pci_get_drvdata(pdev);
1647 	int err;
1648 
1649 	err = mana_gd_setup(pdev);
1650 	if (err)
1651 		return err;
1652 
1653 	err = mana_probe(&gc->mana, true);
1654 	if (err)
1655 		return err;
1656 
1657 	return 0;
1658 }
1659 
1660 /* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
mana_gd_shutdown(struct pci_dev * pdev)1661 static void mana_gd_shutdown(struct pci_dev *pdev)
1662 {
1663 	struct gdma_context *gc = pci_get_drvdata(pdev);
1664 
1665 	dev_info(&pdev->dev, "Shutdown was called\n");
1666 
1667 	mana_remove(&gc->mana, true);
1668 
1669 	mana_gd_cleanup(pdev);
1670 
1671 	debugfs_remove_recursive(gc->mana_pci_debugfs);
1672 
1673 	gc->mana_pci_debugfs = NULL;
1674 
1675 	pci_disable_device(pdev);
1676 }
1677 
1678 static const struct pci_device_id mana_id_table[] = {
1679 	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
1680 	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
1681 	{ }
1682 };
1683 
1684 static struct pci_driver mana_driver = {
1685 	.name		= "mana",
1686 	.id_table	= mana_id_table,
1687 	.probe		= mana_gd_probe,
1688 	.remove		= mana_gd_remove,
1689 	.suspend	= mana_gd_suspend,
1690 	.resume		= mana_gd_resume,
1691 	.shutdown	= mana_gd_shutdown,
1692 };
1693 
mana_driver_init(void)1694 static int __init mana_driver_init(void)
1695 {
1696 	int err;
1697 
1698 	mana_debugfs_root = debugfs_create_dir("mana", NULL);
1699 
1700 	err = pci_register_driver(&mana_driver);
1701 	if (err) {
1702 		debugfs_remove(mana_debugfs_root);
1703 		mana_debugfs_root = NULL;
1704 	}
1705 
1706 	return err;
1707 }
1708 
mana_driver_exit(void)1709 static void __exit mana_driver_exit(void)
1710 {
1711 	pci_unregister_driver(&mana_driver);
1712 
1713 	debugfs_remove(mana_debugfs_root);
1714 
1715 	mana_debugfs_root = NULL;
1716 }
1717 
1718 module_init(mana_driver_init);
1719 module_exit(mana_driver_exit);
1720 
1721 MODULE_DEVICE_TABLE(pci, mana_id_table);
1722 
1723 MODULE_LICENSE("Dual BSD/GPL");
1724 MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
1725