xref: /linux/drivers/infiniband/core/cq.c (revision 7ce4de1cdaf11c39b507008dfb5a4e59079d4e8a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015 HGST, a Western Digital Company.
4  */
5 #include <linux/err.h>
6 #include <linux/slab.h>
7 #include <rdma/ib_verbs.h>
8 
9 #include "core_priv.h"
10 
11 #include <trace/events/rdma_core.h>
12 /* Max size for shared CQ, may require tuning */
13 #define IB_MAX_SHARED_CQ_SZ		4096U
14 
15 /* # of WCs to poll for with a single call to ib_poll_cq */
16 #define IB_POLL_BATCH			16
17 #define IB_POLL_BATCH_DIRECT		8
18 
19 /* # of WCs to iterate over before yielding */
20 #define IB_POLL_BUDGET_IRQ		256
21 #define IB_POLL_BUDGET_WORKQUEUE	65536
22 
23 #define IB_POLL_FLAGS \
24 	(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
25 
26 static const struct dim_cq_moder
27 rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = {
28 	{1,   0, 1,  0},
29 	{1,   0, 4,  0},
30 	{2,   0, 4,  0},
31 	{2,   0, 8,  0},
32 	{4,   0, 8,  0},
33 	{16,  0, 8,  0},
34 	{16,  0, 16, 0},
35 	{32,  0, 16, 0},
36 	{32,  0, 32, 0},
37 };
38 
ib_cq_rdma_dim_work(struct work_struct * w)39 static void ib_cq_rdma_dim_work(struct work_struct *w)
40 {
41 	struct dim *dim = container_of(w, struct dim, work);
42 	struct ib_cq *cq = dim->priv;
43 
44 	u16 usec = rdma_dim_prof[dim->profile_ix].usec;
45 	u16 comps = rdma_dim_prof[dim->profile_ix].comps;
46 
47 	dim->state = DIM_START_MEASURE;
48 
49 	trace_cq_modify(cq, comps, usec);
50 	cq->device->ops.modify_cq(cq, comps, usec);
51 }
52 
rdma_dim_init(struct ib_cq * cq)53 static void rdma_dim_init(struct ib_cq *cq)
54 {
55 	struct dim *dim;
56 
57 	if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
58 	    cq->poll_ctx == IB_POLL_DIRECT)
59 		return;
60 
61 	dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
62 	if (!dim)
63 		return;
64 
65 	dim->state = DIM_START_MEASURE;
66 	dim->tune_state = DIM_GOING_RIGHT;
67 	dim->profile_ix = RDMA_DIM_START_PROFILE;
68 	dim->priv = cq;
69 	cq->dim = dim;
70 
71 	INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
72 }
73 
rdma_dim_destroy(struct ib_cq * cq)74 static void rdma_dim_destroy(struct ib_cq *cq)
75 {
76 	if (!cq->dim)
77 		return;
78 
79 	cancel_work_sync(&cq->dim->work);
80 	kfree(cq->dim);
81 }
82 
__poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)83 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
84 {
85 	int rc;
86 
87 	rc = ib_poll_cq(cq, num_entries, wc);
88 	trace_cq_poll(cq, num_entries, rc);
89 	return rc;
90 }
91 
__ib_process_cq(struct ib_cq * cq,int budget,struct ib_wc * wcs,int batch)92 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
93 			   int batch)
94 {
95 	int i, n, completed = 0;
96 
97 	trace_cq_process(cq);
98 
99 	/*
100 	 * budget might be (-1) if the caller does not
101 	 * want to bound this call, thus we need unsigned
102 	 * minimum here.
103 	 */
104 	while ((n = __poll_cq(cq, min_t(u32, batch,
105 					budget - completed), wcs)) > 0) {
106 		for (i = 0; i < n; i++) {
107 			struct ib_wc *wc = &wcs[i];
108 
109 			if (wc->wr_cqe)
110 				wc->wr_cqe->done(cq, wc);
111 			else
112 				WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
113 		}
114 
115 		completed += n;
116 
117 		if (n != batch || (budget != -1 && completed >= budget))
118 			break;
119 	}
120 
121 	return completed;
122 }
123 
124 /**
125  * ib_process_cq_direct - process a CQ in caller context
126  * @cq:		CQ to process
127  * @budget:	number of CQEs to poll for
128  *
129  * This function is used to process all outstanding CQ entries.
130  * It does not offload CQ processing to a different context and does
131  * not ask for completion interrupts from the HCA.
132  * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
133  * concurrent processing.
134  *
135  * Note: do not pass -1 as %budget unless it is guaranteed that the number
136  * of completions that will be processed is small.
137  */
ib_process_cq_direct(struct ib_cq * cq,int budget)138 int ib_process_cq_direct(struct ib_cq *cq, int budget)
139 {
140 	struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
141 
142 	return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
143 }
144 EXPORT_SYMBOL(ib_process_cq_direct);
145 
ib_cq_completion_direct(struct ib_cq * cq,void * private)146 static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
147 {
148 	WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
149 }
150 
ib_poll_handler(struct irq_poll * iop,int budget)151 static int ib_poll_handler(struct irq_poll *iop, int budget)
152 {
153 	struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
154 	struct dim *dim = cq->dim;
155 	int completed;
156 
157 	completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
158 	if (completed < budget) {
159 		irq_poll_complete(&cq->iop);
160 		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
161 			trace_cq_reschedule(cq);
162 			irq_poll_sched(&cq->iop);
163 		}
164 	}
165 
166 	if (dim)
167 		rdma_dim(dim, completed);
168 
169 	return completed;
170 }
171 
ib_cq_completion_softirq(struct ib_cq * cq,void * private)172 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
173 {
174 	trace_cq_schedule(cq);
175 	irq_poll_sched(&cq->iop);
176 }
177 
ib_cq_poll_work(struct work_struct * work)178 static void ib_cq_poll_work(struct work_struct *work)
179 {
180 	struct ib_cq *cq = container_of(work, struct ib_cq, work);
181 	int completed;
182 
183 	completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
184 				    IB_POLL_BATCH);
185 	if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
186 	    ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
187 		queue_work(cq->comp_wq, &cq->work);
188 	else if (cq->dim)
189 		rdma_dim(cq->dim, completed);
190 }
191 
ib_cq_completion_workqueue(struct ib_cq * cq,void * private)192 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
193 {
194 	trace_cq_schedule(cq);
195 	queue_work(cq->comp_wq, &cq->work);
196 }
197 
198 /**
199  * __ib_alloc_cq - allocate a completion queue
200  * @dev:		device to allocate the CQ for
201  * @private:		driver private data, accessible from cq->cq_context
202  * @nr_cqe:		number of CQEs to allocate
203  * @comp_vector:	HCA completion vectors for this CQ
204  * @poll_ctx:		context to poll the CQ from.
205  * @caller:		module owner name.
206  *
207  * This is the proper interface to allocate a CQ for in-kernel users. A
208  * CQ allocated with this interface will automatically be polled from the
209  * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
210  * to use this CQ abstraction.
211  */
__ib_alloc_cq(struct ib_device * dev,void * private,int nr_cqe,int comp_vector,enum ib_poll_context poll_ctx,const char * caller)212 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
213 			    int comp_vector, enum ib_poll_context poll_ctx,
214 			    const char *caller)
215 {
216 	struct ib_cq_init_attr cq_attr = {
217 		.cqe		= nr_cqe,
218 		.comp_vector	= comp_vector,
219 	};
220 	struct ib_cq *cq;
221 	int ret = -ENOMEM;
222 
223 	cq = rdma_zalloc_drv_obj(dev, ib_cq);
224 	if (!cq)
225 		return ERR_PTR(ret);
226 
227 	cq->device = dev;
228 	cq->cq_context = private;
229 	cq->poll_ctx = poll_ctx;
230 	atomic_set(&cq->usecnt, 0);
231 	cq->comp_vector = comp_vector;
232 
233 	cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
234 	if (!cq->wc)
235 		goto out_free_cq;
236 
237 	rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
238 	rdma_restrack_set_name(&cq->res, caller);
239 
240 	ret = dev->ops.create_cq(cq, &cq_attr, NULL);
241 	if (ret)
242 		goto out_free_wc;
243 
244 	rdma_dim_init(cq);
245 
246 	switch (cq->poll_ctx) {
247 	case IB_POLL_DIRECT:
248 		cq->comp_handler = ib_cq_completion_direct;
249 		break;
250 	case IB_POLL_SOFTIRQ:
251 		cq->comp_handler = ib_cq_completion_softirq;
252 
253 		irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
254 		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
255 		break;
256 	case IB_POLL_WORKQUEUE:
257 	case IB_POLL_UNBOUND_WORKQUEUE:
258 		cq->comp_handler = ib_cq_completion_workqueue;
259 		INIT_WORK(&cq->work, ib_cq_poll_work);
260 		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
261 		cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
262 				ib_comp_wq : ib_comp_unbound_wq;
263 		break;
264 	default:
265 		ret = -EINVAL;
266 		goto out_destroy_cq;
267 	}
268 
269 	rdma_restrack_add(&cq->res);
270 	trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
271 	return cq;
272 
273 out_destroy_cq:
274 	rdma_dim_destroy(cq);
275 	cq->device->ops.destroy_cq(cq, NULL);
276 out_free_wc:
277 	rdma_restrack_put(&cq->res);
278 	kfree(cq->wc);
279 out_free_cq:
280 	kfree(cq);
281 	trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
282 	return ERR_PTR(ret);
283 }
284 EXPORT_SYMBOL(__ib_alloc_cq);
285 
286 /**
287  * __ib_alloc_cq_any - allocate a completion queue
288  * @dev:		device to allocate the CQ for
289  * @private:		driver private data, accessible from cq->cq_context
290  * @nr_cqe:		number of CQEs to allocate
291  * @poll_ctx:		context to poll the CQ from
292  * @caller:		module owner name
293  *
294  * Attempt to spread ULP Completion Queues over each device's interrupt
295  * vectors. A simple best-effort mechanism is used.
296  */
__ib_alloc_cq_any(struct ib_device * dev,void * private,int nr_cqe,enum ib_poll_context poll_ctx,const char * caller)297 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
298 				int nr_cqe, enum ib_poll_context poll_ctx,
299 				const char *caller)
300 {
301 	static atomic_t counter;
302 	int comp_vector = 0;
303 
304 	if (dev->num_comp_vectors > 1)
305 		comp_vector =
306 			atomic_inc_return(&counter) %
307 			min_t(int, dev->num_comp_vectors, num_online_cpus());
308 
309 	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
310 			     caller);
311 }
312 EXPORT_SYMBOL(__ib_alloc_cq_any);
313 
314 /**
315  * ib_free_cq - free a completion queue
316  * @cq:		completion queue to free.
317  */
ib_free_cq(struct ib_cq * cq)318 void ib_free_cq(struct ib_cq *cq)
319 {
320 	int ret = 0;
321 
322 	if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
323 		return;
324 	if (WARN_ON_ONCE(cq->cqe_used))
325 		return;
326 
327 	if (cq->device->ops.pre_destroy_cq) {
328 		ret = cq->device->ops.pre_destroy_cq(cq);
329 		WARN_ONCE(ret, "Disable of kernel CQ shouldn't fail");
330 	}
331 
332 	switch (cq->poll_ctx) {
333 	case IB_POLL_DIRECT:
334 		break;
335 	case IB_POLL_SOFTIRQ:
336 		irq_poll_disable(&cq->iop);
337 		break;
338 	case IB_POLL_WORKQUEUE:
339 	case IB_POLL_UNBOUND_WORKQUEUE:
340 		cancel_work_sync(&cq->work);
341 		break;
342 	default:
343 		WARN_ON_ONCE(1);
344 	}
345 
346 	rdma_dim_destroy(cq);
347 	trace_cq_free(cq);
348 	if (cq->device->ops.post_destroy_cq)
349 		cq->device->ops.post_destroy_cq(cq);
350 	else
351 		ret = cq->device->ops.destroy_cq(cq, NULL);
352 	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
353 	rdma_restrack_del(&cq->res);
354 	kfree(cq->wc);
355 	kfree(cq);
356 }
357 EXPORT_SYMBOL(ib_free_cq);
358 
ib_cq_pool_cleanup(struct ib_device * dev)359 void ib_cq_pool_cleanup(struct ib_device *dev)
360 {
361 	struct ib_cq *cq, *n;
362 	unsigned int i;
363 
364 	for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
365 		list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
366 					 pool_entry) {
367 			WARN_ON(cq->cqe_used);
368 			list_del(&cq->pool_entry);
369 			cq->shared = false;
370 			ib_free_cq(cq);
371 		}
372 	}
373 }
374 
ib_alloc_cqs(struct ib_device * dev,unsigned int nr_cqes,enum ib_poll_context poll_ctx)375 static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
376 			enum ib_poll_context poll_ctx)
377 {
378 	LIST_HEAD(tmp_list);
379 	unsigned int nr_cqs, i;
380 	struct ib_cq *cq, *n;
381 	int ret;
382 
383 	if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
384 		WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
385 		return -EINVAL;
386 	}
387 
388 	/*
389 	 * Allocate at least as many CQEs as requested, and otherwise
390 	 * a reasonable batch size so that we can share CQs between
391 	 * multiple users instead of allocating a larger number of CQs.
392 	 */
393 	nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
394 			max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
395 	nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
396 	for (i = 0; i < nr_cqs; i++) {
397 		cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
398 		if (IS_ERR(cq)) {
399 			ret = PTR_ERR(cq);
400 			goto out_free_cqs;
401 		}
402 		cq->shared = true;
403 		list_add_tail(&cq->pool_entry, &tmp_list);
404 	}
405 
406 	spin_lock_irq(&dev->cq_pools_lock);
407 	list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
408 	spin_unlock_irq(&dev->cq_pools_lock);
409 
410 	return 0;
411 
412 out_free_cqs:
413 	list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
414 		cq->shared = false;
415 		ib_free_cq(cq);
416 	}
417 	return ret;
418 }
419 
420 /**
421  * ib_cq_pool_get() - Find the least used completion queue that matches
422  *   a given cpu hint (or least used for wild card affinity) and fits
423  *   nr_cqe.
424  * @dev: rdma device
425  * @nr_cqe: number of needed cqe entries
426  * @comp_vector_hint: completion vector hint (-1) for the driver to assign
427  *   a comp vector based on internal counter
428  * @poll_ctx: cq polling context
429  *
430  * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
431  * claim entries in it for us.  In case there is no available cq, allocate
432  * a new cq with the requirements and add it to the device pool.
433  * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
434  * for @poll_ctx.
435  */
ib_cq_pool_get(struct ib_device * dev,unsigned int nr_cqe,int comp_vector_hint,enum ib_poll_context poll_ctx)436 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
437 			     int comp_vector_hint,
438 			     enum ib_poll_context poll_ctx)
439 {
440 	static unsigned int default_comp_vector;
441 	unsigned int vector, num_comp_vectors;
442 	struct ib_cq *cq, *found = NULL;
443 	int ret;
444 
445 	if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
446 		WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
447 		return ERR_PTR(-EINVAL);
448 	}
449 
450 	num_comp_vectors =
451 		min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
452 	/* Project the affinty to the device completion vector range */
453 	if (comp_vector_hint < 0) {
454 		comp_vector_hint =
455 			(READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
456 		WRITE_ONCE(default_comp_vector, comp_vector_hint);
457 	}
458 	vector = comp_vector_hint % num_comp_vectors;
459 
460 	/*
461 	 * Find the least used CQ with correct affinity and
462 	 * enough free CQ entries
463 	 */
464 	while (!found) {
465 		spin_lock_irq(&dev->cq_pools_lock);
466 		list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
467 				    pool_entry) {
468 			/*
469 			 * Check to see if we have found a CQ with the
470 			 * correct completion vector
471 			 */
472 			if (vector != cq->comp_vector)
473 				continue;
474 			if (cq->cqe_used + nr_cqe > cq->cqe)
475 				continue;
476 			found = cq;
477 			break;
478 		}
479 
480 		if (found) {
481 			found->cqe_used += nr_cqe;
482 			spin_unlock_irq(&dev->cq_pools_lock);
483 
484 			return found;
485 		}
486 		spin_unlock_irq(&dev->cq_pools_lock);
487 
488 		/*
489 		 * Didn't find a match or ran out of CQs in the device
490 		 * pool, allocate a new array of CQs.
491 		 */
492 		ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
493 		if (ret)
494 			return ERR_PTR(ret);
495 	}
496 
497 	return found;
498 }
499 EXPORT_SYMBOL(ib_cq_pool_get);
500 
501 /**
502  * ib_cq_pool_put - Return a CQ taken from a shared pool.
503  * @cq: The CQ to return.
504  * @nr_cqe: The max number of cqes that the user had requested.
505  */
ib_cq_pool_put(struct ib_cq * cq,unsigned int nr_cqe)506 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
507 {
508 	if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
509 		return;
510 
511 	spin_lock_irq(&cq->device->cq_pools_lock);
512 	cq->cqe_used -= nr_cqe;
513 	spin_unlock_irq(&cq->device->cq_pools_lock);
514 }
515 EXPORT_SYMBOL(ib_cq_pool_put);
516