1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  Completion queue handling
5  *
6  *  Authors: Waleri Fomin <fomin@de.ibm.com>
7  *           Khadija Souissi <souissi@de.ibm.com>
8  *           Reinhard Ernst <rernst@de.ibm.com>
9  *           Heiko J Schick <schickhj@de.ibm.com>
10  *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11  *
12  *
13  *  Copyright (c) 2005 IBM Corporation
14  *
15  *  All rights reserved.
16  *
17  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
18  *  BSD.
19  *
20  * OpenIB BSD License
21  *
22  * Redistribution and use in source and binary forms, with or without
23  * modification, are permitted provided that the following conditions are met:
24  *
25  * Redistributions of source code must retain the above copyright notice, this
26  * list of conditions and the following disclaimer.
27  *
28  * Redistributions in binary form must reproduce the above copyright notice,
29  * this list of conditions and the following disclaimer in the documentation
30  * and/or other materials
31  * provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43  * POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <linux/slab.h>
47 
48 #include "ehca_iverbs.h"
49 #include "ehca_classes.h"
50 #include "ehca_irq.h"
51 #include "hcp_if.h"
52 
53 static struct kmem_cache *cq_cache;
54 
ehca_cq_assign_qp(struct ehca_cq * cq,struct ehca_qp * qp)55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
56 {
57 	unsigned int qp_num = qp->real_qp_num;
58 	unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
59 	unsigned long flags;
60 
61 	spin_lock_irqsave(&cq->spinlock, flags);
62 	hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
63 	spin_unlock_irqrestore(&cq->spinlock, flags);
64 
65 	ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
66 		 cq->cq_number, qp_num);
67 
68 	return 0;
69 }
70 
ehca_cq_unassign_qp(struct ehca_cq * cq,unsigned int real_qp_num)71 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
72 {
73 	int ret = -EINVAL;
74 	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
75 	struct hlist_node *iter;
76 	struct ehca_qp *qp;
77 	unsigned long flags;
78 
79 	spin_lock_irqsave(&cq->spinlock, flags);
80 	hlist_for_each(iter, &cq->qp_hashtab[key]) {
81 		qp = hlist_entry(iter, struct ehca_qp, list_entries);
82 		if (qp->real_qp_num == real_qp_num) {
83 			hlist_del(iter);
84 			ehca_dbg(cq->ib_cq.device,
85 				 "removed qp from cq .cq_num=%x real_qp_num=%x",
86 				 cq->cq_number, real_qp_num);
87 			ret = 0;
88 			break;
89 		}
90 	}
91 	spin_unlock_irqrestore(&cq->spinlock, flags);
92 	if (ret)
93 		ehca_err(cq->ib_cq.device,
94 			 "qp not found cq_num=%x real_qp_num=%x",
95 			 cq->cq_number, real_qp_num);
96 
97 	return ret;
98 }
99 
ehca_cq_get_qp(struct ehca_cq * cq,int real_qp_num)100 struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
101 {
102 	struct ehca_qp *ret = NULL;
103 	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
104 	struct hlist_node *iter;
105 	struct ehca_qp *qp;
106 	hlist_for_each(iter, &cq->qp_hashtab[key]) {
107 		qp = hlist_entry(iter, struct ehca_qp, list_entries);
108 		if (qp->real_qp_num == real_qp_num) {
109 			ret = qp;
110 			break;
111 		}
112 	}
113 	return ret;
114 }
115 
ehca_create_cq(struct ib_device * device,int cqe,int comp_vector,struct ib_ucontext * context,struct ib_udata * udata)116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
117 			     struct ib_ucontext *context,
118 			     struct ib_udata *udata)
119 {
120 	static const u32 additional_cqe = 20;
121 	struct ib_cq *cq;
122 	struct ehca_cq *my_cq;
123 	struct ehca_shca *shca =
124 		container_of(device, struct ehca_shca, ib_device);
125 	struct ipz_adapter_handle adapter_handle;
126 	struct ehca_alloc_cq_parms param; /* h_call's out parameters */
127 	struct h_galpa gal;
128 	void *vpage;
129 	u32 counter;
130 	u64 rpage, cqx_fec, h_ret;
131 	int ipz_rc, ret, i;
132 	unsigned long flags;
133 
134 	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
135 		return ERR_PTR(-EINVAL);
136 
137 	if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
138 		ehca_err(device, "Unable to create CQ, max number of %i "
139 			"CQs reached.", shca->max_num_cqs);
140 		ehca_err(device, "To increase the maximum number of CQs "
141 			"use the number_of_cqs module parameter.\n");
142 		return ERR_PTR(-ENOSPC);
143 	}
144 
145 	my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
146 	if (!my_cq) {
147 		ehca_err(device, "Out of memory for ehca_cq struct device=%p",
148 			 device);
149 		atomic_dec(&shca->num_cqs);
150 		return ERR_PTR(-ENOMEM);
151 	}
152 
153 	memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
154 
155 	spin_lock_init(&my_cq->spinlock);
156 	spin_lock_init(&my_cq->cb_lock);
157 	spin_lock_init(&my_cq->task_lock);
158 	atomic_set(&my_cq->nr_events, 0);
159 	init_waitqueue_head(&my_cq->wait_completion);
160 
161 	cq = &my_cq->ib_cq;
162 
163 	adapter_handle = shca->ipz_hca_handle;
164 	param.eq_handle = shca->eq.ipz_eq_handle;
165 
166 	do {
167 		if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
168 			cq = ERR_PTR(-ENOMEM);
169 			ehca_err(device, "Can't reserve idr nr. device=%p",
170 				 device);
171 			goto create_cq_exit1;
172 		}
173 
174 		write_lock_irqsave(&ehca_cq_idr_lock, flags);
175 		ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
176 		write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
177 	} while (ret == -EAGAIN);
178 
179 	if (ret) {
180 		cq = ERR_PTR(-ENOMEM);
181 		ehca_err(device, "Can't allocate new idr entry. device=%p",
182 			 device);
183 		goto create_cq_exit1;
184 	}
185 
186 	if (my_cq->token > 0x1FFFFFF) {
187 		cq = ERR_PTR(-ENOMEM);
188 		ehca_err(device, "Invalid number of cq. device=%p", device);
189 		goto create_cq_exit2;
190 	}
191 
192 	/*
193 	 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
194 	 * for receiving errors CQEs.
195 	 */
196 	param.nr_cqe = cqe + additional_cqe;
197 	h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
198 
199 	if (h_ret != H_SUCCESS) {
200 		ehca_err(device, "hipz_h_alloc_resource_cq() failed "
201 			 "h_ret=%lli device=%p", h_ret, device);
202 		cq = ERR_PTR(ehca2ib_return_code(h_ret));
203 		goto create_cq_exit2;
204 	}
205 
206 	ipz_rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
207 				EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
208 	if (!ipz_rc) {
209 		ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
210 			 ipz_rc, device);
211 		cq = ERR_PTR(-EINVAL);
212 		goto create_cq_exit3;
213 	}
214 
215 	for (counter = 0; counter < param.act_pages; counter++) {
216 		vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
217 		if (!vpage) {
218 			ehca_err(device, "ipz_qpageit_get_inc() "
219 				 "returns NULL device=%p", device);
220 			cq = ERR_PTR(-EAGAIN);
221 			goto create_cq_exit4;
222 		}
223 		rpage = virt_to_abs(vpage);
224 
225 		h_ret = hipz_h_register_rpage_cq(adapter_handle,
226 						 my_cq->ipz_cq_handle,
227 						 &my_cq->pf,
228 						 0,
229 						 0,
230 						 rpage,
231 						 1,
232 						 my_cq->galpas.
233 						 kernel);
234 
235 		if (h_ret < H_SUCCESS) {
236 			ehca_err(device, "hipz_h_register_rpage_cq() failed "
237 				 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
238 				 "act_pages=%i", my_cq, my_cq->cq_number,
239 				 h_ret, counter, param.act_pages);
240 			cq = ERR_PTR(-EINVAL);
241 			goto create_cq_exit4;
242 		}
243 
244 		if (counter == (param.act_pages - 1)) {
245 			vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
246 			if ((h_ret != H_SUCCESS) || vpage) {
247 				ehca_err(device, "Registration of pages not "
248 					 "complete ehca_cq=%p cq_num=%x "
249 					 "h_ret=%lli", my_cq, my_cq->cq_number,
250 					 h_ret);
251 				cq = ERR_PTR(-EAGAIN);
252 				goto create_cq_exit4;
253 			}
254 		} else {
255 			if (h_ret != H_PAGE_REGISTERED) {
256 				ehca_err(device, "Registration of page failed "
257 					 "ehca_cq=%p cq_num=%x h_ret=%lli "
258 					 "counter=%i act_pages=%i",
259 					 my_cq, my_cq->cq_number,
260 					 h_ret, counter, param.act_pages);
261 				cq = ERR_PTR(-ENOMEM);
262 				goto create_cq_exit4;
263 			}
264 		}
265 	}
266 
267 	ipz_qeit_reset(&my_cq->ipz_queue);
268 
269 	gal = my_cq->galpas.kernel;
270 	cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
271 	ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
272 		 my_cq, my_cq->cq_number, cqx_fec);
273 
274 	my_cq->ib_cq.cqe = my_cq->nr_of_entries =
275 		param.act_nr_of_entries - additional_cqe;
276 	my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
277 
278 	for (i = 0; i < QP_HASHTAB_LEN; i++)
279 		INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
280 
281 	INIT_LIST_HEAD(&my_cq->sqp_err_list);
282 	INIT_LIST_HEAD(&my_cq->rqp_err_list);
283 
284 	if (context) {
285 		struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
286 		struct ehca_create_cq_resp resp;
287 		memset(&resp, 0, sizeof(resp));
288 		resp.cq_number = my_cq->cq_number;
289 		resp.token = my_cq->token;
290 		resp.ipz_queue.qe_size = ipz_queue->qe_size;
291 		resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
292 		resp.ipz_queue.queue_length = ipz_queue->queue_length;
293 		resp.ipz_queue.pagesize = ipz_queue->pagesize;
294 		resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
295 		resp.fw_handle_ofs = (u32)
296 			(my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
297 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
298 			ehca_err(device, "Copy to udata failed.");
299 			goto create_cq_exit4;
300 		}
301 	}
302 
303 	return cq;
304 
305 create_cq_exit4:
306 	ipz_queue_dtor(NULL, &my_cq->ipz_queue);
307 
308 create_cq_exit3:
309 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
310 	if (h_ret != H_SUCCESS)
311 		ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
312 			 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
313 
314 create_cq_exit2:
315 	write_lock_irqsave(&ehca_cq_idr_lock, flags);
316 	idr_remove(&ehca_cq_idr, my_cq->token);
317 	write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
318 
319 create_cq_exit1:
320 	kmem_cache_free(cq_cache, my_cq);
321 
322 	atomic_dec(&shca->num_cqs);
323 	return cq;
324 }
325 
ehca_destroy_cq(struct ib_cq * cq)326 int ehca_destroy_cq(struct ib_cq *cq)
327 {
328 	u64 h_ret;
329 	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
330 	int cq_num = my_cq->cq_number;
331 	struct ib_device *device = cq->device;
332 	struct ehca_shca *shca = container_of(device, struct ehca_shca,
333 					      ib_device);
334 	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
335 	unsigned long flags;
336 
337 	if (cq->uobject) {
338 		if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
339 			ehca_err(device, "Resources still referenced in "
340 				 "user space cq_num=%x", my_cq->cq_number);
341 			return -EINVAL;
342 		}
343 	}
344 
345 	/*
346 	 * remove the CQ from the idr first to make sure
347 	 * no more interrupt tasklets will touch this CQ
348 	 */
349 	write_lock_irqsave(&ehca_cq_idr_lock, flags);
350 	idr_remove(&ehca_cq_idr, my_cq->token);
351 	write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
352 
353 	/* now wait until all pending events have completed */
354 	wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
355 
356 	/* nobody's using our CQ any longer -- we can destroy it */
357 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
358 	if (h_ret == H_R_STATE) {
359 		/* cq in err: read err data and destroy it forcibly */
360 		ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
361 			 "state. Try to delete it forcibly.",
362 			 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
363 		ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
364 		h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
365 		if (h_ret == H_SUCCESS)
366 			ehca_dbg(device, "cq_num=%x deleted successfully.",
367 				 cq_num);
368 	}
369 	if (h_ret != H_SUCCESS) {
370 		ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
371 			 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
372 		return ehca2ib_return_code(h_ret);
373 	}
374 	ipz_queue_dtor(NULL, &my_cq->ipz_queue);
375 	kmem_cache_free(cq_cache, my_cq);
376 
377 	atomic_dec(&shca->num_cqs);
378 	return 0;
379 }
380 
ehca_resize_cq(struct ib_cq * cq,int cqe,struct ib_udata * udata)381 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
382 {
383 	/* TODO: proper resize needs to be done */
384 	ehca_err(cq->device, "not implemented yet");
385 
386 	return -EFAULT;
387 }
388 
ehca_init_cq_cache(void)389 int ehca_init_cq_cache(void)
390 {
391 	cq_cache = kmem_cache_create("ehca_cache_cq",
392 				     sizeof(struct ehca_cq), 0,
393 				     SLAB_HWCACHE_ALIGN,
394 				     NULL);
395 	if (!cq_cache)
396 		return -ENOMEM;
397 	return 0;
398 }
399 
ehca_cleanup_cq_cache(void)400 void ehca_cleanup_cq_cache(void)
401 {
402 	if (cq_cache)
403 		kmem_cache_destroy(cq_cache);
404 }
405