1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *	copyright notice, this list of conditions and the following
17  *	disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *	copyright notice, this list of conditions and the following
21  *	disclaimer in the documentation and/or other materials
22  *	provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/completion.h>
35 #include <linux/init.h>
36 #include <linux/fs.h>
37 #include <linux/module.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
40 #include <linux/poll.h>
41 #include <linux/sched.h>
42 #include <linux/file.h>
43 #include <linux/mount.h>
44 #include <linux/cdev.h>
45 #include <linux/idr.h>
46 #include <linux/mutex.h>
47 #include <linux/slab.h>
48 
49 #include <asm/uaccess.h>
50 
51 #include <rdma/ib_cm.h>
52 #include <rdma/ib_user_cm.h>
53 #include <rdma/ib_marshall.h>
54 
55 MODULE_AUTHOR("Libor Michalek");
56 MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
57 MODULE_LICENSE("Dual BSD/GPL");
58 
59 struct ib_ucm_device {
60 	int			devnum;
61 	struct cdev		cdev;
62 	struct device		dev;
63 	struct ib_device	*ib_dev;
64 };
65 
66 struct ib_ucm_file {
67 	struct mutex file_mutex;
68 	struct file *filp;
69 	struct ib_ucm_device *device;
70 
71 	struct list_head  ctxs;
72 	struct list_head  events;
73 	wait_queue_head_t poll_wait;
74 };
75 
76 struct ib_ucm_context {
77 	int                 id;
78 	struct completion   comp;
79 	atomic_t            ref;
80 	int		    events_reported;
81 
82 	struct ib_ucm_file *file;
83 	struct ib_cm_id    *cm_id;
84 	__u64		   uid;
85 
86 	struct list_head    events;    /* list of pending events. */
87 	struct list_head    file_list; /* member in file ctx list */
88 };
89 
90 struct ib_ucm_event {
91 	struct ib_ucm_context *ctx;
92 	struct list_head file_list; /* member in file event list */
93 	struct list_head ctx_list;  /* member in ctx event list */
94 
95 	struct ib_cm_id *cm_id;
96 	struct ib_ucm_event_resp resp;
97 	void *data;
98 	void *info;
99 	int data_len;
100 	int info_len;
101 };
102 
103 enum {
104 	IB_UCM_MAJOR = 231,
105 	IB_UCM_BASE_MINOR = 224,
106 	IB_UCM_MAX_DEVICES = 32
107 };
108 
109 #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
110 
111 static void ib_ucm_add_one(struct ib_device *device);
112 static void ib_ucm_remove_one(struct ib_device *device);
113 
114 static struct ib_client ucm_client = {
115 	.name   = "ucm",
116 	.add    = ib_ucm_add_one,
117 	.remove = ib_ucm_remove_one
118 };
119 
120 static DEFINE_MUTEX(ctx_id_mutex);
121 static DEFINE_IDR(ctx_id_table);
122 static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
123 
ib_ucm_ctx_get(struct ib_ucm_file * file,int id)124 static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
125 {
126 	struct ib_ucm_context *ctx;
127 
128 	mutex_lock(&ctx_id_mutex);
129 	ctx = idr_find(&ctx_id_table, id);
130 	if (!ctx)
131 		ctx = ERR_PTR(-ENOENT);
132 	else if (ctx->file != file)
133 		ctx = ERR_PTR(-EINVAL);
134 	else
135 		atomic_inc(&ctx->ref);
136 	mutex_unlock(&ctx_id_mutex);
137 
138 	return ctx;
139 }
140 
ib_ucm_ctx_put(struct ib_ucm_context * ctx)141 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
142 {
143 	if (atomic_dec_and_test(&ctx->ref))
144 		complete(&ctx->comp);
145 }
146 
ib_ucm_new_cm_id(int event)147 static inline int ib_ucm_new_cm_id(int event)
148 {
149 	return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED;
150 }
151 
ib_ucm_cleanup_events(struct ib_ucm_context * ctx)152 static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
153 {
154 	struct ib_ucm_event *uevent;
155 
156 	mutex_lock(&ctx->file->file_mutex);
157 	list_del(&ctx->file_list);
158 	while (!list_empty(&ctx->events)) {
159 
160 		uevent = list_entry(ctx->events.next,
161 				    struct ib_ucm_event, ctx_list);
162 		list_del(&uevent->file_list);
163 		list_del(&uevent->ctx_list);
164 		mutex_unlock(&ctx->file->file_mutex);
165 
166 		/* clear incoming connections. */
167 		if (ib_ucm_new_cm_id(uevent->resp.event))
168 			ib_destroy_cm_id(uevent->cm_id);
169 
170 		kfree(uevent);
171 		mutex_lock(&ctx->file->file_mutex);
172 	}
173 	mutex_unlock(&ctx->file->file_mutex);
174 }
175 
ib_ucm_ctx_alloc(struct ib_ucm_file * file)176 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
177 {
178 	struct ib_ucm_context *ctx;
179 	int result;
180 
181 	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
182 	if (!ctx)
183 		return NULL;
184 
185 	atomic_set(&ctx->ref, 1);
186 	init_completion(&ctx->comp);
187 	ctx->file = file;
188 	INIT_LIST_HEAD(&ctx->events);
189 
190 	do {
191 		result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
192 		if (!result)
193 			goto error;
194 
195 		mutex_lock(&ctx_id_mutex);
196 		result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
197 		mutex_unlock(&ctx_id_mutex);
198 	} while (result == -EAGAIN);
199 
200 	if (result)
201 		goto error;
202 
203 	list_add_tail(&ctx->file_list, &file->ctxs);
204 	return ctx;
205 
206 error:
207 	kfree(ctx);
208 	return NULL;
209 }
210 
ib_ucm_event_req_get(struct ib_ucm_req_event_resp * ureq,struct ib_cm_req_event_param * kreq)211 static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
212 				 struct ib_cm_req_event_param *kreq)
213 {
214 	ureq->remote_ca_guid             = kreq->remote_ca_guid;
215 	ureq->remote_qkey                = kreq->remote_qkey;
216 	ureq->remote_qpn                 = kreq->remote_qpn;
217 	ureq->qp_type                    = kreq->qp_type;
218 	ureq->starting_psn               = kreq->starting_psn;
219 	ureq->responder_resources        = kreq->responder_resources;
220 	ureq->initiator_depth            = kreq->initiator_depth;
221 	ureq->local_cm_response_timeout  = kreq->local_cm_response_timeout;
222 	ureq->flow_control               = kreq->flow_control;
223 	ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
224 	ureq->retry_count                = kreq->retry_count;
225 	ureq->rnr_retry_count            = kreq->rnr_retry_count;
226 	ureq->srq                        = kreq->srq;
227 	ureq->port			 = kreq->port;
228 
229 	ib_copy_path_rec_to_user(&ureq->primary_path, kreq->primary_path);
230 	if (kreq->alternate_path)
231 		ib_copy_path_rec_to_user(&ureq->alternate_path,
232 					 kreq->alternate_path);
233 }
234 
ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp * urep,struct ib_cm_rep_event_param * krep)235 static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
236 				 struct ib_cm_rep_event_param *krep)
237 {
238 	urep->remote_ca_guid      = krep->remote_ca_guid;
239 	urep->remote_qkey         = krep->remote_qkey;
240 	urep->remote_qpn          = krep->remote_qpn;
241 	urep->starting_psn        = krep->starting_psn;
242 	urep->responder_resources = krep->responder_resources;
243 	urep->initiator_depth     = krep->initiator_depth;
244 	urep->target_ack_delay    = krep->target_ack_delay;
245 	urep->failover_accepted   = krep->failover_accepted;
246 	urep->flow_control        = krep->flow_control;
247 	urep->rnr_retry_count     = krep->rnr_retry_count;
248 	urep->srq                 = krep->srq;
249 }
250 
ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp * urep,struct ib_cm_sidr_rep_event_param * krep)251 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
252 				      struct ib_cm_sidr_rep_event_param *krep)
253 {
254 	urep->status = krep->status;
255 	urep->qkey   = krep->qkey;
256 	urep->qpn    = krep->qpn;
257 };
258 
ib_ucm_event_process(struct ib_cm_event * evt,struct ib_ucm_event * uvt)259 static int ib_ucm_event_process(struct ib_cm_event *evt,
260 				struct ib_ucm_event *uvt)
261 {
262 	void *info = NULL;
263 
264 	switch (evt->event) {
265 	case IB_CM_REQ_RECEIVED:
266 		ib_ucm_event_req_get(&uvt->resp.u.req_resp,
267 				     &evt->param.req_rcvd);
268 		uvt->data_len      = IB_CM_REQ_PRIVATE_DATA_SIZE;
269 		uvt->resp.present  = IB_UCM_PRES_PRIMARY;
270 		uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
271 				      IB_UCM_PRES_ALTERNATE : 0);
272 		break;
273 	case IB_CM_REP_RECEIVED:
274 		ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
275 				     &evt->param.rep_rcvd);
276 		uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
277 		break;
278 	case IB_CM_RTU_RECEIVED:
279 		uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
280 		uvt->resp.u.send_status = evt->param.send_status;
281 		break;
282 	case IB_CM_DREQ_RECEIVED:
283 		uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
284 		uvt->resp.u.send_status = evt->param.send_status;
285 		break;
286 	case IB_CM_DREP_RECEIVED:
287 		uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
288 		uvt->resp.u.send_status = evt->param.send_status;
289 		break;
290 	case IB_CM_MRA_RECEIVED:
291 		uvt->resp.u.mra_resp.timeout =
292 					evt->param.mra_rcvd.service_timeout;
293 		uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
294 		break;
295 	case IB_CM_REJ_RECEIVED:
296 		uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason;
297 		uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
298 		uvt->info_len = evt->param.rej_rcvd.ari_length;
299 		info	      = evt->param.rej_rcvd.ari;
300 		break;
301 	case IB_CM_LAP_RECEIVED:
302 		ib_copy_path_rec_to_user(&uvt->resp.u.lap_resp.path,
303 					 evt->param.lap_rcvd.alternate_path);
304 		uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
305 		uvt->resp.present = IB_UCM_PRES_ALTERNATE;
306 		break;
307 	case IB_CM_APR_RECEIVED:
308 		uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status;
309 		uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
310 		uvt->info_len = evt->param.apr_rcvd.info_len;
311 		info	      = evt->param.apr_rcvd.apr_info;
312 		break;
313 	case IB_CM_SIDR_REQ_RECEIVED:
314 		uvt->resp.u.sidr_req_resp.pkey =
315 					evt->param.sidr_req_rcvd.pkey;
316 		uvt->resp.u.sidr_req_resp.port =
317 					evt->param.sidr_req_rcvd.port;
318 		uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
319 		break;
320 	case IB_CM_SIDR_REP_RECEIVED:
321 		ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
322 					  &evt->param.sidr_rep_rcvd);
323 		uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
324 		uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
325 		info	      = evt->param.sidr_rep_rcvd.info;
326 		break;
327 	default:
328 		uvt->resp.u.send_status = evt->param.send_status;
329 		break;
330 	}
331 
332 	if (uvt->data_len) {
333 		uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL);
334 		if (!uvt->data)
335 			goto err1;
336 
337 		uvt->resp.present |= IB_UCM_PRES_DATA;
338 	}
339 
340 	if (uvt->info_len) {
341 		uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL);
342 		if (!uvt->info)
343 			goto err2;
344 
345 		uvt->resp.present |= IB_UCM_PRES_INFO;
346 	}
347 	return 0;
348 
349 err2:
350 	kfree(uvt->data);
351 err1:
352 	return -ENOMEM;
353 }
354 
ib_ucm_event_handler(struct ib_cm_id * cm_id,struct ib_cm_event * event)355 static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
356 				struct ib_cm_event *event)
357 {
358 	struct ib_ucm_event *uevent;
359 	struct ib_ucm_context *ctx;
360 	int result = 0;
361 
362 	ctx = cm_id->context;
363 
364 	uevent = kzalloc(sizeof *uevent, GFP_KERNEL);
365 	if (!uevent)
366 		goto err1;
367 
368 	uevent->ctx = ctx;
369 	uevent->cm_id = cm_id;
370 	uevent->resp.uid = ctx->uid;
371 	uevent->resp.id = ctx->id;
372 	uevent->resp.event = event->event;
373 
374 	result = ib_ucm_event_process(event, uevent);
375 	if (result)
376 		goto err2;
377 
378 	mutex_lock(&ctx->file->file_mutex);
379 	list_add_tail(&uevent->file_list, &ctx->file->events);
380 	list_add_tail(&uevent->ctx_list, &ctx->events);
381 	wake_up_interruptible(&ctx->file->poll_wait);
382 	mutex_unlock(&ctx->file->file_mutex);
383 	return 0;
384 
385 err2:
386 	kfree(uevent);
387 err1:
388 	/* Destroy new cm_id's */
389 	return ib_ucm_new_cm_id(event->event);
390 }
391 
ib_ucm_event(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)392 static ssize_t ib_ucm_event(struct ib_ucm_file *file,
393 			    const char __user *inbuf,
394 			    int in_len, int out_len)
395 {
396 	struct ib_ucm_context *ctx;
397 	struct ib_ucm_event_get cmd;
398 	struct ib_ucm_event *uevent;
399 	int result = 0;
400 	DEFINE_WAIT(wait);
401 
402 	if (out_len < sizeof(struct ib_ucm_event_resp))
403 		return -ENOSPC;
404 
405 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
406 		return -EFAULT;
407 
408 	mutex_lock(&file->file_mutex);
409 	while (list_empty(&file->events)) {
410 		mutex_unlock(&file->file_mutex);
411 
412 		if (file->filp->f_flags & O_NONBLOCK)
413 			return -EAGAIN;
414 
415 		if (wait_event_interruptible(file->poll_wait,
416 					     !list_empty(&file->events)))
417 			return -ERESTARTSYS;
418 
419 		mutex_lock(&file->file_mutex);
420 	}
421 
422 	uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
423 
424 	if (ib_ucm_new_cm_id(uevent->resp.event)) {
425 		ctx = ib_ucm_ctx_alloc(file);
426 		if (!ctx) {
427 			result = -ENOMEM;
428 			goto done;
429 		}
430 
431 		ctx->cm_id = uevent->cm_id;
432 		ctx->cm_id->context = ctx;
433 		uevent->resp.id = ctx->id;
434 	}
435 
436 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
437 			 &uevent->resp, sizeof(uevent->resp))) {
438 		result = -EFAULT;
439 		goto done;
440 	}
441 
442 	if (uevent->data) {
443 		if (cmd.data_len < uevent->data_len) {
444 			result = -ENOMEM;
445 			goto done;
446 		}
447 		if (copy_to_user((void __user *)(unsigned long)cmd.data,
448 				 uevent->data, uevent->data_len)) {
449 			result = -EFAULT;
450 			goto done;
451 		}
452 	}
453 
454 	if (uevent->info) {
455 		if (cmd.info_len < uevent->info_len) {
456 			result = -ENOMEM;
457 			goto done;
458 		}
459 		if (copy_to_user((void __user *)(unsigned long)cmd.info,
460 				 uevent->info, uevent->info_len)) {
461 			result = -EFAULT;
462 			goto done;
463 		}
464 	}
465 
466 	list_del(&uevent->file_list);
467 	list_del(&uevent->ctx_list);
468 	uevent->ctx->events_reported++;
469 
470 	kfree(uevent->data);
471 	kfree(uevent->info);
472 	kfree(uevent);
473 done:
474 	mutex_unlock(&file->file_mutex);
475 	return result;
476 }
477 
ib_ucm_create_id(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)478 static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
479 				const char __user *inbuf,
480 				int in_len, int out_len)
481 {
482 	struct ib_ucm_create_id cmd;
483 	struct ib_ucm_create_id_resp resp;
484 	struct ib_ucm_context *ctx;
485 	int result;
486 
487 	if (out_len < sizeof(resp))
488 		return -ENOSPC;
489 
490 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
491 		return -EFAULT;
492 
493 	mutex_lock(&file->file_mutex);
494 	ctx = ib_ucm_ctx_alloc(file);
495 	mutex_unlock(&file->file_mutex);
496 	if (!ctx)
497 		return -ENOMEM;
498 
499 	ctx->uid = cmd.uid;
500 	ctx->cm_id = ib_create_cm_id(file->device->ib_dev,
501 				     ib_ucm_event_handler, ctx);
502 	if (IS_ERR(ctx->cm_id)) {
503 		result = PTR_ERR(ctx->cm_id);
504 		goto err1;
505 	}
506 
507 	resp.id = ctx->id;
508 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
509 			 &resp, sizeof(resp))) {
510 		result = -EFAULT;
511 		goto err2;
512 	}
513 	return 0;
514 
515 err2:
516 	ib_destroy_cm_id(ctx->cm_id);
517 err1:
518 	mutex_lock(&ctx_id_mutex);
519 	idr_remove(&ctx_id_table, ctx->id);
520 	mutex_unlock(&ctx_id_mutex);
521 	kfree(ctx);
522 	return result;
523 }
524 
ib_ucm_destroy_id(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)525 static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
526 				 const char __user *inbuf,
527 				 int in_len, int out_len)
528 {
529 	struct ib_ucm_destroy_id cmd;
530 	struct ib_ucm_destroy_id_resp resp;
531 	struct ib_ucm_context *ctx;
532 	int result = 0;
533 
534 	if (out_len < sizeof(resp))
535 		return -ENOSPC;
536 
537 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
538 		return -EFAULT;
539 
540 	mutex_lock(&ctx_id_mutex);
541 	ctx = idr_find(&ctx_id_table, cmd.id);
542 	if (!ctx)
543 		ctx = ERR_PTR(-ENOENT);
544 	else if (ctx->file != file)
545 		ctx = ERR_PTR(-EINVAL);
546 	else
547 		idr_remove(&ctx_id_table, ctx->id);
548 	mutex_unlock(&ctx_id_mutex);
549 
550 	if (IS_ERR(ctx))
551 		return PTR_ERR(ctx);
552 
553 	ib_ucm_ctx_put(ctx);
554 	wait_for_completion(&ctx->comp);
555 
556 	/* No new events will be generated after destroying the cm_id. */
557 	ib_destroy_cm_id(ctx->cm_id);
558 	/* Cleanup events not yet reported to the user. */
559 	ib_ucm_cleanup_events(ctx);
560 
561 	resp.events_reported = ctx->events_reported;
562 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
563 			 &resp, sizeof(resp)))
564 		result = -EFAULT;
565 
566 	kfree(ctx);
567 	return result;
568 }
569 
ib_ucm_attr_id(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)570 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
571 			      const char __user *inbuf,
572 			      int in_len, int out_len)
573 {
574 	struct ib_ucm_attr_id_resp resp;
575 	struct ib_ucm_attr_id cmd;
576 	struct ib_ucm_context *ctx;
577 	int result = 0;
578 
579 	if (out_len < sizeof(resp))
580 		return -ENOSPC;
581 
582 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
583 		return -EFAULT;
584 
585 	ctx = ib_ucm_ctx_get(file, cmd.id);
586 	if (IS_ERR(ctx))
587 		return PTR_ERR(ctx);
588 
589 	resp.service_id   = ctx->cm_id->service_id;
590 	resp.service_mask = ctx->cm_id->service_mask;
591 	resp.local_id     = ctx->cm_id->local_id;
592 	resp.remote_id    = ctx->cm_id->remote_id;
593 
594 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
595 			 &resp, sizeof(resp)))
596 		result = -EFAULT;
597 
598 	ib_ucm_ctx_put(ctx);
599 	return result;
600 }
601 
ib_ucm_init_qp_attr(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)602 static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file,
603 				   const char __user *inbuf,
604 				   int in_len, int out_len)
605 {
606 	struct ib_uverbs_qp_attr resp;
607 	struct ib_ucm_init_qp_attr cmd;
608 	struct ib_ucm_context *ctx;
609 	struct ib_qp_attr qp_attr;
610 	int result = 0;
611 
612 	if (out_len < sizeof(resp))
613 		return -ENOSPC;
614 
615 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
616 		return -EFAULT;
617 
618 	ctx = ib_ucm_ctx_get(file, cmd.id);
619 	if (IS_ERR(ctx))
620 		return PTR_ERR(ctx);
621 
622 	resp.qp_attr_mask = 0;
623 	memset(&qp_attr, 0, sizeof qp_attr);
624 	qp_attr.qp_state = cmd.qp_state;
625 	result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
626 	if (result)
627 		goto out;
628 
629 	ib_copy_qp_attr_to_user(&resp, &qp_attr);
630 
631 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
632 			 &resp, sizeof(resp)))
633 		result = -EFAULT;
634 
635 out:
636 	ib_ucm_ctx_put(ctx);
637 	return result;
638 }
639 
ucm_validate_listen(__be64 service_id,__be64 service_mask)640 static int ucm_validate_listen(__be64 service_id, __be64 service_mask)
641 {
642 	service_id &= service_mask;
643 
644 	if (((service_id & IB_CMA_SERVICE_ID_MASK) == IB_CMA_SERVICE_ID) ||
645 	    ((service_id & IB_SDP_SERVICE_ID_MASK) == IB_SDP_SERVICE_ID))
646 		return -EINVAL;
647 
648 	return 0;
649 }
650 
ib_ucm_listen(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)651 static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
652 			     const char __user *inbuf,
653 			     int in_len, int out_len)
654 {
655 	struct ib_ucm_listen cmd;
656 	struct ib_ucm_context *ctx;
657 	int result;
658 
659 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
660 		return -EFAULT;
661 
662 	ctx = ib_ucm_ctx_get(file, cmd.id);
663 	if (IS_ERR(ctx))
664 		return PTR_ERR(ctx);
665 
666 	result = ucm_validate_listen(cmd.service_id, cmd.service_mask);
667 	if (result)
668 		goto out;
669 
670 	result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask,
671 			      NULL);
672 out:
673 	ib_ucm_ctx_put(ctx);
674 	return result;
675 }
676 
ib_ucm_notify(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)677 static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
678 			     const char __user *inbuf,
679 			     int in_len, int out_len)
680 {
681 	struct ib_ucm_notify cmd;
682 	struct ib_ucm_context *ctx;
683 	int result;
684 
685 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
686 		return -EFAULT;
687 
688 	ctx = ib_ucm_ctx_get(file, cmd.id);
689 	if (IS_ERR(ctx))
690 		return PTR_ERR(ctx);
691 
692 	result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
693 	ib_ucm_ctx_put(ctx);
694 	return result;
695 }
696 
ib_ucm_alloc_data(const void ** dest,u64 src,u32 len)697 static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
698 {
699 	void *data;
700 
701 	*dest = NULL;
702 
703 	if (!len)
704 		return 0;
705 
706 	data = memdup_user((void __user *)(unsigned long)src, len);
707 	if (IS_ERR(data))
708 		return PTR_ERR(data);
709 
710 	*dest = data;
711 	return 0;
712 }
713 
ib_ucm_path_get(struct ib_sa_path_rec ** path,u64 src)714 static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
715 {
716 	struct ib_user_path_rec upath;
717 	struct ib_sa_path_rec  *sa_path;
718 
719 	*path = NULL;
720 
721 	if (!src)
722 		return 0;
723 
724 	sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL);
725 	if (!sa_path)
726 		return -ENOMEM;
727 
728 	if (copy_from_user(&upath, (void __user *)(unsigned long)src,
729 			   sizeof(upath))) {
730 
731 		kfree(sa_path);
732 		return -EFAULT;
733 	}
734 
735 	ib_copy_path_rec_from_user(sa_path, &upath);
736 	*path = sa_path;
737 	return 0;
738 }
739 
ib_ucm_send_req(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)740 static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
741 			       const char __user *inbuf,
742 			       int in_len, int out_len)
743 {
744 	struct ib_cm_req_param param;
745 	struct ib_ucm_context *ctx;
746 	struct ib_ucm_req cmd;
747 	int result;
748 
749 	param.private_data   = NULL;
750 	param.primary_path   = NULL;
751 	param.alternate_path = NULL;
752 
753 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
754 		return -EFAULT;
755 
756 	result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
757 	if (result)
758 		goto done;
759 
760 	result = ib_ucm_path_get(&param.primary_path, cmd.primary_path);
761 	if (result)
762 		goto done;
763 
764 	result = ib_ucm_path_get(&param.alternate_path, cmd.alternate_path);
765 	if (result)
766 		goto done;
767 
768 	param.private_data_len           = cmd.len;
769 	param.service_id                 = cmd.sid;
770 	param.qp_num                     = cmd.qpn;
771 	param.qp_type                    = cmd.qp_type;
772 	param.starting_psn               = cmd.psn;
773 	param.peer_to_peer               = cmd.peer_to_peer;
774 	param.responder_resources        = cmd.responder_resources;
775 	param.initiator_depth            = cmd.initiator_depth;
776 	param.remote_cm_response_timeout = cmd.remote_cm_response_timeout;
777 	param.flow_control               = cmd.flow_control;
778 	param.local_cm_response_timeout  = cmd.local_cm_response_timeout;
779 	param.retry_count                = cmd.retry_count;
780 	param.rnr_retry_count            = cmd.rnr_retry_count;
781 	param.max_cm_retries             = cmd.max_cm_retries;
782 	param.srq                        = cmd.srq;
783 
784 	ctx = ib_ucm_ctx_get(file, cmd.id);
785 	if (!IS_ERR(ctx)) {
786 		result = ib_send_cm_req(ctx->cm_id, &param);
787 		ib_ucm_ctx_put(ctx);
788 	} else
789 		result = PTR_ERR(ctx);
790 
791 done:
792 	kfree(param.private_data);
793 	kfree(param.primary_path);
794 	kfree(param.alternate_path);
795 	return result;
796 }
797 
ib_ucm_send_rep(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)798 static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
799 			       const char __user *inbuf,
800 			       int in_len, int out_len)
801 {
802 	struct ib_cm_rep_param param;
803 	struct ib_ucm_context *ctx;
804 	struct ib_ucm_rep cmd;
805 	int result;
806 
807 	param.private_data = NULL;
808 
809 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
810 		return -EFAULT;
811 
812 	result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
813 	if (result)
814 		return result;
815 
816 	param.qp_num              = cmd.qpn;
817 	param.starting_psn        = cmd.psn;
818 	param.private_data_len    = cmd.len;
819 	param.responder_resources = cmd.responder_resources;
820 	param.initiator_depth     = cmd.initiator_depth;
821 	param.failover_accepted   = cmd.failover_accepted;
822 	param.flow_control        = cmd.flow_control;
823 	param.rnr_retry_count     = cmd.rnr_retry_count;
824 	param.srq                 = cmd.srq;
825 
826 	ctx = ib_ucm_ctx_get(file, cmd.id);
827 	if (!IS_ERR(ctx)) {
828 		ctx->uid = cmd.uid;
829 		result = ib_send_cm_rep(ctx->cm_id, &param);
830 		ib_ucm_ctx_put(ctx);
831 	} else
832 		result = PTR_ERR(ctx);
833 
834 	kfree(param.private_data);
835 	return result;
836 }
837 
ib_ucm_send_private_data(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int (* func)(struct ib_cm_id * cm_id,const void * private_data,u8 private_data_len))838 static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
839 					const char __user *inbuf, int in_len,
840 					int (*func)(struct ib_cm_id *cm_id,
841 						    const void *private_data,
842 						    u8 private_data_len))
843 {
844 	struct ib_ucm_private_data cmd;
845 	struct ib_ucm_context *ctx;
846 	const void *private_data = NULL;
847 	int result;
848 
849 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
850 		return -EFAULT;
851 
852 	result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len);
853 	if (result)
854 		return result;
855 
856 	ctx = ib_ucm_ctx_get(file, cmd.id);
857 	if (!IS_ERR(ctx)) {
858 		result = func(ctx->cm_id, private_data, cmd.len);
859 		ib_ucm_ctx_put(ctx);
860 	} else
861 		result = PTR_ERR(ctx);
862 
863 	kfree(private_data);
864 	return result;
865 }
866 
ib_ucm_send_rtu(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)867 static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file,
868 			       const char __user *inbuf,
869 			       int in_len, int out_len)
870 {
871 	return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu);
872 }
873 
ib_ucm_send_dreq(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)874 static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file,
875 				const char __user *inbuf,
876 				int in_len, int out_len)
877 {
878 	return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq);
879 }
880 
ib_ucm_send_drep(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)881 static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file,
882 				const char __user *inbuf,
883 				int in_len, int out_len)
884 {
885 	return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep);
886 }
887 
ib_ucm_send_info(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int (* func)(struct ib_cm_id * cm_id,int status,const void * info,u8 info_len,const void * data,u8 data_len))888 static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
889 				const char __user *inbuf, int in_len,
890 				int (*func)(struct ib_cm_id *cm_id,
891 					    int status,
892 					    const void *info,
893 					    u8 info_len,
894 					    const void *data,
895 					    u8 data_len))
896 {
897 	struct ib_ucm_context *ctx;
898 	struct ib_ucm_info cmd;
899 	const void *data = NULL;
900 	const void *info = NULL;
901 	int result;
902 
903 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
904 		return -EFAULT;
905 
906 	result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len);
907 	if (result)
908 		goto done;
909 
910 	result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len);
911 	if (result)
912 		goto done;
913 
914 	ctx = ib_ucm_ctx_get(file, cmd.id);
915 	if (!IS_ERR(ctx)) {
916 		result = func(ctx->cm_id, cmd.status, info, cmd.info_len,
917 			      data, cmd.data_len);
918 		ib_ucm_ctx_put(ctx);
919 	} else
920 		result = PTR_ERR(ctx);
921 
922 done:
923 	kfree(data);
924 	kfree(info);
925 	return result;
926 }
927 
ib_ucm_send_rej(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)928 static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
929 			       const char __user *inbuf,
930 			       int in_len, int out_len)
931 {
932 	return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
933 }
934 
ib_ucm_send_apr(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)935 static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
936 			       const char __user *inbuf,
937 			       int in_len, int out_len)
938 {
939 	return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
940 }
941 
ib_ucm_send_mra(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)942 static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
943 			       const char __user *inbuf,
944 			       int in_len, int out_len)
945 {
946 	struct ib_ucm_context *ctx;
947 	struct ib_ucm_mra cmd;
948 	const void *data = NULL;
949 	int result;
950 
951 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
952 		return -EFAULT;
953 
954 	result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
955 	if (result)
956 		return result;
957 
958 	ctx = ib_ucm_ctx_get(file, cmd.id);
959 	if (!IS_ERR(ctx)) {
960 		result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len);
961 		ib_ucm_ctx_put(ctx);
962 	} else
963 		result = PTR_ERR(ctx);
964 
965 	kfree(data);
966 	return result;
967 }
968 
ib_ucm_send_lap(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)969 static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
970 			       const char __user *inbuf,
971 			       int in_len, int out_len)
972 {
973 	struct ib_ucm_context *ctx;
974 	struct ib_sa_path_rec *path = NULL;
975 	struct ib_ucm_lap cmd;
976 	const void *data = NULL;
977 	int result;
978 
979 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
980 		return -EFAULT;
981 
982 	result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
983 	if (result)
984 		goto done;
985 
986 	result = ib_ucm_path_get(&path, cmd.path);
987 	if (result)
988 		goto done;
989 
990 	ctx = ib_ucm_ctx_get(file, cmd.id);
991 	if (!IS_ERR(ctx)) {
992 		result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
993 		ib_ucm_ctx_put(ctx);
994 	} else
995 		result = PTR_ERR(ctx);
996 
997 done:
998 	kfree(data);
999 	kfree(path);
1000 	return result;
1001 }
1002 
ib_ucm_send_sidr_req(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)1003 static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
1004 				    const char __user *inbuf,
1005 				    int in_len, int out_len)
1006 {
1007 	struct ib_cm_sidr_req_param param;
1008 	struct ib_ucm_context *ctx;
1009 	struct ib_ucm_sidr_req cmd;
1010 	int result;
1011 
1012 	param.private_data = NULL;
1013 	param.path = NULL;
1014 
1015 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1016 		return -EFAULT;
1017 
1018 	result = ib_ucm_alloc_data(&param.private_data, cmd.data, cmd.len);
1019 	if (result)
1020 		goto done;
1021 
1022 	result = ib_ucm_path_get(&param.path, cmd.path);
1023 	if (result)
1024 		goto done;
1025 
1026 	param.private_data_len = cmd.len;
1027 	param.service_id       = cmd.sid;
1028 	param.timeout_ms       = cmd.timeout;
1029 	param.max_cm_retries   = cmd.max_cm_retries;
1030 
1031 	ctx = ib_ucm_ctx_get(file, cmd.id);
1032 	if (!IS_ERR(ctx)) {
1033 		result = ib_send_cm_sidr_req(ctx->cm_id, &param);
1034 		ib_ucm_ctx_put(ctx);
1035 	} else
1036 		result = PTR_ERR(ctx);
1037 
1038 done:
1039 	kfree(param.private_data);
1040 	kfree(param.path);
1041 	return result;
1042 }
1043 
ib_ucm_send_sidr_rep(struct ib_ucm_file * file,const char __user * inbuf,int in_len,int out_len)1044 static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1045 				    const char __user *inbuf,
1046 				    int in_len, int out_len)
1047 {
1048 	struct ib_cm_sidr_rep_param param;
1049 	struct ib_ucm_sidr_rep cmd;
1050 	struct ib_ucm_context *ctx;
1051 	int result;
1052 
1053 	param.info = NULL;
1054 
1055 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1056 		return -EFAULT;
1057 
1058 	result = ib_ucm_alloc_data(&param.private_data,
1059 				   cmd.data, cmd.data_len);
1060 	if (result)
1061 		goto done;
1062 
1063 	result = ib_ucm_alloc_data(&param.info, cmd.info, cmd.info_len);
1064 	if (result)
1065 		goto done;
1066 
1067 	param.qp_num		= cmd.qpn;
1068 	param.qkey		= cmd.qkey;
1069 	param.status		= cmd.status;
1070 	param.info_length	= cmd.info_len;
1071 	param.private_data_len	= cmd.data_len;
1072 
1073 	ctx = ib_ucm_ctx_get(file, cmd.id);
1074 	if (!IS_ERR(ctx)) {
1075 		result = ib_send_cm_sidr_rep(ctx->cm_id, &param);
1076 		ib_ucm_ctx_put(ctx);
1077 	} else
1078 		result = PTR_ERR(ctx);
1079 
1080 done:
1081 	kfree(param.private_data);
1082 	kfree(param.info);
1083 	return result;
1084 }
1085 
1086 static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1087 				  const char __user *inbuf,
1088 				  int in_len, int out_len) = {
1089 	[IB_USER_CM_CMD_CREATE_ID]     = ib_ucm_create_id,
1090 	[IB_USER_CM_CMD_DESTROY_ID]    = ib_ucm_destroy_id,
1091 	[IB_USER_CM_CMD_ATTR_ID]       = ib_ucm_attr_id,
1092 	[IB_USER_CM_CMD_LISTEN]        = ib_ucm_listen,
1093 	[IB_USER_CM_CMD_NOTIFY]        = ib_ucm_notify,
1094 	[IB_USER_CM_CMD_SEND_REQ]      = ib_ucm_send_req,
1095 	[IB_USER_CM_CMD_SEND_REP]      = ib_ucm_send_rep,
1096 	[IB_USER_CM_CMD_SEND_RTU]      = ib_ucm_send_rtu,
1097 	[IB_USER_CM_CMD_SEND_DREQ]     = ib_ucm_send_dreq,
1098 	[IB_USER_CM_CMD_SEND_DREP]     = ib_ucm_send_drep,
1099 	[IB_USER_CM_CMD_SEND_REJ]      = ib_ucm_send_rej,
1100 	[IB_USER_CM_CMD_SEND_MRA]      = ib_ucm_send_mra,
1101 	[IB_USER_CM_CMD_SEND_LAP]      = ib_ucm_send_lap,
1102 	[IB_USER_CM_CMD_SEND_APR]      = ib_ucm_send_apr,
1103 	[IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
1104 	[IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
1105 	[IB_USER_CM_CMD_EVENT]	       = ib_ucm_event,
1106 	[IB_USER_CM_CMD_INIT_QP_ATTR]  = ib_ucm_init_qp_attr,
1107 };
1108 
ib_ucm_write(struct file * filp,const char __user * buf,size_t len,loff_t * pos)1109 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1110 			    size_t len, loff_t *pos)
1111 {
1112 	struct ib_ucm_file *file = filp->private_data;
1113 	struct ib_ucm_cmd_hdr hdr;
1114 	ssize_t result;
1115 
1116 	if (len < sizeof(hdr))
1117 		return -EINVAL;
1118 
1119 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1120 		return -EFAULT;
1121 
1122 	if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
1123 		return -EINVAL;
1124 
1125 	if (hdr.in + sizeof(hdr) > len)
1126 		return -EINVAL;
1127 
1128 	result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr),
1129 					hdr.in, hdr.out);
1130 	if (!result)
1131 		result = len;
1132 
1133 	return result;
1134 }
1135 
ib_ucm_poll(struct file * filp,struct poll_table_struct * wait)1136 static unsigned int ib_ucm_poll(struct file *filp,
1137 				struct poll_table_struct *wait)
1138 {
1139 	struct ib_ucm_file *file = filp->private_data;
1140 	unsigned int mask = 0;
1141 
1142 	poll_wait(filp, &file->poll_wait, wait);
1143 
1144 	if (!list_empty(&file->events))
1145 		mask = POLLIN | POLLRDNORM;
1146 
1147 	return mask;
1148 }
1149 
1150 /*
1151  * ib_ucm_open() does not need the BKL:
1152  *
1153  *  - no global state is referred to;
1154  *  - there is no ioctl method to race against;
1155  *  - no further module initialization is required for open to work
1156  *    after the device is registered.
1157  */
ib_ucm_open(struct inode * inode,struct file * filp)1158 static int ib_ucm_open(struct inode *inode, struct file *filp)
1159 {
1160 	struct ib_ucm_file *file;
1161 
1162 	file = kmalloc(sizeof(*file), GFP_KERNEL);
1163 	if (!file)
1164 		return -ENOMEM;
1165 
1166 	INIT_LIST_HEAD(&file->events);
1167 	INIT_LIST_HEAD(&file->ctxs);
1168 	init_waitqueue_head(&file->poll_wait);
1169 
1170 	mutex_init(&file->file_mutex);
1171 
1172 	filp->private_data = file;
1173 	file->filp = filp;
1174 	file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
1175 
1176 	return nonseekable_open(inode, filp);
1177 }
1178 
ib_ucm_close(struct inode * inode,struct file * filp)1179 static int ib_ucm_close(struct inode *inode, struct file *filp)
1180 {
1181 	struct ib_ucm_file *file = filp->private_data;
1182 	struct ib_ucm_context *ctx;
1183 
1184 	mutex_lock(&file->file_mutex);
1185 	while (!list_empty(&file->ctxs)) {
1186 		ctx = list_entry(file->ctxs.next,
1187 				 struct ib_ucm_context, file_list);
1188 		mutex_unlock(&file->file_mutex);
1189 
1190 		mutex_lock(&ctx_id_mutex);
1191 		idr_remove(&ctx_id_table, ctx->id);
1192 		mutex_unlock(&ctx_id_mutex);
1193 
1194 		ib_destroy_cm_id(ctx->cm_id);
1195 		ib_ucm_cleanup_events(ctx);
1196 		kfree(ctx);
1197 
1198 		mutex_lock(&file->file_mutex);
1199 	}
1200 	mutex_unlock(&file->file_mutex);
1201 	kfree(file);
1202 	return 0;
1203 }
1204 
ib_ucm_release_dev(struct device * dev)1205 static void ib_ucm_release_dev(struct device *dev)
1206 {
1207 	struct ib_ucm_device *ucm_dev;
1208 
1209 	ucm_dev = container_of(dev, struct ib_ucm_device, dev);
1210 	cdev_del(&ucm_dev->cdev);
1211 	if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1212 		clear_bit(ucm_dev->devnum, dev_map);
1213 	else
1214 		clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
1215 	kfree(ucm_dev);
1216 }
1217 
1218 static const struct file_operations ucm_fops = {
1219 	.owner	 = THIS_MODULE,
1220 	.open	 = ib_ucm_open,
1221 	.release = ib_ucm_close,
1222 	.write	 = ib_ucm_write,
1223 	.poll    = ib_ucm_poll,
1224 	.llseek	 = no_llseek,
1225 };
1226 
show_ibdev(struct device * dev,struct device_attribute * attr,char * buf)1227 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1228 			  char *buf)
1229 {
1230 	struct ib_ucm_device *ucm_dev;
1231 
1232 	ucm_dev = container_of(dev, struct ib_ucm_device, dev);
1233 	return sprintf(buf, "%s\n", ucm_dev->ib_dev->name);
1234 }
1235 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1236 
1237 static dev_t overflow_maj;
1238 static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
find_overflow_devnum(void)1239 static int find_overflow_devnum(void)
1240 {
1241 	int ret;
1242 
1243 	if (!overflow_maj) {
1244 		ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
1245 					  "infiniband_cm");
1246 		if (ret) {
1247 			printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
1248 			return ret;
1249 		}
1250 	}
1251 
1252 	ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
1253 	if (ret >= IB_UCM_MAX_DEVICES)
1254 		return -1;
1255 
1256 	return ret;
1257 }
1258 
ib_ucm_add_one(struct ib_device * device)1259 static void ib_ucm_add_one(struct ib_device *device)
1260 {
1261 	int devnum;
1262 	dev_t base;
1263 	struct ib_ucm_device *ucm_dev;
1264 
1265 	if (!device->alloc_ucontext ||
1266 	    rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1267 		return;
1268 
1269 	ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
1270 	if (!ucm_dev)
1271 		return;
1272 
1273 	ucm_dev->ib_dev = device;
1274 
1275 	devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
1276 	if (devnum >= IB_UCM_MAX_DEVICES) {
1277 		devnum = find_overflow_devnum();
1278 		if (devnum < 0)
1279 			goto err;
1280 
1281 		ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
1282 		base = devnum + overflow_maj;
1283 		set_bit(devnum, overflow_map);
1284 	} else {
1285 		ucm_dev->devnum = devnum;
1286 		base = devnum + IB_UCM_BASE_DEV;
1287 		set_bit(devnum, dev_map);
1288 	}
1289 
1290 	cdev_init(&ucm_dev->cdev, &ucm_fops);
1291 	ucm_dev->cdev.owner = THIS_MODULE;
1292 	kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
1293 	if (cdev_add(&ucm_dev->cdev, base, 1))
1294 		goto err;
1295 
1296 	ucm_dev->dev.class = &cm_class;
1297 	ucm_dev->dev.parent = device->dma_device;
1298 	ucm_dev->dev.devt = ucm_dev->cdev.dev;
1299 	ucm_dev->dev.release = ib_ucm_release_dev;
1300 	dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum);
1301 	if (device_register(&ucm_dev->dev))
1302 		goto err_cdev;
1303 
1304 	if (device_create_file(&ucm_dev->dev, &dev_attr_ibdev))
1305 		goto err_dev;
1306 
1307 	ib_set_client_data(device, &ucm_client, ucm_dev);
1308 	return;
1309 
1310 err_dev:
1311 	device_unregister(&ucm_dev->dev);
1312 err_cdev:
1313 	cdev_del(&ucm_dev->cdev);
1314 	if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1315 		clear_bit(devnum, dev_map);
1316 	else
1317 		clear_bit(devnum, overflow_map);
1318 err:
1319 	kfree(ucm_dev);
1320 	return;
1321 }
1322 
ib_ucm_remove_one(struct ib_device * device)1323 static void ib_ucm_remove_one(struct ib_device *device)
1324 {
1325 	struct ib_ucm_device *ucm_dev = ib_get_client_data(device, &ucm_client);
1326 
1327 	if (!ucm_dev)
1328 		return;
1329 
1330 	device_unregister(&ucm_dev->dev);
1331 }
1332 
1333 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1334 			 __stringify(IB_USER_CM_ABI_VERSION));
1335 
ib_ucm_init(void)1336 static int __init ib_ucm_init(void)
1337 {
1338 	int ret;
1339 
1340 	ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
1341 				     "infiniband_cm");
1342 	if (ret) {
1343 		printk(KERN_ERR "ucm: couldn't register device number\n");
1344 		goto error1;
1345 	}
1346 
1347 	ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
1348 	if (ret) {
1349 		printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
1350 		goto error2;
1351 	}
1352 
1353 	ret = ib_register_client(&ucm_client);
1354 	if (ret) {
1355 		printk(KERN_ERR "ucm: couldn't register client\n");
1356 		goto error3;
1357 	}
1358 	return 0;
1359 
1360 error3:
1361 	class_remove_file(&cm_class, &class_attr_abi_version.attr);
1362 error2:
1363 	unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
1364 error1:
1365 	return ret;
1366 }
1367 
ib_ucm_cleanup(void)1368 static void __exit ib_ucm_cleanup(void)
1369 {
1370 	ib_unregister_client(&ucm_client);
1371 	class_remove_file(&cm_class, &class_attr_abi_version.attr);
1372 	unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
1373 	if (overflow_maj)
1374 		unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
1375 	idr_destroy(&ctx_id_table);
1376 }
1377 
1378 module_init(ib_ucm_init);
1379 module_exit(ib_ucm_cleanup);
1380