1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * RapidIO mport character device
4  *
5  * Copyright 2014-2015 Integrated Device Technology, Inc.
6  *    Alexandre Bounine <alexandre.bounine@idt.com>
7  * Copyright 2014-2015 Prodrive Technologies
8  *    Andre van Herk <andre.van.herk@prodrive-technologies.com>
9  *    Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
10  * Copyright (C) 2014 Texas Instruments Incorporated
11  *    Aurelien Jacquiot <a-jacquiot@ti.com>
12  */
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/cdev.h>
16 #include <linux/ioctl.h>
17 #include <linux/uaccess.h>
18 #include <linux/list.h>
19 #include <linux/fs.h>
20 #include <linux/err.h>
21 #include <linux/net.h>
22 #include <linux/poll.h>
23 #include <linux/spinlock.h>
24 #include <linux/sched.h>
25 #include <linux/kfifo.h>
26 
27 #include <linux/mm.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/mman.h>
31 
32 #include <linux/dma-mapping.h>
33 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
34 #include <linux/dmaengine.h>
35 #endif
36 
37 #include <linux/rio.h>
38 #include <linux/rio_ids.h>
39 #include <linux/rio_drv.h>
40 #include <linux/rio_mport_cdev.h>
41 
42 #include "../rio.h"
43 
44 #define DRV_NAME	"rio_mport"
45 #define DRV_PREFIX	DRV_NAME ": "
46 #define DEV_NAME	"rio_mport"
47 #define DRV_VERSION     "1.0.0"
48 
49 /* Debug output filtering masks */
50 enum {
51 	DBG_NONE	= 0,
52 	DBG_INIT	= BIT(0), /* driver init */
53 	DBG_EXIT	= BIT(1), /* driver exit */
54 	DBG_MPORT	= BIT(2), /* mport add/remove */
55 	DBG_RDEV	= BIT(3), /* RapidIO device add/remove */
56 	DBG_DMA		= BIT(4), /* DMA transfer messages */
57 	DBG_MMAP	= BIT(5), /* mapping messages */
58 	DBG_IBW		= BIT(6), /* inbound window */
59 	DBG_EVENT	= BIT(7), /* event handling messages */
60 	DBG_OBW		= BIT(8), /* outbound window messages */
61 	DBG_DBELL	= BIT(9), /* doorbell messages */
62 	DBG_ALL		= ~0,
63 };
64 
65 #ifdef DEBUG
66 #define rmcd_debug(level, fmt, arg...)		\
67 	do {					\
68 		if (DBG_##level & dbg_level)	\
69 			pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
70 	} while (0)
71 #else
72 #define rmcd_debug(level, fmt, arg...) \
73 		no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
74 #endif
75 
76 #define rmcd_warn(fmt, arg...) \
77 	pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
78 
79 #define rmcd_error(fmt, arg...) \
80 	pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
81 
82 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
83 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
84 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
85 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
86 MODULE_DESCRIPTION("RapidIO mport character device driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(DRV_VERSION);
89 
90 static int dma_timeout = 3000; /* DMA transfer timeout in msec */
91 module_param(dma_timeout, int, S_IRUGO);
92 MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
93 
94 #ifdef DEBUG
95 static u32 dbg_level = DBG_NONE;
96 module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
97 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
98 #endif
99 
100 /*
101  * Internal memory mapping structure
102  */
103 enum rio_mport_map_dir {
104 	MAP_INBOUND,
105 	MAP_OUTBOUND,
106 	MAP_DMA,
107 };
108 
109 struct rio_mport_mapping {
110 	struct list_head node;
111 	struct mport_dev *md;
112 	enum rio_mport_map_dir dir;
113 	u16 rioid;
114 	u64 rio_addr;
115 	dma_addr_t phys_addr; /* for mmap */
116 	void *virt_addr; /* kernel address, for dma_free_coherent */
117 	u64 size;
118 	struct kref ref; /* refcount of vmas sharing the mapping */
119 	struct file *filp;
120 };
121 
122 #define MPORT_EVENT_DEPTH	10
123 
124 /*
125  * mport_dev  driver-specific structure that represents mport device
126  * @active    mport device status flag
127  * @node      list node to maintain list of registered mports
128  * @cdev      character device
129  * @dev       associated device object
130  * @mport     associated subsystem's master port device object
131  * @buf_mutex lock for buffer handling
132  * @file_mutex - lock for open files list
133  * @file_list  - list of open files on given mport
134  * @properties properties of this mport
135  * @portwrites queue of inbound portwrites
136  * @pw_lock    lock for port write queue
137  * @mappings   queue for memory mappings
138  * @dma_chan   DMA channels associated with this device
139  * @dma_ref:
140  * @comp:
141  */
142 struct mport_dev {
143 	atomic_t		active;
144 	struct list_head	node;
145 	struct cdev		cdev;
146 	struct device		dev;
147 	struct rio_mport	*mport;
148 	struct mutex		buf_mutex;
149 	struct mutex		file_mutex;
150 	struct list_head	file_list;
151 	struct rio_mport_properties	properties;
152 	struct list_head		doorbells;
153 	spinlock_t			db_lock;
154 	struct list_head		portwrites;
155 	spinlock_t			pw_lock;
156 	struct list_head	mappings;
157 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
158 	struct dma_chan *dma_chan;
159 	struct kref	dma_ref;
160 	struct completion comp;
161 #endif
162 };
163 
164 /*
165  * mport_cdev_priv - data structure specific to individual file object
166  *                   associated with an open device
167  * @md    master port character device object
168  * @async_queue - asynchronous notification queue
169  * @list - file objects tracking list
170  * @db_filters    inbound doorbell filters for this descriptor
171  * @pw_filters    portwrite filters for this descriptor
172  * @event_fifo    event fifo for this descriptor
173  * @event_rx_wait wait queue for this descriptor
174  * @fifo_lock     lock for event_fifo
175  * @event_mask    event mask for this descriptor
176  * @dmach DMA engine channel allocated for specific file object
177  */
178 struct mport_cdev_priv {
179 	struct mport_dev	*md;
180 	struct fasync_struct	*async_queue;
181 	struct list_head	list;
182 	struct list_head	db_filters;
183 	struct list_head        pw_filters;
184 	struct kfifo            event_fifo;
185 	wait_queue_head_t       event_rx_wait;
186 	spinlock_t              fifo_lock;
187 	u32			event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
188 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
189 	struct dma_chan		*dmach;
190 	struct list_head	async_list;
191 	spinlock_t              req_lock;
192 	struct mutex		dma_lock;
193 	struct kref		dma_ref;
194 	struct completion	comp;
195 #endif
196 };
197 
198 /*
199  * rio_mport_pw_filter - structure to describe a portwrite filter
200  * md_node   node in mport device's list
201  * priv_node node in private file object's list
202  * priv      reference to private data
203  * filter    actual portwrite filter
204  */
205 struct rio_mport_pw_filter {
206 	struct list_head md_node;
207 	struct list_head priv_node;
208 	struct mport_cdev_priv *priv;
209 	struct rio_pw_filter filter;
210 };
211 
212 /*
213  * rio_mport_db_filter - structure to describe a doorbell filter
214  * @data_node reference to device node
215  * @priv_node node in private data
216  * @priv      reference to private data
217  * @filter    actual doorbell filter
218  */
219 struct rio_mport_db_filter {
220 	struct list_head data_node;
221 	struct list_head priv_node;
222 	struct mport_cdev_priv *priv;
223 	struct rio_doorbell_filter filter;
224 };
225 
226 static LIST_HEAD(mport_devs);
227 static DEFINE_MUTEX(mport_devs_lock);
228 
229 #if (0) /* used by commented out portion of poll function : FIXME */
230 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
231 #endif
232 
233 static const struct class dev_class = {
234 	.name = DRV_NAME,
235 };
236 static dev_t dev_number;
237 
238 static void mport_release_mapping(struct kref *ref);
239 
240 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
241 			      int local)
242 {
243 	struct rio_mport *mport = priv->md->mport;
244 	struct rio_mport_maint_io maint_io;
245 	u32 *buffer;
246 	u32 offset;
247 	size_t length;
248 	int ret, i;
249 
250 	if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
251 		return -EFAULT;
252 
253 	if ((maint_io.offset % 4) ||
254 	    (maint_io.length == 0) || (maint_io.length % 4) ||
255 	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
256 		return -EINVAL;
257 
258 	buffer = vmalloc(maint_io.length);
259 	if (buffer == NULL)
260 		return -ENOMEM;
261 	length = maint_io.length/sizeof(u32);
262 	offset = maint_io.offset;
263 
264 	for (i = 0; i < length; i++) {
265 		if (local)
266 			ret = __rio_local_read_config_32(mport,
267 				offset, &buffer[i]);
268 		else
269 			ret = rio_mport_read_config_32(mport, maint_io.rioid,
270 				maint_io.hopcount, offset, &buffer[i]);
271 		if (ret)
272 			goto out;
273 
274 		offset += 4;
275 	}
276 
277 	if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
278 				   buffer, maint_io.length)))
279 		ret = -EFAULT;
280 out:
281 	vfree(buffer);
282 	return ret;
283 }
284 
285 static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
286 			      int local)
287 {
288 	struct rio_mport *mport = priv->md->mport;
289 	struct rio_mport_maint_io maint_io;
290 	u32 *buffer;
291 	u32 offset;
292 	size_t length;
293 	int ret = -EINVAL, i;
294 
295 	if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
296 		return -EFAULT;
297 
298 	if ((maint_io.offset % 4) ||
299 	    (maint_io.length == 0) || (maint_io.length % 4) ||
300 	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
301 		return -EINVAL;
302 
303 	buffer = vmalloc(maint_io.length);
304 	if (buffer == NULL)
305 		return -ENOMEM;
306 	length = maint_io.length;
307 
308 	if (unlikely(copy_from_user(buffer,
309 			(void __user *)(uintptr_t)maint_io.buffer, length))) {
310 		ret = -EFAULT;
311 		goto out;
312 	}
313 
314 	offset = maint_io.offset;
315 	length /= sizeof(u32);
316 
317 	for (i = 0; i < length; i++) {
318 		if (local)
319 			ret = __rio_local_write_config_32(mport,
320 							  offset, buffer[i]);
321 		else
322 			ret = rio_mport_write_config_32(mport, maint_io.rioid,
323 							maint_io.hopcount,
324 							offset, buffer[i]);
325 		if (ret)
326 			goto out;
327 
328 		offset += 4;
329 	}
330 
331 out:
332 	vfree(buffer);
333 	return ret;
334 }
335 
336 
337 /*
338  * Inbound/outbound memory mapping functions
339  */
340 static int
341 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
342 				  u16 rioid, u64 raddr, u32 size,
343 				  dma_addr_t *paddr)
344 {
345 	struct rio_mport *mport = md->mport;
346 	struct rio_mport_mapping *map;
347 	int ret;
348 
349 	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
350 
351 	map = kzalloc(sizeof(*map), GFP_KERNEL);
352 	if (map == NULL)
353 		return -ENOMEM;
354 
355 	ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
356 	if (ret < 0)
357 		goto err_map_outb;
358 
359 	map->dir = MAP_OUTBOUND;
360 	map->rioid = rioid;
361 	map->rio_addr = raddr;
362 	map->size = size;
363 	map->phys_addr = *paddr;
364 	map->filp = filp;
365 	map->md = md;
366 	kref_init(&map->ref);
367 	list_add_tail(&map->node, &md->mappings);
368 	return 0;
369 err_map_outb:
370 	kfree(map);
371 	return ret;
372 }
373 
374 static int
375 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
376 			       u16 rioid, u64 raddr, u32 size,
377 			       dma_addr_t *paddr)
378 {
379 	struct rio_mport_mapping *map;
380 	int err = -ENOMEM;
381 
382 	mutex_lock(&md->buf_mutex);
383 	list_for_each_entry(map, &md->mappings, node) {
384 		if (map->dir != MAP_OUTBOUND)
385 			continue;
386 		if (rioid == map->rioid &&
387 		    raddr == map->rio_addr && size == map->size) {
388 			*paddr = map->phys_addr;
389 			err = 0;
390 			break;
391 		} else if (rioid == map->rioid &&
392 			   raddr < (map->rio_addr + map->size - 1) &&
393 			   (raddr + size) > map->rio_addr) {
394 			err = -EBUSY;
395 			break;
396 		}
397 	}
398 
399 	/* If not found, create new */
400 	if (err == -ENOMEM)
401 		err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
402 						size, paddr);
403 	mutex_unlock(&md->buf_mutex);
404 	return err;
405 }
406 
407 static int rio_mport_obw_map(struct file *filp, void __user *arg)
408 {
409 	struct mport_cdev_priv *priv = filp->private_data;
410 	struct mport_dev *data = priv->md;
411 	struct rio_mmap map;
412 	dma_addr_t paddr;
413 	int ret;
414 
415 	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
416 		return -EFAULT;
417 
418 	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
419 		   map.rioid, map.rio_addr, map.length);
420 
421 	ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
422 					     map.rio_addr, map.length, &paddr);
423 	if (ret < 0) {
424 		rmcd_error("Failed to set OBW err= %d", ret);
425 		return ret;
426 	}
427 
428 	map.handle = paddr;
429 
430 	if (unlikely(copy_to_user(arg, &map, sizeof(map))))
431 		return -EFAULT;
432 	return 0;
433 }
434 
435 /*
436  * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
437  *
438  * @priv: driver private data
439  * @arg:  buffer handle returned by allocation routine
440  */
441 static int rio_mport_obw_free(struct file *filp, void __user *arg)
442 {
443 	struct mport_cdev_priv *priv = filp->private_data;
444 	struct mport_dev *md = priv->md;
445 	u64 handle;
446 	struct rio_mport_mapping *map, *_map;
447 
448 	if (!md->mport->ops->unmap_outb)
449 		return -EPROTONOSUPPORT;
450 
451 	if (copy_from_user(&handle, arg, sizeof(handle)))
452 		return -EFAULT;
453 
454 	rmcd_debug(OBW, "h=0x%llx", handle);
455 
456 	mutex_lock(&md->buf_mutex);
457 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
458 		if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
459 			if (map->filp == filp) {
460 				rmcd_debug(OBW, "kref_put h=0x%llx", handle);
461 				map->filp = NULL;
462 				kref_put(&map->ref, mport_release_mapping);
463 			}
464 			break;
465 		}
466 	}
467 	mutex_unlock(&md->buf_mutex);
468 
469 	return 0;
470 }
471 
472 /*
473  * maint_hdid_set() - Set the host Device ID
474  * @priv: driver private data
475  * @arg:	Device Id
476  */
477 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
478 {
479 	struct mport_dev *md = priv->md;
480 	u16 hdid;
481 
482 	if (copy_from_user(&hdid, arg, sizeof(hdid)))
483 		return -EFAULT;
484 
485 	md->mport->host_deviceid = hdid;
486 	md->properties.hdid = hdid;
487 	rio_local_set_device_id(md->mport, hdid);
488 
489 	rmcd_debug(MPORT, "Set host device Id to %d", hdid);
490 
491 	return 0;
492 }
493 
494 /*
495  * maint_comptag_set() - Set the host Component Tag
496  * @priv: driver private data
497  * @arg:	Component Tag
498  */
499 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
500 {
501 	struct mport_dev *md = priv->md;
502 	u32 comptag;
503 
504 	if (copy_from_user(&comptag, arg, sizeof(comptag)))
505 		return -EFAULT;
506 
507 	rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
508 
509 	rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
510 
511 	return 0;
512 }
513 
514 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
515 
516 struct mport_dma_req {
517 	struct kref refcount;
518 	struct list_head node;
519 	struct file *filp;
520 	struct mport_cdev_priv *priv;
521 	enum rio_transfer_sync sync;
522 	struct sg_table sgt;
523 	struct page **page_list;
524 	unsigned int nr_pages;
525 	struct rio_mport_mapping *map;
526 	struct dma_chan *dmach;
527 	enum dma_data_direction dir;
528 	dma_cookie_t cookie;
529 	enum dma_status	status;
530 	struct completion req_comp;
531 };
532 
533 static void mport_release_def_dma(struct kref *dma_ref)
534 {
535 	struct mport_dev *md =
536 			container_of(dma_ref, struct mport_dev, dma_ref);
537 
538 	rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
539 	rio_release_dma(md->dma_chan);
540 	md->dma_chan = NULL;
541 }
542 
543 static void mport_release_dma(struct kref *dma_ref)
544 {
545 	struct mport_cdev_priv *priv =
546 			container_of(dma_ref, struct mport_cdev_priv, dma_ref);
547 
548 	rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
549 	complete(&priv->comp);
550 }
551 
552 static void dma_req_free(struct kref *ref)
553 {
554 	struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
555 			refcount);
556 	struct mport_cdev_priv *priv = req->priv;
557 
558 	dma_unmap_sg(req->dmach->device->dev,
559 		     req->sgt.sgl, req->sgt.nents, req->dir);
560 	sg_free_table(&req->sgt);
561 	if (req->page_list) {
562 		unpin_user_pages(req->page_list, req->nr_pages);
563 		kfree(req->page_list);
564 	}
565 
566 	if (req->map) {
567 		mutex_lock(&req->map->md->buf_mutex);
568 		kref_put(&req->map->ref, mport_release_mapping);
569 		mutex_unlock(&req->map->md->buf_mutex);
570 	}
571 
572 	kref_put(&priv->dma_ref, mport_release_dma);
573 
574 	kfree(req);
575 }
576 
577 static void dma_xfer_callback(void *param)
578 {
579 	struct mport_dma_req *req = (struct mport_dma_req *)param;
580 	struct mport_cdev_priv *priv = req->priv;
581 
582 	req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
583 					       NULL, NULL);
584 	complete(&req->req_comp);
585 	kref_put(&req->refcount, dma_req_free);
586 }
587 
588 /*
589  * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
590  *                   transfer object.
591  * Returns pointer to DMA transaction descriptor allocated by DMA driver on
592  * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
593  * non-NULL pointer using IS_ERR macro.
594  */
595 static struct dma_async_tx_descriptor
596 *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
597 	struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
598 	enum dma_ctrl_flags flags)
599 {
600 	struct rio_dma_data tx_data;
601 
602 	tx_data.sg = sgt->sgl;
603 	tx_data.sg_len = nents;
604 	tx_data.rio_addr_u = 0;
605 	tx_data.rio_addr = transfer->rio_addr;
606 	if (dir == DMA_MEM_TO_DEV) {
607 		switch (transfer->method) {
608 		case RIO_EXCHANGE_NWRITE:
609 			tx_data.wr_type = RDW_ALL_NWRITE;
610 			break;
611 		case RIO_EXCHANGE_NWRITE_R_ALL:
612 			tx_data.wr_type = RDW_ALL_NWRITE_R;
613 			break;
614 		case RIO_EXCHANGE_NWRITE_R:
615 			tx_data.wr_type = RDW_LAST_NWRITE_R;
616 			break;
617 		case RIO_EXCHANGE_DEFAULT:
618 			tx_data.wr_type = RDW_DEFAULT;
619 			break;
620 		default:
621 			return ERR_PTR(-EINVAL);
622 		}
623 	}
624 
625 	return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
626 }
627 
628 /* Request DMA channel associated with this mport device.
629  * Try to request DMA channel for every new process that opened given
630  * mport. If a new DMA channel is not available use default channel
631  * which is the first DMA channel opened on mport device.
632  */
633 static int get_dma_channel(struct mport_cdev_priv *priv)
634 {
635 	mutex_lock(&priv->dma_lock);
636 	if (!priv->dmach) {
637 		priv->dmach = rio_request_mport_dma(priv->md->mport);
638 		if (!priv->dmach) {
639 			/* Use default DMA channel if available */
640 			if (priv->md->dma_chan) {
641 				priv->dmach = priv->md->dma_chan;
642 				kref_get(&priv->md->dma_ref);
643 			} else {
644 				rmcd_error("Failed to get DMA channel");
645 				mutex_unlock(&priv->dma_lock);
646 				return -ENODEV;
647 			}
648 		} else if (!priv->md->dma_chan) {
649 			/* Register default DMA channel if we do not have one */
650 			priv->md->dma_chan = priv->dmach;
651 			kref_init(&priv->md->dma_ref);
652 			rmcd_debug(DMA, "Register DMA_chan %d as default",
653 				   priv->dmach->chan_id);
654 		}
655 
656 		kref_init(&priv->dma_ref);
657 		init_completion(&priv->comp);
658 	}
659 
660 	kref_get(&priv->dma_ref);
661 	mutex_unlock(&priv->dma_lock);
662 	return 0;
663 }
664 
665 static void put_dma_channel(struct mport_cdev_priv *priv)
666 {
667 	kref_put(&priv->dma_ref, mport_release_dma);
668 }
669 
670 /*
671  * DMA transfer functions
672  */
673 static int do_dma_request(struct mport_dma_req *req,
674 			  struct rio_transfer_io *xfer,
675 			  enum rio_transfer_sync sync, int nents)
676 {
677 	struct mport_cdev_priv *priv;
678 	struct sg_table *sgt;
679 	struct dma_chan *chan;
680 	struct dma_async_tx_descriptor *tx;
681 	dma_cookie_t cookie;
682 	unsigned long tmo = msecs_to_jiffies(dma_timeout);
683 	enum dma_transfer_direction dir;
684 	long wret;
685 	int ret = 0;
686 
687 	priv = req->priv;
688 	sgt = &req->sgt;
689 
690 	chan = priv->dmach;
691 	dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
692 
693 	rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
694 		   current->comm, task_pid_nr(current),
695 		   dev_name(&chan->dev->device),
696 		   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
697 
698 	/* Initialize DMA transaction request */
699 	tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
700 			   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
701 
702 	if (!tx) {
703 		rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
704 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
705 			xfer->rio_addr, xfer->length);
706 		ret = -EIO;
707 		goto err_out;
708 	} else if (IS_ERR(tx)) {
709 		ret = PTR_ERR(tx);
710 		rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
711 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
712 			xfer->rio_addr, xfer->length);
713 		goto err_out;
714 	}
715 
716 	tx->callback = dma_xfer_callback;
717 	tx->callback_param = req;
718 
719 	req->status = DMA_IN_PROGRESS;
720 	kref_get(&req->refcount);
721 
722 	cookie = dmaengine_submit(tx);
723 	req->cookie = cookie;
724 
725 	rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
726 		   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
727 
728 	if (dma_submit_error(cookie)) {
729 		rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
730 			   cookie, xfer->rio_addr, xfer->length);
731 		kref_put(&req->refcount, dma_req_free);
732 		ret = -EIO;
733 		goto err_out;
734 	}
735 
736 	dma_async_issue_pending(chan);
737 
738 	if (sync == RIO_TRANSFER_ASYNC) {
739 		spin_lock(&priv->req_lock);
740 		list_add_tail(&req->node, &priv->async_list);
741 		spin_unlock(&priv->req_lock);
742 		return cookie;
743 	} else if (sync == RIO_TRANSFER_FAF)
744 		return 0;
745 
746 	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
747 
748 	if (wret == 0) {
749 		/* Timeout on wait occurred */
750 		rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
751 		       current->comm, task_pid_nr(current),
752 		       (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
753 		return -ETIMEDOUT;
754 	} else if (wret == -ERESTARTSYS) {
755 		/* Wait_for_completion was interrupted by a signal but DMA may
756 		 * be in progress
757 		 */
758 		rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
759 			current->comm, task_pid_nr(current),
760 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
761 		return -EINTR;
762 	}
763 
764 	if (req->status != DMA_COMPLETE) {
765 		/* DMA transaction completion was signaled with error */
766 		rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
767 			current->comm, task_pid_nr(current),
768 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
769 			cookie, req->status, ret);
770 		ret = -EIO;
771 	}
772 
773 err_out:
774 	return ret;
775 }
776 
777 /*
778  * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
779  *                      the remote RapidIO device
780  * @filp: file pointer associated with the call
781  * @transfer_mode: DMA transfer mode
782  * @sync: synchronization mode
783  * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
784  *                               DMA_DEV_TO_MEM = read)
785  * @xfer: data transfer descriptor structure
786  */
787 static int
788 rio_dma_transfer(struct file *filp, u32 transfer_mode,
789 		 enum rio_transfer_sync sync, enum dma_data_direction dir,
790 		 struct rio_transfer_io *xfer)
791 {
792 	struct mport_cdev_priv *priv = filp->private_data;
793 	unsigned long nr_pages = 0;
794 	struct page **page_list = NULL;
795 	struct mport_dma_req *req;
796 	struct mport_dev *md = priv->md;
797 	struct dma_chan *chan;
798 	int ret;
799 	int nents;
800 
801 	if (xfer->length == 0)
802 		return -EINVAL;
803 	req = kzalloc(sizeof(*req), GFP_KERNEL);
804 	if (!req)
805 		return -ENOMEM;
806 
807 	ret = get_dma_channel(priv);
808 	if (ret) {
809 		kfree(req);
810 		return ret;
811 	}
812 	chan = priv->dmach;
813 
814 	kref_init(&req->refcount);
815 	init_completion(&req->req_comp);
816 	req->dir = dir;
817 	req->filp = filp;
818 	req->priv = priv;
819 	req->dmach = chan;
820 	req->sync = sync;
821 
822 	/*
823 	 * If parameter loc_addr != NULL, we are transferring data from/to
824 	 * data buffer allocated in user-space: lock in memory user-space
825 	 * buffer pages and build an SG table for DMA transfer request
826 	 *
827 	 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
828 	 * used for DMA data transfers: build single entry SG table using
829 	 * offset within the internal buffer specified by handle parameter.
830 	 */
831 	if (xfer->loc_addr) {
832 		unsigned int offset;
833 		long pinned;
834 
835 		offset = lower_32_bits(offset_in_page(xfer->loc_addr));
836 		nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
837 
838 		page_list = kmalloc_array(nr_pages,
839 					  sizeof(*page_list), GFP_KERNEL);
840 		if (page_list == NULL) {
841 			ret = -ENOMEM;
842 			goto err_req;
843 		}
844 
845 		pinned = pin_user_pages_fast(
846 				(unsigned long)xfer->loc_addr & PAGE_MASK,
847 				nr_pages,
848 				dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
849 				page_list);
850 
851 		if (pinned != nr_pages) {
852 			if (pinned < 0) {
853 				rmcd_error("pin_user_pages_fast err=%ld",
854 					   pinned);
855 				nr_pages = 0;
856 			} else {
857 				rmcd_error("pinned %ld out of %ld pages",
858 					   pinned, nr_pages);
859 				/*
860 				 * Set nr_pages up to mean "how many pages to unpin, in
861 				 * the error handler:
862 				 */
863 				nr_pages = pinned;
864 			}
865 			ret = -EFAULT;
866 			goto err_pg;
867 		}
868 
869 		ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
870 					offset, xfer->length, GFP_KERNEL);
871 		if (ret) {
872 			rmcd_error("sg_alloc_table failed with err=%d", ret);
873 			goto err_pg;
874 		}
875 
876 		req->page_list = page_list;
877 		req->nr_pages = nr_pages;
878 	} else {
879 		dma_addr_t baddr;
880 		struct rio_mport_mapping *map;
881 
882 		baddr = (dma_addr_t)xfer->handle;
883 
884 		mutex_lock(&md->buf_mutex);
885 		list_for_each_entry(map, &md->mappings, node) {
886 			if (baddr >= map->phys_addr &&
887 			    baddr < (map->phys_addr + map->size)) {
888 				kref_get(&map->ref);
889 				req->map = map;
890 				break;
891 			}
892 		}
893 		mutex_unlock(&md->buf_mutex);
894 
895 		if (req->map == NULL) {
896 			ret = -ENOMEM;
897 			goto err_req;
898 		}
899 
900 		if (xfer->length + xfer->offset > req->map->size) {
901 			ret = -EINVAL;
902 			goto err_req;
903 		}
904 
905 		ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
906 		if (unlikely(ret)) {
907 			rmcd_error("sg_alloc_table failed for internal buf");
908 			goto err_req;
909 		}
910 
911 		sg_set_buf(req->sgt.sgl,
912 			   req->map->virt_addr + (baddr - req->map->phys_addr) +
913 				xfer->offset, xfer->length);
914 	}
915 
916 	nents = dma_map_sg(chan->device->dev,
917 			   req->sgt.sgl, req->sgt.nents, dir);
918 	if (nents == 0) {
919 		rmcd_error("Failed to map SG list");
920 		ret = -EFAULT;
921 		goto err_pg;
922 	}
923 
924 	ret = do_dma_request(req, xfer, sync, nents);
925 
926 	if (ret >= 0) {
927 		if (sync == RIO_TRANSFER_ASYNC)
928 			return ret; /* return ASYNC cookie */
929 	} else {
930 		rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
931 	}
932 
933 err_pg:
934 	if (!req->page_list) {
935 		unpin_user_pages(page_list, nr_pages);
936 		kfree(page_list);
937 	}
938 err_req:
939 	kref_put(&req->refcount, dma_req_free);
940 	return ret;
941 }
942 
943 static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
944 {
945 	struct mport_cdev_priv *priv = filp->private_data;
946 	struct rio_transaction transaction;
947 	struct rio_transfer_io *transfer;
948 	enum dma_data_direction dir;
949 	int i, ret = 0;
950 	size_t size;
951 
952 	if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
953 		return -EFAULT;
954 
955 	if (transaction.count != 1) /* only single transfer for now */
956 		return -EINVAL;
957 
958 	if ((transaction.transfer_mode &
959 	     priv->md->properties.transfer_mode) == 0)
960 		return -ENODEV;
961 
962 	size = array_size(sizeof(*transfer), transaction.count);
963 	transfer = vmalloc(size);
964 	if (!transfer)
965 		return -ENOMEM;
966 
967 	if (unlikely(copy_from_user(transfer,
968 				    (void __user *)(uintptr_t)transaction.block,
969 				    size))) {
970 		ret = -EFAULT;
971 		goto out_free;
972 	}
973 
974 	dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
975 					DMA_FROM_DEVICE : DMA_TO_DEVICE;
976 	for (i = 0; i < transaction.count && ret == 0; i++)
977 		ret = rio_dma_transfer(filp, transaction.transfer_mode,
978 			transaction.sync, dir, &transfer[i]);
979 
980 	if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
981 				  transfer, size)))
982 		ret = -EFAULT;
983 
984 out_free:
985 	vfree(transfer);
986 
987 	return ret;
988 }
989 
990 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
991 {
992 	struct mport_cdev_priv *priv;
993 	struct rio_async_tx_wait w_param;
994 	struct mport_dma_req *req;
995 	dma_cookie_t cookie;
996 	unsigned long tmo;
997 	long wret;
998 	int found = 0;
999 	int ret;
1000 
1001 	priv = (struct mport_cdev_priv *)filp->private_data;
1002 
1003 	if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1004 		return -EFAULT;
1005 
1006 	cookie = w_param.token;
1007 	if (w_param.timeout)
1008 		tmo = msecs_to_jiffies(w_param.timeout);
1009 	else /* Use default DMA timeout */
1010 		tmo = msecs_to_jiffies(dma_timeout);
1011 
1012 	spin_lock(&priv->req_lock);
1013 	list_for_each_entry(req, &priv->async_list, node) {
1014 		if (req->cookie == cookie) {
1015 			list_del(&req->node);
1016 			found = 1;
1017 			break;
1018 		}
1019 	}
1020 	spin_unlock(&priv->req_lock);
1021 
1022 	if (!found)
1023 		return -EAGAIN;
1024 
1025 	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1026 
1027 	if (wret == 0) {
1028 		/* Timeout on wait occurred */
1029 		rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1030 		       current->comm, task_pid_nr(current),
1031 		       (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1032 		ret = -ETIMEDOUT;
1033 		goto err_tmo;
1034 	} else if (wret == -ERESTARTSYS) {
1035 		/* Wait_for_completion was interrupted by a signal but DMA may
1036 		 * be still in progress
1037 		 */
1038 		rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1039 			current->comm, task_pid_nr(current),
1040 			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1041 		ret = -EINTR;
1042 		goto err_tmo;
1043 	}
1044 
1045 	if (req->status != DMA_COMPLETE) {
1046 		/* DMA transaction completion signaled with transfer error */
1047 		rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1048 			current->comm, task_pid_nr(current),
1049 			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1050 			req->status);
1051 		ret = -EIO;
1052 	} else
1053 		ret = 0;
1054 
1055 	if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1056 		kref_put(&req->refcount, dma_req_free);
1057 
1058 	return ret;
1059 
1060 err_tmo:
1061 	/* Return request back into async queue */
1062 	spin_lock(&priv->req_lock);
1063 	list_add_tail(&req->node, &priv->async_list);
1064 	spin_unlock(&priv->req_lock);
1065 	return ret;
1066 }
1067 
1068 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1069 			u64 size, struct rio_mport_mapping **mapping)
1070 {
1071 	struct rio_mport_mapping *map;
1072 
1073 	map = kzalloc(sizeof(*map), GFP_KERNEL);
1074 	if (map == NULL)
1075 		return -ENOMEM;
1076 
1077 	map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1078 					    &map->phys_addr, GFP_KERNEL);
1079 	if (map->virt_addr == NULL) {
1080 		kfree(map);
1081 		return -ENOMEM;
1082 	}
1083 
1084 	map->dir = MAP_DMA;
1085 	map->size = size;
1086 	map->filp = filp;
1087 	map->md = md;
1088 	kref_init(&map->ref);
1089 	mutex_lock(&md->buf_mutex);
1090 	list_add_tail(&map->node, &md->mappings);
1091 	mutex_unlock(&md->buf_mutex);
1092 	*mapping = map;
1093 
1094 	return 0;
1095 }
1096 
1097 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1098 {
1099 	struct mport_cdev_priv *priv = filp->private_data;
1100 	struct mport_dev *md = priv->md;
1101 	struct rio_dma_mem map;
1102 	struct rio_mport_mapping *mapping = NULL;
1103 	int ret;
1104 
1105 	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1106 		return -EFAULT;
1107 
1108 	ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1109 	if (ret)
1110 		return ret;
1111 
1112 	map.dma_handle = mapping->phys_addr;
1113 
1114 	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1115 		mutex_lock(&md->buf_mutex);
1116 		kref_put(&mapping->ref, mport_release_mapping);
1117 		mutex_unlock(&md->buf_mutex);
1118 		return -EFAULT;
1119 	}
1120 
1121 	return 0;
1122 }
1123 
1124 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1125 {
1126 	struct mport_cdev_priv *priv = filp->private_data;
1127 	struct mport_dev *md = priv->md;
1128 	u64 handle;
1129 	int ret = -EFAULT;
1130 	struct rio_mport_mapping *map, *_map;
1131 
1132 	if (copy_from_user(&handle, arg, sizeof(handle)))
1133 		return -EFAULT;
1134 	rmcd_debug(EXIT, "filp=%p", filp);
1135 
1136 	mutex_lock(&md->buf_mutex);
1137 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
1138 		if (map->dir == MAP_DMA && map->phys_addr == handle &&
1139 		    map->filp == filp) {
1140 			kref_put(&map->ref, mport_release_mapping);
1141 			ret = 0;
1142 			break;
1143 		}
1144 	}
1145 	mutex_unlock(&md->buf_mutex);
1146 
1147 	if (ret == -EFAULT) {
1148 		rmcd_debug(DMA, "ERR no matching mapping");
1149 		return ret;
1150 	}
1151 
1152 	return 0;
1153 }
1154 #else
1155 static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1156 {
1157 	return -ENODEV;
1158 }
1159 
1160 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1161 {
1162 	return -ENODEV;
1163 }
1164 
1165 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1166 {
1167 	return -ENODEV;
1168 }
1169 
1170 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1171 {
1172 	return -ENODEV;
1173 }
1174 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1175 
1176 /*
1177  * Inbound/outbound memory mapping functions
1178  */
1179 
1180 static int
1181 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1182 				u64 raddr, u64 size,
1183 				struct rio_mport_mapping **mapping)
1184 {
1185 	struct rio_mport *mport = md->mport;
1186 	struct rio_mport_mapping *map;
1187 	int ret;
1188 
1189 	/* rio_map_inb_region() accepts u32 size */
1190 	if (size > 0xffffffff)
1191 		return -EINVAL;
1192 
1193 	map = kzalloc(sizeof(*map), GFP_KERNEL);
1194 	if (map == NULL)
1195 		return -ENOMEM;
1196 
1197 	map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1198 					    &map->phys_addr, GFP_KERNEL);
1199 	if (map->virt_addr == NULL) {
1200 		ret = -ENOMEM;
1201 		goto err_dma_alloc;
1202 	}
1203 
1204 	if (raddr == RIO_MAP_ANY_ADDR)
1205 		raddr = map->phys_addr;
1206 	ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1207 	if (ret < 0)
1208 		goto err_map_inb;
1209 
1210 	map->dir = MAP_INBOUND;
1211 	map->rio_addr = raddr;
1212 	map->size = size;
1213 	map->filp = filp;
1214 	map->md = md;
1215 	kref_init(&map->ref);
1216 	mutex_lock(&md->buf_mutex);
1217 	list_add_tail(&map->node, &md->mappings);
1218 	mutex_unlock(&md->buf_mutex);
1219 	*mapping = map;
1220 	return 0;
1221 
1222 err_map_inb:
1223 	dma_free_coherent(mport->dev.parent, size,
1224 			  map->virt_addr, map->phys_addr);
1225 err_dma_alloc:
1226 	kfree(map);
1227 	return ret;
1228 }
1229 
1230 static int
1231 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1232 			      u64 raddr, u64 size,
1233 			      struct rio_mport_mapping **mapping)
1234 {
1235 	struct rio_mport_mapping *map;
1236 	int err = -ENOMEM;
1237 
1238 	if (raddr == RIO_MAP_ANY_ADDR)
1239 		goto get_new;
1240 
1241 	mutex_lock(&md->buf_mutex);
1242 	list_for_each_entry(map, &md->mappings, node) {
1243 		if (map->dir != MAP_INBOUND)
1244 			continue;
1245 		if (raddr == map->rio_addr && size == map->size) {
1246 			/* allow exact match only */
1247 			*mapping = map;
1248 			err = 0;
1249 			break;
1250 		} else if (raddr < (map->rio_addr + map->size - 1) &&
1251 			   (raddr + size) > map->rio_addr) {
1252 			err = -EBUSY;
1253 			break;
1254 		}
1255 	}
1256 	mutex_unlock(&md->buf_mutex);
1257 
1258 	if (err != -ENOMEM)
1259 		return err;
1260 get_new:
1261 	/* not found, create new */
1262 	return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1263 }
1264 
1265 static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1266 {
1267 	struct mport_cdev_priv *priv = filp->private_data;
1268 	struct mport_dev *md = priv->md;
1269 	struct rio_mmap map;
1270 	struct rio_mport_mapping *mapping = NULL;
1271 	int ret;
1272 
1273 	if (!md->mport->ops->map_inb)
1274 		return -EPROTONOSUPPORT;
1275 	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1276 		return -EFAULT;
1277 
1278 	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1279 
1280 	ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1281 					    map.length, &mapping);
1282 	if (ret)
1283 		return ret;
1284 
1285 	map.handle = mapping->phys_addr;
1286 	map.rio_addr = mapping->rio_addr;
1287 
1288 	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1289 		/* Delete mapping if it was created by this request */
1290 		if (ret == 0 && mapping->filp == filp) {
1291 			mutex_lock(&md->buf_mutex);
1292 			kref_put(&mapping->ref, mport_release_mapping);
1293 			mutex_unlock(&md->buf_mutex);
1294 		}
1295 		return -EFAULT;
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 /*
1302  * rio_mport_inbound_free() - unmap from RapidIO address space and free
1303  *                    previously allocated inbound DMA coherent buffer
1304  * @priv: driver private data
1305  * @arg:  buffer handle returned by allocation routine
1306  */
1307 static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1308 {
1309 	struct mport_cdev_priv *priv = filp->private_data;
1310 	struct mport_dev *md = priv->md;
1311 	u64 handle;
1312 	struct rio_mport_mapping *map, *_map;
1313 
1314 	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1315 
1316 	if (!md->mport->ops->unmap_inb)
1317 		return -EPROTONOSUPPORT;
1318 
1319 	if (copy_from_user(&handle, arg, sizeof(handle)))
1320 		return -EFAULT;
1321 
1322 	mutex_lock(&md->buf_mutex);
1323 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
1324 		if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1325 			if (map->filp == filp) {
1326 				map->filp = NULL;
1327 				kref_put(&map->ref, mport_release_mapping);
1328 			}
1329 			break;
1330 		}
1331 	}
1332 	mutex_unlock(&md->buf_mutex);
1333 
1334 	return 0;
1335 }
1336 
1337 /*
1338  * maint_port_idx_get() - Get the port index of the mport instance
1339  * @priv: driver private data
1340  * @arg:  port index
1341  */
1342 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1343 {
1344 	struct mport_dev *md = priv->md;
1345 	u32 port_idx = md->mport->index;
1346 
1347 	rmcd_debug(MPORT, "port_index=%d", port_idx);
1348 
1349 	if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1350 		return -EFAULT;
1351 
1352 	return 0;
1353 }
1354 
1355 static int rio_mport_add_event(struct mport_cdev_priv *priv,
1356 			       struct rio_event *event)
1357 {
1358 	int overflow;
1359 
1360 	if (!(priv->event_mask & event->header))
1361 		return -EACCES;
1362 
1363 	spin_lock(&priv->fifo_lock);
1364 	overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1365 		|| kfifo_in(&priv->event_fifo, (unsigned char *)event,
1366 			sizeof(*event)) != sizeof(*event);
1367 	spin_unlock(&priv->fifo_lock);
1368 
1369 	wake_up_interruptible(&priv->event_rx_wait);
1370 
1371 	if (overflow) {
1372 		dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1373 		return -EBUSY;
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1380 				       u16 src, u16 dst, u16 info)
1381 {
1382 	struct mport_dev *data = dev_id;
1383 	struct mport_cdev_priv *priv;
1384 	struct rio_mport_db_filter *db_filter;
1385 	struct rio_event event;
1386 	int handled;
1387 
1388 	event.header = RIO_DOORBELL;
1389 	event.u.doorbell.rioid = src;
1390 	event.u.doorbell.payload = info;
1391 
1392 	handled = 0;
1393 	spin_lock(&data->db_lock);
1394 	list_for_each_entry(db_filter, &data->doorbells, data_node) {
1395 		if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1396 		      db_filter->filter.rioid == src)) &&
1397 		      info >= db_filter->filter.low &&
1398 		      info <= db_filter->filter.high) {
1399 			priv = db_filter->priv;
1400 			rio_mport_add_event(priv, &event);
1401 			handled = 1;
1402 		}
1403 	}
1404 	spin_unlock(&data->db_lock);
1405 
1406 	if (!handled)
1407 		dev_warn(&data->dev,
1408 			"%s: spurious DB received from 0x%x, info=0x%04x\n",
1409 			__func__, src, info);
1410 }
1411 
1412 static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1413 				   void __user *arg)
1414 {
1415 	struct mport_dev *md = priv->md;
1416 	struct rio_mport_db_filter *db_filter;
1417 	struct rio_doorbell_filter filter;
1418 	unsigned long flags;
1419 	int ret;
1420 
1421 	if (copy_from_user(&filter, arg, sizeof(filter)))
1422 		return -EFAULT;
1423 
1424 	if (filter.low > filter.high)
1425 		return -EINVAL;
1426 
1427 	ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1428 				    rio_mport_doorbell_handler);
1429 	if (ret) {
1430 		rmcd_error("%s failed to register IBDB, err=%d",
1431 			   dev_name(&md->dev), ret);
1432 		return ret;
1433 	}
1434 
1435 	db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1436 	if (db_filter == NULL) {
1437 		rio_release_inb_dbell(md->mport, filter.low, filter.high);
1438 		return -ENOMEM;
1439 	}
1440 
1441 	db_filter->filter = filter;
1442 	db_filter->priv = priv;
1443 	spin_lock_irqsave(&md->db_lock, flags);
1444 	list_add_tail(&db_filter->priv_node, &priv->db_filters);
1445 	list_add_tail(&db_filter->data_node, &md->doorbells);
1446 	spin_unlock_irqrestore(&md->db_lock, flags);
1447 
1448 	return 0;
1449 }
1450 
1451 static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1452 {
1453 	list_del(&db_filter->data_node);
1454 	list_del(&db_filter->priv_node);
1455 	kfree(db_filter);
1456 }
1457 
1458 static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1459 				      void __user *arg)
1460 {
1461 	struct rio_mport_db_filter *db_filter;
1462 	struct rio_doorbell_filter filter;
1463 	unsigned long flags;
1464 	int ret = -EINVAL;
1465 
1466 	if (copy_from_user(&filter, arg, sizeof(filter)))
1467 		return -EFAULT;
1468 
1469 	if (filter.low > filter.high)
1470 		return -EINVAL;
1471 
1472 	spin_lock_irqsave(&priv->md->db_lock, flags);
1473 	list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1474 		if (db_filter->filter.rioid == filter.rioid &&
1475 		    db_filter->filter.low == filter.low &&
1476 		    db_filter->filter.high == filter.high) {
1477 			rio_mport_delete_db_filter(db_filter);
1478 			ret = 0;
1479 			break;
1480 		}
1481 	}
1482 	spin_unlock_irqrestore(&priv->md->db_lock, flags);
1483 
1484 	if (!ret)
1485 		rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1486 
1487 	return ret;
1488 }
1489 
1490 static int rio_mport_match_pw(union rio_pw_msg *msg,
1491 			      struct rio_pw_filter *filter)
1492 {
1493 	if ((msg->em.comptag & filter->mask) < filter->low ||
1494 		(msg->em.comptag & filter->mask) > filter->high)
1495 		return 0;
1496 	return 1;
1497 }
1498 
1499 static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1500 				union rio_pw_msg *msg, int step)
1501 {
1502 	struct mport_dev *md = context;
1503 	struct mport_cdev_priv *priv;
1504 	struct rio_mport_pw_filter *pw_filter;
1505 	struct rio_event event;
1506 	int handled;
1507 
1508 	event.header = RIO_PORTWRITE;
1509 	memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1510 
1511 	handled = 0;
1512 	spin_lock(&md->pw_lock);
1513 	list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1514 		if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1515 			priv = pw_filter->priv;
1516 			rio_mport_add_event(priv, &event);
1517 			handled = 1;
1518 		}
1519 	}
1520 	spin_unlock(&md->pw_lock);
1521 
1522 	if (!handled) {
1523 		printk_ratelimited(KERN_WARNING DRV_NAME
1524 			": mport%d received spurious PW from 0x%08x\n",
1525 			mport->id, msg->em.comptag);
1526 	}
1527 
1528 	return 0;
1529 }
1530 
1531 static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1532 				   void __user *arg)
1533 {
1534 	struct mport_dev *md = priv->md;
1535 	struct rio_mport_pw_filter *pw_filter;
1536 	struct rio_pw_filter filter;
1537 	unsigned long flags;
1538 	int hadd = 0;
1539 
1540 	if (copy_from_user(&filter, arg, sizeof(filter)))
1541 		return -EFAULT;
1542 
1543 	pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1544 	if (pw_filter == NULL)
1545 		return -ENOMEM;
1546 
1547 	pw_filter->filter = filter;
1548 	pw_filter->priv = priv;
1549 	spin_lock_irqsave(&md->pw_lock, flags);
1550 	if (list_empty(&md->portwrites))
1551 		hadd = 1;
1552 	list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1553 	list_add_tail(&pw_filter->md_node, &md->portwrites);
1554 	spin_unlock_irqrestore(&md->pw_lock, flags);
1555 
1556 	if (hadd) {
1557 		int ret;
1558 
1559 		ret = rio_add_mport_pw_handler(md->mport, md,
1560 					       rio_mport_pw_handler);
1561 		if (ret) {
1562 			dev_err(&md->dev,
1563 				"%s: failed to add IB_PW handler, err=%d\n",
1564 				__func__, ret);
1565 			return ret;
1566 		}
1567 		rio_pw_enable(md->mport, 1);
1568 	}
1569 
1570 	return 0;
1571 }
1572 
1573 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1574 {
1575 	list_del(&pw_filter->md_node);
1576 	list_del(&pw_filter->priv_node);
1577 	kfree(pw_filter);
1578 }
1579 
1580 static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1581 				     struct rio_pw_filter *b)
1582 {
1583 	if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1584 		return 1;
1585 	return 0;
1586 }
1587 
1588 static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1589 				      void __user *arg)
1590 {
1591 	struct mport_dev *md = priv->md;
1592 	struct rio_mport_pw_filter *pw_filter;
1593 	struct rio_pw_filter filter;
1594 	unsigned long flags;
1595 	int ret = -EINVAL;
1596 	int hdel = 0;
1597 
1598 	if (copy_from_user(&filter, arg, sizeof(filter)))
1599 		return -EFAULT;
1600 
1601 	spin_lock_irqsave(&md->pw_lock, flags);
1602 	list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1603 		if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1604 			rio_mport_delete_pw_filter(pw_filter);
1605 			ret = 0;
1606 			break;
1607 		}
1608 	}
1609 
1610 	if (list_empty(&md->portwrites))
1611 		hdel = 1;
1612 	spin_unlock_irqrestore(&md->pw_lock, flags);
1613 
1614 	if (hdel) {
1615 		rio_del_mport_pw_handler(md->mport, priv->md,
1616 					 rio_mport_pw_handler);
1617 		rio_pw_enable(md->mport, 0);
1618 	}
1619 
1620 	return ret;
1621 }
1622 
1623 /*
1624  * rio_release_dev - release routine for kernel RIO device object
1625  * @dev: kernel device object associated with a RIO device structure
1626  *
1627  * Frees a RIO device struct associated a RIO device struct.
1628  * The RIO device struct is freed.
1629  */
1630 static void rio_release_dev(struct device *dev)
1631 {
1632 	struct rio_dev *rdev;
1633 
1634 	rdev = to_rio_dev(dev);
1635 	pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1636 	kfree(rdev);
1637 }
1638 
1639 
1640 static void rio_release_net(struct device *dev)
1641 {
1642 	struct rio_net *net;
1643 
1644 	net = to_rio_net(dev);
1645 	rmcd_debug(RDEV, "net_%d", net->id);
1646 	kfree(net);
1647 }
1648 
1649 
1650 /*
1651  * rio_mport_add_riodev - creates a kernel RIO device object
1652  *
1653  * Allocates a RIO device data structure and initializes required fields based
1654  * on device's configuration space contents.
1655  * If the device has switch capabilities, then a switch specific portion is
1656  * allocated and configured.
1657  */
1658 static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1659 				   void __user *arg)
1660 {
1661 	struct mport_dev *md = priv->md;
1662 	struct rio_rdev_info dev_info;
1663 	struct rio_dev *rdev;
1664 	struct rio_switch *rswitch = NULL;
1665 	struct rio_mport *mport;
1666 	struct device *dev;
1667 	size_t size;
1668 	u32 rval;
1669 	u32 swpinfo = 0;
1670 	u16 destid;
1671 	u8 hopcount;
1672 	int err;
1673 
1674 	if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1675 		return -EFAULT;
1676 	dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1677 
1678 	rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1679 		   dev_info.comptag, dev_info.destid, dev_info.hopcount);
1680 
1681 	dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
1682 	if (dev) {
1683 		rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1684 		put_device(dev);
1685 		return -EEXIST;
1686 	}
1687 
1688 	size = sizeof(*rdev);
1689 	mport = md->mport;
1690 	destid = dev_info.destid;
1691 	hopcount = dev_info.hopcount;
1692 
1693 	if (rio_mport_read_config_32(mport, destid, hopcount,
1694 				     RIO_PEF_CAR, &rval))
1695 		return -EIO;
1696 
1697 	if (rval & RIO_PEF_SWITCH) {
1698 		rio_mport_read_config_32(mport, destid, hopcount,
1699 					 RIO_SWP_INFO_CAR, &swpinfo);
1700 		size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
1701 	}
1702 
1703 	rdev = kzalloc(size, GFP_KERNEL);
1704 	if (rdev == NULL)
1705 		return -ENOMEM;
1706 
1707 	if (mport->net == NULL) {
1708 		struct rio_net *net;
1709 
1710 		net = rio_alloc_net(mport);
1711 		if (!net) {
1712 			err = -ENOMEM;
1713 			rmcd_debug(RDEV, "failed to allocate net object");
1714 			goto cleanup;
1715 		}
1716 
1717 		net->id = mport->id;
1718 		net->hport = mport;
1719 		dev_set_name(&net->dev, "rnet_%d", net->id);
1720 		net->dev.parent = &mport->dev;
1721 		net->dev.release = rio_release_net;
1722 		err = rio_add_net(net);
1723 		if (err) {
1724 			rmcd_debug(RDEV, "failed to register net, err=%d", err);
1725 			put_device(&net->dev);
1726 			mport->net = NULL;
1727 			goto cleanup;
1728 		}
1729 	}
1730 
1731 	rdev->net = mport->net;
1732 	rdev->pef = rval;
1733 	rdev->swpinfo = swpinfo;
1734 	rio_mport_read_config_32(mport, destid, hopcount,
1735 				 RIO_DEV_ID_CAR, &rval);
1736 	rdev->did = rval >> 16;
1737 	rdev->vid = rval & 0xffff;
1738 	rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1739 				 &rdev->device_rev);
1740 	rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1741 				 &rval);
1742 	rdev->asm_did = rval >> 16;
1743 	rdev->asm_vid = rval & 0xffff;
1744 	rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1745 				 &rval);
1746 	rdev->asm_rev = rval >> 16;
1747 
1748 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1749 		rdev->efptr = rval & 0xffff;
1750 		rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1751 						hopcount, &rdev->phys_rmap);
1752 
1753 		rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1754 						hopcount, RIO_EFB_ERR_MGMNT);
1755 	}
1756 
1757 	rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1758 				 &rdev->src_ops);
1759 	rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1760 				 &rdev->dst_ops);
1761 
1762 	rdev->comp_tag = dev_info.comptag;
1763 	rdev->destid = destid;
1764 	/* hopcount is stored as specified by a caller, regardles of EP or SW */
1765 	rdev->hopcount = hopcount;
1766 
1767 	if (rdev->pef & RIO_PEF_SWITCH) {
1768 		rswitch = rdev->rswitch;
1769 		rswitch->route_table = NULL;
1770 	}
1771 
1772 	if (strlen(dev_info.name))
1773 		dev_set_name(&rdev->dev, "%s", dev_info.name);
1774 	else if (rdev->pef & RIO_PEF_SWITCH)
1775 		dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1776 			     rdev->comp_tag & RIO_CTAG_UDEVID);
1777 	else
1778 		dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1779 			     rdev->comp_tag & RIO_CTAG_UDEVID);
1780 
1781 	INIT_LIST_HEAD(&rdev->net_list);
1782 	rdev->dev.parent = &mport->net->dev;
1783 	rio_attach_device(rdev);
1784 	rdev->dev.release = rio_release_dev;
1785 
1786 	if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1787 		rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1788 				   0, 0xffff);
1789 	err = rio_add_device(rdev);
1790 	if (err) {
1791 		put_device(&rdev->dev);
1792 		return err;
1793 	}
1794 
1795 	rio_dev_get(rdev);
1796 
1797 	return 0;
1798 cleanup:
1799 	kfree(rdev);
1800 	return err;
1801 }
1802 
1803 static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1804 {
1805 	struct rio_rdev_info dev_info;
1806 	struct rio_dev *rdev = NULL;
1807 	struct device  *dev;
1808 	struct rio_mport *mport;
1809 	struct rio_net *net;
1810 
1811 	if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1812 		return -EFAULT;
1813 	dev_info.name[sizeof(dev_info.name) - 1] = '\0';
1814 
1815 	mport = priv->md->mport;
1816 
1817 	/* If device name is specified, removal by name has priority */
1818 	if (strlen(dev_info.name)) {
1819 		dev = bus_find_device_by_name(&rio_bus_type, NULL,
1820 					      dev_info.name);
1821 		if (dev)
1822 			rdev = to_rio_dev(dev);
1823 	} else {
1824 		do {
1825 			rdev = rio_get_comptag(dev_info.comptag, rdev);
1826 			if (rdev && rdev->dev.parent == &mport->net->dev &&
1827 			    rdev->destid == dev_info.destid &&
1828 			    rdev->hopcount == dev_info.hopcount)
1829 				break;
1830 		} while (rdev);
1831 	}
1832 
1833 	if (!rdev) {
1834 		rmcd_debug(RDEV,
1835 			"device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1836 			dev_info.name, dev_info.comptag, dev_info.destid,
1837 			dev_info.hopcount);
1838 		return -ENODEV;
1839 	}
1840 
1841 	net = rdev->net;
1842 	rio_dev_put(rdev);
1843 	rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1844 
1845 	if (list_empty(&net->devices)) {
1846 		rio_free_net(net);
1847 		mport->net = NULL;
1848 	}
1849 
1850 	return 0;
1851 }
1852 
1853 /*
1854  * Mport cdev management
1855  */
1856 
1857 /*
1858  * mport_cdev_open() - Open character device (mport)
1859  */
1860 static int mport_cdev_open(struct inode *inode, struct file *filp)
1861 {
1862 	int ret;
1863 	int minor = iminor(inode);
1864 	struct mport_dev *chdev;
1865 	struct mport_cdev_priv *priv;
1866 
1867 	/* Test for valid device */
1868 	if (minor >= RIO_MAX_MPORTS) {
1869 		rmcd_error("Invalid minor device number");
1870 		return -EINVAL;
1871 	}
1872 
1873 	chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1874 
1875 	rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1876 
1877 	if (atomic_read(&chdev->active) == 0)
1878 		return -ENODEV;
1879 
1880 	get_device(&chdev->dev);
1881 
1882 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1883 	if (!priv) {
1884 		put_device(&chdev->dev);
1885 		return -ENOMEM;
1886 	}
1887 
1888 	priv->md = chdev;
1889 
1890 	INIT_LIST_HEAD(&priv->db_filters);
1891 	INIT_LIST_HEAD(&priv->pw_filters);
1892 	spin_lock_init(&priv->fifo_lock);
1893 	init_waitqueue_head(&priv->event_rx_wait);
1894 	ret = kfifo_alloc(&priv->event_fifo,
1895 			  sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1896 			  GFP_KERNEL);
1897 	if (ret < 0) {
1898 		put_device(&chdev->dev);
1899 		dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1900 		ret = -ENOMEM;
1901 		goto err_fifo;
1902 	}
1903 
1904 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1905 	INIT_LIST_HEAD(&priv->async_list);
1906 	spin_lock_init(&priv->req_lock);
1907 	mutex_init(&priv->dma_lock);
1908 #endif
1909 	mutex_lock(&chdev->file_mutex);
1910 	list_add_tail(&priv->list, &chdev->file_list);
1911 	mutex_unlock(&chdev->file_mutex);
1912 
1913 	filp->private_data = priv;
1914 	goto out;
1915 err_fifo:
1916 	kfree(priv);
1917 out:
1918 	return ret;
1919 }
1920 
1921 static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1922 {
1923 	struct mport_cdev_priv *priv = filp->private_data;
1924 
1925 	return fasync_helper(fd, filp, mode, &priv->async_queue);
1926 }
1927 
1928 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1929 static void mport_cdev_release_dma(struct file *filp)
1930 {
1931 	struct mport_cdev_priv *priv = filp->private_data;
1932 	struct mport_dev *md;
1933 	struct mport_dma_req *req, *req_next;
1934 	unsigned long tmo = msecs_to_jiffies(dma_timeout);
1935 	long wret;
1936 	LIST_HEAD(list);
1937 
1938 	rmcd_debug(EXIT, "from filp=%p %s(%d)",
1939 		   filp, current->comm, task_pid_nr(current));
1940 
1941 	if (!priv->dmach) {
1942 		rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1943 		return;
1944 	}
1945 
1946 	md = priv->md;
1947 
1948 	spin_lock(&priv->req_lock);
1949 	if (!list_empty(&priv->async_list)) {
1950 		rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1951 			   filp, current->comm, task_pid_nr(current));
1952 		list_splice_init(&priv->async_list, &list);
1953 	}
1954 	spin_unlock(&priv->req_lock);
1955 
1956 	if (!list_empty(&list)) {
1957 		rmcd_debug(EXIT, "temp list not empty");
1958 		list_for_each_entry_safe(req, req_next, &list, node) {
1959 			rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1960 				   req->filp, req->cookie,
1961 				   completion_done(&req->req_comp)?"yes":"no");
1962 			list_del(&req->node);
1963 			kref_put(&req->refcount, dma_req_free);
1964 		}
1965 	}
1966 
1967 	put_dma_channel(priv);
1968 	wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1969 
1970 	if (wret <= 0) {
1971 		rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1972 			current->comm, task_pid_nr(current), wret);
1973 	}
1974 
1975 	if (priv->dmach != priv->md->dma_chan) {
1976 		rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1977 			   filp, current->comm, task_pid_nr(current));
1978 		rio_release_dma(priv->dmach);
1979 	} else {
1980 		rmcd_debug(EXIT, "Adjust default DMA channel refcount");
1981 		kref_put(&md->dma_ref, mport_release_def_dma);
1982 	}
1983 
1984 	priv->dmach = NULL;
1985 }
1986 #else
1987 #define mport_cdev_release_dma(priv) do {} while (0)
1988 #endif
1989 
1990 /*
1991  * mport_cdev_release() - Release character device
1992  */
1993 static int mport_cdev_release(struct inode *inode, struct file *filp)
1994 {
1995 	struct mport_cdev_priv *priv = filp->private_data;
1996 	struct mport_dev *chdev;
1997 	struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
1998 	struct rio_mport_db_filter *db_filter, *db_filter_next;
1999 	struct rio_mport_mapping *map, *_map;
2000 	unsigned long flags;
2001 
2002 	rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2003 
2004 	chdev = priv->md;
2005 	mport_cdev_release_dma(filp);
2006 
2007 	priv->event_mask = 0;
2008 
2009 	spin_lock_irqsave(&chdev->pw_lock, flags);
2010 	if (!list_empty(&priv->pw_filters)) {
2011 		list_for_each_entry_safe(pw_filter, pw_filter_next,
2012 					 &priv->pw_filters, priv_node)
2013 			rio_mport_delete_pw_filter(pw_filter);
2014 	}
2015 	spin_unlock_irqrestore(&chdev->pw_lock, flags);
2016 
2017 	spin_lock_irqsave(&chdev->db_lock, flags);
2018 	list_for_each_entry_safe(db_filter, db_filter_next,
2019 				 &priv->db_filters, priv_node) {
2020 		rio_mport_delete_db_filter(db_filter);
2021 	}
2022 	spin_unlock_irqrestore(&chdev->db_lock, flags);
2023 
2024 	kfifo_free(&priv->event_fifo);
2025 
2026 	mutex_lock(&chdev->buf_mutex);
2027 	list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2028 		if (map->filp == filp) {
2029 			rmcd_debug(EXIT, "release mapping %p filp=%p",
2030 				   map->virt_addr, filp);
2031 			kref_put(&map->ref, mport_release_mapping);
2032 		}
2033 	}
2034 	mutex_unlock(&chdev->buf_mutex);
2035 
2036 	mport_cdev_fasync(-1, filp, 0);
2037 	filp->private_data = NULL;
2038 	mutex_lock(&chdev->file_mutex);
2039 	list_del(&priv->list);
2040 	mutex_unlock(&chdev->file_mutex);
2041 	put_device(&chdev->dev);
2042 	kfree(priv);
2043 	return 0;
2044 }
2045 
2046 /*
2047  * mport_cdev_ioctl() - IOCTLs for character device
2048  */
2049 static long mport_cdev_ioctl(struct file *filp,
2050 		unsigned int cmd, unsigned long arg)
2051 {
2052 	int err = -EINVAL;
2053 	struct mport_cdev_priv *data = filp->private_data;
2054 	struct mport_dev *md = data->md;
2055 
2056 	if (atomic_read(&md->active) == 0)
2057 		return -ENODEV;
2058 
2059 	switch (cmd) {
2060 	case RIO_MPORT_MAINT_READ_LOCAL:
2061 		return rio_mport_maint_rd(data, (void __user *)arg, 1);
2062 	case RIO_MPORT_MAINT_WRITE_LOCAL:
2063 		return rio_mport_maint_wr(data, (void __user *)arg, 1);
2064 	case RIO_MPORT_MAINT_READ_REMOTE:
2065 		return rio_mport_maint_rd(data, (void __user *)arg, 0);
2066 	case RIO_MPORT_MAINT_WRITE_REMOTE:
2067 		return rio_mport_maint_wr(data, (void __user *)arg, 0);
2068 	case RIO_MPORT_MAINT_HDID_SET:
2069 		return maint_hdid_set(data, (void __user *)arg);
2070 	case RIO_MPORT_MAINT_COMPTAG_SET:
2071 		return maint_comptag_set(data, (void __user *)arg);
2072 	case RIO_MPORT_MAINT_PORT_IDX_GET:
2073 		return maint_port_idx_get(data, (void __user *)arg);
2074 	case RIO_MPORT_GET_PROPERTIES:
2075 		md->properties.hdid = md->mport->host_deviceid;
2076 		if (copy_to_user((void __user *)arg, &(md->properties),
2077 				 sizeof(md->properties)))
2078 			return -EFAULT;
2079 		return 0;
2080 	case RIO_ENABLE_DOORBELL_RANGE:
2081 		return rio_mport_add_db_filter(data, (void __user *)arg);
2082 	case RIO_DISABLE_DOORBELL_RANGE:
2083 		return rio_mport_remove_db_filter(data, (void __user *)arg);
2084 	case RIO_ENABLE_PORTWRITE_RANGE:
2085 		return rio_mport_add_pw_filter(data, (void __user *)arg);
2086 	case RIO_DISABLE_PORTWRITE_RANGE:
2087 		return rio_mport_remove_pw_filter(data, (void __user *)arg);
2088 	case RIO_SET_EVENT_MASK:
2089 		data->event_mask = (u32)arg;
2090 		return 0;
2091 	case RIO_GET_EVENT_MASK:
2092 		if (copy_to_user((void __user *)arg, &data->event_mask,
2093 				    sizeof(u32)))
2094 			return -EFAULT;
2095 		return 0;
2096 	case RIO_MAP_OUTBOUND:
2097 		return rio_mport_obw_map(filp, (void __user *)arg);
2098 	case RIO_MAP_INBOUND:
2099 		return rio_mport_map_inbound(filp, (void __user *)arg);
2100 	case RIO_UNMAP_OUTBOUND:
2101 		return rio_mport_obw_free(filp, (void __user *)arg);
2102 	case RIO_UNMAP_INBOUND:
2103 		return rio_mport_inbound_free(filp, (void __user *)arg);
2104 	case RIO_ALLOC_DMA:
2105 		return rio_mport_alloc_dma(filp, (void __user *)arg);
2106 	case RIO_FREE_DMA:
2107 		return rio_mport_free_dma(filp, (void __user *)arg);
2108 	case RIO_WAIT_FOR_ASYNC:
2109 		return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2110 	case RIO_TRANSFER:
2111 		return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2112 	case RIO_DEV_ADD:
2113 		return rio_mport_add_riodev(data, (void __user *)arg);
2114 	case RIO_DEV_DEL:
2115 		return rio_mport_del_riodev(data, (void __user *)arg);
2116 	default:
2117 		break;
2118 	}
2119 
2120 	return err;
2121 }
2122 
2123 /*
2124  * mport_release_mapping - free mapping resources and info structure
2125  * @ref: a pointer to the kref within struct rio_mport_mapping
2126  *
2127  * NOTE: Shall be called while holding buf_mutex.
2128  */
2129 static void mport_release_mapping(struct kref *ref)
2130 {
2131 	struct rio_mport_mapping *map =
2132 			container_of(ref, struct rio_mport_mapping, ref);
2133 	struct rio_mport *mport = map->md->mport;
2134 
2135 	rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2136 		   map->dir, map->virt_addr,
2137 		   &map->phys_addr, mport->name);
2138 
2139 	list_del(&map->node);
2140 
2141 	switch (map->dir) {
2142 	case MAP_INBOUND:
2143 		rio_unmap_inb_region(mport, map->phys_addr);
2144 		fallthrough;
2145 	case MAP_DMA:
2146 		dma_free_coherent(mport->dev.parent, map->size,
2147 				  map->virt_addr, map->phys_addr);
2148 		break;
2149 	case MAP_OUTBOUND:
2150 		rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2151 		break;
2152 	}
2153 	kfree(map);
2154 }
2155 
2156 static void mport_mm_open(struct vm_area_struct *vma)
2157 {
2158 	struct rio_mport_mapping *map = vma->vm_private_data;
2159 
2160 	rmcd_debug(MMAP, "%pad", &map->phys_addr);
2161 	kref_get(&map->ref);
2162 }
2163 
2164 static void mport_mm_close(struct vm_area_struct *vma)
2165 {
2166 	struct rio_mport_mapping *map = vma->vm_private_data;
2167 
2168 	rmcd_debug(MMAP, "%pad", &map->phys_addr);
2169 	mutex_lock(&map->md->buf_mutex);
2170 	kref_put(&map->ref, mport_release_mapping);
2171 	mutex_unlock(&map->md->buf_mutex);
2172 }
2173 
2174 static const struct vm_operations_struct vm_ops = {
2175 	.open =	mport_mm_open,
2176 	.close = mport_mm_close,
2177 };
2178 
2179 static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2180 {
2181 	struct mport_cdev_priv *priv = filp->private_data;
2182 	struct mport_dev *md;
2183 	size_t size = vma->vm_end - vma->vm_start;
2184 	dma_addr_t baddr;
2185 	unsigned long offset;
2186 	int found = 0, ret;
2187 	struct rio_mport_mapping *map;
2188 
2189 	rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2190 		   (unsigned int)size, vma->vm_pgoff);
2191 
2192 	md = priv->md;
2193 	baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2194 
2195 	mutex_lock(&md->buf_mutex);
2196 	list_for_each_entry(map, &md->mappings, node) {
2197 		if (baddr >= map->phys_addr &&
2198 		    baddr < (map->phys_addr + map->size)) {
2199 			found = 1;
2200 			break;
2201 		}
2202 	}
2203 	mutex_unlock(&md->buf_mutex);
2204 
2205 	if (!found)
2206 		return -ENOMEM;
2207 
2208 	offset = baddr - map->phys_addr;
2209 
2210 	if (size + offset > map->size)
2211 		return -EINVAL;
2212 
2213 	vma->vm_pgoff = offset >> PAGE_SHIFT;
2214 	rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2215 
2216 	if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2217 		ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2218 				map->virt_addr, map->phys_addr, map->size);
2219 	else if (map->dir == MAP_OUTBOUND) {
2220 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2221 		ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2222 	} else {
2223 		rmcd_error("Attempt to mmap unsupported mapping type");
2224 		ret = -EIO;
2225 	}
2226 
2227 	if (!ret) {
2228 		vma->vm_private_data = map;
2229 		vma->vm_ops = &vm_ops;
2230 		mport_mm_open(vma);
2231 	} else {
2232 		rmcd_error("MMAP exit with err=%d", ret);
2233 	}
2234 
2235 	return ret;
2236 }
2237 
2238 static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
2239 {
2240 	struct mport_cdev_priv *priv = filp->private_data;
2241 
2242 	poll_wait(filp, &priv->event_rx_wait, wait);
2243 	if (kfifo_len(&priv->event_fifo))
2244 		return EPOLLIN | EPOLLRDNORM;
2245 
2246 	return 0;
2247 }
2248 
2249 static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2250 			loff_t *ppos)
2251 {
2252 	struct mport_cdev_priv *priv = filp->private_data;
2253 	int copied;
2254 	ssize_t ret;
2255 
2256 	if (!count)
2257 		return 0;
2258 
2259 	if (kfifo_is_empty(&priv->event_fifo) &&
2260 	    (filp->f_flags & O_NONBLOCK))
2261 		return -EAGAIN;
2262 
2263 	if (count % sizeof(struct rio_event))
2264 		return -EINVAL;
2265 
2266 	ret = wait_event_interruptible(priv->event_rx_wait,
2267 					kfifo_len(&priv->event_fifo) != 0);
2268 	if (ret)
2269 		return ret;
2270 
2271 	while (ret < count) {
2272 		if (kfifo_to_user(&priv->event_fifo, buf,
2273 		      sizeof(struct rio_event), &copied))
2274 			return -EFAULT;
2275 		ret += copied;
2276 		buf += copied;
2277 	}
2278 
2279 	return ret;
2280 }
2281 
2282 static ssize_t mport_write(struct file *filp, const char __user *buf,
2283 			 size_t count, loff_t *ppos)
2284 {
2285 	struct mport_cdev_priv *priv = filp->private_data;
2286 	struct rio_mport *mport = priv->md->mport;
2287 	struct rio_event event;
2288 	int len, ret;
2289 
2290 	if (!count)
2291 		return 0;
2292 
2293 	if (count % sizeof(event))
2294 		return -EINVAL;
2295 
2296 	len = 0;
2297 	while ((count - len) >= (int)sizeof(event)) {
2298 		if (copy_from_user(&event, buf, sizeof(event)))
2299 			return -EFAULT;
2300 
2301 		if (event.header != RIO_DOORBELL)
2302 			return -EINVAL;
2303 
2304 		ret = rio_mport_send_doorbell(mport,
2305 					      event.u.doorbell.rioid,
2306 					      event.u.doorbell.payload);
2307 		if (ret < 0)
2308 			return ret;
2309 
2310 		len += sizeof(event);
2311 		buf += sizeof(event);
2312 	}
2313 
2314 	return len;
2315 }
2316 
2317 static const struct file_operations mport_fops = {
2318 	.owner		= THIS_MODULE,
2319 	.open		= mport_cdev_open,
2320 	.release	= mport_cdev_release,
2321 	.poll		= mport_cdev_poll,
2322 	.read		= mport_read,
2323 	.write		= mport_write,
2324 	.mmap		= mport_cdev_mmap,
2325 	.fasync		= mport_cdev_fasync,
2326 	.unlocked_ioctl = mport_cdev_ioctl
2327 };
2328 
2329 /*
2330  * Character device management
2331  */
2332 
2333 static void mport_device_release(struct device *dev)
2334 {
2335 	struct mport_dev *md;
2336 
2337 	rmcd_debug(EXIT, "%s", dev_name(dev));
2338 	md = container_of(dev, struct mport_dev, dev);
2339 	kfree(md);
2340 }
2341 
2342 /*
2343  * mport_cdev_add() - Create mport_dev from rio_mport
2344  * @mport:	RapidIO master port
2345  */
2346 static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2347 {
2348 	int ret = 0;
2349 	struct mport_dev *md;
2350 	struct rio_mport_attr attr;
2351 
2352 	md = kzalloc(sizeof(*md), GFP_KERNEL);
2353 	if (!md) {
2354 		rmcd_error("Unable allocate a device object");
2355 		return NULL;
2356 	}
2357 
2358 	md->mport = mport;
2359 	mutex_init(&md->buf_mutex);
2360 	mutex_init(&md->file_mutex);
2361 	INIT_LIST_HEAD(&md->file_list);
2362 
2363 	device_initialize(&md->dev);
2364 	md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2365 	md->dev.class = &dev_class;
2366 	md->dev.parent = &mport->dev;
2367 	md->dev.release = mport_device_release;
2368 	dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2369 	atomic_set(&md->active, 1);
2370 
2371 	cdev_init(&md->cdev, &mport_fops);
2372 	md->cdev.owner = THIS_MODULE;
2373 
2374 	INIT_LIST_HEAD(&md->doorbells);
2375 	spin_lock_init(&md->db_lock);
2376 	INIT_LIST_HEAD(&md->portwrites);
2377 	spin_lock_init(&md->pw_lock);
2378 	INIT_LIST_HEAD(&md->mappings);
2379 
2380 	md->properties.id = mport->id;
2381 	md->properties.sys_size = mport->sys_size;
2382 	md->properties.hdid = mport->host_deviceid;
2383 	md->properties.index = mport->index;
2384 
2385 	/* The transfer_mode property will be returned through mport query
2386 	 * interface
2387 	 */
2388 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2389 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2390 #else
2391 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2392 #endif
2393 
2394 	ret = cdev_device_add(&md->cdev, &md->dev);
2395 	if (ret) {
2396 		rmcd_error("Failed to register mport %d (err=%d)",
2397 		       mport->id, ret);
2398 		goto err_cdev;
2399 	}
2400 	ret = rio_query_mport(mport, &attr);
2401 	if (!ret) {
2402 		md->properties.flags = attr.flags;
2403 		md->properties.link_speed = attr.link_speed;
2404 		md->properties.link_width = attr.link_width;
2405 		md->properties.dma_max_sge = attr.dma_max_sge;
2406 		md->properties.dma_max_size = attr.dma_max_size;
2407 		md->properties.dma_align = attr.dma_align;
2408 		md->properties.cap_sys_size = 0;
2409 		md->properties.cap_transfer_mode = 0;
2410 		md->properties.cap_addr_size = 0;
2411 	} else
2412 		pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2413 			mport->name, MAJOR(dev_number), mport->id);
2414 
2415 	mutex_lock(&mport_devs_lock);
2416 	list_add_tail(&md->node, &mport_devs);
2417 	mutex_unlock(&mport_devs_lock);
2418 
2419 	pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2420 		mport->name, MAJOR(dev_number), mport->id);
2421 
2422 	return md;
2423 
2424 err_cdev:
2425 	put_device(&md->dev);
2426 	return NULL;
2427 }
2428 
2429 /*
2430  * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2431  *                              associated DMA channels.
2432  */
2433 static void mport_cdev_terminate_dma(struct mport_dev *md)
2434 {
2435 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2436 	struct mport_cdev_priv *client;
2437 
2438 	rmcd_debug(DMA, "%s", dev_name(&md->dev));
2439 
2440 	mutex_lock(&md->file_mutex);
2441 	list_for_each_entry(client, &md->file_list, list) {
2442 		if (client->dmach) {
2443 			dmaengine_terminate_all(client->dmach);
2444 			rio_release_dma(client->dmach);
2445 		}
2446 	}
2447 	mutex_unlock(&md->file_mutex);
2448 
2449 	if (md->dma_chan) {
2450 		dmaengine_terminate_all(md->dma_chan);
2451 		rio_release_dma(md->dma_chan);
2452 		md->dma_chan = NULL;
2453 	}
2454 #endif
2455 }
2456 
2457 
2458 /*
2459  * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2460  *                            mport_cdev files.
2461  */
2462 static int mport_cdev_kill_fasync(struct mport_dev *md)
2463 {
2464 	unsigned int files = 0;
2465 	struct mport_cdev_priv *client;
2466 
2467 	mutex_lock(&md->file_mutex);
2468 	list_for_each_entry(client, &md->file_list, list) {
2469 		if (client->async_queue)
2470 			kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2471 		files++;
2472 	}
2473 	mutex_unlock(&md->file_mutex);
2474 	return files;
2475 }
2476 
2477 /*
2478  * mport_cdev_remove() - Remove mport character device
2479  * @dev:	Mport device to remove
2480  */
2481 static void mport_cdev_remove(struct mport_dev *md)
2482 {
2483 	struct rio_mport_mapping *map, *_map;
2484 
2485 	rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2486 	atomic_set(&md->active, 0);
2487 	mport_cdev_terminate_dma(md);
2488 	rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2489 	cdev_device_del(&md->cdev, &md->dev);
2490 	mport_cdev_kill_fasync(md);
2491 
2492 	/* TODO: do we need to give clients some time to close file
2493 	 * descriptors? Simple wait for XX, or kref?
2494 	 */
2495 
2496 	/*
2497 	 * Release DMA buffers allocated for the mport device.
2498 	 * Disable associated inbound Rapidio requests mapping if applicable.
2499 	 */
2500 	mutex_lock(&md->buf_mutex);
2501 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
2502 		kref_put(&map->ref, mport_release_mapping);
2503 	}
2504 	mutex_unlock(&md->buf_mutex);
2505 
2506 	if (!list_empty(&md->mappings))
2507 		rmcd_warn("WARNING: %s pending mappings on removal",
2508 			  md->mport->name);
2509 
2510 	rio_release_inb_dbell(md->mport, 0, 0x0fff);
2511 
2512 	put_device(&md->dev);
2513 }
2514 
2515 /*
2516  * RIO rio_mport_interface driver
2517  */
2518 
2519 /*
2520  * mport_add_mport() - Add rio_mport from LDM device struct
2521  * @dev:		Linux device model struct
2522  */
2523 static int mport_add_mport(struct device *dev)
2524 {
2525 	struct rio_mport *mport = NULL;
2526 	struct mport_dev *chdev = NULL;
2527 
2528 	mport = to_rio_mport(dev);
2529 	if (!mport)
2530 		return -ENODEV;
2531 
2532 	chdev = mport_cdev_add(mport);
2533 	if (!chdev)
2534 		return -ENODEV;
2535 
2536 	return 0;
2537 }
2538 
2539 /*
2540  * mport_remove_mport() - Remove rio_mport from global list
2541  * TODO remove device from global mport_dev list
2542  */
2543 static void mport_remove_mport(struct device *dev)
2544 {
2545 	struct rio_mport *mport = NULL;
2546 	struct mport_dev *chdev;
2547 	int found = 0;
2548 
2549 	mport = to_rio_mport(dev);
2550 	rmcd_debug(EXIT, "Remove %s", mport->name);
2551 
2552 	mutex_lock(&mport_devs_lock);
2553 	list_for_each_entry(chdev, &mport_devs, node) {
2554 		if (chdev->mport->id == mport->id) {
2555 			atomic_set(&chdev->active, 0);
2556 			list_del(&chdev->node);
2557 			found = 1;
2558 			break;
2559 		}
2560 	}
2561 	mutex_unlock(&mport_devs_lock);
2562 
2563 	if (found)
2564 		mport_cdev_remove(chdev);
2565 }
2566 
2567 /* the rio_mport_interface is used to handle local mport devices */
2568 static struct class_interface rio_mport_interface __refdata = {
2569 	.class		= &rio_mport_class,
2570 	.add_dev	= mport_add_mport,
2571 	.remove_dev	= mport_remove_mport,
2572 };
2573 
2574 /*
2575  * Linux kernel module
2576  */
2577 
2578 /*
2579  * mport_init - Driver module loading
2580  */
2581 static int __init mport_init(void)
2582 {
2583 	int ret;
2584 
2585 	/* Create device class needed by udev */
2586 	ret = class_register(&dev_class);
2587 	if (ret) {
2588 		rmcd_error("Unable to create " DRV_NAME " class");
2589 		return ret;
2590 	}
2591 
2592 	ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2593 	if (ret < 0)
2594 		goto err_chr;
2595 
2596 	rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2597 
2598 	/* Register to rio_mport_interface */
2599 	ret = class_interface_register(&rio_mport_interface);
2600 	if (ret) {
2601 		rmcd_error("class_interface_register() failed, err=%d", ret);
2602 		goto err_cli;
2603 	}
2604 
2605 	return 0;
2606 
2607 err_cli:
2608 	unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2609 err_chr:
2610 	class_unregister(&dev_class);
2611 	return ret;
2612 }
2613 
2614 /**
2615  * mport_exit - Driver module unloading
2616  */
2617 static void __exit mport_exit(void)
2618 {
2619 	class_interface_unregister(&rio_mport_interface);
2620 	class_unregister(&dev_class);
2621 	unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2622 }
2623 
2624 module_init(mport_init);
2625 module_exit(mport_exit);
2626