Lines Matching refs:idev

219 	struct uio_device *idev = dev_get_drvdata(dev);
222 mutex_lock(&idev->info_lock);
223 if (!idev->info) {
229 ret = sprintf(buf, "%s\n", idev->info->name);
232 mutex_unlock(&idev->info_lock);
240 struct uio_device *idev = dev_get_drvdata(dev);
243 mutex_lock(&idev->info_lock);
244 if (!idev->info) {
250 ret = sprintf(buf, "%s\n", idev->info->version);
253 mutex_unlock(&idev->info_lock);
261 struct uio_device *idev = dev_get_drvdata(dev);
262 return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
285 static int uio_dev_add_attributes(struct uio_device *idev)
297 mem = &idev->info->mem[mi];
302 idev->map_dir = kobject_create_and_add("maps",
303 &idev->dev.kobj);
304 if (!idev->map_dir) {
317 ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
326 port = &idev->info->port[pi];
331 idev->portio_dir = kobject_create_and_add("portio",
332 &idev->dev.kobj);
333 if (!idev->portio_dir) {
346 ret = kobject_add(&portio->kobj, idev->portio_dir,
361 port = &idev->info->port[pi];
365 kobject_put(idev->portio_dir);
370 mem = &idev->info->mem[mi];
374 kobject_put(idev->map_dir);
375 dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret);
379 static void uio_dev_del_attributes(struct uio_device *idev)
386 mem = &idev->info->mem[i];
391 kobject_put(idev->map_dir);
394 port = &idev->info->port[i];
399 kobject_put(idev->portio_dir);
402 static int uio_get_minor(struct uio_device *idev)
407 retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
409 idev->minor = retval;
412 dev_err(&idev->dev, "too many uio devices\n");
432 struct uio_device *idev = info->uio_dev;
434 atomic_inc(&idev->event);
435 wake_up_interruptible(&idev->wait);
436 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
447 struct uio_device *idev = (struct uio_device *)dev_id;
450 ret = idev->info->handler(irq, idev->info);
464 struct uio_device *idev = (struct uio_device *)dev_id;
466 uio_event_notify(idev->info);
478 struct uio_device *idev;
483 idev = idr_find(&uio_idr, iminor(inode));
484 if (!idev) {
489 get_device(&idev->dev);
492 if (!try_module_get(idev->owner)) {
503 listener->dev = idev;
504 listener->event_count = atomic_read(&idev->event);
507 mutex_lock(&idev->info_lock);
508 if (!idev->info) {
509 mutex_unlock(&idev->info_lock);
514 if (idev->info->open)
515 ret = idev->info->open(idev->info, inode);
516 mutex_unlock(&idev->info_lock);
526 module_put(idev->owner);
529 put_device(&idev->dev);
538 struct uio_device *idev = listener->dev;
540 return fasync_helper(fd, filep, on, &idev->async_queue);
547 struct uio_device *idev = listener->dev;
549 mutex_lock(&idev->info_lock);
550 if (idev->info && idev->info->release)
551 ret = idev->info->release(idev->info, inode);
552 mutex_unlock(&idev->info_lock);
554 module_put(idev->owner);
556 put_device(&idev->dev);
563 struct uio_device *idev = listener->dev;
566 mutex_lock(&idev->info_lock);
567 if (!idev->info || !idev->info->irq)
569 mutex_unlock(&idev->info_lock);
574 poll_wait(filep, &idev->wait, wait);
575 if (listener->event_count != atomic_read(&idev->event))
584 struct uio_device *idev = listener->dev;
592 add_wait_queue(&idev->wait, &wait);
595 mutex_lock(&idev->info_lock);
596 if (!idev->info || !idev->info->irq) {
598 mutex_unlock(&idev->info_lock);
601 mutex_unlock(&idev->info_lock);
605 event_count = atomic_read(&idev->event);
630 remove_wait_queue(&idev->wait, &wait);
639 struct uio_device *idev = listener->dev;
649 mutex_lock(&idev->info_lock);
650 if (!idev->info) {
655 if (!idev->info->irq) {
660 if (!idev->info->irqcontrol) {
665 retval = idev->info->irqcontrol(idev->info, irq_on);
668 mutex_unlock(&idev->info_lock);
674 struct uio_device *idev = vma->vm_private_data;
677 if (idev->info->mem[vma->vm_pgoff].size == 0)
686 struct uio_device *idev = vmf->vma->vm_private_data;
693 mutex_lock(&idev->info_lock);
694 if (!idev->info) {
711 addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset;
712 if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
720 mutex_unlock(&idev->info_lock);
744 struct uio_device *idev = vma->vm_private_data;
750 mem = idev->info->mem + mi;
758 if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
779 struct uio_device *idev = vma->vm_private_data;
789 mem = idev->info->mem + mi;
823 struct uio_device *idev = listener->dev;
831 vma->vm_private_data = idev;
833 mutex_lock(&idev->info_lock);
834 if (!idev->info) {
846 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
847 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
853 if (idev->info->mmap) {
854 ret = idev->info->mmap(idev->info, vma);
858 switch (idev->info->mem[mi].memtype) {
875 mutex_unlock(&idev->info_lock);
966 struct uio_device *idev = dev_get_drvdata(dev);
968 kfree(idev);
983 struct uio_device *idev;
994 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
995 if (!idev) {
999 idev->owner = owner;
1000 idev->info = info;
1001 mutex_init(&idev->info_lock);
1002 init_waitqueue_head(&idev->wait);
1003 atomic_set(&idev->event, 0);
1005 ret = uio_get_minor(idev);
1007 kfree(idev);
1011 device_initialize(&idev->dev);
1012 idev->dev.devt = MKDEV(uio_major, idev->minor);
1013 idev->dev.class = &uio_class;
1014 idev->dev.parent = parent;
1015 idev->dev.release = uio_device_release;
1016 dev_set_drvdata(&idev->dev, idev);
1018 ret = dev_set_name(&idev->dev, "uio%d", idev->minor);
1022 ret = device_add(&idev->dev);
1026 ret = uio_dev_add_attributes(idev);
1030 info->uio_dev = idev;
1042 info->irq_flags, info->name, idev);
1052 uio_dev_del_attributes(idev);
1054 device_del(&idev->dev);
1056 uio_free_minor(idev->minor);
1057 put_device(&idev->dev);
1107 struct uio_device *idev;
1113 idev = info->uio_dev;
1114 minor = idev->minor;
1116 mutex_lock(&idev->info_lock);
1117 uio_dev_del_attributes(idev);
1120 free_irq(info->irq, idev);
1122 idev->info = NULL;
1123 mutex_unlock(&idev->info_lock);
1125 wake_up_interruptible(&idev->wait);
1126 kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
1129 device_unregister(&idev->dev);