1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux driver for System z and s390 unit record devices
4  * (z/VM virtual punch, reader, printer)
5  *
6  * Copyright IBM Corp. 2001, 2009
7  * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
8  *	    Michael Holzheu <holzheu@de.ibm.com>
9  *	    Frank Munzert <munzert@de.ibm.com>
10  */
11 
12 #define KMSG_COMPONENT "vmur"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 
15 #include <linux/cdev.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/kobject.h>
19 
20 #include <linux/uaccess.h>
21 #include <asm/machine.h>
22 #include <asm/cio.h>
23 #include <asm/ccwdev.h>
24 #include <asm/debug.h>
25 #include <asm/diag.h>
26 #include <asm/scsw.h>
27 
28 #include "vmur.h"
29 
30 /*
31  * Driver overview
32  *
33  * Unit record device support is implemented as a character device driver.
34  * We can fit at least 16 bits into a device minor number and use the
35  * simple method of mapping a character device number with minor abcd
36  * to the unit record device with devno abcd.
37  * I/O to virtual unit record devices is handled as follows:
38  * Reads: Diagnose code 0x14 (input spool file manipulation)
39  * is used to read spool data page-wise.
40  * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
41  * is available by reading sysfs attr reclen. Each write() to the device
42  * must specify an integral multiple (maximal 511) of reclen.
43  */
44 
45 static char ur_banner[] = "z/VM virtual unit record device driver";
46 
47 MODULE_AUTHOR("IBM Corporation");
48 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
49 MODULE_LICENSE("GPL");
50 
51 static dev_t ur_first_dev_maj_min;
52 static const struct class vmur_class = {
53 	.name = "vmur",
54 };
55 static struct debug_info *vmur_dbf;
56 
57 /* We put the device's record length (for writes) in the driver_info field */
58 static struct ccw_device_id ur_ids[] = {
59 	{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
60 	{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
61 	{ /* end of list */ }
62 };
63 
64 MODULE_DEVICE_TABLE(ccw, ur_ids);
65 
66 static int ur_probe(struct ccw_device *cdev);
67 static void ur_remove(struct ccw_device *cdev);
68 static int ur_set_online(struct ccw_device *cdev);
69 static int ur_set_offline(struct ccw_device *cdev);
70 
71 static struct ccw_driver ur_driver = {
72 	.driver = {
73 		.name	= "vmur",
74 		.owner	= THIS_MODULE,
75 	},
76 	.ids		= ur_ids,
77 	.probe		= ur_probe,
78 	.remove		= ur_remove,
79 	.set_online	= ur_set_online,
80 	.set_offline	= ur_set_offline,
81 	.int_class	= IRQIO_VMR,
82 };
83 
84 static DEFINE_MUTEX(vmur_mutex);
85 
86 static void ur_uevent(struct work_struct *ws);
87 
88 /*
89  * Allocation, freeing, getting and putting of urdev structures
90  *
91  * Each ur device (urd) contains a reference to its corresponding ccw device
92  * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
93  * ur device using dev_get_drvdata(&cdev->dev) pointer.
94  *
95  * urd references:
96  * - ur_probe gets a urd reference, ur_remove drops the reference
97  *   dev_get_drvdata(&cdev->dev)
98  * - ur_open gets a urd reference, ur_release drops the reference
99  *   (urf->urd)
100  *
101  * cdev references:
102  * - urdev_alloc get a cdev reference (urd->cdev)
103  * - urdev_free drops the cdev reference (urd->cdev)
104  *
105  * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
106  */
107 static struct urdev *urdev_alloc(struct ccw_device *cdev)
108 {
109 	struct urdev *urd;
110 
111 	urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
112 	if (!urd)
113 		return NULL;
114 	urd->reclen = cdev->id.driver_info;
115 	ccw_device_get_id(cdev, &urd->dev_id);
116 	mutex_init(&urd->io_mutex);
117 	init_waitqueue_head(&urd->wait);
118 	INIT_WORK(&urd->uevent_work, ur_uevent);
119 	spin_lock_init(&urd->open_lock);
120 	refcount_set(&urd->ref_count,  1);
121 	urd->cdev = cdev;
122 	get_device(&cdev->dev);
123 	return urd;
124 }
125 
126 static void urdev_free(struct urdev *urd)
127 {
128 	TRACE("urdev_free: %p\n", urd);
129 	if (urd->cdev)
130 		put_device(&urd->cdev->dev);
131 	kfree(urd);
132 }
133 
134 static void urdev_get(struct urdev *urd)
135 {
136 	refcount_inc(&urd->ref_count);
137 }
138 
139 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
140 {
141 	struct urdev *urd;
142 	unsigned long flags;
143 
144 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
145 	urd = dev_get_drvdata(&cdev->dev);
146 	if (urd)
147 		urdev_get(urd);
148 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
149 	return urd;
150 }
151 
152 static struct urdev *urdev_get_from_devno(u16 devno)
153 {
154 	char bus_id[16];
155 	struct ccw_device *cdev;
156 	struct urdev *urd;
157 
158 	sprintf(bus_id, "0.0.%04x", devno);
159 	cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
160 	if (!cdev)
161 		return NULL;
162 	urd = urdev_get_from_cdev(cdev);
163 	put_device(&cdev->dev);
164 	return urd;
165 }
166 
167 static void urdev_put(struct urdev *urd)
168 {
169 	if (refcount_dec_and_test(&urd->ref_count))
170 		urdev_free(urd);
171 }
172 
173 /*
174  * Low-level functions to do I/O to a ur device.
175  *     alloc_chan_prog
176  *     free_chan_prog
177  *     do_ur_io
178  *     ur_int_handler
179  *
180  * alloc_chan_prog allocates and builds the channel program
181  * free_chan_prog frees memory of the channel program
182  *
183  * do_ur_io issues the channel program to the device and blocks waiting
184  * on a completion event it publishes at urd->io_done. The function
185  * serialises itself on the device's mutex so that only one I/O
186  * is issued at a time (and that I/O is synchronous).
187  *
188  * ur_int_handler catches the "I/O done" interrupt, writes the
189  * subchannel status word into the scsw member of the urdev structure
190  * and complete()s the io_done to wake the waiting do_ur_io.
191  *
192  * The caller of do_ur_io is responsible for kfree()ing the channel program
193  * address pointer that alloc_chan_prog returned.
194  */
195 
196 static void free_chan_prog(struct ccw1 *cpa)
197 {
198 	struct ccw1 *ptr = cpa;
199 
200 	while (ptr->cda) {
201 		kfree(dma32_to_virt(ptr->cda));
202 		ptr++;
203 	}
204 	kfree(cpa);
205 }
206 
207 /*
208  * alloc_chan_prog
209  * The channel program we use is write commands chained together
210  * with a final NOP CCW command-chained on (which ensures that CE and DE
211  * are presented together in a single interrupt instead of as separate
212  * interrupts unless an incorrect length indication kicks in first). The
213  * data length in each CCW is reclen.
214  */
215 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
216 				    int reclen)
217 {
218 	struct ccw1 *cpa;
219 	void *kbuf;
220 	int i;
221 
222 	TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
223 
224 	/*
225 	 * We chain a NOP onto the writes to force CE+DE together.
226 	 * That means we allocate room for CCWs to cover count/reclen
227 	 * records plus a NOP.
228 	 */
229 	cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
230 		      GFP_KERNEL | GFP_DMA);
231 	if (!cpa)
232 		return ERR_PTR(-ENOMEM);
233 
234 	for (i = 0; i < rec_count; i++) {
235 		cpa[i].cmd_code = WRITE_CCW_CMD;
236 		cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
237 		cpa[i].count = reclen;
238 		kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
239 		if (!kbuf) {
240 			free_chan_prog(cpa);
241 			return ERR_PTR(-ENOMEM);
242 		}
243 		cpa[i].cda = virt_to_dma32(kbuf);
244 		if (copy_from_user(kbuf, ubuf, reclen)) {
245 			free_chan_prog(cpa);
246 			return ERR_PTR(-EFAULT);
247 		}
248 		ubuf += reclen;
249 	}
250 	/* The following NOP CCW forces CE+DE to be presented together */
251 	cpa[i].cmd_code = CCW_CMD_NOOP;
252 	return cpa;
253 }
254 
255 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
256 {
257 	int rc;
258 	struct ccw_device *cdev = urd->cdev;
259 	DECLARE_COMPLETION_ONSTACK(event);
260 
261 	TRACE("do_ur_io: cpa=%p\n", cpa);
262 
263 	rc = mutex_lock_interruptible(&urd->io_mutex);
264 	if (rc)
265 		return rc;
266 
267 	urd->io_done = &event;
268 
269 	spin_lock_irq(get_ccwdev_lock(cdev));
270 	rc = ccw_device_start(cdev, cpa, 1, 0, 0);
271 	spin_unlock_irq(get_ccwdev_lock(cdev));
272 
273 	TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
274 	if (rc)
275 		goto out;
276 
277 	wait_for_completion(&event);
278 	TRACE("do_ur_io: I/O complete\n");
279 	rc = 0;
280 
281 out:
282 	mutex_unlock(&urd->io_mutex);
283 	return rc;
284 }
285 
286 static void ur_uevent(struct work_struct *ws)
287 {
288 	struct urdev *urd = container_of(ws, struct urdev, uevent_work);
289 	char *envp[] = {
290 		"EVENT=unsol_de",	/* Unsolicited device-end interrupt */
291 		NULL
292 	};
293 
294 	kobject_uevent_env(&urd->cdev->dev.kobj, KOBJ_CHANGE, envp);
295 	urdev_put(urd);
296 }
297 
298 /*
299  * ur interrupt handler, called from the ccw_device layer
300  */
301 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
302 			   struct irb *irb)
303 {
304 	struct urdev *urd;
305 
306 	if (!IS_ERR(irb)) {
307 		TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
308 		      intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
309 		      irb->scsw.cmd.count);
310 	}
311 	urd = dev_get_drvdata(&cdev->dev);
312 	if (!intparm) {
313 		TRACE("ur_int_handler: unsolicited interrupt\n");
314 
315 		if (scsw_dstat(&irb->scsw) & DEV_STAT_DEV_END) {
316 			/*
317 			 * Userspace might be interested in a transition to
318 			 * device-ready state.
319 			 */
320 			urdev_get(urd);
321 			schedule_work(&urd->uevent_work);
322 		}
323 
324 		return;
325 	}
326 	/* On special conditions irb is an error pointer */
327 	if (IS_ERR(irb))
328 		urd->io_request_rc = PTR_ERR(irb);
329 	else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
330 		urd->io_request_rc = 0;
331 	else
332 		urd->io_request_rc = -EIO;
333 
334 	complete(urd->io_done);
335 }
336 
337 /*
338  * reclen sysfs attribute - The record length to be used for write CCWs
339  */
340 static ssize_t ur_attr_reclen_show(struct device *dev,
341 				   struct device_attribute *attr, char *buf)
342 {
343 	struct urdev *urd;
344 	int rc;
345 
346 	urd = urdev_get_from_cdev(to_ccwdev(dev));
347 	if (!urd)
348 		return -ENODEV;
349 	rc = sysfs_emit(buf, "%zu\n", urd->reclen);
350 	urdev_put(urd);
351 	return rc;
352 }
353 
354 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
355 
356 static int ur_create_attributes(struct device *dev)
357 {
358 	return device_create_file(dev, &dev_attr_reclen);
359 }
360 
361 static void ur_remove_attributes(struct device *dev)
362 {
363 	device_remove_file(dev, &dev_attr_reclen);
364 }
365 
366 /*
367  * diagnose code 0x210 - retrieve device information
368  * cc=0  normal completion, we have a real device
369  * cc=1  CP paging error
370  * cc=2  The virtual device exists, but is not associated with a real device
371  * cc=3  Invalid device address, or the virtual device does not exist
372  */
373 static int get_urd_class(struct urdev *urd)
374 {
375 	static struct diag210 ur_diag210;
376 	int cc;
377 
378 	ur_diag210.vrdcdvno = urd->dev_id.devno;
379 	ur_diag210.vrdclen = sizeof(struct diag210);
380 
381 	cc = diag210(&ur_diag210);
382 	switch (cc) {
383 	case 0:
384 		return -EOPNOTSUPP;
385 	case 2:
386 		return ur_diag210.vrdcvcla; /* virtual device class */
387 	case 3:
388 		return -ENODEV;
389 	default:
390 		return -EIO;
391 	}
392 }
393 
394 /*
395  * Allocation and freeing of urfile structures
396  */
397 static struct urfile *urfile_alloc(struct urdev *urd)
398 {
399 	struct urfile *urf;
400 
401 	urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
402 	if (!urf)
403 		return NULL;
404 	urf->urd = urd;
405 
406 	TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
407 	      urf->dev_reclen);
408 
409 	return urf;
410 }
411 
412 static void urfile_free(struct urfile *urf)
413 {
414 	TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
415 	kfree(urf);
416 }
417 
418 /*
419  * The fops implementation of the character device driver
420  */
421 static ssize_t do_write(struct urdev *urd, const char __user *udata,
422 			size_t count, size_t reclen, loff_t *ppos)
423 {
424 	struct ccw1 *cpa;
425 	int rc;
426 
427 	cpa = alloc_chan_prog(udata, count / reclen, reclen);
428 	if (IS_ERR(cpa))
429 		return PTR_ERR(cpa);
430 
431 	rc = do_ur_io(urd, cpa);
432 	if (rc)
433 		goto fail_kfree_cpa;
434 
435 	if (urd->io_request_rc) {
436 		rc = urd->io_request_rc;
437 		goto fail_kfree_cpa;
438 	}
439 	*ppos += count;
440 	rc = count;
441 
442 fail_kfree_cpa:
443 	free_chan_prog(cpa);
444 	return rc;
445 }
446 
447 static ssize_t ur_write(struct file *file, const char __user *udata,
448 			size_t count, loff_t *ppos)
449 {
450 	struct urfile *urf = file->private_data;
451 
452 	TRACE("ur_write: count=%zu\n", count);
453 
454 	if (count == 0)
455 		return 0;
456 
457 	if (count % urf->dev_reclen)
458 		return -EINVAL;	/* count must be a multiple of reclen */
459 
460 	if (count > urf->dev_reclen * MAX_RECS_PER_IO)
461 		count = urf->dev_reclen * MAX_RECS_PER_IO;
462 
463 	return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
464 }
465 
466 /*
467  * diagnose code 0x14 subcode 0x0028 - position spool file to designated
468  *				       record
469  * cc=0  normal completion
470  * cc=2  no file active on the virtual reader or device not ready
471  * cc=3  record specified is beyond EOF
472  */
473 static int diag_position_to_record(int devno, int record)
474 {
475 	int cc;
476 
477 	cc = diag14(record, devno, 0x28);
478 	switch (cc) {
479 	case 0:
480 		return 0;
481 	case 2:
482 		return -ENOMEDIUM;
483 	case 3:
484 		return -ENODATA; /* position beyond end of file */
485 	default:
486 		return -EIO;
487 	}
488 }
489 
490 /*
491  * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
492  * cc=0  normal completion
493  * cc=1  EOF reached
494  * cc=2  no file active on the virtual reader, and no file eligible
495  * cc=3  file already active on the virtual reader or specified virtual
496  *	 reader does not exist or is not a reader
497  */
498 static int diag_read_file(int devno, char *buf)
499 {
500 	int cc;
501 
502 	cc = diag14((unsigned long) buf, devno, 0x00);
503 	switch (cc) {
504 	case 0:
505 		return 0;
506 	case 1:
507 		return -ENODATA;
508 	case 2:
509 		return -ENOMEDIUM;
510 	default:
511 		return -EIO;
512 	}
513 }
514 
515 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
516 			   loff_t *offs)
517 {
518 	size_t len, copied, res;
519 	char *buf;
520 	int rc;
521 	u16 reclen;
522 	struct urdev *urd;
523 
524 	urd = ((struct urfile *) file->private_data)->urd;
525 	reclen = ((struct urfile *) file->private_data)->file_reclen;
526 
527 	rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
528 	if (rc == -ENODATA)
529 		return 0;
530 	if (rc)
531 		return rc;
532 
533 	len = min((size_t) PAGE_SIZE, count);
534 	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
535 	if (!buf)
536 		return -ENOMEM;
537 
538 	copied = 0;
539 	res = (size_t) (*offs % PAGE_SIZE);
540 	do {
541 		rc = diag_read_file(urd->dev_id.devno, buf);
542 		if (rc == -ENODATA) {
543 			break;
544 		}
545 		if (rc)
546 			goto fail;
547 		if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
548 			*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
549 		len = min(count - copied, PAGE_SIZE - res);
550 		if (copy_to_user(ubuf + copied, buf + res, len)) {
551 			rc = -EFAULT;
552 			goto fail;
553 		}
554 		res = 0;
555 		copied += len;
556 	} while (copied != count);
557 
558 	*offs += copied;
559 	rc = copied;
560 fail:
561 	free_page((unsigned long) buf);
562 	return rc;
563 }
564 
565 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
566 		       loff_t *offs)
567 {
568 	struct urdev *urd;
569 	int rc;
570 
571 	TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
572 
573 	if (count == 0)
574 		return 0;
575 
576 	urd = ((struct urfile *) file->private_data)->urd;
577 	rc = mutex_lock_interruptible(&urd->io_mutex);
578 	if (rc)
579 		return rc;
580 	rc = diag14_read(file, ubuf, count, offs);
581 	mutex_unlock(&urd->io_mutex);
582 	return rc;
583 }
584 
585 /*
586  * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
587  * cc=0  normal completion
588  * cc=1  no files on reader queue or no subsequent file
589  * cc=2  spid specified is invalid
590  */
591 static int diag_read_next_file_info(struct file_control_block *buf, int spid)
592 {
593 	int cc;
594 
595 	cc = diag14((unsigned long) buf, spid, 0xfff);
596 	switch (cc) {
597 	case 0:
598 		return 0;
599 	default:
600 		return -ENODATA;
601 	}
602 }
603 
604 static int verify_uri_device(struct urdev *urd)
605 {
606 	struct file_control_block *fcb;
607 	char *buf;
608 	int rc;
609 
610 	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
611 	if (!fcb)
612 		return -ENOMEM;
613 
614 	/* check for empty reader device (beginning of chain) */
615 	rc = diag_read_next_file_info(fcb, 0);
616 	if (rc)
617 		goto fail_free_fcb;
618 
619 	/* if file is in hold status, we do not read it */
620 	if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
621 		rc = -EPERM;
622 		goto fail_free_fcb;
623 	}
624 
625 	/* open file on virtual reader	*/
626 	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
627 	if (!buf) {
628 		rc = -ENOMEM;
629 		goto fail_free_fcb;
630 	}
631 	rc = diag_read_file(urd->dev_id.devno, buf);
632 	if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
633 		goto fail_free_buf;
634 
635 	/* check if the file on top of the queue is open now */
636 	rc = diag_read_next_file_info(fcb, 0);
637 	if (rc)
638 		goto fail_free_buf;
639 	if (!(fcb->file_stat & FLG_IN_USE)) {
640 		rc = -EMFILE;
641 		goto fail_free_buf;
642 	}
643 	rc = 0;
644 
645 fail_free_buf:
646 	free_page((unsigned long) buf);
647 fail_free_fcb:
648 	kfree(fcb);
649 	return rc;
650 }
651 
652 static int verify_device(struct urdev *urd)
653 {
654 	switch (urd->class) {
655 	case DEV_CLASS_UR_O:
656 		return 0; /* no check needed here */
657 	case DEV_CLASS_UR_I:
658 		return verify_uri_device(urd);
659 	default:
660 		return -EOPNOTSUPP;
661 	}
662 }
663 
664 static int get_uri_file_reclen(struct urdev *urd)
665 {
666 	struct file_control_block *fcb;
667 	int rc;
668 
669 	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
670 	if (!fcb)
671 		return -ENOMEM;
672 	rc = diag_read_next_file_info(fcb, 0);
673 	if (rc)
674 		goto fail_free;
675 	if (fcb->file_stat & FLG_CP_DUMP)
676 		rc = 0;
677 	else
678 		rc = fcb->rec_len;
679 
680 fail_free:
681 	kfree(fcb);
682 	return rc;
683 }
684 
685 static int get_file_reclen(struct urdev *urd)
686 {
687 	switch (urd->class) {
688 	case DEV_CLASS_UR_O:
689 		return 0;
690 	case DEV_CLASS_UR_I:
691 		return get_uri_file_reclen(urd);
692 	default:
693 		return -EOPNOTSUPP;
694 	}
695 }
696 
697 static int ur_open(struct inode *inode, struct file *file)
698 {
699 	u16 devno;
700 	struct urdev *urd;
701 	struct urfile *urf;
702 	unsigned short accmode;
703 	int rc;
704 
705 	accmode = file->f_flags & O_ACCMODE;
706 
707 	if (accmode == O_RDWR)
708 		return -EACCES;
709 	/*
710 	 * We treat the minor number as the devno of the ur device
711 	 * to find in the driver tree.
712 	 */
713 	devno = iminor(file_inode(file));
714 
715 	urd = urdev_get_from_devno(devno);
716 	if (!urd) {
717 		rc = -ENXIO;
718 		goto out;
719 	}
720 
721 	spin_lock(&urd->open_lock);
722 	while (urd->open_flag) {
723 		spin_unlock(&urd->open_lock);
724 		if (file->f_flags & O_NONBLOCK) {
725 			rc = -EBUSY;
726 			goto fail_put;
727 		}
728 		if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
729 			rc = -ERESTARTSYS;
730 			goto fail_put;
731 		}
732 		spin_lock(&urd->open_lock);
733 	}
734 	urd->open_flag++;
735 	spin_unlock(&urd->open_lock);
736 
737 	TRACE("ur_open\n");
738 
739 	if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
740 	    ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
741 		TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
742 		rc = -EACCES;
743 		goto fail_unlock;
744 	}
745 
746 	rc = verify_device(urd);
747 	if (rc)
748 		goto fail_unlock;
749 
750 	urf = urfile_alloc(urd);
751 	if (!urf) {
752 		rc = -ENOMEM;
753 		goto fail_unlock;
754 	}
755 
756 	urf->dev_reclen = urd->reclen;
757 	rc = get_file_reclen(urd);
758 	if (rc < 0)
759 		goto fail_urfile_free;
760 	urf->file_reclen = rc;
761 	file->private_data = urf;
762 	return 0;
763 
764 fail_urfile_free:
765 	urfile_free(urf);
766 fail_unlock:
767 	spin_lock(&urd->open_lock);
768 	urd->open_flag--;
769 	spin_unlock(&urd->open_lock);
770 fail_put:
771 	urdev_put(urd);
772 out:
773 	return rc;
774 }
775 
776 static int ur_release(struct inode *inode, struct file *file)
777 {
778 	struct urfile *urf = file->private_data;
779 
780 	TRACE("ur_release\n");
781 	spin_lock(&urf->urd->open_lock);
782 	urf->urd->open_flag--;
783 	spin_unlock(&urf->urd->open_lock);
784 	wake_up_interruptible(&urf->urd->wait);
785 	urdev_put(urf->urd);
786 	urfile_free(urf);
787 	return 0;
788 }
789 
790 static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
791 {
792 	if ((file->f_flags & O_ACCMODE) != O_RDONLY)
793 		return -ESPIPE; /* seek allowed only for reader */
794 	if (offset % PAGE_SIZE)
795 		return -ESPIPE; /* only multiples of 4K allowed */
796 	return no_seek_end_llseek(file, offset, whence);
797 }
798 
799 static const struct file_operations ur_fops = {
800 	.owner	 = THIS_MODULE,
801 	.open	 = ur_open,
802 	.release = ur_release,
803 	.read	 = ur_read,
804 	.write	 = ur_write,
805 	.llseek  = ur_llseek,
806 };
807 
808 /*
809  * ccw_device infrastructure:
810  *     ur_probe creates the struct urdev (with refcount = 1), the device
811  *     attributes, sets up the interrupt handler and validates the virtual
812  *     unit record device.
813  *     ur_remove removes the device attributes and drops the reference to
814  *     struct urdev.
815  *
816  *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
817  *     by the vmur_mutex lock.
818  *
819  *     urd->char_device is used as indication that the online function has
820  *     been completed successfully.
821  */
822 static int ur_probe(struct ccw_device *cdev)
823 {
824 	struct urdev *urd;
825 	int rc;
826 
827 	TRACE("ur_probe: cdev=%p\n", cdev);
828 
829 	mutex_lock(&vmur_mutex);
830 	urd = urdev_alloc(cdev);
831 	if (!urd) {
832 		rc = -ENOMEM;
833 		goto fail_unlock;
834 	}
835 
836 	rc = ur_create_attributes(&cdev->dev);
837 	if (rc) {
838 		rc = -ENOMEM;
839 		goto fail_urdev_put;
840 	}
841 
842 	/* validate virtual unit record device */
843 	urd->class = get_urd_class(urd);
844 	if (urd->class < 0) {
845 		rc = urd->class;
846 		goto fail_remove_attr;
847 	}
848 	if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
849 		rc = -EOPNOTSUPP;
850 		goto fail_remove_attr;
851 	}
852 	spin_lock_irq(get_ccwdev_lock(cdev));
853 	dev_set_drvdata(&cdev->dev, urd);
854 	cdev->handler = ur_int_handler;
855 	spin_unlock_irq(get_ccwdev_lock(cdev));
856 
857 	mutex_unlock(&vmur_mutex);
858 	return 0;
859 
860 fail_remove_attr:
861 	ur_remove_attributes(&cdev->dev);
862 fail_urdev_put:
863 	urdev_put(urd);
864 fail_unlock:
865 	mutex_unlock(&vmur_mutex);
866 	return rc;
867 }
868 
869 static int ur_set_online(struct ccw_device *cdev)
870 {
871 	struct urdev *urd;
872 	int minor, major, rc;
873 	char node_id[16];
874 
875 	TRACE("ur_set_online: cdev=%p\n", cdev);
876 
877 	mutex_lock(&vmur_mutex);
878 	urd = urdev_get_from_cdev(cdev);
879 	if (!urd) {
880 		/* ur_remove already deleted our urd */
881 		rc = -ENODEV;
882 		goto fail_unlock;
883 	}
884 
885 	if (urd->char_device) {
886 		/* Another ur_set_online was faster */
887 		rc = -EBUSY;
888 		goto fail_urdev_put;
889 	}
890 
891 	minor = urd->dev_id.devno;
892 	major = MAJOR(ur_first_dev_maj_min);
893 
894 	urd->char_device = cdev_alloc();
895 	if (!urd->char_device) {
896 		rc = -ENOMEM;
897 		goto fail_urdev_put;
898 	}
899 
900 	urd->char_device->ops = &ur_fops;
901 	urd->char_device->owner = ur_fops.owner;
902 
903 	rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
904 	if (rc)
905 		goto fail_free_cdev;
906 	if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
907 		if (urd->class == DEV_CLASS_UR_I)
908 			sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
909 		if (urd->class == DEV_CLASS_UR_O)
910 			sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
911 	} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
912 		sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
913 	} else {
914 		rc = -EOPNOTSUPP;
915 		goto fail_free_cdev;
916 	}
917 
918 	urd->device = device_create(&vmur_class, &cdev->dev,
919 				    urd->char_device->dev, NULL, "%s", node_id);
920 	if (IS_ERR(urd->device)) {
921 		rc = PTR_ERR(urd->device);
922 		TRACE("ur_set_online: device_create rc=%d\n", rc);
923 		goto fail_free_cdev;
924 	}
925 	urdev_put(urd);
926 	mutex_unlock(&vmur_mutex);
927 	return 0;
928 
929 fail_free_cdev:
930 	cdev_del(urd->char_device);
931 	urd->char_device = NULL;
932 fail_urdev_put:
933 	urdev_put(urd);
934 fail_unlock:
935 	mutex_unlock(&vmur_mutex);
936 	return rc;
937 }
938 
939 static int ur_set_offline_force(struct ccw_device *cdev, int force)
940 {
941 	struct urdev *urd;
942 	int rc;
943 
944 	TRACE("ur_set_offline: cdev=%p\n", cdev);
945 	urd = urdev_get_from_cdev(cdev);
946 	if (!urd)
947 		/* ur_remove already deleted our urd */
948 		return -ENODEV;
949 	if (!urd->char_device) {
950 		/* Another ur_set_offline was faster */
951 		rc = -EBUSY;
952 		goto fail_urdev_put;
953 	}
954 	if (!force && (refcount_read(&urd->ref_count) > 2)) {
955 		/* There is still a user of urd (e.g. ur_open) */
956 		TRACE("ur_set_offline: BUSY\n");
957 		rc = -EBUSY;
958 		goto fail_urdev_put;
959 	}
960 	if (cancel_work_sync(&urd->uevent_work)) {
961 		/* Work not run yet - need to release reference here */
962 		urdev_put(urd);
963 	}
964 	device_destroy(&vmur_class, urd->char_device->dev);
965 	cdev_del(urd->char_device);
966 	urd->char_device = NULL;
967 	rc = 0;
968 
969 fail_urdev_put:
970 	urdev_put(urd);
971 	return rc;
972 }
973 
974 static int ur_set_offline(struct ccw_device *cdev)
975 {
976 	int rc;
977 
978 	mutex_lock(&vmur_mutex);
979 	rc = ur_set_offline_force(cdev, 0);
980 	mutex_unlock(&vmur_mutex);
981 	return rc;
982 }
983 
984 static void ur_remove(struct ccw_device *cdev)
985 {
986 	unsigned long flags;
987 
988 	TRACE("ur_remove\n");
989 
990 	mutex_lock(&vmur_mutex);
991 
992 	if (cdev->online)
993 		ur_set_offline_force(cdev, 1);
994 	ur_remove_attributes(&cdev->dev);
995 
996 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
997 	urdev_put(dev_get_drvdata(&cdev->dev));
998 	dev_set_drvdata(&cdev->dev, NULL);
999 	cdev->handler = NULL;
1000 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1001 
1002 	mutex_unlock(&vmur_mutex);
1003 }
1004 
1005 /*
1006  * Module initialisation and cleanup
1007  */
1008 static int __init ur_init(void)
1009 {
1010 	int rc;
1011 	dev_t dev;
1012 
1013 	if (!machine_is_vm()) {
1014 		pr_err("The %s cannot be loaded without z/VM\n",
1015 		       ur_banner);
1016 		return -ENODEV;
1017 	}
1018 
1019 	vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
1020 	if (!vmur_dbf)
1021 		return -ENOMEM;
1022 	rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
1023 	if (rc)
1024 		goto fail_free_dbf;
1025 
1026 	debug_set_level(vmur_dbf, 6);
1027 
1028 	rc = class_register(&vmur_class);
1029 	if (rc)
1030 		goto fail_free_dbf;
1031 
1032 	rc = ccw_driver_register(&ur_driver);
1033 	if (rc)
1034 		goto fail_class_destroy;
1035 
1036 	rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
1037 	if (rc) {
1038 		pr_err("Kernel function alloc_chrdev_region failed with "
1039 		       "error code %d\n", rc);
1040 		goto fail_unregister_driver;
1041 	}
1042 	ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
1043 
1044 	pr_info("%s loaded.\n", ur_banner);
1045 	return 0;
1046 
1047 fail_unregister_driver:
1048 	ccw_driver_unregister(&ur_driver);
1049 fail_class_destroy:
1050 	class_unregister(&vmur_class);
1051 fail_free_dbf:
1052 	debug_unregister(vmur_dbf);
1053 	return rc;
1054 }
1055 
1056 static void __exit ur_exit(void)
1057 {
1058 	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1059 	ccw_driver_unregister(&ur_driver);
1060 	class_unregister(&vmur_class);
1061 	debug_unregister(vmur_dbf);
1062 	pr_info("%s unloaded.\n", ur_banner);
1063 }
1064 
1065 module_init(ur_init);
1066 module_exit(ur_exit);
1067