1 /*
2  * u_serial.c - utilities for USB gadget "serial port"/TTY support
3  *
4  * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5  * Copyright (C) 2008 David Brownell
6  * Copyright (C) 2008 by Nokia Corporation
7  *
8  * This code also borrows from usbserial.c, which is
9  * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10  * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11  * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12  *
13  * This software is distributed under the terms of the GNU General
14  * Public License ("GPL") as published by the Free Software Foundation,
15  * either version 2 of that License or (at your option) any later version.
16  */
17 
18 /* #define VERBOSE_DEBUG */
19 
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/delay.h>
25 #include <linux/tty.h>
26 #include <linux/tty_flip.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 
30 #include "u_serial.h"
31 
32 
33 /*
34  * This component encapsulates the TTY layer glue needed to provide basic
35  * "serial port" functionality through the USB gadget stack.  Each such
36  * port is exposed through a /dev/ttyGS* node.
37  *
38  * After initialization (gserial_setup), these TTY port devices stay
39  * available until they are removed (gserial_cleanup).  Each one may be
40  * connected to a USB function (gserial_connect), or disconnected (with
41  * gserial_disconnect) when the USB host issues a config change event.
42  * Data can only flow when the port is connected to the host.
43  *
44  * A given TTY port can be made available in multiple configurations.
45  * For example, each one might expose a ttyGS0 node which provides a
46  * login application.  In one case that might use CDC ACM interface 0,
47  * while another configuration might use interface 3 for that.  The
48  * work to handle that (including descriptor management) is not part
49  * of this component.
50  *
51  * Configurations may expose more than one TTY port.  For example, if
52  * ttyGS0 provides login service, then ttyGS1 might provide dialer access
53  * for a telephone or fax link.  And ttyGS2 might be something that just
54  * needs a simple byte stream interface for some messaging protocol that
55  * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
56  */
57 
58 #define PREFIX	"ttyGS"
59 
60 /*
61  * gserial is the lifecycle interface, used by USB functions
62  * gs_port is the I/O nexus, used by the tty driver
63  * tty_struct links to the tty/filesystem framework
64  *
65  * gserial <---> gs_port ... links will be null when the USB link is
66  * inactive; managed by gserial_{connect,disconnect}().  each gserial
67  * instance can wrap its own USB control protocol.
68  *	gserial->ioport == usb_ep->driver_data ... gs_port
69  *	gs_port->port_usb ... gserial
70  *
71  * gs_port <---> tty_struct ... links will be null when the TTY file
72  * isn't opened; managed by gs_open()/gs_close()
73  *	gserial->port_tty ... tty_struct
74  *	tty_struct->driver_data ... gserial
75  */
76 
77 /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
78  * next layer of buffering.  For TX that's a circular buffer; for RX
79  * consider it a NOP.  A third layer is provided by the TTY code.
80  */
81 #define QUEUE_SIZE		16
82 #define WRITE_BUF_SIZE		8192		/* TX only */
83 
84 /* circular buffer */
85 struct gs_buf {
86 	unsigned		buf_size;
87 	char			*buf_buf;
88 	char			*buf_get;
89 	char			*buf_put;
90 };
91 
92 /*
93  * The port structure holds info for each port, one for each minor number
94  * (and thus for each /dev/ node).
95  */
96 struct gs_port {
97 	spinlock_t		port_lock;	/* guard port_* access */
98 
99 	struct gserial		*port_usb;
100 	struct tty_struct	*port_tty;
101 
102 	unsigned		open_count;
103 	bool			openclose;	/* open/close in progress */
104 	u8			port_num;
105 
106 	wait_queue_head_t	close_wait;	/* wait for last close */
107 
108 	struct list_head	read_pool;
109 	int read_started;
110 	int read_allocated;
111 	struct list_head	read_queue;
112 	unsigned		n_read;
113 	struct tasklet_struct	push;
114 
115 	struct list_head	write_pool;
116 	int write_started;
117 	int write_allocated;
118 	struct gs_buf		port_write_buf;
119 	wait_queue_head_t	drain_wait;	/* wait while writes drain */
120 
121 	/* REVISIT this state ... */
122 	struct usb_cdc_line_coding port_line_coding;	/* 8-N-1 etc */
123 };
124 
125 /* increase N_PORTS if you need more */
126 #define N_PORTS		4
127 static struct portmaster {
128 	struct mutex	lock;			/* protect open/close */
129 	struct gs_port	*port;
130 } ports[N_PORTS];
131 static unsigned	n_ports;
132 
133 #define GS_CLOSE_TIMEOUT		15		/* seconds */
134 
135 
136 
137 #ifdef VERBOSE_DEBUG
138 #define pr_vdebug(fmt, arg...) \
139 	pr_debug(fmt, ##arg)
140 #else
141 #define pr_vdebug(fmt, arg...) \
142 	({ if (0) pr_debug(fmt, ##arg); })
143 #endif
144 
145 /*-------------------------------------------------------------------------*/
146 
147 /* Circular Buffer */
148 
149 /*
150  * gs_buf_alloc
151  *
152  * Allocate a circular buffer and all associated memory.
153  */
gs_buf_alloc(struct gs_buf * gb,unsigned size)154 static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
155 {
156 	gb->buf_buf = kmalloc(size, GFP_KERNEL);
157 	if (gb->buf_buf == NULL)
158 		return -ENOMEM;
159 
160 	gb->buf_size = size;
161 	gb->buf_put = gb->buf_buf;
162 	gb->buf_get = gb->buf_buf;
163 
164 	return 0;
165 }
166 
167 /*
168  * gs_buf_free
169  *
170  * Free the buffer and all associated memory.
171  */
gs_buf_free(struct gs_buf * gb)172 static void gs_buf_free(struct gs_buf *gb)
173 {
174 	kfree(gb->buf_buf);
175 	gb->buf_buf = NULL;
176 }
177 
178 /*
179  * gs_buf_clear
180  *
181  * Clear out all data in the circular buffer.
182  */
gs_buf_clear(struct gs_buf * gb)183 static void gs_buf_clear(struct gs_buf *gb)
184 {
185 	gb->buf_get = gb->buf_put;
186 	/* equivalent to a get of all data available */
187 }
188 
189 /*
190  * gs_buf_data_avail
191  *
192  * Return the number of bytes of data written into the circular
193  * buffer.
194  */
gs_buf_data_avail(struct gs_buf * gb)195 static unsigned gs_buf_data_avail(struct gs_buf *gb)
196 {
197 	return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
198 }
199 
200 /*
201  * gs_buf_space_avail
202  *
203  * Return the number of bytes of space available in the circular
204  * buffer.
205  */
gs_buf_space_avail(struct gs_buf * gb)206 static unsigned gs_buf_space_avail(struct gs_buf *gb)
207 {
208 	return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
209 }
210 
211 /*
212  * gs_buf_put
213  *
214  * Copy data data from a user buffer and put it into the circular buffer.
215  * Restrict to the amount of space available.
216  *
217  * Return the number of bytes copied.
218  */
219 static unsigned
gs_buf_put(struct gs_buf * gb,const char * buf,unsigned count)220 gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
221 {
222 	unsigned len;
223 
224 	len  = gs_buf_space_avail(gb);
225 	if (count > len)
226 		count = len;
227 
228 	if (count == 0)
229 		return 0;
230 
231 	len = gb->buf_buf + gb->buf_size - gb->buf_put;
232 	if (count > len) {
233 		memcpy(gb->buf_put, buf, len);
234 		memcpy(gb->buf_buf, buf+len, count - len);
235 		gb->buf_put = gb->buf_buf + count - len;
236 	} else {
237 		memcpy(gb->buf_put, buf, count);
238 		if (count < len)
239 			gb->buf_put += count;
240 		else /* count == len */
241 			gb->buf_put = gb->buf_buf;
242 	}
243 
244 	return count;
245 }
246 
247 /*
248  * gs_buf_get
249  *
250  * Get data from the circular buffer and copy to the given buffer.
251  * Restrict to the amount of data available.
252  *
253  * Return the number of bytes copied.
254  */
255 static unsigned
gs_buf_get(struct gs_buf * gb,char * buf,unsigned count)256 gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
257 {
258 	unsigned len;
259 
260 	len = gs_buf_data_avail(gb);
261 	if (count > len)
262 		count = len;
263 
264 	if (count == 0)
265 		return 0;
266 
267 	len = gb->buf_buf + gb->buf_size - gb->buf_get;
268 	if (count > len) {
269 		memcpy(buf, gb->buf_get, len);
270 		memcpy(buf+len, gb->buf_buf, count - len);
271 		gb->buf_get = gb->buf_buf + count - len;
272 	} else {
273 		memcpy(buf, gb->buf_get, count);
274 		if (count < len)
275 			gb->buf_get += count;
276 		else /* count == len */
277 			gb->buf_get = gb->buf_buf;
278 	}
279 
280 	return count;
281 }
282 
283 /*-------------------------------------------------------------------------*/
284 
285 /* I/O glue between TTY (upper) and USB function (lower) driver layers */
286 
287 /*
288  * gs_alloc_req
289  *
290  * Allocate a usb_request and its buffer.  Returns a pointer to the
291  * usb_request or NULL if there is an error.
292  */
293 struct usb_request *
gs_alloc_req(struct usb_ep * ep,unsigned len,gfp_t kmalloc_flags)294 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
295 {
296 	struct usb_request *req;
297 
298 	req = usb_ep_alloc_request(ep, kmalloc_flags);
299 
300 	if (req != NULL) {
301 		req->length = len;
302 		req->buf = kmalloc(len, kmalloc_flags);
303 		if (req->buf == NULL) {
304 			usb_ep_free_request(ep, req);
305 			return NULL;
306 		}
307 	}
308 
309 	return req;
310 }
311 
312 /*
313  * gs_free_req
314  *
315  * Free a usb_request and its buffer.
316  */
gs_free_req(struct usb_ep * ep,struct usb_request * req)317 void gs_free_req(struct usb_ep *ep, struct usb_request *req)
318 {
319 	kfree(req->buf);
320 	usb_ep_free_request(ep, req);
321 }
322 
323 /*
324  * gs_send_packet
325  *
326  * If there is data to send, a packet is built in the given
327  * buffer and the size is returned.  If there is no data to
328  * send, 0 is returned.
329  *
330  * Called with port_lock held.
331  */
332 static unsigned
gs_send_packet(struct gs_port * port,char * packet,unsigned size)333 gs_send_packet(struct gs_port *port, char *packet, unsigned size)
334 {
335 	unsigned len;
336 
337 	len = gs_buf_data_avail(&port->port_write_buf);
338 	if (len < size)
339 		size = len;
340 	if (size != 0)
341 		size = gs_buf_get(&port->port_write_buf, packet, size);
342 	return size;
343 }
344 
345 /*
346  * gs_start_tx
347  *
348  * This function finds available write requests, calls
349  * gs_send_packet to fill these packets with data, and
350  * continues until either there are no more write requests
351  * available or no more data to send.  This function is
352  * run whenever data arrives or write requests are available.
353  *
354  * Context: caller owns port_lock; port_usb is non-null.
355  */
gs_start_tx(struct gs_port * port)356 static int gs_start_tx(struct gs_port *port)
357 /*
358 __releases(&port->port_lock)
359 __acquires(&port->port_lock)
360 */
361 {
362 	struct list_head	*pool = &port->write_pool;
363 	struct usb_ep		*in = port->port_usb->in;
364 	int			status = 0;
365 	bool			do_tty_wake = false;
366 
367 	while (!list_empty(pool)) {
368 		struct usb_request	*req;
369 		int			len;
370 
371 		if (port->write_started >= QUEUE_SIZE)
372 			break;
373 
374 		req = list_entry(pool->next, struct usb_request, list);
375 		len = gs_send_packet(port, req->buf, in->maxpacket);
376 		if (len == 0) {
377 			wake_up_interruptible(&port->drain_wait);
378 			break;
379 		}
380 		do_tty_wake = true;
381 
382 		req->length = len;
383 		list_del(&req->list);
384 		req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
385 
386 		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
387 				port->port_num, len, *((u8 *)req->buf),
388 				*((u8 *)req->buf+1), *((u8 *)req->buf+2));
389 
390 		/* Drop lock while we call out of driver; completions
391 		 * could be issued while we do so.  Disconnection may
392 		 * happen too; maybe immediately before we queue this!
393 		 *
394 		 * NOTE that we may keep sending data for a while after
395 		 * the TTY closed (dev->ioport->port_tty is NULL).
396 		 */
397 		spin_unlock(&port->port_lock);
398 		status = usb_ep_queue(in, req, GFP_ATOMIC);
399 		spin_lock(&port->port_lock);
400 
401 		if (status) {
402 			pr_debug("%s: %s %s err %d\n",
403 					__func__, "queue", in->name, status);
404 			list_add(&req->list, pool);
405 			break;
406 		}
407 
408 		port->write_started++;
409 
410 		/* abort immediately after disconnect */
411 		if (!port->port_usb)
412 			break;
413 	}
414 
415 	if (do_tty_wake && port->port_tty)
416 		tty_wakeup(port->port_tty);
417 	return status;
418 }
419 
420 /*
421  * Context: caller owns port_lock, and port_usb is set
422  */
gs_start_rx(struct gs_port * port)423 static unsigned gs_start_rx(struct gs_port *port)
424 /*
425 __releases(&port->port_lock)
426 __acquires(&port->port_lock)
427 */
428 {
429 	struct list_head	*pool = &port->read_pool;
430 	struct usb_ep		*out = port->port_usb->out;
431 
432 	while (!list_empty(pool)) {
433 		struct usb_request	*req;
434 		int			status;
435 		struct tty_struct	*tty;
436 
437 		/* no more rx if closed */
438 		tty = port->port_tty;
439 		if (!tty)
440 			break;
441 
442 		if (port->read_started >= QUEUE_SIZE)
443 			break;
444 
445 		req = list_entry(pool->next, struct usb_request, list);
446 		list_del(&req->list);
447 		req->length = out->maxpacket;
448 
449 		/* drop lock while we call out; the controller driver
450 		 * may need to call us back (e.g. for disconnect)
451 		 */
452 		spin_unlock(&port->port_lock);
453 		status = usb_ep_queue(out, req, GFP_ATOMIC);
454 		spin_lock(&port->port_lock);
455 
456 		if (status) {
457 			pr_debug("%s: %s %s err %d\n",
458 					__func__, "queue", out->name, status);
459 			list_add(&req->list, pool);
460 			break;
461 		}
462 		port->read_started++;
463 
464 		/* abort immediately after disconnect */
465 		if (!port->port_usb)
466 			break;
467 	}
468 	return port->read_started;
469 }
470 
471 /*
472  * RX tasklet takes data out of the RX queue and hands it up to the TTY
473  * layer until it refuses to take any more data (or is throttled back).
474  * Then it issues reads for any further data.
475  *
476  * If the RX queue becomes full enough that no usb_request is queued,
477  * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
478  * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
479  * can be buffered before the TTY layer's buffers (currently 64 KB).
480  */
gs_rx_push(unsigned long _port)481 static void gs_rx_push(unsigned long _port)
482 {
483 	struct gs_port		*port = (void *)_port;
484 	struct tty_struct	*tty;
485 	struct list_head	*queue = &port->read_queue;
486 	bool			disconnect = false;
487 	bool			do_push = false;
488 
489 	/* hand any queued data to the tty */
490 	spin_lock_irq(&port->port_lock);
491 	tty = port->port_tty;
492 	while (!list_empty(queue)) {
493 		struct usb_request	*req;
494 
495 		req = list_first_entry(queue, struct usb_request, list);
496 
497 		/* discard data if tty was closed */
498 		if (!tty)
499 			goto recycle;
500 
501 		/* leave data queued if tty was rx throttled */
502 		if (test_bit(TTY_THROTTLED, &tty->flags))
503 			break;
504 
505 		switch (req->status) {
506 		case -ESHUTDOWN:
507 			disconnect = true;
508 			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
509 			break;
510 
511 		default:
512 			/* presumably a transient fault */
513 			pr_warning(PREFIX "%d: unexpected RX status %d\n",
514 					port->port_num, req->status);
515 			/* FALLTHROUGH */
516 		case 0:
517 			/* normal completion */
518 			break;
519 		}
520 
521 		/* push data to (open) tty */
522 		if (req->actual) {
523 			char		*packet = req->buf;
524 			unsigned	size = req->actual;
525 			unsigned	n;
526 			int		count;
527 
528 			/* we may have pushed part of this packet already... */
529 			n = port->n_read;
530 			if (n) {
531 				packet += n;
532 				size -= n;
533 			}
534 
535 			count = tty_insert_flip_string(tty, packet, size);
536 			if (count)
537 				do_push = true;
538 			if (count != size) {
539 				/* stop pushing; TTY layer can't handle more */
540 				port->n_read += count;
541 				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
542 						port->port_num,
543 						count, req->actual);
544 				break;
545 			}
546 			port->n_read = 0;
547 		}
548 recycle:
549 		list_move(&req->list, &port->read_pool);
550 		port->read_started--;
551 	}
552 
553 	/* Push from tty to ldisc; without low_latency set this is handled by
554 	 * a workqueue, so we won't get callbacks and can hold port_lock
555 	 */
556 	if (tty && do_push)
557 		tty_flip_buffer_push(tty);
558 
559 
560 	/* We want our data queue to become empty ASAP, keeping data
561 	 * in the tty and ldisc (not here).  If we couldn't push any
562 	 * this time around, there may be trouble unless there's an
563 	 * implicit tty_unthrottle() call on its way...
564 	 *
565 	 * REVISIT we should probably add a timer to keep the tasklet
566 	 * from starving ... but it's not clear that case ever happens.
567 	 */
568 	if (!list_empty(queue) && tty) {
569 		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
570 			if (do_push)
571 				tasklet_schedule(&port->push);
572 			else
573 				pr_warning(PREFIX "%d: RX not scheduled?\n",
574 					port->port_num);
575 		}
576 	}
577 
578 	/* If we're still connected, refill the USB RX queue. */
579 	if (!disconnect && port->port_usb)
580 		gs_start_rx(port);
581 
582 	spin_unlock_irq(&port->port_lock);
583 }
584 
gs_read_complete(struct usb_ep * ep,struct usb_request * req)585 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
586 {
587 	struct gs_port	*port = ep->driver_data;
588 
589 	/* Queue all received data until the tty layer is ready for it. */
590 	spin_lock(&port->port_lock);
591 	list_add_tail(&req->list, &port->read_queue);
592 	tasklet_schedule(&port->push);
593 	spin_unlock(&port->port_lock);
594 }
595 
gs_write_complete(struct usb_ep * ep,struct usb_request * req)596 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
597 {
598 	struct gs_port	*port = ep->driver_data;
599 
600 	spin_lock(&port->port_lock);
601 	list_add(&req->list, &port->write_pool);
602 	port->write_started--;
603 
604 	switch (req->status) {
605 	default:
606 		/* presumably a transient fault */
607 		pr_warning("%s: unexpected %s status %d\n",
608 				__func__, ep->name, req->status);
609 		/* FALL THROUGH */
610 	case 0:
611 		/* normal completion */
612 		gs_start_tx(port);
613 		break;
614 
615 	case -ESHUTDOWN:
616 		/* disconnect */
617 		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
618 		break;
619 	}
620 
621 	spin_unlock(&port->port_lock);
622 }
623 
gs_free_requests(struct usb_ep * ep,struct list_head * head,int * allocated)624 static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
625 							 int *allocated)
626 {
627 	struct usb_request	*req;
628 
629 	while (!list_empty(head)) {
630 		req = list_entry(head->next, struct usb_request, list);
631 		list_del(&req->list);
632 		gs_free_req(ep, req);
633 		if (allocated)
634 			(*allocated)--;
635 	}
636 }
637 
gs_alloc_requests(struct usb_ep * ep,struct list_head * head,void (* fn)(struct usb_ep *,struct usb_request *),int * allocated)638 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
639 		void (*fn)(struct usb_ep *, struct usb_request *),
640 		int *allocated)
641 {
642 	int			i;
643 	struct usb_request	*req;
644 	int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
645 
646 	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
647 	 * do quite that many this time, don't fail ... we just won't
648 	 * be as speedy as we might otherwise be.
649 	 */
650 	for (i = 0; i < n; i++) {
651 		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
652 		if (!req)
653 			return list_empty(head) ? -ENOMEM : 0;
654 		req->complete = fn;
655 		list_add_tail(&req->list, head);
656 		if (allocated)
657 			(*allocated)++;
658 	}
659 	return 0;
660 }
661 
662 /**
663  * gs_start_io - start USB I/O streams
664  * @dev: encapsulates endpoints to use
665  * Context: holding port_lock; port_tty and port_usb are non-null
666  *
667  * We only start I/O when something is connected to both sides of
668  * this port.  If nothing is listening on the host side, we may
669  * be pointlessly filling up our TX buffers and FIFO.
670  */
gs_start_io(struct gs_port * port)671 static int gs_start_io(struct gs_port *port)
672 {
673 	struct list_head	*head = &port->read_pool;
674 	struct usb_ep		*ep = port->port_usb->out;
675 	int			status;
676 	unsigned		started;
677 
678 	/* Allocate RX and TX I/O buffers.  We can't easily do this much
679 	 * earlier (with GFP_KERNEL) because the requests are coupled to
680 	 * endpoints, as are the packet sizes we'll be using.  Different
681 	 * configurations may use different endpoints with a given port;
682 	 * and high speed vs full speed changes packet sizes too.
683 	 */
684 	status = gs_alloc_requests(ep, head, gs_read_complete,
685 		&port->read_allocated);
686 	if (status)
687 		return status;
688 
689 	status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
690 			gs_write_complete, &port->write_allocated);
691 	if (status) {
692 		gs_free_requests(ep, head, &port->read_allocated);
693 		return status;
694 	}
695 
696 	/* queue read requests */
697 	port->n_read = 0;
698 	started = gs_start_rx(port);
699 
700 	/* unblock any pending writes into our circular buffer */
701 	if (started) {
702 		tty_wakeup(port->port_tty);
703 	} else {
704 		gs_free_requests(ep, head, &port->read_allocated);
705 		gs_free_requests(port->port_usb->in, &port->write_pool,
706 			&port->write_allocated);
707 		status = -EIO;
708 	}
709 
710 	return status;
711 }
712 
713 /*-------------------------------------------------------------------------*/
714 
715 /* TTY Driver */
716 
717 /*
718  * gs_open sets up the link between a gs_port and its associated TTY.
719  * That link is broken *only* by TTY close(), and all driver methods
720  * know that.
721  */
gs_open(struct tty_struct * tty,struct file * file)722 static int gs_open(struct tty_struct *tty, struct file *file)
723 {
724 	int		port_num = tty->index;
725 	struct gs_port	*port;
726 	int		status;
727 
728 	if (port_num < 0 || port_num >= n_ports)
729 		return -ENXIO;
730 
731 	do {
732 		mutex_lock(&ports[port_num].lock);
733 		port = ports[port_num].port;
734 		if (!port)
735 			status = -ENODEV;
736 		else {
737 			spin_lock_irq(&port->port_lock);
738 
739 			/* already open?  Great. */
740 			if (port->open_count) {
741 				status = 0;
742 				port->open_count++;
743 
744 			/* currently opening/closing? wait ... */
745 			} else if (port->openclose) {
746 				status = -EBUSY;
747 
748 			/* ... else we do the work */
749 			} else {
750 				status = -EAGAIN;
751 				port->openclose = true;
752 			}
753 			spin_unlock_irq(&port->port_lock);
754 		}
755 		mutex_unlock(&ports[port_num].lock);
756 
757 		switch (status) {
758 		default:
759 			/* fully handled */
760 			return status;
761 		case -EAGAIN:
762 			/* must do the work */
763 			break;
764 		case -EBUSY:
765 			/* wait for EAGAIN task to finish */
766 			msleep(1);
767 			/* REVISIT could have a waitchannel here, if
768 			 * concurrent open performance is important
769 			 */
770 			break;
771 		}
772 	} while (status != -EAGAIN);
773 
774 	/* Do the "real open" */
775 	spin_lock_irq(&port->port_lock);
776 
777 	/* allocate circular buffer on first open */
778 	if (port->port_write_buf.buf_buf == NULL) {
779 
780 		spin_unlock_irq(&port->port_lock);
781 		status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
782 		spin_lock_irq(&port->port_lock);
783 
784 		if (status) {
785 			pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
786 				port->port_num, tty, file);
787 			port->openclose = false;
788 			goto exit_unlock_port;
789 		}
790 	}
791 
792 	/* REVISIT if REMOVED (ports[].port NULL), abort the open
793 	 * to let rmmod work faster (but this way isn't wrong).
794 	 */
795 
796 	/* REVISIT maybe wait for "carrier detect" */
797 
798 	tty->driver_data = port;
799 	port->port_tty = tty;
800 
801 	port->open_count = 1;
802 	port->openclose = false;
803 
804 	/* if connected, start the I/O stream */
805 	if (port->port_usb) {
806 		struct gserial	*gser = port->port_usb;
807 
808 		pr_debug("gs_open: start ttyGS%d\n", port->port_num);
809 		gs_start_io(port);
810 
811 		if (gser->connect)
812 			gser->connect(gser);
813 	}
814 
815 	pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
816 
817 	status = 0;
818 
819 exit_unlock_port:
820 	spin_unlock_irq(&port->port_lock);
821 	return status;
822 }
823 
gs_writes_finished(struct gs_port * p)824 static int gs_writes_finished(struct gs_port *p)
825 {
826 	int cond;
827 
828 	/* return true on disconnect or empty buffer */
829 	spin_lock_irq(&p->port_lock);
830 	cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
831 	spin_unlock_irq(&p->port_lock);
832 
833 	return cond;
834 }
835 
gs_close(struct tty_struct * tty,struct file * file)836 static void gs_close(struct tty_struct *tty, struct file *file)
837 {
838 	struct gs_port *port = tty->driver_data;
839 	struct gserial	*gser;
840 
841 	spin_lock_irq(&port->port_lock);
842 
843 	if (port->open_count != 1) {
844 		if (port->open_count == 0)
845 			WARN_ON(1);
846 		else
847 			--port->open_count;
848 		goto exit;
849 	}
850 
851 	pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
852 
853 	/* mark port as closing but in use; we can drop port lock
854 	 * and sleep if necessary
855 	 */
856 	port->openclose = true;
857 	port->open_count = 0;
858 
859 	gser = port->port_usb;
860 	if (gser && gser->disconnect)
861 		gser->disconnect(gser);
862 
863 	/* wait for circular write buffer to drain, disconnect, or at
864 	 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
865 	 */
866 	if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
867 		spin_unlock_irq(&port->port_lock);
868 		wait_event_interruptible_timeout(port->drain_wait,
869 					gs_writes_finished(port),
870 					GS_CLOSE_TIMEOUT * HZ);
871 		spin_lock_irq(&port->port_lock);
872 		gser = port->port_usb;
873 	}
874 
875 	/* Iff we're disconnected, there can be no I/O in flight so it's
876 	 * ok to free the circular buffer; else just scrub it.  And don't
877 	 * let the push tasklet fire again until we're re-opened.
878 	 */
879 	if (gser == NULL)
880 		gs_buf_free(&port->port_write_buf);
881 	else
882 		gs_buf_clear(&port->port_write_buf);
883 
884 	tty->driver_data = NULL;
885 	port->port_tty = NULL;
886 
887 	port->openclose = false;
888 
889 	pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
890 			port->port_num, tty, file);
891 
892 	wake_up_interruptible(&port->close_wait);
893 exit:
894 	spin_unlock_irq(&port->port_lock);
895 }
896 
gs_write(struct tty_struct * tty,const unsigned char * buf,int count)897 static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
898 {
899 	struct gs_port	*port = tty->driver_data;
900 	unsigned long	flags;
901 	int		status;
902 
903 	pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
904 			port->port_num, tty, count);
905 
906 	spin_lock_irqsave(&port->port_lock, flags);
907 	if (count)
908 		count = gs_buf_put(&port->port_write_buf, buf, count);
909 	/* treat count == 0 as flush_chars() */
910 	if (port->port_usb)
911 		status = gs_start_tx(port);
912 	spin_unlock_irqrestore(&port->port_lock, flags);
913 
914 	return count;
915 }
916 
gs_put_char(struct tty_struct * tty,unsigned char ch)917 static int gs_put_char(struct tty_struct *tty, unsigned char ch)
918 {
919 	struct gs_port	*port = tty->driver_data;
920 	unsigned long	flags;
921 	int		status;
922 
923 	pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
924 		port->port_num, tty, ch, __builtin_return_address(0));
925 
926 	spin_lock_irqsave(&port->port_lock, flags);
927 	status = gs_buf_put(&port->port_write_buf, &ch, 1);
928 	spin_unlock_irqrestore(&port->port_lock, flags);
929 
930 	return status;
931 }
932 
gs_flush_chars(struct tty_struct * tty)933 static void gs_flush_chars(struct tty_struct *tty)
934 {
935 	struct gs_port	*port = tty->driver_data;
936 	unsigned long	flags;
937 
938 	pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
939 
940 	spin_lock_irqsave(&port->port_lock, flags);
941 	if (port->port_usb)
942 		gs_start_tx(port);
943 	spin_unlock_irqrestore(&port->port_lock, flags);
944 }
945 
gs_write_room(struct tty_struct * tty)946 static int gs_write_room(struct tty_struct *tty)
947 {
948 	struct gs_port	*port = tty->driver_data;
949 	unsigned long	flags;
950 	int		room = 0;
951 
952 	spin_lock_irqsave(&port->port_lock, flags);
953 	if (port->port_usb)
954 		room = gs_buf_space_avail(&port->port_write_buf);
955 	spin_unlock_irqrestore(&port->port_lock, flags);
956 
957 	pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
958 		port->port_num, tty, room);
959 
960 	return room;
961 }
962 
gs_chars_in_buffer(struct tty_struct * tty)963 static int gs_chars_in_buffer(struct tty_struct *tty)
964 {
965 	struct gs_port	*port = tty->driver_data;
966 	unsigned long	flags;
967 	int		chars = 0;
968 
969 	spin_lock_irqsave(&port->port_lock, flags);
970 	chars = gs_buf_data_avail(&port->port_write_buf);
971 	spin_unlock_irqrestore(&port->port_lock, flags);
972 
973 	pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
974 		port->port_num, tty, chars);
975 
976 	return chars;
977 }
978 
979 /* undo side effects of setting TTY_THROTTLED */
gs_unthrottle(struct tty_struct * tty)980 static void gs_unthrottle(struct tty_struct *tty)
981 {
982 	struct gs_port		*port = tty->driver_data;
983 	unsigned long		flags;
984 
985 	spin_lock_irqsave(&port->port_lock, flags);
986 	if (port->port_usb) {
987 		/* Kickstart read queue processing.  We don't do xon/xoff,
988 		 * rts/cts, or other handshaking with the host, but if the
989 		 * read queue backs up enough we'll be NAKing OUT packets.
990 		 */
991 		tasklet_schedule(&port->push);
992 		pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
993 	}
994 	spin_unlock_irqrestore(&port->port_lock, flags);
995 }
996 
gs_break_ctl(struct tty_struct * tty,int duration)997 static int gs_break_ctl(struct tty_struct *tty, int duration)
998 {
999 	struct gs_port	*port = tty->driver_data;
1000 	int		status = 0;
1001 	struct gserial	*gser;
1002 
1003 	pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1004 			port->port_num, duration);
1005 
1006 	spin_lock_irq(&port->port_lock);
1007 	gser = port->port_usb;
1008 	if (gser && gser->send_break)
1009 		status = gser->send_break(gser, duration);
1010 	spin_unlock_irq(&port->port_lock);
1011 
1012 	return status;
1013 }
1014 
1015 static const struct tty_operations gs_tty_ops = {
1016 	.open =			gs_open,
1017 	.close =		gs_close,
1018 	.write =		gs_write,
1019 	.put_char =		gs_put_char,
1020 	.flush_chars =		gs_flush_chars,
1021 	.write_room =		gs_write_room,
1022 	.chars_in_buffer =	gs_chars_in_buffer,
1023 	.unthrottle =		gs_unthrottle,
1024 	.break_ctl =		gs_break_ctl,
1025 };
1026 
1027 /*-------------------------------------------------------------------------*/
1028 
1029 static struct tty_driver *gs_tty_driver;
1030 
1031 static int __init
gs_port_alloc(unsigned port_num,struct usb_cdc_line_coding * coding)1032 gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1033 {
1034 	struct gs_port	*port;
1035 
1036 	port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1037 	if (port == NULL)
1038 		return -ENOMEM;
1039 
1040 	spin_lock_init(&port->port_lock);
1041 	init_waitqueue_head(&port->close_wait);
1042 	init_waitqueue_head(&port->drain_wait);
1043 
1044 	tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1045 
1046 	INIT_LIST_HEAD(&port->read_pool);
1047 	INIT_LIST_HEAD(&port->read_queue);
1048 	INIT_LIST_HEAD(&port->write_pool);
1049 
1050 	port->port_num = port_num;
1051 	port->port_line_coding = *coding;
1052 
1053 	ports[port_num].port = port;
1054 
1055 	return 0;
1056 }
1057 
1058 /**
1059  * gserial_setup - initialize TTY driver for one or more ports
1060  * @g: gadget to associate with these ports
1061  * @count: how many ports to support
1062  * Context: may sleep
1063  *
1064  * The TTY stack needs to know in advance how many devices it should
1065  * plan to manage.  Use this call to set up the ports you will be
1066  * exporting through USB.  Later, connect them to functions based
1067  * on what configuration is activated by the USB host; and disconnect
1068  * them as appropriate.
1069  *
1070  * An example would be a two-configuration device in which both
1071  * configurations expose port 0, but through different functions.
1072  * One configuration could even expose port 1 while the other
1073  * one doesn't.
1074  *
1075  * Returns negative errno or zero.
1076  */
gserial_setup(struct usb_gadget * g,unsigned count)1077 int __init gserial_setup(struct usb_gadget *g, unsigned count)
1078 {
1079 	unsigned			i;
1080 	struct usb_cdc_line_coding	coding;
1081 	int				status;
1082 
1083 	if (count == 0 || count > N_PORTS)
1084 		return -EINVAL;
1085 
1086 	gs_tty_driver = alloc_tty_driver(count);
1087 	if (!gs_tty_driver)
1088 		return -ENOMEM;
1089 
1090 	gs_tty_driver->owner = THIS_MODULE;
1091 	gs_tty_driver->driver_name = "g_serial";
1092 	gs_tty_driver->name = PREFIX;
1093 	/* uses dynamically assigned dev_t values */
1094 
1095 	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1096 	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1097 	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1098 	gs_tty_driver->init_termios = tty_std_termios;
1099 
1100 	/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1101 	 * MS-Windows.  Otherwise, most of these flags shouldn't affect
1102 	 * anything unless we were to actually hook up to a serial line.
1103 	 */
1104 	gs_tty_driver->init_termios.c_cflag =
1105 			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1106 	gs_tty_driver->init_termios.c_ispeed = 9600;
1107 	gs_tty_driver->init_termios.c_ospeed = 9600;
1108 
1109 	coding.dwDTERate = cpu_to_le32(9600);
1110 	coding.bCharFormat = 8;
1111 	coding.bParityType = USB_CDC_NO_PARITY;
1112 	coding.bDataBits = USB_CDC_1_STOP_BITS;
1113 
1114 	tty_set_operations(gs_tty_driver, &gs_tty_ops);
1115 
1116 	/* make devices be openable */
1117 	for (i = 0; i < count; i++) {
1118 		mutex_init(&ports[i].lock);
1119 		status = gs_port_alloc(i, &coding);
1120 		if (status) {
1121 			count = i;
1122 			goto fail;
1123 		}
1124 	}
1125 	n_ports = count;
1126 
1127 	/* export the driver ... */
1128 	status = tty_register_driver(gs_tty_driver);
1129 	if (status) {
1130 		pr_err("%s: cannot register, err %d\n",
1131 				__func__, status);
1132 		goto fail;
1133 	}
1134 
1135 	/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1136 	for (i = 0; i < count; i++) {
1137 		struct device	*tty_dev;
1138 
1139 		tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1140 		if (IS_ERR(tty_dev))
1141 			pr_warning("%s: no classdev for port %d, err %ld\n",
1142 				__func__, i, PTR_ERR(tty_dev));
1143 	}
1144 
1145 	pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1146 			count, (count == 1) ? "" : "s");
1147 
1148 	return status;
1149 fail:
1150 	while (count--)
1151 		kfree(ports[count].port);
1152 	put_tty_driver(gs_tty_driver);
1153 	gs_tty_driver = NULL;
1154 	return status;
1155 }
1156 
gs_closed(struct gs_port * port)1157 static int gs_closed(struct gs_port *port)
1158 {
1159 	int cond;
1160 
1161 	spin_lock_irq(&port->port_lock);
1162 	cond = (port->open_count == 0) && !port->openclose;
1163 	spin_unlock_irq(&port->port_lock);
1164 	return cond;
1165 }
1166 
1167 /**
1168  * gserial_cleanup - remove TTY-over-USB driver and devices
1169  * Context: may sleep
1170  *
1171  * This is called to free all resources allocated by @gserial_setup().
1172  * Accordingly, it may need to wait until some open /dev/ files have
1173  * closed.
1174  *
1175  * The caller must have issued @gserial_disconnect() for any ports
1176  * that had previously been connected, so that there is never any
1177  * I/O pending when it's called.
1178  */
gserial_cleanup(void)1179 void gserial_cleanup(void)
1180 {
1181 	unsigned	i;
1182 	struct gs_port	*port;
1183 
1184 	if (!gs_tty_driver)
1185 		return;
1186 
1187 	/* start sysfs and /dev/ttyGS* node removal */
1188 	for (i = 0; i < n_ports; i++)
1189 		tty_unregister_device(gs_tty_driver, i);
1190 
1191 	for (i = 0; i < n_ports; i++) {
1192 		/* prevent new opens */
1193 		mutex_lock(&ports[i].lock);
1194 		port = ports[i].port;
1195 		ports[i].port = NULL;
1196 		mutex_unlock(&ports[i].lock);
1197 
1198 		tasklet_kill(&port->push);
1199 
1200 		/* wait for old opens to finish */
1201 		wait_event(port->close_wait, gs_closed(port));
1202 
1203 		WARN_ON(port->port_usb != NULL);
1204 
1205 		kfree(port);
1206 	}
1207 	n_ports = 0;
1208 
1209 	tty_unregister_driver(gs_tty_driver);
1210 	put_tty_driver(gs_tty_driver);
1211 	gs_tty_driver = NULL;
1212 
1213 	pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1214 }
1215 
1216 /**
1217  * gserial_connect - notify TTY I/O glue that USB link is active
1218  * @gser: the function, set up with endpoints and descriptors
1219  * @port_num: which port is active
1220  * Context: any (usually from irq)
1221  *
1222  * This is called activate endpoints and let the TTY layer know that
1223  * the connection is active ... not unlike "carrier detect".  It won't
1224  * necessarily start I/O queues; unless the TTY is held open by any
1225  * task, there would be no point.  However, the endpoints will be
1226  * activated so the USB host can perform I/O, subject to basic USB
1227  * hardware flow control.
1228  *
1229  * Caller needs to have set up the endpoints and USB function in @dev
1230  * before calling this, as well as the appropriate (speed-specific)
1231  * endpoint descriptors, and also have set up the TTY driver by calling
1232  * @gserial_setup().
1233  *
1234  * Returns negative errno or zero.
1235  * On success, ep->driver_data will be overwritten.
1236  */
gserial_connect(struct gserial * gser,u8 port_num)1237 int gserial_connect(struct gserial *gser, u8 port_num)
1238 {
1239 	struct gs_port	*port;
1240 	unsigned long	flags;
1241 	int		status;
1242 
1243 	if (!gs_tty_driver || port_num >= n_ports)
1244 		return -ENXIO;
1245 
1246 	/* we "know" gserial_cleanup() hasn't been called */
1247 	port = ports[port_num].port;
1248 
1249 	/* activate the endpoints */
1250 	status = usb_ep_enable(gser->in);
1251 	if (status < 0)
1252 		return status;
1253 	gser->in->driver_data = port;
1254 
1255 	status = usb_ep_enable(gser->out);
1256 	if (status < 0)
1257 		goto fail_out;
1258 	gser->out->driver_data = port;
1259 
1260 	/* then tell the tty glue that I/O can work */
1261 	spin_lock_irqsave(&port->port_lock, flags);
1262 	gser->ioport = port;
1263 	port->port_usb = gser;
1264 
1265 	/* REVISIT unclear how best to handle this state...
1266 	 * we don't really couple it with the Linux TTY.
1267 	 */
1268 	gser->port_line_coding = port->port_line_coding;
1269 
1270 	/* REVISIT if waiting on "carrier detect", signal. */
1271 
1272 	/* if it's already open, start I/O ... and notify the serial
1273 	 * protocol about open/close status (connect/disconnect).
1274 	 */
1275 	if (port->open_count) {
1276 		pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1277 		gs_start_io(port);
1278 		if (gser->connect)
1279 			gser->connect(gser);
1280 	} else {
1281 		if (gser->disconnect)
1282 			gser->disconnect(gser);
1283 	}
1284 
1285 	spin_unlock_irqrestore(&port->port_lock, flags);
1286 
1287 	return status;
1288 
1289 fail_out:
1290 	usb_ep_disable(gser->in);
1291 	gser->in->driver_data = NULL;
1292 	return status;
1293 }
1294 
1295 /**
1296  * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1297  * @gser: the function, on which gserial_connect() was called
1298  * Context: any (usually from irq)
1299  *
1300  * This is called to deactivate endpoints and let the TTY layer know
1301  * that the connection went inactive ... not unlike "hangup".
1302  *
1303  * On return, the state is as if gserial_connect() had never been called;
1304  * there is no active USB I/O on these endpoints.
1305  */
gserial_disconnect(struct gserial * gser)1306 void gserial_disconnect(struct gserial *gser)
1307 {
1308 	struct gs_port	*port = gser->ioport;
1309 	unsigned long	flags;
1310 
1311 	if (!port)
1312 		return;
1313 
1314 	/* tell the TTY glue not to do I/O here any more */
1315 	spin_lock_irqsave(&port->port_lock, flags);
1316 
1317 	/* REVISIT as above: how best to track this? */
1318 	port->port_line_coding = gser->port_line_coding;
1319 
1320 	port->port_usb = NULL;
1321 	gser->ioport = NULL;
1322 	if (port->open_count > 0 || port->openclose) {
1323 		wake_up_interruptible(&port->drain_wait);
1324 		if (port->port_tty)
1325 			tty_hangup(port->port_tty);
1326 	}
1327 	spin_unlock_irqrestore(&port->port_lock, flags);
1328 
1329 	/* disable endpoints, aborting down any active I/O */
1330 	usb_ep_disable(gser->out);
1331 	gser->out->driver_data = NULL;
1332 
1333 	usb_ep_disable(gser->in);
1334 	gser->in->driver_data = NULL;
1335 
1336 	/* finally, free any unused/unusable I/O buffers */
1337 	spin_lock_irqsave(&port->port_lock, flags);
1338 	if (port->open_count == 0 && !port->openclose)
1339 		gs_buf_free(&port->port_write_buf);
1340 	gs_free_requests(gser->out, &port->read_pool, NULL);
1341 	gs_free_requests(gser->out, &port->read_queue, NULL);
1342 	gs_free_requests(gser->in, &port->write_pool, NULL);
1343 
1344 	port->read_allocated = port->read_started =
1345 		port->write_allocated = port->write_started = 0;
1346 
1347 	spin_unlock_irqrestore(&port->port_lock, flags);
1348 }
1349