1 /*
2  * file_storage.c -- File-backed USB Storage Gadget, for USB development
3  *
4  * Copyright (C) 2003-2008 Alan Stern
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The names of the above-listed copyright holders may not be used
17  *    to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * ALTERNATIVELY, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") as published by the Free Software
22  * Foundation, either version 2 of that License or (at your option) any
23  * later version.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 /*
40  * The File-backed Storage Gadget acts as a USB Mass Storage device,
41  * appearing to the host as a disk drive or as a CD-ROM drive.  In addition
42  * to providing an example of a genuinely useful gadget driver for a USB
43  * device, it also illustrates a technique of double-buffering for increased
44  * throughput.  Last but not least, it gives an easy way to probe the
45  * behavior of the Mass Storage drivers in a USB host.
46  *
47  * Backing storage is provided by a regular file or a block device, specified
48  * by the "file" module parameter.  Access can be limited to read-only by
49  * setting the optional "ro" module parameter.  (For CD-ROM emulation,
50  * access is always read-only.)  The gadget will indicate that it has
51  * removable media if the optional "removable" module parameter is set.
52  *
53  * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
54  * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
55  * by the optional "transport" module parameter.  It also supports the
56  * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
57  * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
58  * the optional "protocol" module parameter.  In addition, the default
59  * Vendor ID, Product ID, release number and serial number can be overridden.
60  *
61  * There is support for multiple logical units (LUNs), each of which has
62  * its own backing file.  The number of LUNs can be set using the optional
63  * "luns" module parameter (anywhere from 1 to 8), and the corresponding
64  * files are specified using comma-separated lists for "file" and "ro".
65  * The default number of LUNs is taken from the number of "file" elements;
66  * it is 1 if "file" is not given.  If "removable" is not set then a backing
67  * file must be specified for each LUN.  If it is set, then an unspecified
68  * or empty backing filename means the LUN's medium is not loaded.  Ideally
69  * each LUN would be settable independently as a disk drive or a CD-ROM
70  * drive, but currently all LUNs have to be the same type.  The CD-ROM
71  * emulation includes a single data track and no audio tracks; hence there
72  * need be only one backing file per LUN.
73  *
74  * Requirements are modest; only a bulk-in and a bulk-out endpoint are
75  * needed (an interrupt-out endpoint is also needed for CBI).  The memory
76  * requirement amounts to two 16K buffers, size configurable by a parameter.
77  * Support is included for both full-speed and high-speed operation.
78  *
79  * Note that the driver is slightly non-portable in that it assumes a
80  * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
81  * interrupt-in endpoints.  With most device controllers this isn't an
82  * issue, but there may be some with hardware restrictions that prevent
83  * a buffer from being used by more than one endpoint.
84  *
85  * Module options:
86  *
87  *	file=filename[,filename...]
88  *				Required if "removable" is not set, names of
89  *					the files or block devices used for
90  *					backing storage
91  *	serial=HHHH...		Required serial number (string of hex chars)
92  *	ro=b[,b...]		Default false, booleans for read-only access
93  *	removable		Default false, boolean for removable media
94  *	luns=N			Default N = number of filenames, number of
95  *					LUNs to support
96  *	nofua=b[,b...]		Default false, booleans for ignore FUA flag
97  *					in SCSI WRITE(10,12) commands
98  *	stall			Default determined according to the type of
99  *					USB device controller (usually true),
100  *					boolean to permit the driver to halt
101  *					bulk endpoints
102  *	cdrom			Default false, boolean for whether to emulate
103  *					a CD-ROM drive
104  *	transport=XXX		Default BBB, transport name (CB, CBI, or BBB)
105  *	protocol=YYY		Default SCSI, protocol name (RBC, 8020 or
106  *					ATAPI, QIC, UFI, 8070, or SCSI;
107  *					also 1 - 6)
108  *	vendor=0xVVVV		Default 0x0525 (NetChip), USB Vendor ID
109  *	product=0xPPPP		Default 0xa4a5 (FSG), USB Product ID
110  *	release=0xRRRR		Override the USB release number (bcdDevice)
111  *	buflen=N		Default N=16384, buffer size used (will be
112  *					rounded down to a multiple of
113  *					PAGE_CACHE_SIZE)
114  *
115  * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "serial", "ro",
116  * "removable", "luns", "nofua", "stall", and "cdrom" options are available;
117  * default values are used for everything else.
118  *
119  * The pathnames of the backing files and the ro settings are available in
120  * the attribute files "file", "nofua", and "ro" in the lun<n> subdirectory of
121  * the gadget's sysfs directory.  If the "removable" option is set, writing to
122  * these files will simulate ejecting/loading the medium (writing an empty
123  * line means eject) and adjusting a write-enable tab.  Changes to the ro
124  * setting are not allowed when the medium is loaded or if CD-ROM emulation
125  * is being used.
126  *
127  * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
128  * The driver's SCSI command interface was based on the "Information
129  * technology - Small Computer System Interface - 2" document from
130  * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
131  * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.  The single exception
132  * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
133  * "Universal Serial Bus Mass Storage Class UFI Command Specification"
134  * document, Revision 1.0, December 14, 1998, available at
135  * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
136  */
137 
138 
139 /*
140  *				Driver Design
141  *
142  * The FSG driver is fairly straightforward.  There is a main kernel
143  * thread that handles most of the work.  Interrupt routines field
144  * callbacks from the controller driver: bulk- and interrupt-request
145  * completion notifications, endpoint-0 events, and disconnect events.
146  * Completion events are passed to the main thread by wakeup calls.  Many
147  * ep0 requests are handled at interrupt time, but SetInterface,
148  * SetConfiguration, and device reset requests are forwarded to the
149  * thread in the form of "exceptions" using SIGUSR1 signals (since they
150  * should interrupt any ongoing file I/O operations).
151  *
152  * The thread's main routine implements the standard command/data/status
153  * parts of a SCSI interaction.  It and its subroutines are full of tests
154  * for pending signals/exceptions -- all this polling is necessary since
155  * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
156  * indication that the driver really wants to be running in userspace.)
157  * An important point is that so long as the thread is alive it keeps an
158  * open reference to the backing file.  This will prevent unmounting
159  * the backing file's underlying filesystem and could cause problems
160  * during system shutdown, for example.  To prevent such problems, the
161  * thread catches INT, TERM, and KILL signals and converts them into
162  * an EXIT exception.
163  *
164  * In normal operation the main thread is started during the gadget's
165  * fsg_bind() callback and stopped during fsg_unbind().  But it can also
166  * exit when it receives a signal, and there's no point leaving the
167  * gadget running when the thread is dead.  So just before the thread
168  * exits, it deregisters the gadget driver.  This makes things a little
169  * tricky: The driver is deregistered at two places, and the exiting
170  * thread can indirectly call fsg_unbind() which in turn can tell the
171  * thread to exit.  The first problem is resolved through the use of the
172  * REGISTERED atomic bitflag; the driver will only be deregistered once.
173  * The second problem is resolved by having fsg_unbind() check
174  * fsg->state; it won't try to stop the thread if the state is already
175  * FSG_STATE_TERMINATED.
176  *
177  * To provide maximum throughput, the driver uses a circular pipeline of
178  * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
179  * arbitrarily long; in practice the benefits don't justify having more
180  * than 2 stages (i.e., double buffering).  But it helps to think of the
181  * pipeline as being a long one.  Each buffer head contains a bulk-in and
182  * a bulk-out request pointer (since the buffer can be used for both
183  * output and input -- directions always are given from the host's
184  * point of view) as well as a pointer to the buffer and various state
185  * variables.
186  *
187  * Use of the pipeline follows a simple protocol.  There is a variable
188  * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
189  * At any time that buffer head may still be in use from an earlier
190  * request, so each buffer head has a state variable indicating whether
191  * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
192  * buffer head to be EMPTY, filling the buffer either by file I/O or by
193  * USB I/O (during which the buffer head is BUSY), and marking the buffer
194  * head FULL when the I/O is complete.  Then the buffer will be emptied
195  * (again possibly by USB I/O, during which it is marked BUSY) and
196  * finally marked EMPTY again (possibly by a completion routine).
197  *
198  * A module parameter tells the driver to avoid stalling the bulk
199  * endpoints wherever the transport specification allows.  This is
200  * necessary for some UDCs like the SuperH, which cannot reliably clear a
201  * halt on a bulk endpoint.  However, under certain circumstances the
202  * Bulk-only specification requires a stall.  In such cases the driver
203  * will halt the endpoint and set a flag indicating that it should clear
204  * the halt in software during the next device reset.  Hopefully this
205  * will permit everything to work correctly.  Furthermore, although the
206  * specification allows the bulk-out endpoint to halt when the host sends
207  * too much data, implementing this would cause an unavoidable race.
208  * The driver will always use the "no-stall" approach for OUT transfers.
209  *
210  * One subtle point concerns sending status-stage responses for ep0
211  * requests.  Some of these requests, such as device reset, can involve
212  * interrupting an ongoing file I/O operation, which might take an
213  * arbitrarily long time.  During that delay the host might give up on
214  * the original ep0 request and issue a new one.  When that happens the
215  * driver should not notify the host about completion of the original
216  * request, as the host will no longer be waiting for it.  So the driver
217  * assigns to each ep0 request a unique tag, and it keeps track of the
218  * tag value of the request associated with a long-running exception
219  * (device-reset, interface-change, or configuration-change).  When the
220  * exception handler is finished, the status-stage response is submitted
221  * only if the current ep0 request tag is equal to the exception request
222  * tag.  Thus only the most recently received ep0 request will get a
223  * status-stage response.
224  *
225  * Warning: This driver source file is too long.  It ought to be split up
226  * into a header file plus about 3 separate .c files, to handle the details
227  * of the Gadget, USB Mass Storage, and SCSI protocols.
228  */
229 
230 
231 /* #define VERBOSE_DEBUG */
232 /* #define DUMP_MSGS */
233 
234 
235 #include <linux/blkdev.h>
236 #include <linux/completion.h>
237 #include <linux/dcache.h>
238 #include <linux/delay.h>
239 #include <linux/device.h>
240 #include <linux/fcntl.h>
241 #include <linux/file.h>
242 #include <linux/fs.h>
243 #include <linux/kref.h>
244 #include <linux/kthread.h>
245 #include <linux/limits.h>
246 #include <linux/module.h>
247 #include <linux/rwsem.h>
248 #include <linux/slab.h>
249 #include <linux/spinlock.h>
250 #include <linux/string.h>
251 #include <linux/freezer.h>
252 #include <linux/utsname.h>
253 
254 #include <linux/usb/ch9.h>
255 #include <linux/usb/gadget.h>
256 
257 #include "gadget_chips.h"
258 
259 
260 
261 /*
262  * Kbuild is not very cooperative with respect to linking separately
263  * compiled library objects into one module.  So for now we won't use
264  * separate compilation ... ensuring init/exit sections work to shrink
265  * the runtime footprint, and giving us at least some parts of what
266  * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
267  */
268 #include "usbstring.c"
269 #include "config.c"
270 #include "epautoconf.c"
271 
272 /*-------------------------------------------------------------------------*/
273 
274 #define DRIVER_DESC		"File-backed Storage Gadget"
275 #define DRIVER_NAME		"g_file_storage"
276 #define DRIVER_VERSION		"1 September 2010"
277 
278 static       char fsg_string_manufacturer[64];
279 static const char fsg_string_product[] = DRIVER_DESC;
280 static const char fsg_string_config[] = "Self-powered";
281 static const char fsg_string_interface[] = "Mass Storage";
282 
283 
284 #include "storage_common.c"
285 
286 
287 MODULE_DESCRIPTION(DRIVER_DESC);
288 MODULE_AUTHOR("Alan Stern");
289 MODULE_LICENSE("Dual BSD/GPL");
290 
291 /*
292  * This driver assumes self-powered hardware and has no way for users to
293  * trigger remote wakeup.  It uses autoconfiguration to select endpoints
294  * and endpoint addresses.
295  */
296 
297 
298 /*-------------------------------------------------------------------------*/
299 
300 
301 /* Encapsulate the module parameter settings */
302 
303 static struct {
304 	char		*file[FSG_MAX_LUNS];
305 	char		*serial;
306 	bool		ro[FSG_MAX_LUNS];
307 	bool		nofua[FSG_MAX_LUNS];
308 	unsigned int	num_filenames;
309 	unsigned int	num_ros;
310 	unsigned int	num_nofuas;
311 	unsigned int	nluns;
312 
313 	bool		removable;
314 	bool		can_stall;
315 	bool		cdrom;
316 
317 	char		*transport_parm;
318 	char		*protocol_parm;
319 	unsigned short	vendor;
320 	unsigned short	product;
321 	unsigned short	release;
322 	unsigned int	buflen;
323 
324 	int		transport_type;
325 	char		*transport_name;
326 	int		protocol_type;
327 	char		*protocol_name;
328 
329 } mod_data = {					// Default values
330 	.transport_parm		= "BBB",
331 	.protocol_parm		= "SCSI",
332 	.removable		= 0,
333 	.can_stall		= 1,
334 	.cdrom			= 0,
335 	.vendor			= FSG_VENDOR_ID,
336 	.product		= FSG_PRODUCT_ID,
337 	.release		= 0xffff,	// Use controller chip type
338 	.buflen			= 16384,
339 	};
340 
341 
342 module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
343 		S_IRUGO);
344 MODULE_PARM_DESC(file, "names of backing files or devices");
345 
346 module_param_named(serial, mod_data.serial, charp, S_IRUGO);
347 MODULE_PARM_DESC(serial, "USB serial number");
348 
349 module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
350 MODULE_PARM_DESC(ro, "true to force read-only");
351 
352 module_param_array_named(nofua, mod_data.nofua, bool, &mod_data.num_nofuas,
353 		S_IRUGO);
354 MODULE_PARM_DESC(nofua, "true to ignore SCSI WRITE(10,12) FUA bit");
355 
356 module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
357 MODULE_PARM_DESC(luns, "number of LUNs");
358 
359 module_param_named(removable, mod_data.removable, bool, S_IRUGO);
360 MODULE_PARM_DESC(removable, "true to simulate removable media");
361 
362 module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
363 MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
364 
365 module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
366 MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
367 
368 /* In the non-TEST version, only the module parameters listed above
369  * are available. */
370 #ifdef CONFIG_USB_FILE_STORAGE_TEST
371 
372 module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO);
373 MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)");
374 
375 module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO);
376 MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, "
377 		"8070, or SCSI)");
378 
379 module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO);
380 MODULE_PARM_DESC(vendor, "USB Vendor ID");
381 
382 module_param_named(product, mod_data.product, ushort, S_IRUGO);
383 MODULE_PARM_DESC(product, "USB Product ID");
384 
385 module_param_named(release, mod_data.release, ushort, S_IRUGO);
386 MODULE_PARM_DESC(release, "USB release number");
387 
388 module_param_named(buflen, mod_data.buflen, uint, S_IRUGO);
389 MODULE_PARM_DESC(buflen, "I/O buffer size");
390 
391 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
392 
393 
394 /*
395  * These definitions will permit the compiler to avoid generating code for
396  * parts of the driver that aren't used in the non-TEST version.  Even gcc
397  * can recognize when a test of a constant expression yields a dead code
398  * path.
399  */
400 
401 #ifdef CONFIG_USB_FILE_STORAGE_TEST
402 
403 #define transport_is_bbb()	(mod_data.transport_type == USB_PR_BULK)
404 #define transport_is_cbi()	(mod_data.transport_type == USB_PR_CBI)
405 #define protocol_is_scsi()	(mod_data.protocol_type == USB_SC_SCSI)
406 
407 #else
408 
409 #define transport_is_bbb()	1
410 #define transport_is_cbi()	0
411 #define protocol_is_scsi()	1
412 
413 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
414 
415 
416 /*-------------------------------------------------------------------------*/
417 
418 
419 struct fsg_dev {
420 	/* lock protects: state, all the req_busy's, and cbbuf_cmnd */
421 	spinlock_t		lock;
422 	struct usb_gadget	*gadget;
423 
424 	/* filesem protects: backing files in use */
425 	struct rw_semaphore	filesem;
426 
427 	/* reference counting: wait until all LUNs are released */
428 	struct kref		ref;
429 
430 	struct usb_ep		*ep0;		// Handy copy of gadget->ep0
431 	struct usb_request	*ep0req;	// For control responses
432 	unsigned int		ep0_req_tag;
433 	const char		*ep0req_name;
434 
435 	struct usb_request	*intreq;	// For interrupt responses
436 	int			intreq_busy;
437 	struct fsg_buffhd	*intr_buffhd;
438 
439 	unsigned int		bulk_out_maxpacket;
440 	enum fsg_state		state;		// For exception handling
441 	unsigned int		exception_req_tag;
442 
443 	u8			config, new_config;
444 
445 	unsigned int		running : 1;
446 	unsigned int		bulk_in_enabled : 1;
447 	unsigned int		bulk_out_enabled : 1;
448 	unsigned int		intr_in_enabled : 1;
449 	unsigned int		phase_error : 1;
450 	unsigned int		short_packet_received : 1;
451 	unsigned int		bad_lun_okay : 1;
452 
453 	unsigned long		atomic_bitflags;
454 #define REGISTERED		0
455 #define IGNORE_BULK_OUT		1
456 #define SUSPENDED		2
457 
458 	struct usb_ep		*bulk_in;
459 	struct usb_ep		*bulk_out;
460 	struct usb_ep		*intr_in;
461 
462 	struct fsg_buffhd	*next_buffhd_to_fill;
463 	struct fsg_buffhd	*next_buffhd_to_drain;
464 
465 	int			thread_wakeup_needed;
466 	struct completion	thread_notifier;
467 	struct task_struct	*thread_task;
468 
469 	int			cmnd_size;
470 	u8			cmnd[MAX_COMMAND_SIZE];
471 	enum data_direction	data_dir;
472 	u32			data_size;
473 	u32			data_size_from_cmnd;
474 	u32			tag;
475 	unsigned int		lun;
476 	u32			residue;
477 	u32			usb_amount_left;
478 
479 	/* The CB protocol offers no way for a host to know when a command
480 	 * has completed.  As a result the next command may arrive early,
481 	 * and we will still have to handle it.  For that reason we need
482 	 * a buffer to store new commands when using CB (or CBI, which
483 	 * does not oblige a host to wait for command completion either). */
484 	int			cbbuf_cmnd_size;
485 	u8			cbbuf_cmnd[MAX_COMMAND_SIZE];
486 
487 	unsigned int		nluns;
488 	struct fsg_lun		*luns;
489 	struct fsg_lun		*curlun;
490 	/* Must be the last entry */
491 	struct fsg_buffhd	buffhds[];
492 };
493 
494 typedef void (*fsg_routine_t)(struct fsg_dev *);
495 
exception_in_progress(struct fsg_dev * fsg)496 static int exception_in_progress(struct fsg_dev *fsg)
497 {
498 	return (fsg->state > FSG_STATE_IDLE);
499 }
500 
501 /* Make bulk-out requests be divisible by the maxpacket size */
set_bulk_out_req_length(struct fsg_dev * fsg,struct fsg_buffhd * bh,unsigned int length)502 static void set_bulk_out_req_length(struct fsg_dev *fsg,
503 		struct fsg_buffhd *bh, unsigned int length)
504 {
505 	unsigned int	rem;
506 
507 	bh->bulk_out_intended_length = length;
508 	rem = length % fsg->bulk_out_maxpacket;
509 	if (rem > 0)
510 		length += fsg->bulk_out_maxpacket - rem;
511 	bh->outreq->length = length;
512 }
513 
514 static struct fsg_dev			*the_fsg;
515 static struct usb_gadget_driver		fsg_driver;
516 
517 
518 /*-------------------------------------------------------------------------*/
519 
fsg_set_halt(struct fsg_dev * fsg,struct usb_ep * ep)520 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
521 {
522 	const char	*name;
523 
524 	if (ep == fsg->bulk_in)
525 		name = "bulk-in";
526 	else if (ep == fsg->bulk_out)
527 		name = "bulk-out";
528 	else
529 		name = ep->name;
530 	DBG(fsg, "%s set halt\n", name);
531 	return usb_ep_set_halt(ep);
532 }
533 
534 
535 /*-------------------------------------------------------------------------*/
536 
537 /*
538  * DESCRIPTORS ... most are static, but strings and (full) configuration
539  * descriptors are built on demand.  Also the (static) config and interface
540  * descriptors are adjusted during fsg_bind().
541  */
542 
543 /* There is only one configuration. */
544 #define	CONFIG_VALUE		1
545 
546 static struct usb_device_descriptor
547 device_desc = {
548 	.bLength =		sizeof device_desc,
549 	.bDescriptorType =	USB_DT_DEVICE,
550 
551 	.bcdUSB =		cpu_to_le16(0x0200),
552 	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
553 
554 	/* The next three values can be overridden by module parameters */
555 	.idVendor =		cpu_to_le16(FSG_VENDOR_ID),
556 	.idProduct =		cpu_to_le16(FSG_PRODUCT_ID),
557 	.bcdDevice =		cpu_to_le16(0xffff),
558 
559 	.iManufacturer =	FSG_STRING_MANUFACTURER,
560 	.iProduct =		FSG_STRING_PRODUCT,
561 	.iSerialNumber =	FSG_STRING_SERIAL,
562 	.bNumConfigurations =	1,
563 };
564 
565 static struct usb_config_descriptor
566 config_desc = {
567 	.bLength =		sizeof config_desc,
568 	.bDescriptorType =	USB_DT_CONFIG,
569 
570 	/* wTotalLength computed by usb_gadget_config_buf() */
571 	.bNumInterfaces =	1,
572 	.bConfigurationValue =	CONFIG_VALUE,
573 	.iConfiguration =	FSG_STRING_CONFIG,
574 	.bmAttributes =		USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
575 	.bMaxPower =		CONFIG_USB_GADGET_VBUS_DRAW / 2,
576 };
577 
578 
579 static struct usb_qualifier_descriptor
580 dev_qualifier = {
581 	.bLength =		sizeof dev_qualifier,
582 	.bDescriptorType =	USB_DT_DEVICE_QUALIFIER,
583 
584 	.bcdUSB =		cpu_to_le16(0x0200),
585 	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
586 
587 	.bNumConfigurations =	1,
588 };
589 
populate_bos(struct fsg_dev * fsg,u8 * buf)590 static int populate_bos(struct fsg_dev *fsg, u8 *buf)
591 {
592 	memcpy(buf, &fsg_bos_desc, USB_DT_BOS_SIZE);
593 	buf += USB_DT_BOS_SIZE;
594 
595 	memcpy(buf, &fsg_ext_cap_desc, USB_DT_USB_EXT_CAP_SIZE);
596 	buf += USB_DT_USB_EXT_CAP_SIZE;
597 
598 	memcpy(buf, &fsg_ss_cap_desc, USB_DT_USB_SS_CAP_SIZE);
599 
600 	return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE
601 		+ USB_DT_USB_EXT_CAP_SIZE;
602 }
603 
604 /*
605  * Config descriptors must agree with the code that sets configurations
606  * and with code managing interfaces and their altsettings.  They must
607  * also handle different speeds and other-speed requests.
608  */
populate_config_buf(struct usb_gadget * gadget,u8 * buf,u8 type,unsigned index)609 static int populate_config_buf(struct usb_gadget *gadget,
610 		u8 *buf, u8 type, unsigned index)
611 {
612 	enum usb_device_speed			speed = gadget->speed;
613 	int					len;
614 	const struct usb_descriptor_header	**function;
615 
616 	if (index > 0)
617 		return -EINVAL;
618 
619 	if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
620 		speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
621 	function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH
622 		? (const struct usb_descriptor_header **)fsg_hs_function
623 		: (const struct usb_descriptor_header **)fsg_fs_function;
624 
625 	/* for now, don't advertise srp-only devices */
626 	if (!gadget_is_otg(gadget))
627 		function++;
628 
629 	len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
630 	((struct usb_config_descriptor *) buf)->bDescriptorType = type;
631 	return len;
632 }
633 
634 
635 /*-------------------------------------------------------------------------*/
636 
637 /* These routines may be called in process context or in_irq */
638 
639 /* Caller must hold fsg->lock */
wakeup_thread(struct fsg_dev * fsg)640 static void wakeup_thread(struct fsg_dev *fsg)
641 {
642 	/* Tell the main thread that something has happened */
643 	fsg->thread_wakeup_needed = 1;
644 	if (fsg->thread_task)
645 		wake_up_process(fsg->thread_task);
646 }
647 
648 
raise_exception(struct fsg_dev * fsg,enum fsg_state new_state)649 static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
650 {
651 	unsigned long		flags;
652 
653 	/* Do nothing if a higher-priority exception is already in progress.
654 	 * If a lower-or-equal priority exception is in progress, preempt it
655 	 * and notify the main thread by sending it a signal. */
656 	spin_lock_irqsave(&fsg->lock, flags);
657 	if (fsg->state <= new_state) {
658 		fsg->exception_req_tag = fsg->ep0_req_tag;
659 		fsg->state = new_state;
660 		if (fsg->thread_task)
661 			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
662 					fsg->thread_task);
663 	}
664 	spin_unlock_irqrestore(&fsg->lock, flags);
665 }
666 
667 
668 /*-------------------------------------------------------------------------*/
669 
670 /* The disconnect callback and ep0 routines.  These always run in_irq,
671  * except that ep0_queue() is called in the main thread to acknowledge
672  * completion of various requests: set config, set interface, and
673  * Bulk-only device reset. */
674 
fsg_disconnect(struct usb_gadget * gadget)675 static void fsg_disconnect(struct usb_gadget *gadget)
676 {
677 	struct fsg_dev		*fsg = get_gadget_data(gadget);
678 
679 	DBG(fsg, "disconnect or port reset\n");
680 	raise_exception(fsg, FSG_STATE_DISCONNECT);
681 }
682 
683 
ep0_queue(struct fsg_dev * fsg)684 static int ep0_queue(struct fsg_dev *fsg)
685 {
686 	int	rc;
687 
688 	rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
689 	if (rc != 0 && rc != -ESHUTDOWN) {
690 
691 		/* We can't do much more than wait for a reset */
692 		WARNING(fsg, "error in submission: %s --> %d\n",
693 				fsg->ep0->name, rc);
694 	}
695 	return rc;
696 }
697 
ep0_complete(struct usb_ep * ep,struct usb_request * req)698 static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
699 {
700 	struct fsg_dev		*fsg = ep->driver_data;
701 
702 	if (req->actual > 0)
703 		dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
704 	if (req->status || req->actual != req->length)
705 		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
706 				req->status, req->actual, req->length);
707 	if (req->status == -ECONNRESET)		// Request was cancelled
708 		usb_ep_fifo_flush(ep);
709 
710 	if (req->status == 0 && req->context)
711 		((fsg_routine_t) (req->context))(fsg);
712 }
713 
714 
715 /*-------------------------------------------------------------------------*/
716 
717 /* Bulk and interrupt endpoint completion handlers.
718  * These always run in_irq. */
719 
bulk_in_complete(struct usb_ep * ep,struct usb_request * req)720 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
721 {
722 	struct fsg_dev		*fsg = ep->driver_data;
723 	struct fsg_buffhd	*bh = req->context;
724 
725 	if (req->status || req->actual != req->length)
726 		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
727 				req->status, req->actual, req->length);
728 	if (req->status == -ECONNRESET)		// Request was cancelled
729 		usb_ep_fifo_flush(ep);
730 
731 	/* Hold the lock while we update the request and buffer states */
732 	smp_wmb();
733 	spin_lock(&fsg->lock);
734 	bh->inreq_busy = 0;
735 	bh->state = BUF_STATE_EMPTY;
736 	wakeup_thread(fsg);
737 	spin_unlock(&fsg->lock);
738 }
739 
bulk_out_complete(struct usb_ep * ep,struct usb_request * req)740 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
741 {
742 	struct fsg_dev		*fsg = ep->driver_data;
743 	struct fsg_buffhd	*bh = req->context;
744 
745 	dump_msg(fsg, "bulk-out", req->buf, req->actual);
746 	if (req->status || req->actual != bh->bulk_out_intended_length)
747 		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
748 				req->status, req->actual,
749 				bh->bulk_out_intended_length);
750 	if (req->status == -ECONNRESET)		// Request was cancelled
751 		usb_ep_fifo_flush(ep);
752 
753 	/* Hold the lock while we update the request and buffer states */
754 	smp_wmb();
755 	spin_lock(&fsg->lock);
756 	bh->outreq_busy = 0;
757 	bh->state = BUF_STATE_FULL;
758 	wakeup_thread(fsg);
759 	spin_unlock(&fsg->lock);
760 }
761 
762 
763 #ifdef CONFIG_USB_FILE_STORAGE_TEST
intr_in_complete(struct usb_ep * ep,struct usb_request * req)764 static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
765 {
766 	struct fsg_dev		*fsg = ep->driver_data;
767 	struct fsg_buffhd	*bh = req->context;
768 
769 	if (req->status || req->actual != req->length)
770 		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
771 				req->status, req->actual, req->length);
772 	if (req->status == -ECONNRESET)		// Request was cancelled
773 		usb_ep_fifo_flush(ep);
774 
775 	/* Hold the lock while we update the request and buffer states */
776 	smp_wmb();
777 	spin_lock(&fsg->lock);
778 	fsg->intreq_busy = 0;
779 	bh->state = BUF_STATE_EMPTY;
780 	wakeup_thread(fsg);
781 	spin_unlock(&fsg->lock);
782 }
783 
784 #else
intr_in_complete(struct usb_ep * ep,struct usb_request * req)785 static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
786 {}
787 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
788 
789 
790 /*-------------------------------------------------------------------------*/
791 
792 /* Ep0 class-specific handlers.  These always run in_irq. */
793 
794 #ifdef CONFIG_USB_FILE_STORAGE_TEST
received_cbi_adsc(struct fsg_dev * fsg,struct fsg_buffhd * bh)795 static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
796 {
797 	struct usb_request	*req = fsg->ep0req;
798 	static u8		cbi_reset_cmnd[6] = {
799 			SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
800 
801 	/* Error in command transfer? */
802 	if (req->status || req->length != req->actual ||
803 			req->actual < 6 || req->actual > MAX_COMMAND_SIZE) {
804 
805 		/* Not all controllers allow a protocol stall after
806 		 * receiving control-out data, but we'll try anyway. */
807 		fsg_set_halt(fsg, fsg->ep0);
808 		return;			// Wait for reset
809 	}
810 
811 	/* Is it the special reset command? */
812 	if (req->actual >= sizeof cbi_reset_cmnd &&
813 			memcmp(req->buf, cbi_reset_cmnd,
814 				sizeof cbi_reset_cmnd) == 0) {
815 
816 		/* Raise an exception to stop the current operation
817 		 * and reinitialize our state. */
818 		DBG(fsg, "cbi reset request\n");
819 		raise_exception(fsg, FSG_STATE_RESET);
820 		return;
821 	}
822 
823 	VDBG(fsg, "CB[I] accept device-specific command\n");
824 	spin_lock(&fsg->lock);
825 
826 	/* Save the command for later */
827 	if (fsg->cbbuf_cmnd_size)
828 		WARNING(fsg, "CB[I] overwriting previous command\n");
829 	fsg->cbbuf_cmnd_size = req->actual;
830 	memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
831 
832 	wakeup_thread(fsg);
833 	spin_unlock(&fsg->lock);
834 }
835 
836 #else
received_cbi_adsc(struct fsg_dev * fsg,struct fsg_buffhd * bh)837 static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
838 {}
839 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
840 
841 
class_setup_req(struct fsg_dev * fsg,const struct usb_ctrlrequest * ctrl)842 static int class_setup_req(struct fsg_dev *fsg,
843 		const struct usb_ctrlrequest *ctrl)
844 {
845 	struct usb_request	*req = fsg->ep0req;
846 	int			value = -EOPNOTSUPP;
847 	u16			w_index = le16_to_cpu(ctrl->wIndex);
848 	u16                     w_value = le16_to_cpu(ctrl->wValue);
849 	u16			w_length = le16_to_cpu(ctrl->wLength);
850 
851 	if (!fsg->config)
852 		return value;
853 
854 	/* Handle Bulk-only class-specific requests */
855 	if (transport_is_bbb()) {
856 		switch (ctrl->bRequest) {
857 
858 		case USB_BULK_RESET_REQUEST:
859 			if (ctrl->bRequestType != (USB_DIR_OUT |
860 					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
861 				break;
862 			if (w_index != 0 || w_value != 0 || w_length != 0) {
863 				value = -EDOM;
864 				break;
865 			}
866 
867 			/* Raise an exception to stop the current operation
868 			 * and reinitialize our state. */
869 			DBG(fsg, "bulk reset request\n");
870 			raise_exception(fsg, FSG_STATE_RESET);
871 			value = DELAYED_STATUS;
872 			break;
873 
874 		case USB_BULK_GET_MAX_LUN_REQUEST:
875 			if (ctrl->bRequestType != (USB_DIR_IN |
876 					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
877 				break;
878 			if (w_index != 0 || w_value != 0 || w_length != 1) {
879 				value = -EDOM;
880 				break;
881 			}
882 			VDBG(fsg, "get max LUN\n");
883 			*(u8 *) req->buf = fsg->nluns - 1;
884 			value = 1;
885 			break;
886 		}
887 	}
888 
889 	/* Handle CBI class-specific requests */
890 	else {
891 		switch (ctrl->bRequest) {
892 
893 		case USB_CBI_ADSC_REQUEST:
894 			if (ctrl->bRequestType != (USB_DIR_OUT |
895 					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
896 				break;
897 			if (w_index != 0 || w_value != 0) {
898 				value = -EDOM;
899 				break;
900 			}
901 			if (w_length > MAX_COMMAND_SIZE) {
902 				value = -EOVERFLOW;
903 				break;
904 			}
905 			value = w_length;
906 			fsg->ep0req->context = received_cbi_adsc;
907 			break;
908 		}
909 	}
910 
911 	if (value == -EOPNOTSUPP)
912 		VDBG(fsg,
913 			"unknown class-specific control req "
914 			"%02x.%02x v%04x i%04x l%u\n",
915 			ctrl->bRequestType, ctrl->bRequest,
916 			le16_to_cpu(ctrl->wValue), w_index, w_length);
917 	return value;
918 }
919 
920 
921 /*-------------------------------------------------------------------------*/
922 
923 /* Ep0 standard request handlers.  These always run in_irq. */
924 
standard_setup_req(struct fsg_dev * fsg,const struct usb_ctrlrequest * ctrl)925 static int standard_setup_req(struct fsg_dev *fsg,
926 		const struct usb_ctrlrequest *ctrl)
927 {
928 	struct usb_request	*req = fsg->ep0req;
929 	int			value = -EOPNOTSUPP;
930 	u16			w_index = le16_to_cpu(ctrl->wIndex);
931 	u16			w_value = le16_to_cpu(ctrl->wValue);
932 
933 	/* Usually this just stores reply data in the pre-allocated ep0 buffer,
934 	 * but config change events will also reconfigure hardware. */
935 	switch (ctrl->bRequest) {
936 
937 	case USB_REQ_GET_DESCRIPTOR:
938 		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
939 				USB_RECIP_DEVICE))
940 			break;
941 		switch (w_value >> 8) {
942 
943 		case USB_DT_DEVICE:
944 			VDBG(fsg, "get device descriptor\n");
945 			device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
946 			value = sizeof device_desc;
947 			memcpy(req->buf, &device_desc, value);
948 			break;
949 		case USB_DT_DEVICE_QUALIFIER:
950 			VDBG(fsg, "get device qualifier\n");
951 			if (!gadget_is_dualspeed(fsg->gadget) ||
952 					fsg->gadget->speed == USB_SPEED_SUPER)
953 				break;
954 			/*
955 			 * Assume ep0 uses the same maxpacket value for both
956 			 * speeds
957 			 */
958 			dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
959 			value = sizeof dev_qualifier;
960 			memcpy(req->buf, &dev_qualifier, value);
961 			break;
962 
963 		case USB_DT_OTHER_SPEED_CONFIG:
964 			VDBG(fsg, "get other-speed config descriptor\n");
965 			if (!gadget_is_dualspeed(fsg->gadget) ||
966 					fsg->gadget->speed == USB_SPEED_SUPER)
967 				break;
968 			goto get_config;
969 		case USB_DT_CONFIG:
970 			VDBG(fsg, "get configuration descriptor\n");
971 get_config:
972 			value = populate_config_buf(fsg->gadget,
973 					req->buf,
974 					w_value >> 8,
975 					w_value & 0xff);
976 			break;
977 
978 		case USB_DT_STRING:
979 			VDBG(fsg, "get string descriptor\n");
980 
981 			/* wIndex == language code */
982 			value = usb_gadget_get_string(&fsg_stringtab,
983 					w_value & 0xff, req->buf);
984 			break;
985 
986 		case USB_DT_BOS:
987 			VDBG(fsg, "get bos descriptor\n");
988 
989 			if (gadget_is_superspeed(fsg->gadget))
990 				value = populate_bos(fsg, req->buf);
991 			break;
992 		}
993 
994 		break;
995 
996 	/* One config, two speeds */
997 	case USB_REQ_SET_CONFIGURATION:
998 		if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
999 				USB_RECIP_DEVICE))
1000 			break;
1001 		VDBG(fsg, "set configuration\n");
1002 		if (w_value == CONFIG_VALUE || w_value == 0) {
1003 			fsg->new_config = w_value;
1004 
1005 			/* Raise an exception to wipe out previous transaction
1006 			 * state (queued bufs, etc) and set the new config. */
1007 			raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
1008 			value = DELAYED_STATUS;
1009 		}
1010 		break;
1011 	case USB_REQ_GET_CONFIGURATION:
1012 		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
1013 				USB_RECIP_DEVICE))
1014 			break;
1015 		VDBG(fsg, "get configuration\n");
1016 		*(u8 *) req->buf = fsg->config;
1017 		value = 1;
1018 		break;
1019 
1020 	case USB_REQ_SET_INTERFACE:
1021 		if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
1022 				USB_RECIP_INTERFACE))
1023 			break;
1024 		if (fsg->config && w_index == 0) {
1025 
1026 			/* Raise an exception to wipe out previous transaction
1027 			 * state (queued bufs, etc) and install the new
1028 			 * interface altsetting. */
1029 			raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
1030 			value = DELAYED_STATUS;
1031 		}
1032 		break;
1033 	case USB_REQ_GET_INTERFACE:
1034 		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
1035 				USB_RECIP_INTERFACE))
1036 			break;
1037 		if (!fsg->config)
1038 			break;
1039 		if (w_index != 0) {
1040 			value = -EDOM;
1041 			break;
1042 		}
1043 		VDBG(fsg, "get interface\n");
1044 		*(u8 *) req->buf = 0;
1045 		value = 1;
1046 		break;
1047 
1048 	default:
1049 		VDBG(fsg,
1050 			"unknown control req %02x.%02x v%04x i%04x l%u\n",
1051 			ctrl->bRequestType, ctrl->bRequest,
1052 			w_value, w_index, le16_to_cpu(ctrl->wLength));
1053 	}
1054 
1055 	return value;
1056 }
1057 
1058 
fsg_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)1059 static int fsg_setup(struct usb_gadget *gadget,
1060 		const struct usb_ctrlrequest *ctrl)
1061 {
1062 	struct fsg_dev		*fsg = get_gadget_data(gadget);
1063 	int			rc;
1064 	int			w_length = le16_to_cpu(ctrl->wLength);
1065 
1066 	++fsg->ep0_req_tag;		// Record arrival of a new request
1067 	fsg->ep0req->context = NULL;
1068 	fsg->ep0req->length = 0;
1069 	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
1070 
1071 	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
1072 		rc = class_setup_req(fsg, ctrl);
1073 	else
1074 		rc = standard_setup_req(fsg, ctrl);
1075 
1076 	/* Respond with data/status or defer until later? */
1077 	if (rc >= 0 && rc != DELAYED_STATUS) {
1078 		rc = min(rc, w_length);
1079 		fsg->ep0req->length = rc;
1080 		fsg->ep0req->zero = rc < w_length;
1081 		fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
1082 				"ep0-in" : "ep0-out");
1083 		rc = ep0_queue(fsg);
1084 	}
1085 
1086 	/* Device either stalls (rc < 0) or reports success */
1087 	return rc;
1088 }
1089 
1090 
1091 /*-------------------------------------------------------------------------*/
1092 
1093 /* All the following routines run in process context */
1094 
1095 
1096 /* Use this for bulk or interrupt transfers, not ep0 */
start_transfer(struct fsg_dev * fsg,struct usb_ep * ep,struct usb_request * req,int * pbusy,enum fsg_buffer_state * state)1097 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
1098 		struct usb_request *req, int *pbusy,
1099 		enum fsg_buffer_state *state)
1100 {
1101 	int	rc;
1102 
1103 	if (ep == fsg->bulk_in)
1104 		dump_msg(fsg, "bulk-in", req->buf, req->length);
1105 	else if (ep == fsg->intr_in)
1106 		dump_msg(fsg, "intr-in", req->buf, req->length);
1107 
1108 	spin_lock_irq(&fsg->lock);
1109 	*pbusy = 1;
1110 	*state = BUF_STATE_BUSY;
1111 	spin_unlock_irq(&fsg->lock);
1112 	rc = usb_ep_queue(ep, req, GFP_KERNEL);
1113 	if (rc != 0) {
1114 		*pbusy = 0;
1115 		*state = BUF_STATE_EMPTY;
1116 
1117 		/* We can't do much more than wait for a reset */
1118 
1119 		/* Note: currently the net2280 driver fails zero-length
1120 		 * submissions if DMA is enabled. */
1121 		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
1122 						req->length == 0))
1123 			WARNING(fsg, "error in submission: %s --> %d\n",
1124 					ep->name, rc);
1125 	}
1126 }
1127 
1128 
sleep_thread(struct fsg_dev * fsg)1129 static int sleep_thread(struct fsg_dev *fsg)
1130 {
1131 	int	rc = 0;
1132 
1133 	/* Wait until a signal arrives or we are woken up */
1134 	for (;;) {
1135 		try_to_freeze();
1136 		set_current_state(TASK_INTERRUPTIBLE);
1137 		if (signal_pending(current)) {
1138 			rc = -EINTR;
1139 			break;
1140 		}
1141 		if (fsg->thread_wakeup_needed)
1142 			break;
1143 		schedule();
1144 	}
1145 	__set_current_state(TASK_RUNNING);
1146 	fsg->thread_wakeup_needed = 0;
1147 	return rc;
1148 }
1149 
1150 
1151 /*-------------------------------------------------------------------------*/
1152 
do_read(struct fsg_dev * fsg)1153 static int do_read(struct fsg_dev *fsg)
1154 {
1155 	struct fsg_lun		*curlun = fsg->curlun;
1156 	u32			lba;
1157 	struct fsg_buffhd	*bh;
1158 	int			rc;
1159 	u32			amount_left;
1160 	loff_t			file_offset, file_offset_tmp;
1161 	unsigned int		amount;
1162 	ssize_t			nread;
1163 
1164 	/* Get the starting Logical Block Address and check that it's
1165 	 * not too big */
1166 	if (fsg->cmnd[0] == READ_6)
1167 		lba = get_unaligned_be24(&fsg->cmnd[1]);
1168 	else {
1169 		lba = get_unaligned_be32(&fsg->cmnd[2]);
1170 
1171 		/* We allow DPO (Disable Page Out = don't save data in the
1172 		 * cache) and FUA (Force Unit Access = don't read from the
1173 		 * cache), but we don't implement them. */
1174 		if ((fsg->cmnd[1] & ~0x18) != 0) {
1175 			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1176 			return -EINVAL;
1177 		}
1178 	}
1179 	if (lba >= curlun->num_sectors) {
1180 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1181 		return -EINVAL;
1182 	}
1183 	file_offset = ((loff_t) lba) << curlun->blkbits;
1184 
1185 	/* Carry out the file reads */
1186 	amount_left = fsg->data_size_from_cmnd;
1187 	if (unlikely(amount_left == 0))
1188 		return -EIO;		// No default reply
1189 
1190 	for (;;) {
1191 
1192 		/* Figure out how much we need to read:
1193 		 * Try to read the remaining amount.
1194 		 * But don't read more than the buffer size.
1195 		 * And don't try to read past the end of the file.
1196 		 */
1197 		amount = min((unsigned int) amount_left, mod_data.buflen);
1198 		amount = min((loff_t) amount,
1199 				curlun->file_length - file_offset);
1200 
1201 		/* Wait for the next buffer to become available */
1202 		bh = fsg->next_buffhd_to_fill;
1203 		while (bh->state != BUF_STATE_EMPTY) {
1204 			rc = sleep_thread(fsg);
1205 			if (rc)
1206 				return rc;
1207 		}
1208 
1209 		/* If we were asked to read past the end of file,
1210 		 * end with an empty buffer. */
1211 		if (amount == 0) {
1212 			curlun->sense_data =
1213 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1214 			curlun->sense_data_info = file_offset >> curlun->blkbits;
1215 			curlun->info_valid = 1;
1216 			bh->inreq->length = 0;
1217 			bh->state = BUF_STATE_FULL;
1218 			break;
1219 		}
1220 
1221 		/* Perform the read */
1222 		file_offset_tmp = file_offset;
1223 		nread = vfs_read(curlun->filp,
1224 				(char __user *) bh->buf,
1225 				amount, &file_offset_tmp);
1226 		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1227 				(unsigned long long) file_offset,
1228 				(int) nread);
1229 		if (signal_pending(current))
1230 			return -EINTR;
1231 
1232 		if (nread < 0) {
1233 			LDBG(curlun, "error in file read: %d\n",
1234 					(int) nread);
1235 			nread = 0;
1236 		} else if (nread < amount) {
1237 			LDBG(curlun, "partial file read: %d/%u\n",
1238 					(int) nread, amount);
1239 			nread = round_down(nread, curlun->blksize);
1240 		}
1241 		file_offset  += nread;
1242 		amount_left  -= nread;
1243 		fsg->residue -= nread;
1244 
1245 		/* Except at the end of the transfer, nread will be
1246 		 * equal to the buffer size, which is divisible by the
1247 		 * bulk-in maxpacket size.
1248 		 */
1249 		bh->inreq->length = nread;
1250 		bh->state = BUF_STATE_FULL;
1251 
1252 		/* If an error occurred, report it and its position */
1253 		if (nread < amount) {
1254 			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1255 			curlun->sense_data_info = file_offset >> curlun->blkbits;
1256 			curlun->info_valid = 1;
1257 			break;
1258 		}
1259 
1260 		if (amount_left == 0)
1261 			break;		// No more left to read
1262 
1263 		/* Send this buffer and go read some more */
1264 		bh->inreq->zero = 0;
1265 		start_transfer(fsg, fsg->bulk_in, bh->inreq,
1266 				&bh->inreq_busy, &bh->state);
1267 		fsg->next_buffhd_to_fill = bh->next;
1268 	}
1269 
1270 	return -EIO;		// No default reply
1271 }
1272 
1273 
1274 /*-------------------------------------------------------------------------*/
1275 
do_write(struct fsg_dev * fsg)1276 static int do_write(struct fsg_dev *fsg)
1277 {
1278 	struct fsg_lun		*curlun = fsg->curlun;
1279 	u32			lba;
1280 	struct fsg_buffhd	*bh;
1281 	int			get_some_more;
1282 	u32			amount_left_to_req, amount_left_to_write;
1283 	loff_t			usb_offset, file_offset, file_offset_tmp;
1284 	unsigned int		amount;
1285 	ssize_t			nwritten;
1286 	int			rc;
1287 
1288 	if (curlun->ro) {
1289 		curlun->sense_data = SS_WRITE_PROTECTED;
1290 		return -EINVAL;
1291 	}
1292 	spin_lock(&curlun->filp->f_lock);
1293 	curlun->filp->f_flags &= ~O_SYNC;	// Default is not to wait
1294 	spin_unlock(&curlun->filp->f_lock);
1295 
1296 	/* Get the starting Logical Block Address and check that it's
1297 	 * not too big */
1298 	if (fsg->cmnd[0] == WRITE_6)
1299 		lba = get_unaligned_be24(&fsg->cmnd[1]);
1300 	else {
1301 		lba = get_unaligned_be32(&fsg->cmnd[2]);
1302 
1303 		/* We allow DPO (Disable Page Out = don't save data in the
1304 		 * cache) and FUA (Force Unit Access = write directly to the
1305 		 * medium).  We don't implement DPO; we implement FUA by
1306 		 * performing synchronous output. */
1307 		if ((fsg->cmnd[1] & ~0x18) != 0) {
1308 			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1309 			return -EINVAL;
1310 		}
1311 		/* FUA */
1312 		if (!curlun->nofua && (fsg->cmnd[1] & 0x08)) {
1313 			spin_lock(&curlun->filp->f_lock);
1314 			curlun->filp->f_flags |= O_DSYNC;
1315 			spin_unlock(&curlun->filp->f_lock);
1316 		}
1317 	}
1318 	if (lba >= curlun->num_sectors) {
1319 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1320 		return -EINVAL;
1321 	}
1322 
1323 	/* Carry out the file writes */
1324 	get_some_more = 1;
1325 	file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
1326 	amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
1327 
1328 	while (amount_left_to_write > 0) {
1329 
1330 		/* Queue a request for more data from the host */
1331 		bh = fsg->next_buffhd_to_fill;
1332 		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
1333 
1334 			/* Figure out how much we want to get:
1335 			 * Try to get the remaining amount,
1336 			 * but not more than the buffer size.
1337 			 */
1338 			amount = min(amount_left_to_req, mod_data.buflen);
1339 
1340 			/* Beyond the end of the backing file? */
1341 			if (usb_offset >= curlun->file_length) {
1342 				get_some_more = 0;
1343 				curlun->sense_data =
1344 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1345 				curlun->sense_data_info = usb_offset >> curlun->blkbits;
1346 				curlun->info_valid = 1;
1347 				continue;
1348 			}
1349 
1350 			/* Get the next buffer */
1351 			usb_offset += amount;
1352 			fsg->usb_amount_left -= amount;
1353 			amount_left_to_req -= amount;
1354 			if (amount_left_to_req == 0)
1355 				get_some_more = 0;
1356 
1357 			/* Except at the end of the transfer, amount will be
1358 			 * equal to the buffer size, which is divisible by
1359 			 * the bulk-out maxpacket size.
1360 			 */
1361 			set_bulk_out_req_length(fsg, bh, amount);
1362 			start_transfer(fsg, fsg->bulk_out, bh->outreq,
1363 					&bh->outreq_busy, &bh->state);
1364 			fsg->next_buffhd_to_fill = bh->next;
1365 			continue;
1366 		}
1367 
1368 		/* Write the received data to the backing file */
1369 		bh = fsg->next_buffhd_to_drain;
1370 		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
1371 			break;			// We stopped early
1372 		if (bh->state == BUF_STATE_FULL) {
1373 			smp_rmb();
1374 			fsg->next_buffhd_to_drain = bh->next;
1375 			bh->state = BUF_STATE_EMPTY;
1376 
1377 			/* Did something go wrong with the transfer? */
1378 			if (bh->outreq->status != 0) {
1379 				curlun->sense_data = SS_COMMUNICATION_FAILURE;
1380 				curlun->sense_data_info = file_offset >> curlun->blkbits;
1381 				curlun->info_valid = 1;
1382 				break;
1383 			}
1384 
1385 			amount = bh->outreq->actual;
1386 			if (curlun->file_length - file_offset < amount) {
1387 				LERROR(curlun,
1388 	"write %u @ %llu beyond end %llu\n",
1389 	amount, (unsigned long long) file_offset,
1390 	(unsigned long long) curlun->file_length);
1391 				amount = curlun->file_length - file_offset;
1392 			}
1393 
1394 			/* Don't accept excess data.  The spec doesn't say
1395 			 * what to do in this case.  We'll ignore the error.
1396 			 */
1397 			amount = min(amount, bh->bulk_out_intended_length);
1398 
1399 			/* Don't write a partial block */
1400 			amount = round_down(amount, curlun->blksize);
1401 			if (amount == 0)
1402 				goto empty_write;
1403 
1404 			/* Perform the write */
1405 			file_offset_tmp = file_offset;
1406 			nwritten = vfs_write(curlun->filp,
1407 					(char __user *) bh->buf,
1408 					amount, &file_offset_tmp);
1409 			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1410 					(unsigned long long) file_offset,
1411 					(int) nwritten);
1412 			if (signal_pending(current))
1413 				return -EINTR;		// Interrupted!
1414 
1415 			if (nwritten < 0) {
1416 				LDBG(curlun, "error in file write: %d\n",
1417 						(int) nwritten);
1418 				nwritten = 0;
1419 			} else if (nwritten < amount) {
1420 				LDBG(curlun, "partial file write: %d/%u\n",
1421 						(int) nwritten, amount);
1422 				nwritten = round_down(nwritten, curlun->blksize);
1423 			}
1424 			file_offset += nwritten;
1425 			amount_left_to_write -= nwritten;
1426 			fsg->residue -= nwritten;
1427 
1428 			/* If an error occurred, report it and its position */
1429 			if (nwritten < amount) {
1430 				curlun->sense_data = SS_WRITE_ERROR;
1431 				curlun->sense_data_info = file_offset >> curlun->blkbits;
1432 				curlun->info_valid = 1;
1433 				break;
1434 			}
1435 
1436  empty_write:
1437 			/* Did the host decide to stop early? */
1438 			if (bh->outreq->actual < bh->bulk_out_intended_length) {
1439 				fsg->short_packet_received = 1;
1440 				break;
1441 			}
1442 			continue;
1443 		}
1444 
1445 		/* Wait for something to happen */
1446 		rc = sleep_thread(fsg);
1447 		if (rc)
1448 			return rc;
1449 	}
1450 
1451 	return -EIO;		// No default reply
1452 }
1453 
1454 
1455 /*-------------------------------------------------------------------------*/
1456 
do_synchronize_cache(struct fsg_dev * fsg)1457 static int do_synchronize_cache(struct fsg_dev *fsg)
1458 {
1459 	struct fsg_lun	*curlun = fsg->curlun;
1460 	int		rc;
1461 
1462 	/* We ignore the requested LBA and write out all file's
1463 	 * dirty data buffers. */
1464 	rc = fsg_lun_fsync_sub(curlun);
1465 	if (rc)
1466 		curlun->sense_data = SS_WRITE_ERROR;
1467 	return 0;
1468 }
1469 
1470 
1471 /*-------------------------------------------------------------------------*/
1472 
invalidate_sub(struct fsg_lun * curlun)1473 static void invalidate_sub(struct fsg_lun *curlun)
1474 {
1475 	struct file	*filp = curlun->filp;
1476 	struct inode	*inode = filp->f_path.dentry->d_inode;
1477 	unsigned long	rc;
1478 
1479 	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1480 	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1481 }
1482 
do_verify(struct fsg_dev * fsg)1483 static int do_verify(struct fsg_dev *fsg)
1484 {
1485 	struct fsg_lun		*curlun = fsg->curlun;
1486 	u32			lba;
1487 	u32			verification_length;
1488 	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
1489 	loff_t			file_offset, file_offset_tmp;
1490 	u32			amount_left;
1491 	unsigned int		amount;
1492 	ssize_t			nread;
1493 
1494 	/* Get the starting Logical Block Address and check that it's
1495 	 * not too big */
1496 	lba = get_unaligned_be32(&fsg->cmnd[2]);
1497 	if (lba >= curlun->num_sectors) {
1498 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1499 		return -EINVAL;
1500 	}
1501 
1502 	/* We allow DPO (Disable Page Out = don't save data in the
1503 	 * cache) but we don't implement it. */
1504 	if ((fsg->cmnd[1] & ~0x10) != 0) {
1505 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1506 		return -EINVAL;
1507 	}
1508 
1509 	verification_length = get_unaligned_be16(&fsg->cmnd[7]);
1510 	if (unlikely(verification_length == 0))
1511 		return -EIO;		// No default reply
1512 
1513 	/* Prepare to carry out the file verify */
1514 	amount_left = verification_length << curlun->blkbits;
1515 	file_offset = ((loff_t) lba) << curlun->blkbits;
1516 
1517 	/* Write out all the dirty buffers before invalidating them */
1518 	fsg_lun_fsync_sub(curlun);
1519 	if (signal_pending(current))
1520 		return -EINTR;
1521 
1522 	invalidate_sub(curlun);
1523 	if (signal_pending(current))
1524 		return -EINTR;
1525 
1526 	/* Just try to read the requested blocks */
1527 	while (amount_left > 0) {
1528 
1529 		/* Figure out how much we need to read:
1530 		 * Try to read the remaining amount, but not more than
1531 		 * the buffer size.
1532 		 * And don't try to read past the end of the file.
1533 		 */
1534 		amount = min((unsigned int) amount_left, mod_data.buflen);
1535 		amount = min((loff_t) amount,
1536 				curlun->file_length - file_offset);
1537 		if (amount == 0) {
1538 			curlun->sense_data =
1539 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1540 			curlun->sense_data_info = file_offset >> curlun->blkbits;
1541 			curlun->info_valid = 1;
1542 			break;
1543 		}
1544 
1545 		/* Perform the read */
1546 		file_offset_tmp = file_offset;
1547 		nread = vfs_read(curlun->filp,
1548 				(char __user *) bh->buf,
1549 				amount, &file_offset_tmp);
1550 		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1551 				(unsigned long long) file_offset,
1552 				(int) nread);
1553 		if (signal_pending(current))
1554 			return -EINTR;
1555 
1556 		if (nread < 0) {
1557 			LDBG(curlun, "error in file verify: %d\n",
1558 					(int) nread);
1559 			nread = 0;
1560 		} else if (nread < amount) {
1561 			LDBG(curlun, "partial file verify: %d/%u\n",
1562 					(int) nread, amount);
1563 			nread = round_down(nread, curlun->blksize);
1564 		}
1565 		if (nread == 0) {
1566 			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1567 			curlun->sense_data_info = file_offset >> curlun->blkbits;
1568 			curlun->info_valid = 1;
1569 			break;
1570 		}
1571 		file_offset += nread;
1572 		amount_left -= nread;
1573 	}
1574 	return 0;
1575 }
1576 
1577 
1578 /*-------------------------------------------------------------------------*/
1579 
do_inquiry(struct fsg_dev * fsg,struct fsg_buffhd * bh)1580 static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1581 {
1582 	u8	*buf = (u8 *) bh->buf;
1583 
1584 	static char vendor_id[] = "Linux   ";
1585 	static char product_disk_id[] = "File-Stor Gadget";
1586 	static char product_cdrom_id[] = "File-CD Gadget  ";
1587 
1588 	if (!fsg->curlun) {		// Unsupported LUNs are okay
1589 		fsg->bad_lun_okay = 1;
1590 		memset(buf, 0, 36);
1591 		buf[0] = 0x7f;		// Unsupported, no device-type
1592 		buf[4] = 31;		// Additional length
1593 		return 36;
1594 	}
1595 
1596 	memset(buf, 0, 8);
1597 	buf[0] = (mod_data.cdrom ? TYPE_ROM : TYPE_DISK);
1598 	if (mod_data.removable)
1599 		buf[1] = 0x80;
1600 	buf[2] = 2;		// ANSI SCSI level 2
1601 	buf[3] = 2;		// SCSI-2 INQUIRY data format
1602 	buf[4] = 31;		// Additional length
1603 				// No special options
1604 	sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
1605 			(mod_data.cdrom ? product_cdrom_id :
1606 				product_disk_id),
1607 			mod_data.release);
1608 	return 36;
1609 }
1610 
1611 
do_request_sense(struct fsg_dev * fsg,struct fsg_buffhd * bh)1612 static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1613 {
1614 	struct fsg_lun	*curlun = fsg->curlun;
1615 	u8		*buf = (u8 *) bh->buf;
1616 	u32		sd, sdinfo;
1617 	int		valid;
1618 
1619 	/*
1620 	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1621 	 *
1622 	 * If a REQUEST SENSE command is received from an initiator
1623 	 * with a pending unit attention condition (before the target
1624 	 * generates the contingent allegiance condition), then the
1625 	 * target shall either:
1626 	 *   a) report any pending sense data and preserve the unit
1627 	 *	attention condition on the logical unit, or,
1628 	 *   b) report the unit attention condition, may discard any
1629 	 *	pending sense data, and clear the unit attention
1630 	 *	condition on the logical unit for that initiator.
1631 	 *
1632 	 * FSG normally uses option a); enable this code to use option b).
1633 	 */
1634 #if 0
1635 	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1636 		curlun->sense_data = curlun->unit_attention_data;
1637 		curlun->unit_attention_data = SS_NO_SENSE;
1638 	}
1639 #endif
1640 
1641 	if (!curlun) {		// Unsupported LUNs are okay
1642 		fsg->bad_lun_okay = 1;
1643 		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1644 		sdinfo = 0;
1645 		valid = 0;
1646 	} else {
1647 		sd = curlun->sense_data;
1648 		sdinfo = curlun->sense_data_info;
1649 		valid = curlun->info_valid << 7;
1650 		curlun->sense_data = SS_NO_SENSE;
1651 		curlun->sense_data_info = 0;
1652 		curlun->info_valid = 0;
1653 	}
1654 
1655 	memset(buf, 0, 18);
1656 	buf[0] = valid | 0x70;			// Valid, current error
1657 	buf[2] = SK(sd);
1658 	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
1659 	buf[7] = 18 - 8;			// Additional sense length
1660 	buf[12] = ASC(sd);
1661 	buf[13] = ASCQ(sd);
1662 	return 18;
1663 }
1664 
1665 
do_read_capacity(struct fsg_dev * fsg,struct fsg_buffhd * bh)1666 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1667 {
1668 	struct fsg_lun	*curlun = fsg->curlun;
1669 	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
1670 	int		pmi = fsg->cmnd[8];
1671 	u8		*buf = (u8 *) bh->buf;
1672 
1673 	/* Check the PMI and LBA fields */
1674 	if (pmi > 1 || (pmi == 0 && lba != 0)) {
1675 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1676 		return -EINVAL;
1677 	}
1678 
1679 	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1680 						/* Max logical block */
1681 	put_unaligned_be32(curlun->blksize, &buf[4]);	/* Block length */
1682 	return 8;
1683 }
1684 
1685 
do_read_header(struct fsg_dev * fsg,struct fsg_buffhd * bh)1686 static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1687 {
1688 	struct fsg_lun	*curlun = fsg->curlun;
1689 	int		msf = fsg->cmnd[1] & 0x02;
1690 	u32		lba = get_unaligned_be32(&fsg->cmnd[2]);
1691 	u8		*buf = (u8 *) bh->buf;
1692 
1693 	if ((fsg->cmnd[1] & ~0x02) != 0) {		/* Mask away MSF */
1694 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1695 		return -EINVAL;
1696 	}
1697 	if (lba >= curlun->num_sectors) {
1698 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1699 		return -EINVAL;
1700 	}
1701 
1702 	memset(buf, 0, 8);
1703 	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
1704 	store_cdrom_address(&buf[4], msf, lba);
1705 	return 8;
1706 }
1707 
1708 
do_read_toc(struct fsg_dev * fsg,struct fsg_buffhd * bh)1709 static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1710 {
1711 	struct fsg_lun	*curlun = fsg->curlun;
1712 	int		msf = fsg->cmnd[1] & 0x02;
1713 	int		start_track = fsg->cmnd[6];
1714 	u8		*buf = (u8 *) bh->buf;
1715 
1716 	if ((fsg->cmnd[1] & ~0x02) != 0 ||		/* Mask away MSF */
1717 			start_track > 1) {
1718 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1719 		return -EINVAL;
1720 	}
1721 
1722 	memset(buf, 0, 20);
1723 	buf[1] = (20-2);		/* TOC data length */
1724 	buf[2] = 1;			/* First track number */
1725 	buf[3] = 1;			/* Last track number */
1726 	buf[5] = 0x16;			/* Data track, copying allowed */
1727 	buf[6] = 0x01;			/* Only track is number 1 */
1728 	store_cdrom_address(&buf[8], msf, 0);
1729 
1730 	buf[13] = 0x16;			/* Lead-out track is data */
1731 	buf[14] = 0xAA;			/* Lead-out track number */
1732 	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1733 	return 20;
1734 }
1735 
1736 
do_mode_sense(struct fsg_dev * fsg,struct fsg_buffhd * bh)1737 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1738 {
1739 	struct fsg_lun	*curlun = fsg->curlun;
1740 	int		mscmnd = fsg->cmnd[0];
1741 	u8		*buf = (u8 *) bh->buf;
1742 	u8		*buf0 = buf;
1743 	int		pc, page_code;
1744 	int		changeable_values, all_pages;
1745 	int		valid_page = 0;
1746 	int		len, limit;
1747 
1748 	if ((fsg->cmnd[1] & ~0x08) != 0) {		// Mask away DBD
1749 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1750 		return -EINVAL;
1751 	}
1752 	pc = fsg->cmnd[2] >> 6;
1753 	page_code = fsg->cmnd[2] & 0x3f;
1754 	if (pc == 3) {
1755 		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1756 		return -EINVAL;
1757 	}
1758 	changeable_values = (pc == 1);
1759 	all_pages = (page_code == 0x3f);
1760 
1761 	/* Write the mode parameter header.  Fixed values are: default
1762 	 * medium type, no cache control (DPOFUA), and no block descriptors.
1763 	 * The only variable value is the WriteProtect bit.  We will fill in
1764 	 * the mode data length later. */
1765 	memset(buf, 0, 8);
1766 	if (mscmnd == MODE_SENSE) {
1767 		buf[2] = (curlun->ro ? 0x80 : 0x00);		// WP, DPOFUA
1768 		buf += 4;
1769 		limit = 255;
1770 	} else {			// MODE_SENSE_10
1771 		buf[3] = (curlun->ro ? 0x80 : 0x00);		// WP, DPOFUA
1772 		buf += 8;
1773 		limit = 65535;		// Should really be mod_data.buflen
1774 	}
1775 
1776 	/* No block descriptors */
1777 
1778 	/* The mode pages, in numerical order.  The only page we support
1779 	 * is the Caching page. */
1780 	if (page_code == 0x08 || all_pages) {
1781 		valid_page = 1;
1782 		buf[0] = 0x08;		// Page code
1783 		buf[1] = 10;		// Page length
1784 		memset(buf+2, 0, 10);	// None of the fields are changeable
1785 
1786 		if (!changeable_values) {
1787 			buf[2] = 0x04;	// Write cache enable,
1788 					// Read cache not disabled
1789 					// No cache retention priorities
1790 			put_unaligned_be16(0xffff, &buf[4]);
1791 					/* Don't disable prefetch */
1792 					/* Minimum prefetch = 0 */
1793 			put_unaligned_be16(0xffff, &buf[8]);
1794 					/* Maximum prefetch */
1795 			put_unaligned_be16(0xffff, &buf[10]);
1796 					/* Maximum prefetch ceiling */
1797 		}
1798 		buf += 12;
1799 	}
1800 
1801 	/* Check that a valid page was requested and the mode data length
1802 	 * isn't too long. */
1803 	len = buf - buf0;
1804 	if (!valid_page || len > limit) {
1805 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1806 		return -EINVAL;
1807 	}
1808 
1809 	/*  Store the mode data length */
1810 	if (mscmnd == MODE_SENSE)
1811 		buf0[0] = len - 1;
1812 	else
1813 		put_unaligned_be16(len - 2, buf0);
1814 	return len;
1815 }
1816 
1817 
do_start_stop(struct fsg_dev * fsg)1818 static int do_start_stop(struct fsg_dev *fsg)
1819 {
1820 	struct fsg_lun	*curlun = fsg->curlun;
1821 	int		loej, start;
1822 
1823 	if (!mod_data.removable) {
1824 		curlun->sense_data = SS_INVALID_COMMAND;
1825 		return -EINVAL;
1826 	}
1827 
1828 	// int immed = fsg->cmnd[1] & 0x01;
1829 	loej = fsg->cmnd[4] & 0x02;
1830 	start = fsg->cmnd[4] & 0x01;
1831 
1832 #ifdef CONFIG_USB_FILE_STORAGE_TEST
1833 	if ((fsg->cmnd[1] & ~0x01) != 0 ||		// Mask away Immed
1834 			(fsg->cmnd[4] & ~0x03) != 0) {	// Mask LoEj, Start
1835 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1836 		return -EINVAL;
1837 	}
1838 
1839 	if (!start) {
1840 
1841 		/* Are we allowed to unload the media? */
1842 		if (curlun->prevent_medium_removal) {
1843 			LDBG(curlun, "unload attempt prevented\n");
1844 			curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
1845 			return -EINVAL;
1846 		}
1847 		if (loej) {		// Simulate an unload/eject
1848 			up_read(&fsg->filesem);
1849 			down_write(&fsg->filesem);
1850 			fsg_lun_close(curlun);
1851 			up_write(&fsg->filesem);
1852 			down_read(&fsg->filesem);
1853 		}
1854 	} else {
1855 
1856 		/* Our emulation doesn't support mounting; the medium is
1857 		 * available for use as soon as it is loaded. */
1858 		if (!fsg_lun_is_open(curlun)) {
1859 			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1860 			return -EINVAL;
1861 		}
1862 	}
1863 #endif
1864 	return 0;
1865 }
1866 
1867 
do_prevent_allow(struct fsg_dev * fsg)1868 static int do_prevent_allow(struct fsg_dev *fsg)
1869 {
1870 	struct fsg_lun	*curlun = fsg->curlun;
1871 	int		prevent;
1872 
1873 	if (!mod_data.removable) {
1874 		curlun->sense_data = SS_INVALID_COMMAND;
1875 		return -EINVAL;
1876 	}
1877 
1878 	prevent = fsg->cmnd[4] & 0x01;
1879 	if ((fsg->cmnd[4] & ~0x01) != 0) {		// Mask away Prevent
1880 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1881 		return -EINVAL;
1882 	}
1883 
1884 	if (curlun->prevent_medium_removal && !prevent)
1885 		fsg_lun_fsync_sub(curlun);
1886 	curlun->prevent_medium_removal = prevent;
1887 	return 0;
1888 }
1889 
1890 
do_read_format_capacities(struct fsg_dev * fsg,struct fsg_buffhd * bh)1891 static int do_read_format_capacities(struct fsg_dev *fsg,
1892 			struct fsg_buffhd *bh)
1893 {
1894 	struct fsg_lun	*curlun = fsg->curlun;
1895 	u8		*buf = (u8 *) bh->buf;
1896 
1897 	buf[0] = buf[1] = buf[2] = 0;
1898 	buf[3] = 8;		// Only the Current/Maximum Capacity Descriptor
1899 	buf += 4;
1900 
1901 	put_unaligned_be32(curlun->num_sectors, &buf[0]);
1902 						/* Number of blocks */
1903 	put_unaligned_be32(curlun->blksize, &buf[4]);	/* Block length */
1904 	buf[4] = 0x02;				/* Current capacity */
1905 	return 12;
1906 }
1907 
1908 
do_mode_select(struct fsg_dev * fsg,struct fsg_buffhd * bh)1909 static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1910 {
1911 	struct fsg_lun	*curlun = fsg->curlun;
1912 
1913 	/* We don't support MODE SELECT */
1914 	curlun->sense_data = SS_INVALID_COMMAND;
1915 	return -EINVAL;
1916 }
1917 
1918 
1919 /*-------------------------------------------------------------------------*/
1920 
halt_bulk_in_endpoint(struct fsg_dev * fsg)1921 static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1922 {
1923 	int	rc;
1924 
1925 	rc = fsg_set_halt(fsg, fsg->bulk_in);
1926 	if (rc == -EAGAIN)
1927 		VDBG(fsg, "delayed bulk-in endpoint halt\n");
1928 	while (rc != 0) {
1929 		if (rc != -EAGAIN) {
1930 			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1931 			rc = 0;
1932 			break;
1933 		}
1934 
1935 		/* Wait for a short time and then try again */
1936 		if (msleep_interruptible(100) != 0)
1937 			return -EINTR;
1938 		rc = usb_ep_set_halt(fsg->bulk_in);
1939 	}
1940 	return rc;
1941 }
1942 
wedge_bulk_in_endpoint(struct fsg_dev * fsg)1943 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1944 {
1945 	int	rc;
1946 
1947 	DBG(fsg, "bulk-in set wedge\n");
1948 	rc = usb_ep_set_wedge(fsg->bulk_in);
1949 	if (rc == -EAGAIN)
1950 		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1951 	while (rc != 0) {
1952 		if (rc != -EAGAIN) {
1953 			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1954 			rc = 0;
1955 			break;
1956 		}
1957 
1958 		/* Wait for a short time and then try again */
1959 		if (msleep_interruptible(100) != 0)
1960 			return -EINTR;
1961 		rc = usb_ep_set_wedge(fsg->bulk_in);
1962 	}
1963 	return rc;
1964 }
1965 
throw_away_data(struct fsg_dev * fsg)1966 static int throw_away_data(struct fsg_dev *fsg)
1967 {
1968 	struct fsg_buffhd	*bh;
1969 	u32			amount;
1970 	int			rc;
1971 
1972 	while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
1973 			fsg->usb_amount_left > 0) {
1974 
1975 		/* Throw away the data in a filled buffer */
1976 		if (bh->state == BUF_STATE_FULL) {
1977 			smp_rmb();
1978 			bh->state = BUF_STATE_EMPTY;
1979 			fsg->next_buffhd_to_drain = bh->next;
1980 
1981 			/* A short packet or an error ends everything */
1982 			if (bh->outreq->actual < bh->bulk_out_intended_length ||
1983 					bh->outreq->status != 0) {
1984 				raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1985 				return -EINTR;
1986 			}
1987 			continue;
1988 		}
1989 
1990 		/* Try to submit another request if we need one */
1991 		bh = fsg->next_buffhd_to_fill;
1992 		if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
1993 			amount = min(fsg->usb_amount_left,
1994 					(u32) mod_data.buflen);
1995 
1996 			/* Except at the end of the transfer, amount will be
1997 			 * equal to the buffer size, which is divisible by
1998 			 * the bulk-out maxpacket size.
1999 			 */
2000 			set_bulk_out_req_length(fsg, bh, amount);
2001 			start_transfer(fsg, fsg->bulk_out, bh->outreq,
2002 					&bh->outreq_busy, &bh->state);
2003 			fsg->next_buffhd_to_fill = bh->next;
2004 			fsg->usb_amount_left -= amount;
2005 			continue;
2006 		}
2007 
2008 		/* Otherwise wait for something to happen */
2009 		rc = sleep_thread(fsg);
2010 		if (rc)
2011 			return rc;
2012 	}
2013 	return 0;
2014 }
2015 
2016 
finish_reply(struct fsg_dev * fsg)2017 static int finish_reply(struct fsg_dev *fsg)
2018 {
2019 	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
2020 	int			rc = 0;
2021 
2022 	switch (fsg->data_dir) {
2023 	case DATA_DIR_NONE:
2024 		break;			// Nothing to send
2025 
2026 	/* If we don't know whether the host wants to read or write,
2027 	 * this must be CB or CBI with an unknown command.  We mustn't
2028 	 * try to send or receive any data.  So stall both bulk pipes
2029 	 * if we can and wait for a reset. */
2030 	case DATA_DIR_UNKNOWN:
2031 		if (mod_data.can_stall) {
2032 			fsg_set_halt(fsg, fsg->bulk_out);
2033 			rc = halt_bulk_in_endpoint(fsg);
2034 		}
2035 		break;
2036 
2037 	/* All but the last buffer of data must have already been sent */
2038 	case DATA_DIR_TO_HOST:
2039 		if (fsg->data_size == 0)
2040 			;		// Nothing to send
2041 
2042 		/* If there's no residue, simply send the last buffer */
2043 		else if (fsg->residue == 0) {
2044 			bh->inreq->zero = 0;
2045 			start_transfer(fsg, fsg->bulk_in, bh->inreq,
2046 					&bh->inreq_busy, &bh->state);
2047 			fsg->next_buffhd_to_fill = bh->next;
2048 		}
2049 
2050 		/* There is a residue.  For CB and CBI, simply mark the end
2051 		 * of the data with a short packet.  However, if we are
2052 		 * allowed to stall, there was no data at all (residue ==
2053 		 * data_size), and the command failed (invalid LUN or
2054 		 * sense data is set), then halt the bulk-in endpoint
2055 		 * instead. */
2056 		else if (!transport_is_bbb()) {
2057 			if (mod_data.can_stall &&
2058 					fsg->residue == fsg->data_size &&
2059 	(!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
2060 				bh->state = BUF_STATE_EMPTY;
2061 				rc = halt_bulk_in_endpoint(fsg);
2062 			} else {
2063 				bh->inreq->zero = 1;
2064 				start_transfer(fsg, fsg->bulk_in, bh->inreq,
2065 						&bh->inreq_busy, &bh->state);
2066 				fsg->next_buffhd_to_fill = bh->next;
2067 			}
2068 		}
2069 
2070 		/*
2071 		 * For Bulk-only, mark the end of the data with a short
2072 		 * packet.  If we are allowed to stall, halt the bulk-in
2073 		 * endpoint.  (Note: This violates the Bulk-Only Transport
2074 		 * specification, which requires us to pad the data if we
2075 		 * don't halt the endpoint.  Presumably nobody will mind.)
2076 		 */
2077 		else {
2078 			bh->inreq->zero = 1;
2079 			start_transfer(fsg, fsg->bulk_in, bh->inreq,
2080 					&bh->inreq_busy, &bh->state);
2081 			fsg->next_buffhd_to_fill = bh->next;
2082 			if (mod_data.can_stall)
2083 				rc = halt_bulk_in_endpoint(fsg);
2084 		}
2085 		break;
2086 
2087 	/* We have processed all we want from the data the host has sent.
2088 	 * There may still be outstanding bulk-out requests. */
2089 	case DATA_DIR_FROM_HOST:
2090 		if (fsg->residue == 0)
2091 			;		// Nothing to receive
2092 
2093 		/* Did the host stop sending unexpectedly early? */
2094 		else if (fsg->short_packet_received) {
2095 			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2096 			rc = -EINTR;
2097 		}
2098 
2099 		/* We haven't processed all the incoming data.  Even though
2100 		 * we may be allowed to stall, doing so would cause a race.
2101 		 * The controller may already have ACK'ed all the remaining
2102 		 * bulk-out packets, in which case the host wouldn't see a
2103 		 * STALL.  Not realizing the endpoint was halted, it wouldn't
2104 		 * clear the halt -- leading to problems later on. */
2105 #if 0
2106 		else if (mod_data.can_stall) {
2107 			fsg_set_halt(fsg, fsg->bulk_out);
2108 			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2109 			rc = -EINTR;
2110 		}
2111 #endif
2112 
2113 		/* We can't stall.  Read in the excess data and throw it
2114 		 * all away. */
2115 		else
2116 			rc = throw_away_data(fsg);
2117 		break;
2118 	}
2119 	return rc;
2120 }
2121 
2122 
send_status(struct fsg_dev * fsg)2123 static int send_status(struct fsg_dev *fsg)
2124 {
2125 	struct fsg_lun		*curlun = fsg->curlun;
2126 	struct fsg_buffhd	*bh;
2127 	int			rc;
2128 	u8			status = USB_STATUS_PASS;
2129 	u32			sd, sdinfo = 0;
2130 
2131 	/* Wait for the next buffer to become available */
2132 	bh = fsg->next_buffhd_to_fill;
2133 	while (bh->state != BUF_STATE_EMPTY) {
2134 		rc = sleep_thread(fsg);
2135 		if (rc)
2136 			return rc;
2137 	}
2138 
2139 	if (curlun) {
2140 		sd = curlun->sense_data;
2141 		sdinfo = curlun->sense_data_info;
2142 	} else if (fsg->bad_lun_okay)
2143 		sd = SS_NO_SENSE;
2144 	else
2145 		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
2146 
2147 	if (fsg->phase_error) {
2148 		DBG(fsg, "sending phase-error status\n");
2149 		status = USB_STATUS_PHASE_ERROR;
2150 		sd = SS_INVALID_COMMAND;
2151 	} else if (sd != SS_NO_SENSE) {
2152 		DBG(fsg, "sending command-failure status\n");
2153 		status = USB_STATUS_FAIL;
2154 		VDBG(fsg, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2155 				"  info x%x\n",
2156 				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
2157 	}
2158 
2159 	if (transport_is_bbb()) {
2160 		struct bulk_cs_wrap	*csw = bh->buf;
2161 
2162 		/* Store and send the Bulk-only CSW */
2163 		csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
2164 		csw->Tag = fsg->tag;
2165 		csw->Residue = cpu_to_le32(fsg->residue);
2166 		csw->Status = status;
2167 
2168 		bh->inreq->length = USB_BULK_CS_WRAP_LEN;
2169 		bh->inreq->zero = 0;
2170 		start_transfer(fsg, fsg->bulk_in, bh->inreq,
2171 				&bh->inreq_busy, &bh->state);
2172 
2173 	} else if (mod_data.transport_type == USB_PR_CB) {
2174 
2175 		/* Control-Bulk transport has no status phase! */
2176 		return 0;
2177 
2178 	} else {			// USB_PR_CBI
2179 		struct interrupt_data	*buf = bh->buf;
2180 
2181 		/* Store and send the Interrupt data.  UFI sends the ASC
2182 		 * and ASCQ bytes.  Everything else sends a Type (which
2183 		 * is always 0) and the status Value. */
2184 		if (mod_data.protocol_type == USB_SC_UFI) {
2185 			buf->bType = ASC(sd);
2186 			buf->bValue = ASCQ(sd);
2187 		} else {
2188 			buf->bType = 0;
2189 			buf->bValue = status;
2190 		}
2191 		fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
2192 
2193 		fsg->intr_buffhd = bh;		// Point to the right buffhd
2194 		fsg->intreq->buf = bh->inreq->buf;
2195 		fsg->intreq->context = bh;
2196 		start_transfer(fsg, fsg->intr_in, fsg->intreq,
2197 				&fsg->intreq_busy, &bh->state);
2198 	}
2199 
2200 	fsg->next_buffhd_to_fill = bh->next;
2201 	return 0;
2202 }
2203 
2204 
2205 /*-------------------------------------------------------------------------*/
2206 
2207 /* Check whether the command is properly formed and whether its data size
2208  * and direction agree with the values we already have. */
check_command(struct fsg_dev * fsg,int cmnd_size,enum data_direction data_dir,unsigned int mask,int needs_medium,const char * name)2209 static int check_command(struct fsg_dev *fsg, int cmnd_size,
2210 		enum data_direction data_dir, unsigned int mask,
2211 		int needs_medium, const char *name)
2212 {
2213 	int			i;
2214 	int			lun = fsg->cmnd[1] >> 5;
2215 	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
2216 	char			hdlen[20];
2217 	struct fsg_lun		*curlun;
2218 
2219 	/* Adjust the expected cmnd_size for protocol encapsulation padding.
2220 	 * Transparent SCSI doesn't pad. */
2221 	if (protocol_is_scsi())
2222 		;
2223 
2224 	/* There's some disagreement as to whether RBC pads commands or not.
2225 	 * We'll play it safe and accept either form. */
2226 	else if (mod_data.protocol_type == USB_SC_RBC) {
2227 		if (fsg->cmnd_size == 12)
2228 			cmnd_size = 12;
2229 
2230 	/* All the other protocols pad to 12 bytes */
2231 	} else
2232 		cmnd_size = 12;
2233 
2234 	hdlen[0] = 0;
2235 	if (fsg->data_dir != DATA_DIR_UNKNOWN)
2236 		sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
2237 				fsg->data_size);
2238 	VDBG(fsg, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
2239 			name, cmnd_size, dirletter[(int) data_dir],
2240 			fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
2241 
2242 	/* We can't reply at all until we know the correct data direction
2243 	 * and size. */
2244 	if (fsg->data_size_from_cmnd == 0)
2245 		data_dir = DATA_DIR_NONE;
2246 	if (fsg->data_dir == DATA_DIR_UNKNOWN) {	// CB or CBI
2247 		fsg->data_dir = data_dir;
2248 		fsg->data_size = fsg->data_size_from_cmnd;
2249 
2250 	} else {					// Bulk-only
2251 		if (fsg->data_size < fsg->data_size_from_cmnd) {
2252 
2253 			/* Host data size < Device data size is a phase error.
2254 			 * Carry out the command, but only transfer as much
2255 			 * as we are allowed. */
2256 			fsg->data_size_from_cmnd = fsg->data_size;
2257 			fsg->phase_error = 1;
2258 		}
2259 	}
2260 	fsg->residue = fsg->usb_amount_left = fsg->data_size;
2261 
2262 	/* Conflicting data directions is a phase error */
2263 	if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
2264 		fsg->phase_error = 1;
2265 		return -EINVAL;
2266 	}
2267 
2268 	/* Verify the length of the command itself */
2269 	if (cmnd_size != fsg->cmnd_size) {
2270 
2271 		/* Special case workaround: There are plenty of buggy SCSI
2272 		 * implementations. Many have issues with cbw->Length
2273 		 * field passing a wrong command size. For those cases we
2274 		 * always try to work around the problem by using the length
2275 		 * sent by the host side provided it is at least as large
2276 		 * as the correct command length.
2277 		 * Examples of such cases would be MS-Windows, which issues
2278 		 * REQUEST SENSE with cbw->Length == 12 where it should
2279 		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
2280 		 * REQUEST SENSE with cbw->Length == 10 where it should
2281 		 * be 6 as well.
2282 		 */
2283 		if (cmnd_size <= fsg->cmnd_size) {
2284 			DBG(fsg, "%s is buggy! Expected length %d "
2285 					"but we got %d\n", name,
2286 					cmnd_size, fsg->cmnd_size);
2287 			cmnd_size = fsg->cmnd_size;
2288 		} else {
2289 			fsg->phase_error = 1;
2290 			return -EINVAL;
2291 		}
2292 	}
2293 
2294 	/* Check that the LUN values are consistent */
2295 	if (transport_is_bbb()) {
2296 		if (fsg->lun != lun)
2297 			DBG(fsg, "using LUN %d from CBW, "
2298 					"not LUN %d from CDB\n",
2299 					fsg->lun, lun);
2300 	}
2301 
2302 	/* Check the LUN */
2303 	curlun = fsg->curlun;
2304 	if (curlun) {
2305 		if (fsg->cmnd[0] != REQUEST_SENSE) {
2306 			curlun->sense_data = SS_NO_SENSE;
2307 			curlun->sense_data_info = 0;
2308 			curlun->info_valid = 0;
2309 		}
2310 	} else {
2311 		fsg->bad_lun_okay = 0;
2312 
2313 		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
2314 		 * to use unsupported LUNs; all others may not. */
2315 		if (fsg->cmnd[0] != INQUIRY &&
2316 				fsg->cmnd[0] != REQUEST_SENSE) {
2317 			DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2318 			return -EINVAL;
2319 		}
2320 	}
2321 
2322 	/* If a unit attention condition exists, only INQUIRY and
2323 	 * REQUEST SENSE commands are allowed; anything else must fail. */
2324 	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
2325 			fsg->cmnd[0] != INQUIRY &&
2326 			fsg->cmnd[0] != REQUEST_SENSE) {
2327 		curlun->sense_data = curlun->unit_attention_data;
2328 		curlun->unit_attention_data = SS_NO_SENSE;
2329 		return -EINVAL;
2330 	}
2331 
2332 	/* Check that only command bytes listed in the mask are non-zero */
2333 	fsg->cmnd[1] &= 0x1f;			// Mask away the LUN
2334 	for (i = 1; i < cmnd_size; ++i) {
2335 		if (fsg->cmnd[i] && !(mask & (1 << i))) {
2336 			if (curlun)
2337 				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2338 			return -EINVAL;
2339 		}
2340 	}
2341 
2342 	/* If the medium isn't mounted and the command needs to access
2343 	 * it, return an error. */
2344 	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
2345 		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2346 		return -EINVAL;
2347 	}
2348 
2349 	return 0;
2350 }
2351 
2352 /* wrapper of check_command for data size in blocks handling */
check_command_size_in_blocks(struct fsg_dev * fsg,int cmnd_size,enum data_direction data_dir,unsigned int mask,int needs_medium,const char * name)2353 static int check_command_size_in_blocks(struct fsg_dev *fsg, int cmnd_size,
2354 		enum data_direction data_dir, unsigned int mask,
2355 		int needs_medium, const char *name)
2356 {
2357 	if (fsg->curlun)
2358 		fsg->data_size_from_cmnd <<= fsg->curlun->blkbits;
2359 	return check_command(fsg, cmnd_size, data_dir,
2360 			mask, needs_medium, name);
2361 }
2362 
do_scsi_command(struct fsg_dev * fsg)2363 static int do_scsi_command(struct fsg_dev *fsg)
2364 {
2365 	struct fsg_buffhd	*bh;
2366 	int			rc;
2367 	int			reply = -EINVAL;
2368 	int			i;
2369 	static char		unknown[16];
2370 
2371 	dump_cdb(fsg);
2372 
2373 	/* Wait for the next buffer to become available for data or status */
2374 	bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2375 	while (bh->state != BUF_STATE_EMPTY) {
2376 		rc = sleep_thread(fsg);
2377 		if (rc)
2378 			return rc;
2379 	}
2380 	fsg->phase_error = 0;
2381 	fsg->short_packet_received = 0;
2382 
2383 	down_read(&fsg->filesem);	// We're using the backing file
2384 	switch (fsg->cmnd[0]) {
2385 
2386 	case INQUIRY:
2387 		fsg->data_size_from_cmnd = fsg->cmnd[4];
2388 		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2389 				(1<<4), 0,
2390 				"INQUIRY")) == 0)
2391 			reply = do_inquiry(fsg, bh);
2392 		break;
2393 
2394 	case MODE_SELECT:
2395 		fsg->data_size_from_cmnd = fsg->cmnd[4];
2396 		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2397 				(1<<1) | (1<<4), 0,
2398 				"MODE SELECT(6)")) == 0)
2399 			reply = do_mode_select(fsg, bh);
2400 		break;
2401 
2402 	case MODE_SELECT_10:
2403 		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2404 		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2405 				(1<<1) | (3<<7), 0,
2406 				"MODE SELECT(10)")) == 0)
2407 			reply = do_mode_select(fsg, bh);
2408 		break;
2409 
2410 	case MODE_SENSE:
2411 		fsg->data_size_from_cmnd = fsg->cmnd[4];
2412 		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2413 				(1<<1) | (1<<2) | (1<<4), 0,
2414 				"MODE SENSE(6)")) == 0)
2415 			reply = do_mode_sense(fsg, bh);
2416 		break;
2417 
2418 	case MODE_SENSE_10:
2419 		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2420 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2421 				(1<<1) | (1<<2) | (3<<7), 0,
2422 				"MODE SENSE(10)")) == 0)
2423 			reply = do_mode_sense(fsg, bh);
2424 		break;
2425 
2426 	case ALLOW_MEDIUM_REMOVAL:
2427 		fsg->data_size_from_cmnd = 0;
2428 		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2429 				(1<<4), 0,
2430 				"PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2431 			reply = do_prevent_allow(fsg);
2432 		break;
2433 
2434 	case READ_6:
2435 		i = fsg->cmnd[4];
2436 		fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
2437 		if ((reply = check_command_size_in_blocks(fsg, 6,
2438 				DATA_DIR_TO_HOST,
2439 				(7<<1) | (1<<4), 1,
2440 				"READ(6)")) == 0)
2441 			reply = do_read(fsg);
2442 		break;
2443 
2444 	case READ_10:
2445 		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2446 		if ((reply = check_command_size_in_blocks(fsg, 10,
2447 				DATA_DIR_TO_HOST,
2448 				(1<<1) | (0xf<<2) | (3<<7), 1,
2449 				"READ(10)")) == 0)
2450 			reply = do_read(fsg);
2451 		break;
2452 
2453 	case READ_12:
2454 		fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
2455 		if ((reply = check_command_size_in_blocks(fsg, 12,
2456 				DATA_DIR_TO_HOST,
2457 				(1<<1) | (0xf<<2) | (0xf<<6), 1,
2458 				"READ(12)")) == 0)
2459 			reply = do_read(fsg);
2460 		break;
2461 
2462 	case READ_CAPACITY:
2463 		fsg->data_size_from_cmnd = 8;
2464 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2465 				(0xf<<2) | (1<<8), 1,
2466 				"READ CAPACITY")) == 0)
2467 			reply = do_read_capacity(fsg, bh);
2468 		break;
2469 
2470 	case READ_HEADER:
2471 		if (!mod_data.cdrom)
2472 			goto unknown_cmnd;
2473 		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2474 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2475 				(3<<7) | (0x1f<<1), 1,
2476 				"READ HEADER")) == 0)
2477 			reply = do_read_header(fsg, bh);
2478 		break;
2479 
2480 	case READ_TOC:
2481 		if (!mod_data.cdrom)
2482 			goto unknown_cmnd;
2483 		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2484 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2485 				(7<<6) | (1<<1), 1,
2486 				"READ TOC")) == 0)
2487 			reply = do_read_toc(fsg, bh);
2488 		break;
2489 
2490 	case READ_FORMAT_CAPACITIES:
2491 		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2492 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2493 				(3<<7), 1,
2494 				"READ FORMAT CAPACITIES")) == 0)
2495 			reply = do_read_format_capacities(fsg, bh);
2496 		break;
2497 
2498 	case REQUEST_SENSE:
2499 		fsg->data_size_from_cmnd = fsg->cmnd[4];
2500 		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2501 				(1<<4), 0,
2502 				"REQUEST SENSE")) == 0)
2503 			reply = do_request_sense(fsg, bh);
2504 		break;
2505 
2506 	case START_STOP:
2507 		fsg->data_size_from_cmnd = 0;
2508 		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2509 				(1<<1) | (1<<4), 0,
2510 				"START-STOP UNIT")) == 0)
2511 			reply = do_start_stop(fsg);
2512 		break;
2513 
2514 	case SYNCHRONIZE_CACHE:
2515 		fsg->data_size_from_cmnd = 0;
2516 		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2517 				(0xf<<2) | (3<<7), 1,
2518 				"SYNCHRONIZE CACHE")) == 0)
2519 			reply = do_synchronize_cache(fsg);
2520 		break;
2521 
2522 	case TEST_UNIT_READY:
2523 		fsg->data_size_from_cmnd = 0;
2524 		reply = check_command(fsg, 6, DATA_DIR_NONE,
2525 				0, 1,
2526 				"TEST UNIT READY");
2527 		break;
2528 
2529 	/* Although optional, this command is used by MS-Windows.  We
2530 	 * support a minimal version: BytChk must be 0. */
2531 	case VERIFY:
2532 		fsg->data_size_from_cmnd = 0;
2533 		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2534 				(1<<1) | (0xf<<2) | (3<<7), 1,
2535 				"VERIFY")) == 0)
2536 			reply = do_verify(fsg);
2537 		break;
2538 
2539 	case WRITE_6:
2540 		i = fsg->cmnd[4];
2541 		fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
2542 		if ((reply = check_command_size_in_blocks(fsg, 6,
2543 				DATA_DIR_FROM_HOST,
2544 				(7<<1) | (1<<4), 1,
2545 				"WRITE(6)")) == 0)
2546 			reply = do_write(fsg);
2547 		break;
2548 
2549 	case WRITE_10:
2550 		fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2551 		if ((reply = check_command_size_in_blocks(fsg, 10,
2552 				DATA_DIR_FROM_HOST,
2553 				(1<<1) | (0xf<<2) | (3<<7), 1,
2554 				"WRITE(10)")) == 0)
2555 			reply = do_write(fsg);
2556 		break;
2557 
2558 	case WRITE_12:
2559 		fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
2560 		if ((reply = check_command_size_in_blocks(fsg, 12,
2561 				DATA_DIR_FROM_HOST,
2562 				(1<<1) | (0xf<<2) | (0xf<<6), 1,
2563 				"WRITE(12)")) == 0)
2564 			reply = do_write(fsg);
2565 		break;
2566 
2567 	/* Some mandatory commands that we recognize but don't implement.
2568 	 * They don't mean much in this setting.  It's left as an exercise
2569 	 * for anyone interested to implement RESERVE and RELEASE in terms
2570 	 * of Posix locks. */
2571 	case FORMAT_UNIT:
2572 	case RELEASE:
2573 	case RESERVE:
2574 	case SEND_DIAGNOSTIC:
2575 		// Fall through
2576 
2577 	default:
2578  unknown_cmnd:
2579 		fsg->data_size_from_cmnd = 0;
2580 		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2581 		if ((reply = check_command(fsg, fsg->cmnd_size,
2582 				DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
2583 			fsg->curlun->sense_data = SS_INVALID_COMMAND;
2584 			reply = -EINVAL;
2585 		}
2586 		break;
2587 	}
2588 	up_read(&fsg->filesem);
2589 
2590 	if (reply == -EINTR || signal_pending(current))
2591 		return -EINTR;
2592 
2593 	/* Set up the single reply buffer for finish_reply() */
2594 	if (reply == -EINVAL)
2595 		reply = 0;		// Error reply length
2596 	if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2597 		reply = min((u32) reply, fsg->data_size_from_cmnd);
2598 		bh->inreq->length = reply;
2599 		bh->state = BUF_STATE_FULL;
2600 		fsg->residue -= reply;
2601 	}				// Otherwise it's already set
2602 
2603 	return 0;
2604 }
2605 
2606 
2607 /*-------------------------------------------------------------------------*/
2608 
received_cbw(struct fsg_dev * fsg,struct fsg_buffhd * bh)2609 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2610 {
2611 	struct usb_request		*req = bh->outreq;
2612 	struct fsg_bulk_cb_wrap	*cbw = req->buf;
2613 
2614 	/* Was this a real packet?  Should it be ignored? */
2615 	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2616 		return -EINVAL;
2617 
2618 	/* Is the CBW valid? */
2619 	if (req->actual != USB_BULK_CB_WRAP_LEN ||
2620 			cbw->Signature != cpu_to_le32(
2621 				USB_BULK_CB_SIG)) {
2622 		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2623 				req->actual,
2624 				le32_to_cpu(cbw->Signature));
2625 
2626 		/* The Bulk-only spec says we MUST stall the IN endpoint
2627 		 * (6.6.1), so it's unavoidable.  It also says we must
2628 		 * retain this state until the next reset, but there's
2629 		 * no way to tell the controller driver it should ignore
2630 		 * Clear-Feature(HALT) requests.
2631 		 *
2632 		 * We aren't required to halt the OUT endpoint; instead
2633 		 * we can simply accept and discard any data received
2634 		 * until the next reset. */
2635 		wedge_bulk_in_endpoint(fsg);
2636 		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2637 		return -EINVAL;
2638 	}
2639 
2640 	/* Is the CBW meaningful? */
2641 	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2642 			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2643 		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2644 				"cmdlen %u\n",
2645 				cbw->Lun, cbw->Flags, cbw->Length);
2646 
2647 		/* We can do anything we want here, so let's stall the
2648 		 * bulk pipes if we are allowed to. */
2649 		if (mod_data.can_stall) {
2650 			fsg_set_halt(fsg, fsg->bulk_out);
2651 			halt_bulk_in_endpoint(fsg);
2652 		}
2653 		return -EINVAL;
2654 	}
2655 
2656 	/* Save the command for later */
2657 	fsg->cmnd_size = cbw->Length;
2658 	memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2659 	if (cbw->Flags & USB_BULK_IN_FLAG)
2660 		fsg->data_dir = DATA_DIR_TO_HOST;
2661 	else
2662 		fsg->data_dir = DATA_DIR_FROM_HOST;
2663 	fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2664 	if (fsg->data_size == 0)
2665 		fsg->data_dir = DATA_DIR_NONE;
2666 	fsg->lun = cbw->Lun;
2667 	fsg->tag = cbw->Tag;
2668 	return 0;
2669 }
2670 
2671 
get_next_command(struct fsg_dev * fsg)2672 static int get_next_command(struct fsg_dev *fsg)
2673 {
2674 	struct fsg_buffhd	*bh;
2675 	int			rc = 0;
2676 
2677 	if (transport_is_bbb()) {
2678 
2679 		/* Wait for the next buffer to become available */
2680 		bh = fsg->next_buffhd_to_fill;
2681 		while (bh->state != BUF_STATE_EMPTY) {
2682 			rc = sleep_thread(fsg);
2683 			if (rc)
2684 				return rc;
2685 		}
2686 
2687 		/* Queue a request to read a Bulk-only CBW */
2688 		set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2689 		start_transfer(fsg, fsg->bulk_out, bh->outreq,
2690 				&bh->outreq_busy, &bh->state);
2691 
2692 		/* We will drain the buffer in software, which means we
2693 		 * can reuse it for the next filling.  No need to advance
2694 		 * next_buffhd_to_fill. */
2695 
2696 		/* Wait for the CBW to arrive */
2697 		while (bh->state != BUF_STATE_FULL) {
2698 			rc = sleep_thread(fsg);
2699 			if (rc)
2700 				return rc;
2701 		}
2702 		smp_rmb();
2703 		rc = received_cbw(fsg, bh);
2704 		bh->state = BUF_STATE_EMPTY;
2705 
2706 	} else {		// USB_PR_CB or USB_PR_CBI
2707 
2708 		/* Wait for the next command to arrive */
2709 		while (fsg->cbbuf_cmnd_size == 0) {
2710 			rc = sleep_thread(fsg);
2711 			if (rc)
2712 				return rc;
2713 		}
2714 
2715 		/* Is the previous status interrupt request still busy?
2716 		 * The host is allowed to skip reading the status,
2717 		 * so we must cancel it. */
2718 		if (fsg->intreq_busy)
2719 			usb_ep_dequeue(fsg->intr_in, fsg->intreq);
2720 
2721 		/* Copy the command and mark the buffer empty */
2722 		fsg->data_dir = DATA_DIR_UNKNOWN;
2723 		spin_lock_irq(&fsg->lock);
2724 		fsg->cmnd_size = fsg->cbbuf_cmnd_size;
2725 		memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
2726 		fsg->cbbuf_cmnd_size = 0;
2727 		spin_unlock_irq(&fsg->lock);
2728 
2729 		/* Use LUN from the command */
2730 		fsg->lun = fsg->cmnd[1] >> 5;
2731 	}
2732 
2733 	/* Update current lun */
2734 	if (fsg->lun >= 0 && fsg->lun < fsg->nluns)
2735 		fsg->curlun = &fsg->luns[fsg->lun];
2736 	else
2737 		fsg->curlun = NULL;
2738 
2739 	return rc;
2740 }
2741 
2742 
2743 /*-------------------------------------------------------------------------*/
2744 
enable_endpoint(struct fsg_dev * fsg,struct usb_ep * ep,const struct usb_endpoint_descriptor * d)2745 static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2746 		const struct usb_endpoint_descriptor *d)
2747 {
2748 	int	rc;
2749 
2750 	ep->driver_data = fsg;
2751 	ep->desc = d;
2752 	rc = usb_ep_enable(ep);
2753 	if (rc)
2754 		ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2755 	return rc;
2756 }
2757 
alloc_request(struct fsg_dev * fsg,struct usb_ep * ep,struct usb_request ** preq)2758 static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2759 		struct usb_request **preq)
2760 {
2761 	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2762 	if (*preq)
2763 		return 0;
2764 	ERROR(fsg, "can't allocate request for %s\n", ep->name);
2765 	return -ENOMEM;
2766 }
2767 
2768 /*
2769  * Reset interface setting and re-init endpoint state (toggle etc).
2770  * Call with altsetting < 0 to disable the interface.  The only other
2771  * available altsetting is 0, which enables the interface.
2772  */
do_set_interface(struct fsg_dev * fsg,int altsetting)2773 static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2774 {
2775 	int	rc = 0;
2776 	int	i;
2777 	const struct usb_endpoint_descriptor	*d;
2778 
2779 	if (fsg->running)
2780 		DBG(fsg, "reset interface\n");
2781 
2782 reset:
2783 	/* Deallocate the requests */
2784 	for (i = 0; i < fsg_num_buffers; ++i) {
2785 		struct fsg_buffhd *bh = &fsg->buffhds[i];
2786 
2787 		if (bh->inreq) {
2788 			usb_ep_free_request(fsg->bulk_in, bh->inreq);
2789 			bh->inreq = NULL;
2790 		}
2791 		if (bh->outreq) {
2792 			usb_ep_free_request(fsg->bulk_out, bh->outreq);
2793 			bh->outreq = NULL;
2794 		}
2795 	}
2796 	if (fsg->intreq) {
2797 		usb_ep_free_request(fsg->intr_in, fsg->intreq);
2798 		fsg->intreq = NULL;
2799 	}
2800 
2801 	/* Disable the endpoints */
2802 	if (fsg->bulk_in_enabled) {
2803 		usb_ep_disable(fsg->bulk_in);
2804 		fsg->bulk_in_enabled = 0;
2805 	}
2806 	if (fsg->bulk_out_enabled) {
2807 		usb_ep_disable(fsg->bulk_out);
2808 		fsg->bulk_out_enabled = 0;
2809 	}
2810 	if (fsg->intr_in_enabled) {
2811 		usb_ep_disable(fsg->intr_in);
2812 		fsg->intr_in_enabled = 0;
2813 	}
2814 
2815 	fsg->running = 0;
2816 	if (altsetting < 0 || rc != 0)
2817 		return rc;
2818 
2819 	DBG(fsg, "set interface %d\n", altsetting);
2820 
2821 	/* Enable the endpoints */
2822 	d = fsg_ep_desc(fsg->gadget,
2823 			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc,
2824 			&fsg_ss_bulk_in_desc);
2825 	if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2826 		goto reset;
2827 	fsg->bulk_in_enabled = 1;
2828 
2829 	d = fsg_ep_desc(fsg->gadget,
2830 			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc,
2831 			&fsg_ss_bulk_out_desc);
2832 	if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2833 		goto reset;
2834 	fsg->bulk_out_enabled = 1;
2835 	fsg->bulk_out_maxpacket = usb_endpoint_maxp(d);
2836 	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2837 
2838 	if (transport_is_cbi()) {
2839 		d = fsg_ep_desc(fsg->gadget,
2840 				&fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc,
2841 				&fsg_ss_intr_in_desc);
2842 		if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
2843 			goto reset;
2844 		fsg->intr_in_enabled = 1;
2845 	}
2846 
2847 	/* Allocate the requests */
2848 	for (i = 0; i < fsg_num_buffers; ++i) {
2849 		struct fsg_buffhd	*bh = &fsg->buffhds[i];
2850 
2851 		if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
2852 			goto reset;
2853 		if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
2854 			goto reset;
2855 		bh->inreq->buf = bh->outreq->buf = bh->buf;
2856 		bh->inreq->context = bh->outreq->context = bh;
2857 		bh->inreq->complete = bulk_in_complete;
2858 		bh->outreq->complete = bulk_out_complete;
2859 	}
2860 	if (transport_is_cbi()) {
2861 		if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
2862 			goto reset;
2863 		fsg->intreq->complete = intr_in_complete;
2864 	}
2865 
2866 	fsg->running = 1;
2867 	for (i = 0; i < fsg->nluns; ++i)
2868 		fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2869 	return rc;
2870 }
2871 
2872 
2873 /*
2874  * Change our operational configuration.  This code must agree with the code
2875  * that returns config descriptors, and with interface altsetting code.
2876  *
2877  * It's also responsible for power management interactions.  Some
2878  * configurations might not work with our current power sources.
2879  * For now we just assume the gadget is always self-powered.
2880  */
do_set_config(struct fsg_dev * fsg,u8 new_config)2881 static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2882 {
2883 	int	rc = 0;
2884 
2885 	/* Disable the single interface */
2886 	if (fsg->config != 0) {
2887 		DBG(fsg, "reset config\n");
2888 		fsg->config = 0;
2889 		rc = do_set_interface(fsg, -1);
2890 	}
2891 
2892 	/* Enable the interface */
2893 	if (new_config != 0) {
2894 		fsg->config = new_config;
2895 		if ((rc = do_set_interface(fsg, 0)) != 0)
2896 			fsg->config = 0;	// Reset on errors
2897 		else
2898 			INFO(fsg, "%s config #%d\n",
2899 			     usb_speed_string(fsg->gadget->speed),
2900 			     fsg->config);
2901 	}
2902 	return rc;
2903 }
2904 
2905 
2906 /*-------------------------------------------------------------------------*/
2907 
handle_exception(struct fsg_dev * fsg)2908 static void handle_exception(struct fsg_dev *fsg)
2909 {
2910 	siginfo_t		info;
2911 	int			sig;
2912 	int			i;
2913 	int			num_active;
2914 	struct fsg_buffhd	*bh;
2915 	enum fsg_state		old_state;
2916 	u8			new_config;
2917 	struct fsg_lun		*curlun;
2918 	unsigned int		exception_req_tag;
2919 	int			rc;
2920 
2921 	/* Clear the existing signals.  Anything but SIGUSR1 is converted
2922 	 * into a high-priority EXIT exception. */
2923 	for (;;) {
2924 		sig = dequeue_signal_lock(current, &current->blocked, &info);
2925 		if (!sig)
2926 			break;
2927 		if (sig != SIGUSR1) {
2928 			if (fsg->state < FSG_STATE_EXIT)
2929 				DBG(fsg, "Main thread exiting on signal\n");
2930 			raise_exception(fsg, FSG_STATE_EXIT);
2931 		}
2932 	}
2933 
2934 	/* Cancel all the pending transfers */
2935 	if (fsg->intreq_busy)
2936 		usb_ep_dequeue(fsg->intr_in, fsg->intreq);
2937 	for (i = 0; i < fsg_num_buffers; ++i) {
2938 		bh = &fsg->buffhds[i];
2939 		if (bh->inreq_busy)
2940 			usb_ep_dequeue(fsg->bulk_in, bh->inreq);
2941 		if (bh->outreq_busy)
2942 			usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2943 	}
2944 
2945 	/* Wait until everything is idle */
2946 	for (;;) {
2947 		num_active = fsg->intreq_busy;
2948 		for (i = 0; i < fsg_num_buffers; ++i) {
2949 			bh = &fsg->buffhds[i];
2950 			num_active += bh->inreq_busy + bh->outreq_busy;
2951 		}
2952 		if (num_active == 0)
2953 			break;
2954 		if (sleep_thread(fsg))
2955 			return;
2956 	}
2957 
2958 	/* Clear out the controller's fifos */
2959 	if (fsg->bulk_in_enabled)
2960 		usb_ep_fifo_flush(fsg->bulk_in);
2961 	if (fsg->bulk_out_enabled)
2962 		usb_ep_fifo_flush(fsg->bulk_out);
2963 	if (fsg->intr_in_enabled)
2964 		usb_ep_fifo_flush(fsg->intr_in);
2965 
2966 	/* Reset the I/O buffer states and pointers, the SCSI
2967 	 * state, and the exception.  Then invoke the handler. */
2968 	spin_lock_irq(&fsg->lock);
2969 
2970 	for (i = 0; i < fsg_num_buffers; ++i) {
2971 		bh = &fsg->buffhds[i];
2972 		bh->state = BUF_STATE_EMPTY;
2973 	}
2974 	fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
2975 			&fsg->buffhds[0];
2976 
2977 	exception_req_tag = fsg->exception_req_tag;
2978 	new_config = fsg->new_config;
2979 	old_state = fsg->state;
2980 
2981 	if (old_state == FSG_STATE_ABORT_BULK_OUT)
2982 		fsg->state = FSG_STATE_STATUS_PHASE;
2983 	else {
2984 		for (i = 0; i < fsg->nluns; ++i) {
2985 			curlun = &fsg->luns[i];
2986 			curlun->prevent_medium_removal = 0;
2987 			curlun->sense_data = curlun->unit_attention_data =
2988 					SS_NO_SENSE;
2989 			curlun->sense_data_info = 0;
2990 			curlun->info_valid = 0;
2991 		}
2992 		fsg->state = FSG_STATE_IDLE;
2993 	}
2994 	spin_unlock_irq(&fsg->lock);
2995 
2996 	/* Carry out any extra actions required for the exception */
2997 	switch (old_state) {
2998 	default:
2999 		break;
3000 
3001 	case FSG_STATE_ABORT_BULK_OUT:
3002 		send_status(fsg);
3003 		spin_lock_irq(&fsg->lock);
3004 		if (fsg->state == FSG_STATE_STATUS_PHASE)
3005 			fsg->state = FSG_STATE_IDLE;
3006 		spin_unlock_irq(&fsg->lock);
3007 		break;
3008 
3009 	case FSG_STATE_RESET:
3010 		/* In case we were forced against our will to halt a
3011 		 * bulk endpoint, clear the halt now.  (The SuperH UDC
3012 		 * requires this.) */
3013 		if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
3014 			usb_ep_clear_halt(fsg->bulk_in);
3015 
3016 		if (transport_is_bbb()) {
3017 			if (fsg->ep0_req_tag == exception_req_tag)
3018 				ep0_queue(fsg);	// Complete the status stage
3019 
3020 		} else if (transport_is_cbi())
3021 			send_status(fsg);	// Status by interrupt pipe
3022 
3023 		/* Technically this should go here, but it would only be
3024 		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
3025 		 * CONFIG_CHANGE cases. */
3026 		// for (i = 0; i < fsg->nluns; ++i)
3027 		//	fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3028 		break;
3029 
3030 	case FSG_STATE_INTERFACE_CHANGE:
3031 		rc = do_set_interface(fsg, 0);
3032 		if (fsg->ep0_req_tag != exception_req_tag)
3033 			break;
3034 		if (rc != 0)			// STALL on errors
3035 			fsg_set_halt(fsg, fsg->ep0);
3036 		else				// Complete the status stage
3037 			ep0_queue(fsg);
3038 		break;
3039 
3040 	case FSG_STATE_CONFIG_CHANGE:
3041 		rc = do_set_config(fsg, new_config);
3042 		if (fsg->ep0_req_tag != exception_req_tag)
3043 			break;
3044 		if (rc != 0)			// STALL on errors
3045 			fsg_set_halt(fsg, fsg->ep0);
3046 		else				// Complete the status stage
3047 			ep0_queue(fsg);
3048 		break;
3049 
3050 	case FSG_STATE_DISCONNECT:
3051 		for (i = 0; i < fsg->nluns; ++i)
3052 			fsg_lun_fsync_sub(fsg->luns + i);
3053 		do_set_config(fsg, 0);		// Unconfigured state
3054 		break;
3055 
3056 	case FSG_STATE_EXIT:
3057 	case FSG_STATE_TERMINATED:
3058 		do_set_config(fsg, 0);			// Free resources
3059 		spin_lock_irq(&fsg->lock);
3060 		fsg->state = FSG_STATE_TERMINATED;	// Stop the thread
3061 		spin_unlock_irq(&fsg->lock);
3062 		break;
3063 	}
3064 }
3065 
3066 
3067 /*-------------------------------------------------------------------------*/
3068 
fsg_main_thread(void * fsg_)3069 static int fsg_main_thread(void *fsg_)
3070 {
3071 	struct fsg_dev		*fsg = fsg_;
3072 
3073 	/* Allow the thread to be killed by a signal, but set the signal mask
3074 	 * to block everything but INT, TERM, KILL, and USR1. */
3075 	allow_signal(SIGINT);
3076 	allow_signal(SIGTERM);
3077 	allow_signal(SIGKILL);
3078 	allow_signal(SIGUSR1);
3079 
3080 	/* Allow the thread to be frozen */
3081 	set_freezable();
3082 
3083 	/* Arrange for userspace references to be interpreted as kernel
3084 	 * pointers.  That way we can pass a kernel pointer to a routine
3085 	 * that expects a __user pointer and it will work okay. */
3086 	set_fs(get_ds());
3087 
3088 	/* The main loop */
3089 	while (fsg->state != FSG_STATE_TERMINATED) {
3090 		if (exception_in_progress(fsg) || signal_pending(current)) {
3091 			handle_exception(fsg);
3092 			continue;
3093 		}
3094 
3095 		if (!fsg->running) {
3096 			sleep_thread(fsg);
3097 			continue;
3098 		}
3099 
3100 		if (get_next_command(fsg))
3101 			continue;
3102 
3103 		spin_lock_irq(&fsg->lock);
3104 		if (!exception_in_progress(fsg))
3105 			fsg->state = FSG_STATE_DATA_PHASE;
3106 		spin_unlock_irq(&fsg->lock);
3107 
3108 		if (do_scsi_command(fsg) || finish_reply(fsg))
3109 			continue;
3110 
3111 		spin_lock_irq(&fsg->lock);
3112 		if (!exception_in_progress(fsg))
3113 			fsg->state = FSG_STATE_STATUS_PHASE;
3114 		spin_unlock_irq(&fsg->lock);
3115 
3116 		if (send_status(fsg))
3117 			continue;
3118 
3119 		spin_lock_irq(&fsg->lock);
3120 		if (!exception_in_progress(fsg))
3121 			fsg->state = FSG_STATE_IDLE;
3122 		spin_unlock_irq(&fsg->lock);
3123 		}
3124 
3125 	spin_lock_irq(&fsg->lock);
3126 	fsg->thread_task = NULL;
3127 	spin_unlock_irq(&fsg->lock);
3128 
3129 	/* If we are exiting because of a signal, unregister the
3130 	 * gadget driver. */
3131 	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
3132 		usb_gadget_unregister_driver(&fsg_driver);
3133 
3134 	/* Let the unbind and cleanup routines know the thread has exited */
3135 	complete_and_exit(&fsg->thread_notifier, 0);
3136 }
3137 
3138 
3139 /*-------------------------------------------------------------------------*/
3140 
3141 
3142 /* The write permissions and store_xxx pointers are set in fsg_bind() */
3143 static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
3144 static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, NULL);
3145 static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
3146 
3147 
3148 /*-------------------------------------------------------------------------*/
3149 
fsg_release(struct kref * ref)3150 static void fsg_release(struct kref *ref)
3151 {
3152 	struct fsg_dev	*fsg = container_of(ref, struct fsg_dev, ref);
3153 
3154 	kfree(fsg->luns);
3155 	kfree(fsg);
3156 }
3157 
lun_release(struct device * dev)3158 static void lun_release(struct device *dev)
3159 {
3160 	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
3161 	struct fsg_dev		*fsg =
3162 		container_of(filesem, struct fsg_dev, filesem);
3163 
3164 	kref_put(&fsg->ref, fsg_release);
3165 }
3166 
fsg_unbind(struct usb_gadget * gadget)3167 static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
3168 {
3169 	struct fsg_dev		*fsg = get_gadget_data(gadget);
3170 	int			i;
3171 	struct fsg_lun		*curlun;
3172 	struct usb_request	*req = fsg->ep0req;
3173 
3174 	DBG(fsg, "unbind\n");
3175 	clear_bit(REGISTERED, &fsg->atomic_bitflags);
3176 
3177 	/* If the thread isn't already dead, tell it to exit now */
3178 	if (fsg->state != FSG_STATE_TERMINATED) {
3179 		raise_exception(fsg, FSG_STATE_EXIT);
3180 		wait_for_completion(&fsg->thread_notifier);
3181 
3182 		/* The cleanup routine waits for this completion also */
3183 		complete(&fsg->thread_notifier);
3184 	}
3185 
3186 	/* Unregister the sysfs attribute files and the LUNs */
3187 	for (i = 0; i < fsg->nluns; ++i) {
3188 		curlun = &fsg->luns[i];
3189 		if (curlun->registered) {
3190 			device_remove_file(&curlun->dev, &dev_attr_nofua);
3191 			device_remove_file(&curlun->dev, &dev_attr_ro);
3192 			device_remove_file(&curlun->dev, &dev_attr_file);
3193 			fsg_lun_close(curlun);
3194 			device_unregister(&curlun->dev);
3195 			curlun->registered = 0;
3196 		}
3197 	}
3198 
3199 	/* Free the data buffers */
3200 	for (i = 0; i < fsg_num_buffers; ++i)
3201 		kfree(fsg->buffhds[i].buf);
3202 
3203 	/* Free the request and buffer for endpoint 0 */
3204 	if (req) {
3205 		kfree(req->buf);
3206 		usb_ep_free_request(fsg->ep0, req);
3207 	}
3208 
3209 	set_gadget_data(gadget, NULL);
3210 }
3211 
3212 
check_parameters(struct fsg_dev * fsg)3213 static int __init check_parameters(struct fsg_dev *fsg)
3214 {
3215 	int	prot;
3216 	int	gcnum;
3217 
3218 	/* Store the default values */
3219 	mod_data.transport_type = USB_PR_BULK;
3220 	mod_data.transport_name = "Bulk-only";
3221 	mod_data.protocol_type = USB_SC_SCSI;
3222 	mod_data.protocol_name = "Transparent SCSI";
3223 
3224 	/* Some peripheral controllers are known not to be able to
3225 	 * halt bulk endpoints correctly.  If one of them is present,
3226 	 * disable stalls.
3227 	 */
3228 	if (gadget_is_at91(fsg->gadget))
3229 		mod_data.can_stall = 0;
3230 
3231 	if (mod_data.release == 0xffff) {	// Parameter wasn't set
3232 		gcnum = usb_gadget_controller_number(fsg->gadget);
3233 		if (gcnum >= 0)
3234 			mod_data.release = 0x0300 + gcnum;
3235 		else {
3236 			WARNING(fsg, "controller '%s' not recognized\n",
3237 				fsg->gadget->name);
3238 			mod_data.release = 0x0399;
3239 		}
3240 	}
3241 
3242 	prot = simple_strtol(mod_data.protocol_parm, NULL, 0);
3243 
3244 #ifdef CONFIG_USB_FILE_STORAGE_TEST
3245 	if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) {
3246 		;		// Use default setting
3247 	} else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) {
3248 		mod_data.transport_type = USB_PR_CB;
3249 		mod_data.transport_name = "Control-Bulk";
3250 	} else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) {
3251 		mod_data.transport_type = USB_PR_CBI;
3252 		mod_data.transport_name = "Control-Bulk-Interrupt";
3253 	} else {
3254 		ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
3255 		return -EINVAL;
3256 	}
3257 
3258 	if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 ||
3259 			prot == USB_SC_SCSI) {
3260 		;		// Use default setting
3261 	} else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 ||
3262 			prot == USB_SC_RBC) {
3263 		mod_data.protocol_type = USB_SC_RBC;
3264 		mod_data.protocol_name = "RBC";
3265 	} else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 ||
3266 			strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 ||
3267 			prot == USB_SC_8020) {
3268 		mod_data.protocol_type = USB_SC_8020;
3269 		mod_data.protocol_name = "8020i (ATAPI)";
3270 	} else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 ||
3271 			prot == USB_SC_QIC) {
3272 		mod_data.protocol_type = USB_SC_QIC;
3273 		mod_data.protocol_name = "QIC-157";
3274 	} else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 ||
3275 			prot == USB_SC_UFI) {
3276 		mod_data.protocol_type = USB_SC_UFI;
3277 		mod_data.protocol_name = "UFI";
3278 	} else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 ||
3279 			prot == USB_SC_8070) {
3280 		mod_data.protocol_type = USB_SC_8070;
3281 		mod_data.protocol_name = "8070i";
3282 	} else {
3283 		ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
3284 		return -EINVAL;
3285 	}
3286 
3287 	mod_data.buflen &= PAGE_CACHE_MASK;
3288 	if (mod_data.buflen <= 0) {
3289 		ERROR(fsg, "invalid buflen\n");
3290 		return -ETOOSMALL;
3291 	}
3292 
3293 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
3294 
3295 	/* Serial string handling.
3296 	 * On a real device, the serial string would be loaded
3297 	 * from permanent storage. */
3298 	if (mod_data.serial) {
3299 		const char *ch;
3300 		unsigned len = 0;
3301 
3302 		/* Sanity check :
3303 		 * The CB[I] specification limits the serial string to
3304 		 * 12 uppercase hexadecimal characters.
3305 		 * BBB need at least 12 uppercase hexadecimal characters,
3306 		 * with a maximum of 126. */
3307 		for (ch = mod_data.serial; *ch; ++ch) {
3308 			++len;
3309 			if ((*ch < '0' || *ch > '9') &&
3310 			    (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */
3311 				WARNING(fsg,
3312 					"Invalid serial string character: %c\n",
3313 					*ch);
3314 				goto no_serial;
3315 			}
3316 		}
3317 		if (len > 126 ||
3318 		    (mod_data.transport_type == USB_PR_BULK && len < 12) ||
3319 		    (mod_data.transport_type != USB_PR_BULK && len > 12)) {
3320 			WARNING(fsg, "Invalid serial string length!\n");
3321 			goto no_serial;
3322 		}
3323 		fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial;
3324 	} else {
3325 		WARNING(fsg, "No serial-number string provided!\n");
3326  no_serial:
3327 		device_desc.iSerialNumber = 0;
3328 	}
3329 
3330 	return 0;
3331 }
3332 
3333 
fsg_bind(struct usb_gadget * gadget)3334 static int __init fsg_bind(struct usb_gadget *gadget)
3335 {
3336 	struct fsg_dev		*fsg = the_fsg;
3337 	int			rc;
3338 	int			i;
3339 	struct fsg_lun		*curlun;
3340 	struct usb_ep		*ep;
3341 	struct usb_request	*req;
3342 	char			*pathbuf, *p;
3343 
3344 	fsg->gadget = gadget;
3345 	set_gadget_data(gadget, fsg);
3346 	fsg->ep0 = gadget->ep0;
3347 	fsg->ep0->driver_data = fsg;
3348 
3349 	if ((rc = check_parameters(fsg)) != 0)
3350 		goto out;
3351 
3352 	if (mod_data.removable) {	// Enable the store_xxx attributes
3353 		dev_attr_file.attr.mode = 0644;
3354 		dev_attr_file.store = fsg_store_file;
3355 		if (!mod_data.cdrom) {
3356 			dev_attr_ro.attr.mode = 0644;
3357 			dev_attr_ro.store = fsg_store_ro;
3358 		}
3359 	}
3360 
3361 	/* Only for removable media? */
3362 	dev_attr_nofua.attr.mode = 0644;
3363 	dev_attr_nofua.store = fsg_store_nofua;
3364 
3365 	/* Find out how many LUNs there should be */
3366 	i = mod_data.nluns;
3367 	if (i == 0)
3368 		i = max(mod_data.num_filenames, 1u);
3369 	if (i > FSG_MAX_LUNS) {
3370 		ERROR(fsg, "invalid number of LUNs: %d\n", i);
3371 		rc = -EINVAL;
3372 		goto out;
3373 	}
3374 
3375 	/* Create the LUNs, open their backing files, and register the
3376 	 * LUN devices in sysfs. */
3377 	fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
3378 	if (!fsg->luns) {
3379 		rc = -ENOMEM;
3380 		goto out;
3381 	}
3382 	fsg->nluns = i;
3383 
3384 	for (i = 0; i < fsg->nluns; ++i) {
3385 		curlun = &fsg->luns[i];
3386 		curlun->cdrom = !!mod_data.cdrom;
3387 		curlun->ro = mod_data.cdrom || mod_data.ro[i];
3388 		curlun->initially_ro = curlun->ro;
3389 		curlun->removable = mod_data.removable;
3390 		curlun->nofua = mod_data.nofua[i];
3391 		curlun->dev.release = lun_release;
3392 		curlun->dev.parent = &gadget->dev;
3393 		curlun->dev.driver = &fsg_driver.driver;
3394 		dev_set_drvdata(&curlun->dev, &fsg->filesem);
3395 		dev_set_name(&curlun->dev,"%s-lun%d",
3396 			     dev_name(&gadget->dev), i);
3397 
3398 		kref_get(&fsg->ref);
3399 		rc = device_register(&curlun->dev);
3400 		if (rc) {
3401 			INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
3402 			put_device(&curlun->dev);
3403 			goto out;
3404 		}
3405 		curlun->registered = 1;
3406 
3407 		rc = device_create_file(&curlun->dev, &dev_attr_ro);
3408 		if (rc)
3409 			goto out;
3410 		rc = device_create_file(&curlun->dev, &dev_attr_nofua);
3411 		if (rc)
3412 			goto out;
3413 		rc = device_create_file(&curlun->dev, &dev_attr_file);
3414 		if (rc)
3415 			goto out;
3416 
3417 		if (mod_data.file[i] && *mod_data.file[i]) {
3418 			rc = fsg_lun_open(curlun, mod_data.file[i]);
3419 			if (rc)
3420 				goto out;
3421 		} else if (!mod_data.removable) {
3422 			ERROR(fsg, "no file given for LUN%d\n", i);
3423 			rc = -EINVAL;
3424 			goto out;
3425 		}
3426 	}
3427 
3428 	/* Find all the endpoints we will use */
3429 	usb_ep_autoconfig_reset(gadget);
3430 	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
3431 	if (!ep)
3432 		goto autoconf_fail;
3433 	ep->driver_data = fsg;		// claim the endpoint
3434 	fsg->bulk_in = ep;
3435 
3436 	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
3437 	if (!ep)
3438 		goto autoconf_fail;
3439 	ep->driver_data = fsg;		// claim the endpoint
3440 	fsg->bulk_out = ep;
3441 
3442 	if (transport_is_cbi()) {
3443 		ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc);
3444 		if (!ep)
3445 			goto autoconf_fail;
3446 		ep->driver_data = fsg;		// claim the endpoint
3447 		fsg->intr_in = ep;
3448 	}
3449 
3450 	/* Fix up the descriptors */
3451 	device_desc.idVendor = cpu_to_le16(mod_data.vendor);
3452 	device_desc.idProduct = cpu_to_le16(mod_data.product);
3453 	device_desc.bcdDevice = cpu_to_le16(mod_data.release);
3454 
3455 	i = (transport_is_cbi() ? 3 : 2);	// Number of endpoints
3456 	fsg_intf_desc.bNumEndpoints = i;
3457 	fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type;
3458 	fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type;
3459 	fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3460 
3461 	if (gadget_is_dualspeed(gadget)) {
3462 		fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3463 
3464 		/* Assume endpoint addresses are the same for both speeds */
3465 		fsg_hs_bulk_in_desc.bEndpointAddress =
3466 			fsg_fs_bulk_in_desc.bEndpointAddress;
3467 		fsg_hs_bulk_out_desc.bEndpointAddress =
3468 			fsg_fs_bulk_out_desc.bEndpointAddress;
3469 		fsg_hs_intr_in_desc.bEndpointAddress =
3470 			fsg_fs_intr_in_desc.bEndpointAddress;
3471 	}
3472 
3473 	if (gadget_is_superspeed(gadget)) {
3474 		unsigned		max_burst;
3475 
3476 		fsg_ss_function[i + FSG_SS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3477 
3478 		/* Calculate bMaxBurst, we know packet size is 1024 */
3479 		max_burst = min_t(unsigned, mod_data.buflen / 1024, 15);
3480 
3481 		/* Assume endpoint addresses are the same for both speeds */
3482 		fsg_ss_bulk_in_desc.bEndpointAddress =
3483 			fsg_fs_bulk_in_desc.bEndpointAddress;
3484 		fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
3485 
3486 		fsg_ss_bulk_out_desc.bEndpointAddress =
3487 			fsg_fs_bulk_out_desc.bEndpointAddress;
3488 		fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
3489 	}
3490 
3491 	if (gadget_is_otg(gadget))
3492 		fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
3493 
3494 	rc = -ENOMEM;
3495 
3496 	/* Allocate the request and buffer for endpoint 0 */
3497 	fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
3498 	if (!req)
3499 		goto out;
3500 	req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
3501 	if (!req->buf)
3502 		goto out;
3503 	req->complete = ep0_complete;
3504 
3505 	/* Allocate the data buffers */
3506 	for (i = 0; i < fsg_num_buffers; ++i) {
3507 		struct fsg_buffhd	*bh = &fsg->buffhds[i];
3508 
3509 		/* Allocate for the bulk-in endpoint.  We assume that
3510 		 * the buffer will also work with the bulk-out (and
3511 		 * interrupt-in) endpoint. */
3512 		bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL);
3513 		if (!bh->buf)
3514 			goto out;
3515 		bh->next = bh + 1;
3516 	}
3517 	fsg->buffhds[fsg_num_buffers - 1].next = &fsg->buffhds[0];
3518 
3519 	/* This should reflect the actual gadget power source */
3520 	usb_gadget_set_selfpowered(gadget);
3521 
3522 	snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
3523 			"%s %s with %s",
3524 			init_utsname()->sysname, init_utsname()->release,
3525 			gadget->name);
3526 
3527 	fsg->thread_task = kthread_create(fsg_main_thread, fsg,
3528 			"file-storage-gadget");
3529 	if (IS_ERR(fsg->thread_task)) {
3530 		rc = PTR_ERR(fsg->thread_task);
3531 		goto out;
3532 	}
3533 
3534 	INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
3535 	INFO(fsg, "NOTE: This driver is deprecated.  "
3536 			"Consider using g_mass_storage instead.\n");
3537 	INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
3538 
3539 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
3540 	for (i = 0; i < fsg->nluns; ++i) {
3541 		curlun = &fsg->luns[i];
3542 		if (fsg_lun_is_open(curlun)) {
3543 			p = NULL;
3544 			if (pathbuf) {
3545 				p = d_path(&curlun->filp->f_path,
3546 					   pathbuf, PATH_MAX);
3547 				if (IS_ERR(p))
3548 					p = NULL;
3549 			}
3550 			LINFO(curlun, "ro=%d, nofua=%d, file: %s\n",
3551 			      curlun->ro, curlun->nofua, (p ? p : "(error)"));
3552 		}
3553 	}
3554 	kfree(pathbuf);
3555 
3556 	DBG(fsg, "transport=%s (x%02x)\n",
3557 			mod_data.transport_name, mod_data.transport_type);
3558 	DBG(fsg, "protocol=%s (x%02x)\n",
3559 			mod_data.protocol_name, mod_data.protocol_type);
3560 	DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
3561 			mod_data.vendor, mod_data.product, mod_data.release);
3562 	DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
3563 			mod_data.removable, mod_data.can_stall,
3564 			mod_data.cdrom, mod_data.buflen);
3565 	DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
3566 
3567 	set_bit(REGISTERED, &fsg->atomic_bitflags);
3568 
3569 	/* Tell the thread to start working */
3570 	wake_up_process(fsg->thread_task);
3571 	return 0;
3572 
3573 autoconf_fail:
3574 	ERROR(fsg, "unable to autoconfigure all endpoints\n");
3575 	rc = -ENOTSUPP;
3576 
3577 out:
3578 	fsg->state = FSG_STATE_TERMINATED;	// The thread is dead
3579 	fsg_unbind(gadget);
3580 	complete(&fsg->thread_notifier);
3581 	return rc;
3582 }
3583 
3584 
3585 /*-------------------------------------------------------------------------*/
3586 
fsg_suspend(struct usb_gadget * gadget)3587 static void fsg_suspend(struct usb_gadget *gadget)
3588 {
3589 	struct fsg_dev		*fsg = get_gadget_data(gadget);
3590 
3591 	DBG(fsg, "suspend\n");
3592 	set_bit(SUSPENDED, &fsg->atomic_bitflags);
3593 }
3594 
fsg_resume(struct usb_gadget * gadget)3595 static void fsg_resume(struct usb_gadget *gadget)
3596 {
3597 	struct fsg_dev		*fsg = get_gadget_data(gadget);
3598 
3599 	DBG(fsg, "resume\n");
3600 	clear_bit(SUSPENDED, &fsg->atomic_bitflags);
3601 }
3602 
3603 
3604 /*-------------------------------------------------------------------------*/
3605 
3606 static struct usb_gadget_driver		fsg_driver = {
3607 	.max_speed	= USB_SPEED_SUPER,
3608 	.function	= (char *) fsg_string_product,
3609 	.unbind		= fsg_unbind,
3610 	.disconnect	= fsg_disconnect,
3611 	.setup		= fsg_setup,
3612 	.suspend	= fsg_suspend,
3613 	.resume		= fsg_resume,
3614 
3615 	.driver		= {
3616 		.name		= DRIVER_NAME,
3617 		.owner		= THIS_MODULE,
3618 		// .release = ...
3619 		// .suspend = ...
3620 		// .resume = ...
3621 	},
3622 };
3623 
3624 
fsg_alloc(void)3625 static int __init fsg_alloc(void)
3626 {
3627 	struct fsg_dev		*fsg;
3628 
3629 	fsg = kzalloc(sizeof *fsg +
3630 		      fsg_num_buffers * sizeof *(fsg->buffhds), GFP_KERNEL);
3631 
3632 	if (!fsg)
3633 		return -ENOMEM;
3634 	spin_lock_init(&fsg->lock);
3635 	init_rwsem(&fsg->filesem);
3636 	kref_init(&fsg->ref);
3637 	init_completion(&fsg->thread_notifier);
3638 
3639 	the_fsg = fsg;
3640 	return 0;
3641 }
3642 
3643 
fsg_init(void)3644 static int __init fsg_init(void)
3645 {
3646 	int		rc;
3647 	struct fsg_dev	*fsg;
3648 
3649 	rc = fsg_num_buffers_validate();
3650 	if (rc != 0)
3651 		return rc;
3652 
3653 	if ((rc = fsg_alloc()) != 0)
3654 		return rc;
3655 	fsg = the_fsg;
3656 	if ((rc = usb_gadget_probe_driver(&fsg_driver, fsg_bind)) != 0)
3657 		kref_put(&fsg->ref, fsg_release);
3658 	return rc;
3659 }
3660 module_init(fsg_init);
3661 
3662 
fsg_cleanup(void)3663 static void __exit fsg_cleanup(void)
3664 {
3665 	struct fsg_dev	*fsg = the_fsg;
3666 
3667 	/* Unregister the driver iff the thread hasn't already done so */
3668 	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
3669 		usb_gadget_unregister_driver(&fsg_driver);
3670 
3671 	/* Wait for the thread to finish up */
3672 	wait_for_completion(&fsg->thread_notifier);
3673 
3674 	kref_put(&fsg->ref, fsg_release);
3675 }
3676 module_exit(fsg_cleanup);
3677