1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *   K. Y. Srinivasan <kys@microsoft.com>
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/sched.h>
26 #include <linux/completion.h>
27 #include <linux/string.h>
28 #include <linux/mm.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/module.h>
33 #include <linux/device.h>
34 #include <linux/hyperv.h>
35 #include <linux/mempool.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_tcq.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_devinfo.h>
43 #include <scsi/scsi_dbg.h>
44 
45 
46 #define STORVSC_MIN_BUF_NR				64
47 #define STORVSC_RING_BUFFER_SIZE			(20*PAGE_SIZE)
48 static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
49 
50 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
51 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
52 
53 /* to alert the user that structure sizes may be mismatched even though the */
54 /* protocol versions match. */
55 
56 
57 #define REVISION_STRING(REVISION_) #REVISION_
58 #define FILL_VMSTOR_REVISION(RESULT_LVALUE_)				\
59 	do {								\
60 		char *revision_string					\
61 			= REVISION_STRING($Rev : 6 $) + 6;		\
62 		RESULT_LVALUE_ = 0;					\
63 		while (*revision_string >= '0'				\
64 			&& *revision_string <= '9') {			\
65 			RESULT_LVALUE_ *= 10;				\
66 			RESULT_LVALUE_ += *revision_string - '0';	\
67 			revision_string++;				\
68 		}							\
69 	} while (0)
70 
71 /* Major/minor macros.  Minor version is in LSB, meaning that earlier flat */
72 /* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
73 #define VMSTOR_PROTOCOL_MAJOR(VERSION_)		(((VERSION_) >> 8) & 0xff)
74 #define VMSTOR_PROTOCOL_MINOR(VERSION_)		(((VERSION_))      & 0xff)
75 #define VMSTOR_PROTOCOL_VERSION(MAJOR_, MINOR_)	((((MAJOR_) & 0xff) << 8) | \
76 						 (((MINOR_) & 0xff)))
77 #define VMSTOR_INVALID_PROTOCOL_VERSION		(-1)
78 
79 /* Version history: */
80 /* V1 Beta                    0.1 */
81 /* V1 RC < 2008/1/31          1.0 */
82 /* V1 RC > 2008/1/31          2.0 */
83 #define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
84 
85 
86 
87 
88 /*  This will get replaced with the max transfer length that is possible on */
89 /*  the host adapter. */
90 /*  The max transfer length will be published when we offer a vmbus channel. */
91 #define MAX_TRANSFER_LENGTH	0x40000
92 #define DEFAULT_PACKET_SIZE (sizeof(struct vmdata_gpa_direct) +	\
93 			sizeof(struct vstor_packet) +		\
94 			sizesizeof(u64) * (MAX_TRANSFER_LENGTH / PAGE_SIZE)))
95 
96 
97 /*  Packet structure describing virtual storage requests. */
98 enum vstor_packet_operation {
99 	VSTOR_OPERATION_COMPLETE_IO		= 1,
100 	VSTOR_OPERATION_REMOVE_DEVICE		= 2,
101 	VSTOR_OPERATION_EXECUTE_SRB		= 3,
102 	VSTOR_OPERATION_RESET_LUN		= 4,
103 	VSTOR_OPERATION_RESET_ADAPTER		= 5,
104 	VSTOR_OPERATION_RESET_BUS		= 6,
105 	VSTOR_OPERATION_BEGIN_INITIALIZATION	= 7,
106 	VSTOR_OPERATION_END_INITIALIZATION	= 8,
107 	VSTOR_OPERATION_QUERY_PROTOCOL_VERSION	= 9,
108 	VSTOR_OPERATION_QUERY_PROPERTIES	= 10,
109 	VSTOR_OPERATION_ENUMERATE_BUS		= 11,
110 	VSTOR_OPERATION_MAXIMUM			= 11
111 };
112 
113 /*
114  * Platform neutral description of a scsi request -
115  * this remains the same across the write regardless of 32/64 bit
116  * note: it's patterned off the SCSI_PASS_THROUGH structure
117  */
118 #define CDB16GENERIC_LENGTH			0x10
119 
120 #ifndef SENSE_BUFFER_SIZE
121 #define SENSE_BUFFER_SIZE			0x12
122 #endif
123 
124 #define MAX_DATA_BUF_LEN_WITH_PADDING		0x14
125 
126 struct vmscsi_request {
127 	unsigned short length;
128 	unsigned char srb_status;
129 	unsigned char scsi_status;
130 
131 	unsigned char port_number;
132 	unsigned char path_id;
133 	unsigned char target_id;
134 	unsigned char lun;
135 
136 	unsigned char cdb_length;
137 	unsigned char sense_info_length;
138 	unsigned char data_in;
139 	unsigned char reserved;
140 
141 	unsigned int data_transfer_length;
142 
143 	union {
144 		unsigned char cdb[CDB16GENERIC_LENGTH];
145 		unsigned char sense_data[SENSE_BUFFER_SIZE];
146 		unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
147 	};
148 } __attribute((packed));
149 
150 
151 /*
152  * This structure is sent during the intialization phase to get the different
153  * properties of the channel.
154  */
155 struct vmstorage_channel_properties {
156 	unsigned short protocol_version;
157 	unsigned char path_id;
158 	unsigned char target_id;
159 
160 	/* Note: port number is only really known on the client side */
161 	unsigned int port_number;
162 	unsigned int flags;
163 	unsigned int max_transfer_bytes;
164 
165 	/*  This id is unique for each channel and will correspond with */
166 	/*  vendor specific data in the inquirydata */
167 	unsigned long long unique_id;
168 } __packed;
169 
170 /*  This structure is sent during the storage protocol negotiations. */
171 struct vmstorage_protocol_version {
172 	/* Major (MSW) and minor (LSW) version numbers. */
173 	unsigned short major_minor;
174 
175 	/*
176 	 * Revision number is auto-incremented whenever this file is changed
177 	 * (See FILL_VMSTOR_REVISION macro above).  Mismatch does not
178 	 * definitely indicate incompatibility--but it does indicate mismatched
179 	 * builds.
180 	 */
181 	unsigned short revision;
182 } __packed;
183 
184 /* Channel Property Flags */
185 #define STORAGE_CHANNEL_REMOVABLE_FLAG		0x1
186 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG	0x2
187 
188 struct vstor_packet {
189 	/* Requested operation type */
190 	enum vstor_packet_operation operation;
191 
192 	/*  Flags - see below for values */
193 	unsigned int flags;
194 
195 	/* Status of the request returned from the server side. */
196 	unsigned int status;
197 
198 	/* Data payload area */
199 	union {
200 		/*
201 		 * Structure used to forward SCSI commands from the
202 		 * client to the server.
203 		 */
204 		struct vmscsi_request vm_srb;
205 
206 		/* Structure used to query channel properties. */
207 		struct vmstorage_channel_properties storage_channel_properties;
208 
209 		/* Used during version negotiations. */
210 		struct vmstorage_protocol_version version;
211 	};
212 } __packed;
213 
214 /* Packet flags */
215 /*
216  * This flag indicates that the server should send back a completion for this
217  * packet.
218  */
219 #define REQUEST_COMPLETION_FLAG	0x1
220 
221 /*  This is the set of flags that the vsc can set in any packets it sends */
222 #define VSC_LEGAL_FLAGS		(REQUEST_COMPLETION_FLAG)
223 
224 
225 /* Defines */
226 
227 #define STORVSC_MAX_IO_REQUESTS				128
228 
229 /*
230  * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
231  * reality, the path/target is not used (ie always set to 0) so our
232  * scsi host adapter essentially has 1 bus with 1 target that contains
233  * up to 256 luns.
234  */
235 #define STORVSC_MAX_LUNS_PER_TARGET			64
236 #define STORVSC_MAX_TARGETS				1
237 #define STORVSC_MAX_CHANNELS				1
238 #define STORVSC_MAX_CMD_LEN				16
239 
240 /* Matches Windows-end */
241 enum storvsc_request_type {
242 	WRITE_TYPE,
243 	READ_TYPE,
244 	UNKNOWN_TYPE,
245 };
246 
247 
248 struct hv_storvsc_request {
249 	struct hv_device *device;
250 
251 	/* Synchronize the request/response if needed */
252 	struct completion wait_event;
253 
254 	unsigned char *sense_buffer;
255 	void *context;
256 	void (*on_io_completion)(struct hv_storvsc_request *request);
257 	struct hv_multipage_buffer data_buffer;
258 
259 	struct vstor_packet vstor_packet;
260 };
261 
262 
263 /* A storvsc device is a device object that contains a vmbus channel */
264 struct storvsc_device {
265 	struct hv_device *device;
266 
267 	bool	 destroy;
268 	bool	 drain_notify;
269 	atomic_t num_outstanding_req;
270 	struct Scsi_Host *host;
271 
272 	wait_queue_head_t waiting_to_drain;
273 
274 	/*
275 	 * Each unique Port/Path/Target represents 1 channel ie scsi
276 	 * controller. In reality, the pathid, targetid is always 0
277 	 * and the port is set by us
278 	 */
279 	unsigned int port_number;
280 	unsigned char path_id;
281 	unsigned char target_id;
282 
283 	/* Used for vsc/vsp channel reset process */
284 	struct hv_storvsc_request init_request;
285 	struct hv_storvsc_request reset_request;
286 };
287 
288 struct stor_mem_pools {
289 	struct kmem_cache *request_pool;
290 	mempool_t *request_mempool;
291 };
292 
293 struct hv_host_device {
294 	struct hv_device *dev;
295 	unsigned int port;
296 	unsigned char path;
297 	unsigned char target;
298 };
299 
300 struct storvsc_cmd_request {
301 	struct list_head entry;
302 	struct scsi_cmnd *cmd;
303 
304 	unsigned int bounce_sgl_count;
305 	struct scatterlist *bounce_sgl;
306 
307 	struct hv_storvsc_request request;
308 };
309 
310 struct storvsc_scan_work {
311 	struct work_struct work;
312 	struct Scsi_Host *host;
313 	uint lun;
314 };
315 
storvsc_bus_scan(struct work_struct * work)316 static void storvsc_bus_scan(struct work_struct *work)
317 {
318 	struct storvsc_scan_work *wrk;
319 	int id, order_id;
320 
321 	wrk = container_of(work, struct storvsc_scan_work, work);
322 	for (id = 0; id < wrk->host->max_id; ++id) {
323 		if (wrk->host->reverse_ordering)
324 			order_id = wrk->host->max_id - id - 1;
325 		else
326 			order_id = id;
327 
328 		scsi_scan_target(&wrk->host->shost_gendev, 0,
329 				order_id, SCAN_WILD_CARD, 1);
330 	}
331 	kfree(wrk);
332 }
333 
storvsc_remove_lun(struct work_struct * work)334 static void storvsc_remove_lun(struct work_struct *work)
335 {
336 	struct storvsc_scan_work *wrk;
337 	struct scsi_device *sdev;
338 
339 	wrk = container_of(work, struct storvsc_scan_work, work);
340 	if (!scsi_host_get(wrk->host))
341 		goto done;
342 
343 	sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
344 
345 	if (sdev) {
346 		scsi_remove_device(sdev);
347 		scsi_device_put(sdev);
348 	}
349 	scsi_host_put(wrk->host);
350 
351 done:
352 	kfree(wrk);
353 }
354 
get_out_stor_device(struct hv_device * device)355 static inline struct storvsc_device *get_out_stor_device(
356 					struct hv_device *device)
357 {
358 	struct storvsc_device *stor_device;
359 
360 	stor_device = hv_get_drvdata(device);
361 
362 	if (stor_device && stor_device->destroy)
363 		stor_device = NULL;
364 
365 	return stor_device;
366 }
367 
368 
storvsc_wait_to_drain(struct storvsc_device * dev)369 static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
370 {
371 	dev->drain_notify = true;
372 	wait_event(dev->waiting_to_drain,
373 		   atomic_read(&dev->num_outstanding_req) == 0);
374 	dev->drain_notify = false;
375 }
376 
get_in_stor_device(struct hv_device * device)377 static inline struct storvsc_device *get_in_stor_device(
378 					struct hv_device *device)
379 {
380 	struct storvsc_device *stor_device;
381 
382 	stor_device = hv_get_drvdata(device);
383 
384 	if (!stor_device)
385 		goto get_in_err;
386 
387 	/*
388 	 * If the device is being destroyed; allow incoming
389 	 * traffic only to cleanup outstanding requests.
390 	 */
391 
392 	if (stor_device->destroy  &&
393 		(atomic_read(&stor_device->num_outstanding_req) == 0))
394 		stor_device = NULL;
395 
396 get_in_err:
397 	return stor_device;
398 
399 }
400 
storvsc_channel_init(struct hv_device * device)401 static int storvsc_channel_init(struct hv_device *device)
402 {
403 	struct storvsc_device *stor_device;
404 	struct hv_storvsc_request *request;
405 	struct vstor_packet *vstor_packet;
406 	int ret, t;
407 
408 	stor_device = get_out_stor_device(device);
409 	if (!stor_device)
410 		return -ENODEV;
411 
412 	request = &stor_device->init_request;
413 	vstor_packet = &request->vstor_packet;
414 
415 	/*
416 	 * Now, initiate the vsc/vsp initialization protocol on the open
417 	 * channel
418 	 */
419 	memset(request, 0, sizeof(struct hv_storvsc_request));
420 	init_completion(&request->wait_event);
421 	vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
422 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
423 
424 	ret = vmbus_sendpacket(device->channel, vstor_packet,
425 			       sizeof(struct vstor_packet),
426 			       (unsigned long)request,
427 			       VM_PKT_DATA_INBAND,
428 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
429 	if (ret != 0)
430 		goto cleanup;
431 
432 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
433 	if (t == 0) {
434 		ret = -ETIMEDOUT;
435 		goto cleanup;
436 	}
437 
438 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
439 	    vstor_packet->status != 0)
440 		goto cleanup;
441 
442 
443 	/* reuse the packet for version range supported */
444 	memset(vstor_packet, 0, sizeof(struct vstor_packet));
445 	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
446 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
447 
448 	vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
449 	FILL_VMSTOR_REVISION(vstor_packet->version.revision);
450 
451 	ret = vmbus_sendpacket(device->channel, vstor_packet,
452 			       sizeof(struct vstor_packet),
453 			       (unsigned long)request,
454 			       VM_PKT_DATA_INBAND,
455 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
456 	if (ret != 0)
457 		goto cleanup;
458 
459 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
460 	if (t == 0) {
461 		ret = -ETIMEDOUT;
462 		goto cleanup;
463 	}
464 
465 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
466 	    vstor_packet->status != 0)
467 		goto cleanup;
468 
469 
470 	memset(vstor_packet, 0, sizeof(struct vstor_packet));
471 	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
472 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
473 	vstor_packet->storage_channel_properties.port_number =
474 					stor_device->port_number;
475 
476 	ret = vmbus_sendpacket(device->channel, vstor_packet,
477 			       sizeof(struct vstor_packet),
478 			       (unsigned long)request,
479 			       VM_PKT_DATA_INBAND,
480 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
481 
482 	if (ret != 0)
483 		goto cleanup;
484 
485 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
486 	if (t == 0) {
487 		ret = -ETIMEDOUT;
488 		goto cleanup;
489 	}
490 
491 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
492 	    vstor_packet->status != 0)
493 		goto cleanup;
494 
495 	stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
496 	stor_device->target_id
497 		= vstor_packet->storage_channel_properties.target_id;
498 
499 	memset(vstor_packet, 0, sizeof(struct vstor_packet));
500 	vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
501 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
502 
503 	ret = vmbus_sendpacket(device->channel, vstor_packet,
504 			       sizeof(struct vstor_packet),
505 			       (unsigned long)request,
506 			       VM_PKT_DATA_INBAND,
507 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
508 
509 	if (ret != 0)
510 		goto cleanup;
511 
512 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
513 	if (t == 0) {
514 		ret = -ETIMEDOUT;
515 		goto cleanup;
516 	}
517 
518 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
519 	    vstor_packet->status != 0)
520 		goto cleanup;
521 
522 
523 cleanup:
524 	return ret;
525 }
526 
storvsc_on_io_completion(struct hv_device * device,struct vstor_packet * vstor_packet,struct hv_storvsc_request * request)527 static void storvsc_on_io_completion(struct hv_device *device,
528 				  struct vstor_packet *vstor_packet,
529 				  struct hv_storvsc_request *request)
530 {
531 	struct storvsc_device *stor_device;
532 	struct vstor_packet *stor_pkt;
533 
534 	stor_device = hv_get_drvdata(device);
535 	stor_pkt = &request->vstor_packet;
536 
537 	/*
538 	 * The current SCSI handling on the host side does
539 	 * not correctly handle:
540 	 * INQUIRY command with page code parameter set to 0x80
541 	 * MODE_SENSE command with cmd[2] == 0x1c
542 	 *
543 	 * Setup srb and scsi status so this won't be fatal.
544 	 * We do this so we can distinguish truly fatal failues
545 	 * (srb status == 0x4) and off-line the device in that case.
546 	 */
547 
548 	if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
549 		(stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
550 		vstor_packet->vm_srb.scsi_status = 0;
551 		vstor_packet->vm_srb.srb_status = 0x1;
552 	}
553 
554 
555 	/* Copy over the status...etc */
556 	stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
557 	stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
558 	stor_pkt->vm_srb.sense_info_length =
559 	vstor_packet->vm_srb.sense_info_length;
560 
561 	if (vstor_packet->vm_srb.scsi_status != 0 ||
562 		vstor_packet->vm_srb.srb_status != 1){
563 		dev_warn(&device->device,
564 			 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
565 			 stor_pkt->vm_srb.cdb[0],
566 			 vstor_packet->vm_srb.scsi_status,
567 			 vstor_packet->vm_srb.srb_status);
568 	}
569 
570 	if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
571 		/* CHECK_CONDITION */
572 		if (vstor_packet->vm_srb.srb_status & 0x80) {
573 			/* autosense data available */
574 			dev_warn(&device->device,
575 				 "stor pkt %p autosense data valid - len %d\n",
576 				 request,
577 				 vstor_packet->vm_srb.sense_info_length);
578 
579 			memcpy(request->sense_buffer,
580 			       vstor_packet->vm_srb.sense_data,
581 			       vstor_packet->vm_srb.sense_info_length);
582 
583 		}
584 	}
585 
586 	stor_pkt->vm_srb.data_transfer_length =
587 	vstor_packet->vm_srb.data_transfer_length;
588 
589 	request->on_io_completion(request);
590 
591 	if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
592 		stor_device->drain_notify)
593 		wake_up(&stor_device->waiting_to_drain);
594 
595 
596 }
597 
storvsc_on_receive(struct hv_device * device,struct vstor_packet * vstor_packet,struct hv_storvsc_request * request)598 static void storvsc_on_receive(struct hv_device *device,
599 			     struct vstor_packet *vstor_packet,
600 			     struct hv_storvsc_request *request)
601 {
602 	struct storvsc_scan_work *work;
603 	struct storvsc_device *stor_device;
604 
605 	switch (vstor_packet->operation) {
606 	case VSTOR_OPERATION_COMPLETE_IO:
607 		storvsc_on_io_completion(device, vstor_packet, request);
608 		break;
609 
610 	case VSTOR_OPERATION_REMOVE_DEVICE:
611 	case VSTOR_OPERATION_ENUMERATE_BUS:
612 		stor_device = get_in_stor_device(device);
613 		work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
614 		if (!work)
615 			return;
616 
617 		INIT_WORK(&work->work, storvsc_bus_scan);
618 		work->host = stor_device->host;
619 		schedule_work(&work->work);
620 		break;
621 
622 	default:
623 		break;
624 	}
625 }
626 
storvsc_on_channel_callback(void * context)627 static void storvsc_on_channel_callback(void *context)
628 {
629 	struct hv_device *device = (struct hv_device *)context;
630 	struct storvsc_device *stor_device;
631 	u32 bytes_recvd;
632 	u64 request_id;
633 	unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
634 	struct hv_storvsc_request *request;
635 	int ret;
636 
637 
638 	stor_device = get_in_stor_device(device);
639 	if (!stor_device)
640 		return;
641 
642 	do {
643 		ret = vmbus_recvpacket(device->channel, packet,
644 				       ALIGN(sizeof(struct vstor_packet), 8),
645 				       &bytes_recvd, &request_id);
646 		if (ret == 0 && bytes_recvd > 0) {
647 
648 			request = (struct hv_storvsc_request *)
649 					(unsigned long)request_id;
650 
651 			if ((request == &stor_device->init_request) ||
652 			    (request == &stor_device->reset_request)) {
653 
654 				memcpy(&request->vstor_packet, packet,
655 				       sizeof(struct vstor_packet));
656 				complete(&request->wait_event);
657 			} else {
658 				storvsc_on_receive(device,
659 						(struct vstor_packet *)packet,
660 						request);
661 			}
662 		} else {
663 			break;
664 		}
665 	} while (1);
666 
667 	return;
668 }
669 
storvsc_connect_to_vsp(struct hv_device * device,u32 ring_size)670 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
671 {
672 	struct vmstorage_channel_properties props;
673 	int ret;
674 
675 	memset(&props, 0, sizeof(struct vmstorage_channel_properties));
676 
677 	/* Open the channel */
678 	ret = vmbus_open(device->channel,
679 			 ring_size,
680 			 ring_size,
681 			 (void *)&props,
682 			 sizeof(struct vmstorage_channel_properties),
683 			 storvsc_on_channel_callback, device);
684 
685 	if (ret != 0)
686 		return ret;
687 
688 	ret = storvsc_channel_init(device);
689 
690 	return ret;
691 }
692 
storvsc_dev_remove(struct hv_device * device)693 static int storvsc_dev_remove(struct hv_device *device)
694 {
695 	struct storvsc_device *stor_device;
696 	unsigned long flags;
697 
698 	stor_device = hv_get_drvdata(device);
699 
700 	spin_lock_irqsave(&device->channel->inbound_lock, flags);
701 	stor_device->destroy = true;
702 	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
703 
704 	/*
705 	 * At this point, all outbound traffic should be disable. We
706 	 * only allow inbound traffic (responses) to proceed so that
707 	 * outstanding requests can be completed.
708 	 */
709 
710 	storvsc_wait_to_drain(stor_device);
711 
712 	/*
713 	 * Since we have already drained, we don't need to busy wait
714 	 * as was done in final_release_stor_device()
715 	 * Note that we cannot set the ext pointer to NULL until
716 	 * we have drained - to drain the outgoing packets, we need to
717 	 * allow incoming packets.
718 	 */
719 	spin_lock_irqsave(&device->channel->inbound_lock, flags);
720 	hv_set_drvdata(device, NULL);
721 	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
722 
723 	/* Close the channel */
724 	vmbus_close(device->channel);
725 
726 	kfree(stor_device);
727 	return 0;
728 }
729 
storvsc_do_io(struct hv_device * device,struct hv_storvsc_request * request)730 static int storvsc_do_io(struct hv_device *device,
731 			      struct hv_storvsc_request *request)
732 {
733 	struct storvsc_device *stor_device;
734 	struct vstor_packet *vstor_packet;
735 	int ret = 0;
736 
737 	vstor_packet = &request->vstor_packet;
738 	stor_device = get_out_stor_device(device);
739 
740 	if (!stor_device)
741 		return -ENODEV;
742 
743 
744 	request->device  = device;
745 
746 
747 	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
748 
749 	vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
750 
751 
752 	vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
753 
754 
755 	vstor_packet->vm_srb.data_transfer_length =
756 	request->data_buffer.len;
757 
758 	vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
759 
760 	if (request->data_buffer.len) {
761 		ret = vmbus_sendpacket_multipagebuffer(device->channel,
762 				&request->data_buffer,
763 				vstor_packet,
764 				sizeof(struct vstor_packet),
765 				(unsigned long)request);
766 	} else {
767 		ret = vmbus_sendpacket(device->channel, vstor_packet,
768 			       sizeof(struct vstor_packet),
769 			       (unsigned long)request,
770 			       VM_PKT_DATA_INBAND,
771 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
772 	}
773 
774 	if (ret != 0)
775 		return ret;
776 
777 	atomic_inc(&stor_device->num_outstanding_req);
778 
779 	return ret;
780 }
781 
storvsc_get_ide_info(struct hv_device * dev,int * target,int * path)782 static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
783 {
784 	*target =
785 		dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
786 
787 	*path =
788 		dev->dev_instance.b[3] << 24 |
789 		dev->dev_instance.b[2] << 16 |
790 		dev->dev_instance.b[1] << 8  | dev->dev_instance.b[0];
791 }
792 
793 
storvsc_device_alloc(struct scsi_device * sdevice)794 static int storvsc_device_alloc(struct scsi_device *sdevice)
795 {
796 	struct stor_mem_pools *memp;
797 	int number = STORVSC_MIN_BUF_NR;
798 
799 	memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
800 	if (!memp)
801 		return -ENOMEM;
802 
803 	memp->request_pool =
804 		kmem_cache_create(dev_name(&sdevice->sdev_dev),
805 				sizeof(struct storvsc_cmd_request), 0,
806 				SLAB_HWCACHE_ALIGN, NULL);
807 
808 	if (!memp->request_pool)
809 		goto err0;
810 
811 	memp->request_mempool = mempool_create(number, mempool_alloc_slab,
812 						mempool_free_slab,
813 						memp->request_pool);
814 
815 	if (!memp->request_mempool)
816 		goto err1;
817 
818 	sdevice->hostdata = memp;
819 
820 	return 0;
821 
822 err1:
823 	kmem_cache_destroy(memp->request_pool);
824 
825 err0:
826 	kfree(memp);
827 	return -ENOMEM;
828 }
829 
storvsc_device_destroy(struct scsi_device * sdevice)830 static void storvsc_device_destroy(struct scsi_device *sdevice)
831 {
832 	struct stor_mem_pools *memp = sdevice->hostdata;
833 
834 	mempool_destroy(memp->request_mempool);
835 	kmem_cache_destroy(memp->request_pool);
836 	kfree(memp);
837 	sdevice->hostdata = NULL;
838 }
839 
storvsc_device_configure(struct scsi_device * sdevice)840 static int storvsc_device_configure(struct scsi_device *sdevice)
841 {
842 	scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
843 				STORVSC_MAX_IO_REQUESTS);
844 
845 	blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
846 
847 	blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
848 
849 	return 0;
850 }
851 
destroy_bounce_buffer(struct scatterlist * sgl,unsigned int sg_count)852 static void destroy_bounce_buffer(struct scatterlist *sgl,
853 				  unsigned int sg_count)
854 {
855 	int i;
856 	struct page *page_buf;
857 
858 	for (i = 0; i < sg_count; i++) {
859 		page_buf = sg_page((&sgl[i]));
860 		if (page_buf != NULL)
861 			__free_page(page_buf);
862 	}
863 
864 	kfree(sgl);
865 }
866 
do_bounce_buffer(struct scatterlist * sgl,unsigned int sg_count)867 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
868 {
869 	int i;
870 
871 	/* No need to check */
872 	if (sg_count < 2)
873 		return -1;
874 
875 	/* We have at least 2 sg entries */
876 	for (i = 0; i < sg_count; i++) {
877 		if (i == 0) {
878 			/* make sure 1st one does not have hole */
879 			if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
880 				return i;
881 		} else if (i == sg_count - 1) {
882 			/* make sure last one does not have hole */
883 			if (sgl[i].offset != 0)
884 				return i;
885 		} else {
886 			/* make sure no hole in the middle */
887 			if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
888 				return i;
889 		}
890 	}
891 	return -1;
892 }
893 
create_bounce_buffer(struct scatterlist * sgl,unsigned int sg_count,unsigned int len,int write)894 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
895 						unsigned int sg_count,
896 						unsigned int len,
897 						int write)
898 {
899 	int i;
900 	int num_pages;
901 	struct scatterlist *bounce_sgl;
902 	struct page *page_buf;
903 	unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
904 
905 	num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
906 
907 	bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
908 	if (!bounce_sgl)
909 		return NULL;
910 
911 	for (i = 0; i < num_pages; i++) {
912 		page_buf = alloc_page(GFP_ATOMIC);
913 		if (!page_buf)
914 			goto cleanup;
915 		sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
916 	}
917 
918 	return bounce_sgl;
919 
920 cleanup:
921 	destroy_bounce_buffer(bounce_sgl, num_pages);
922 	return NULL;
923 }
924 
925 
926 /* Assume the original sgl has enough room */
copy_from_bounce_buffer(struct scatterlist * orig_sgl,struct scatterlist * bounce_sgl,unsigned int orig_sgl_count,unsigned int bounce_sgl_count)927 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
928 					    struct scatterlist *bounce_sgl,
929 					    unsigned int orig_sgl_count,
930 					    unsigned int bounce_sgl_count)
931 {
932 	int i;
933 	int j = 0;
934 	unsigned long src, dest;
935 	unsigned int srclen, destlen, copylen;
936 	unsigned int total_copied = 0;
937 	unsigned long bounce_addr = 0;
938 	unsigned long dest_addr = 0;
939 	unsigned long flags;
940 
941 	local_irq_save(flags);
942 
943 	for (i = 0; i < orig_sgl_count; i++) {
944 		dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
945 					KM_IRQ0) + orig_sgl[i].offset;
946 		dest = dest_addr;
947 		destlen = orig_sgl[i].length;
948 
949 		if (bounce_addr == 0)
950 			bounce_addr =
951 			(unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
952 							KM_IRQ0);
953 
954 		while (destlen) {
955 			src = bounce_addr + bounce_sgl[j].offset;
956 			srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
957 
958 			copylen = min(srclen, destlen);
959 			memcpy((void *)dest, (void *)src, copylen);
960 
961 			total_copied += copylen;
962 			bounce_sgl[j].offset += copylen;
963 			destlen -= copylen;
964 			dest += copylen;
965 
966 			if (bounce_sgl[j].offset == bounce_sgl[j].length) {
967 				/* full */
968 				kunmap_atomic((void *)bounce_addr, KM_IRQ0);
969 				j++;
970 
971 				/*
972 				 * It is possible that the number of elements
973 				 * in the bounce buffer may not be equal to
974 				 * the number of elements in the original
975 				 * scatter list. Handle this correctly.
976 				 */
977 
978 				if (j == bounce_sgl_count) {
979 					/*
980 					 * We are done; cleanup and return.
981 					 */
982 					kunmap_atomic((void *)(dest_addr -
983 							orig_sgl[i].offset),
984 							KM_IRQ0);
985 					local_irq_restore(flags);
986 					return total_copied;
987 				}
988 
989 				/* if we need to use another bounce buffer */
990 				if (destlen || i != orig_sgl_count - 1)
991 					bounce_addr =
992 					(unsigned long)kmap_atomic(
993 					sg_page((&bounce_sgl[j])), KM_IRQ0);
994 			} else if (destlen == 0 && i == orig_sgl_count - 1) {
995 				/* unmap the last bounce that is < PAGE_SIZE */
996 				kunmap_atomic((void *)bounce_addr, KM_IRQ0);
997 			}
998 		}
999 
1000 		kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
1001 			      KM_IRQ0);
1002 	}
1003 
1004 	local_irq_restore(flags);
1005 
1006 	return total_copied;
1007 }
1008 
1009 
1010 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
copy_to_bounce_buffer(struct scatterlist * orig_sgl,struct scatterlist * bounce_sgl,unsigned int orig_sgl_count)1011 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
1012 					  struct scatterlist *bounce_sgl,
1013 					  unsigned int orig_sgl_count)
1014 {
1015 	int i;
1016 	int j = 0;
1017 	unsigned long src, dest;
1018 	unsigned int srclen, destlen, copylen;
1019 	unsigned int total_copied = 0;
1020 	unsigned long bounce_addr = 0;
1021 	unsigned long src_addr = 0;
1022 	unsigned long flags;
1023 
1024 	local_irq_save(flags);
1025 
1026 	for (i = 0; i < orig_sgl_count; i++) {
1027 		src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
1028 				KM_IRQ0) + orig_sgl[i].offset;
1029 		src = src_addr;
1030 		srclen = orig_sgl[i].length;
1031 
1032 		if (bounce_addr == 0)
1033 			bounce_addr =
1034 			(unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
1035 						KM_IRQ0);
1036 
1037 		while (srclen) {
1038 			/* assume bounce offset always == 0 */
1039 			dest = bounce_addr + bounce_sgl[j].length;
1040 			destlen = PAGE_SIZE - bounce_sgl[j].length;
1041 
1042 			copylen = min(srclen, destlen);
1043 			memcpy((void *)dest, (void *)src, copylen);
1044 
1045 			total_copied += copylen;
1046 			bounce_sgl[j].length += copylen;
1047 			srclen -= copylen;
1048 			src += copylen;
1049 
1050 			if (bounce_sgl[j].length == PAGE_SIZE) {
1051 				/* full..move to next entry */
1052 				kunmap_atomic((void *)bounce_addr, KM_IRQ0);
1053 				j++;
1054 
1055 				/* if we need to use another bounce buffer */
1056 				if (srclen || i != orig_sgl_count - 1)
1057 					bounce_addr =
1058 					(unsigned long)kmap_atomic(
1059 					sg_page((&bounce_sgl[j])), KM_IRQ0);
1060 
1061 			} else if (srclen == 0 && i == orig_sgl_count - 1) {
1062 				/* unmap the last bounce that is < PAGE_SIZE */
1063 				kunmap_atomic((void *)bounce_addr, KM_IRQ0);
1064 			}
1065 		}
1066 
1067 		kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
1068 	}
1069 
1070 	local_irq_restore(flags);
1071 
1072 	return total_copied;
1073 }
1074 
1075 
storvsc_remove(struct hv_device * dev)1076 static int storvsc_remove(struct hv_device *dev)
1077 {
1078 	struct storvsc_device *stor_device = hv_get_drvdata(dev);
1079 	struct Scsi_Host *host = stor_device->host;
1080 
1081 	scsi_remove_host(host);
1082 
1083 	scsi_host_put(host);
1084 
1085 	storvsc_dev_remove(dev);
1086 
1087 	return 0;
1088 }
1089 
1090 
storvsc_get_chs(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int * info)1091 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1092 			   sector_t capacity, int *info)
1093 {
1094 	sector_t nsect = capacity;
1095 	sector_t cylinders = nsect;
1096 	int heads, sectors_pt;
1097 
1098 	/*
1099 	 * We are making up these values; let us keep it simple.
1100 	 */
1101 	heads = 0xff;
1102 	sectors_pt = 0x3f;      /* Sectors per track */
1103 	sector_div(cylinders, heads * sectors_pt);
1104 	if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1105 		cylinders = 0xffff;
1106 
1107 	info[0] = heads;
1108 	info[1] = sectors_pt;
1109 	info[2] = (int)cylinders;
1110 
1111 	return 0;
1112 }
1113 
storvsc_host_reset(struct hv_device * device)1114 static int storvsc_host_reset(struct hv_device *device)
1115 {
1116 	struct storvsc_device *stor_device;
1117 	struct hv_storvsc_request *request;
1118 	struct vstor_packet *vstor_packet;
1119 	int ret, t;
1120 
1121 
1122 	stor_device = get_out_stor_device(device);
1123 	if (!stor_device)
1124 		return FAILED;
1125 
1126 	request = &stor_device->reset_request;
1127 	vstor_packet = &request->vstor_packet;
1128 
1129 	init_completion(&request->wait_event);
1130 
1131 	vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1132 	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1133 	vstor_packet->vm_srb.path_id = stor_device->path_id;
1134 
1135 	ret = vmbus_sendpacket(device->channel, vstor_packet,
1136 			       sizeof(struct vstor_packet),
1137 			       (unsigned long)&stor_device->reset_request,
1138 			       VM_PKT_DATA_INBAND,
1139 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1140 	if (ret != 0)
1141 		return FAILED;
1142 
1143 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1144 	if (t == 0)
1145 		return TIMEOUT_ERROR;
1146 
1147 
1148 	/*
1149 	 * At this point, all outstanding requests in the adapter
1150 	 * should have been flushed out and return to us
1151 	 */
1152 
1153 	return SUCCESS;
1154 }
1155 
1156 
1157 /*
1158  * storvsc_host_reset_handler - Reset the scsi HBA
1159  */
storvsc_host_reset_handler(struct scsi_cmnd * scmnd)1160 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1161 {
1162 	struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1163 	struct hv_device *dev = host_dev->dev;
1164 
1165 	return storvsc_host_reset(dev);
1166 }
1167 
1168 
1169 /*
1170  * storvsc_command_completion - Command completion processing
1171  */
storvsc_command_completion(struct hv_storvsc_request * request)1172 static void storvsc_command_completion(struct hv_storvsc_request *request)
1173 {
1174 	struct storvsc_cmd_request *cmd_request =
1175 		(struct storvsc_cmd_request *)request->context;
1176 	struct scsi_cmnd *scmnd = cmd_request->cmd;
1177 	struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1178 	void (*scsi_done_fn)(struct scsi_cmnd *);
1179 	struct scsi_sense_hdr sense_hdr;
1180 	struct vmscsi_request *vm_srb;
1181 	struct storvsc_scan_work *wrk;
1182 	struct stor_mem_pools *memp = scmnd->device->hostdata;
1183 
1184 	vm_srb = &request->vstor_packet.vm_srb;
1185 	if (cmd_request->bounce_sgl_count) {
1186 		if (vm_srb->data_in == READ_TYPE)
1187 			copy_from_bounce_buffer(scsi_sglist(scmnd),
1188 					cmd_request->bounce_sgl,
1189 					scsi_sg_count(scmnd),
1190 					cmd_request->bounce_sgl_count);
1191 		destroy_bounce_buffer(cmd_request->bounce_sgl,
1192 					cmd_request->bounce_sgl_count);
1193 	}
1194 
1195 	/*
1196 	 * If there is an error; offline the device since all
1197 	 * error recovery strategies would have already been
1198 	 * deployed on the host side.
1199 	 */
1200 	if (vm_srb->srb_status == 0x4)
1201 		scmnd->result = DID_TARGET_FAILURE << 16;
1202 	else
1203 		scmnd->result = vm_srb->scsi_status;
1204 
1205 	/*
1206 	 * If the LUN is invalid; remove the device.
1207 	 */
1208 	if (vm_srb->srb_status == 0x20) {
1209 		struct storvsc_device *stor_dev;
1210 		struct hv_device *dev = host_dev->dev;
1211 		struct Scsi_Host *host;
1212 
1213 		stor_dev = get_in_stor_device(dev);
1214 		host = stor_dev->host;
1215 
1216 		wrk = kmalloc(sizeof(struct storvsc_scan_work),
1217 				GFP_ATOMIC);
1218 		if (!wrk) {
1219 			scmnd->result = DID_TARGET_FAILURE << 16;
1220 		} else {
1221 			wrk->host = host;
1222 			wrk->lun = vm_srb->lun;
1223 			INIT_WORK(&wrk->work, storvsc_remove_lun);
1224 			schedule_work(&wrk->work);
1225 		}
1226 	}
1227 
1228 	if (scmnd->result) {
1229 		if (scsi_normalize_sense(scmnd->sense_buffer,
1230 				SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1231 			scsi_print_sense_hdr("storvsc", &sense_hdr);
1232 	}
1233 
1234 	scsi_set_resid(scmnd,
1235 		request->data_buffer.len -
1236 		vm_srb->data_transfer_length);
1237 
1238 	scsi_done_fn = scmnd->scsi_done;
1239 
1240 	scmnd->host_scribble = NULL;
1241 	scmnd->scsi_done = NULL;
1242 
1243 	scsi_done_fn(scmnd);
1244 
1245 	mempool_free(cmd_request, memp->request_mempool);
1246 }
1247 
storvsc_check_scsi_cmd(struct scsi_cmnd * scmnd)1248 static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
1249 {
1250 	bool allowed = true;
1251 	u8 scsi_op = scmnd->cmnd[0];
1252 
1253 	switch (scsi_op) {
1254 	/* smartd sends this command, which will offline the device */
1255 	case SET_WINDOW:
1256 		scmnd->result = ILLEGAL_REQUEST << 16;
1257 		allowed = false;
1258 		break;
1259 	default:
1260 		break;
1261 	}
1262 	return allowed;
1263 }
1264 
1265 /*
1266  * storvsc_queuecommand - Initiate command processing
1267  */
storvsc_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * scmnd)1268 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1269 {
1270 	int ret;
1271 	struct hv_host_device *host_dev = shost_priv(host);
1272 	struct hv_device *dev = host_dev->dev;
1273 	struct hv_storvsc_request *request;
1274 	struct storvsc_cmd_request *cmd_request;
1275 	unsigned int request_size = 0;
1276 	int i;
1277 	struct scatterlist *sgl;
1278 	unsigned int sg_count = 0;
1279 	struct vmscsi_request *vm_srb;
1280 	struct stor_mem_pools *memp = scmnd->device->hostdata;
1281 
1282 	if (storvsc_check_scsi_cmd(scmnd) == false) {
1283 		scmnd->scsi_done(scmnd);
1284 		return 0;
1285 	}
1286 
1287 	/* If retrying, no need to prep the cmd */
1288 	if (scmnd->host_scribble) {
1289 
1290 		cmd_request =
1291 			(struct storvsc_cmd_request *)scmnd->host_scribble;
1292 
1293 		goto retry_request;
1294 	}
1295 
1296 	request_size = sizeof(struct storvsc_cmd_request);
1297 
1298 	cmd_request = mempool_alloc(memp->request_mempool,
1299 				       GFP_ATOMIC);
1300 	if (!cmd_request)
1301 		return SCSI_MLQUEUE_DEVICE_BUSY;
1302 
1303 	memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
1304 
1305 	/* Setup the cmd request */
1306 	cmd_request->bounce_sgl_count = 0;
1307 	cmd_request->bounce_sgl = NULL;
1308 	cmd_request->cmd = scmnd;
1309 
1310 	scmnd->host_scribble = (unsigned char *)cmd_request;
1311 
1312 	request = &cmd_request->request;
1313 	vm_srb = &request->vstor_packet.vm_srb;
1314 
1315 
1316 	/* Build the SRB */
1317 	switch (scmnd->sc_data_direction) {
1318 	case DMA_TO_DEVICE:
1319 		vm_srb->data_in = WRITE_TYPE;
1320 		break;
1321 	case DMA_FROM_DEVICE:
1322 		vm_srb->data_in = READ_TYPE;
1323 		break;
1324 	default:
1325 		vm_srb->data_in = UNKNOWN_TYPE;
1326 		break;
1327 	}
1328 
1329 	request->on_io_completion = storvsc_command_completion;
1330 	request->context = cmd_request;/* scmnd; */
1331 
1332 	vm_srb->port_number = host_dev->port;
1333 	vm_srb->path_id = scmnd->device->channel;
1334 	vm_srb->target_id = scmnd->device->id;
1335 	vm_srb->lun = scmnd->device->lun;
1336 
1337 	vm_srb->cdb_length = scmnd->cmd_len;
1338 
1339 	memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1340 
1341 	request->sense_buffer = scmnd->sense_buffer;
1342 
1343 
1344 	request->data_buffer.len = scsi_bufflen(scmnd);
1345 	if (scsi_sg_count(scmnd)) {
1346 		sgl = (struct scatterlist *)scsi_sglist(scmnd);
1347 		sg_count = scsi_sg_count(scmnd);
1348 
1349 		/* check if we need to bounce the sgl */
1350 		if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1351 			cmd_request->bounce_sgl =
1352 				create_bounce_buffer(sgl, scsi_sg_count(scmnd),
1353 						     scsi_bufflen(scmnd),
1354 						     vm_srb->data_in);
1355 			if (!cmd_request->bounce_sgl) {
1356 				scmnd->host_scribble = NULL;
1357 				mempool_free(cmd_request,
1358 						memp->request_mempool);
1359 
1360 				return SCSI_MLQUEUE_HOST_BUSY;
1361 			}
1362 
1363 			cmd_request->bounce_sgl_count =
1364 				ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
1365 					PAGE_SHIFT;
1366 
1367 			if (vm_srb->data_in == WRITE_TYPE)
1368 				copy_to_bounce_buffer(sgl,
1369 					cmd_request->bounce_sgl,
1370 					scsi_sg_count(scmnd));
1371 
1372 			sgl = cmd_request->bounce_sgl;
1373 			sg_count = cmd_request->bounce_sgl_count;
1374 		}
1375 
1376 		request->data_buffer.offset = sgl[0].offset;
1377 
1378 		for (i = 0; i < sg_count; i++)
1379 			request->data_buffer.pfn_array[i] =
1380 				page_to_pfn(sg_page((&sgl[i])));
1381 
1382 	} else if (scsi_sglist(scmnd)) {
1383 		request->data_buffer.offset =
1384 			virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1385 		request->data_buffer.pfn_array[0] =
1386 			virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1387 	}
1388 
1389 retry_request:
1390 	/* Invokes the vsc to start an IO */
1391 	ret = storvsc_do_io(dev, &cmd_request->request);
1392 
1393 	if (ret == -EAGAIN) {
1394 		/* no more space */
1395 
1396 		if (cmd_request->bounce_sgl_count)
1397 			destroy_bounce_buffer(cmd_request->bounce_sgl,
1398 					cmd_request->bounce_sgl_count);
1399 
1400 		mempool_free(cmd_request, memp->request_mempool);
1401 
1402 		scmnd->host_scribble = NULL;
1403 
1404 		ret = SCSI_MLQUEUE_DEVICE_BUSY;
1405 	}
1406 
1407 	return ret;
1408 }
1409 
1410 /* Scsi driver */
1411 static struct scsi_host_template scsi_driver = {
1412 	.module	=		THIS_MODULE,
1413 	.name =			"storvsc_host_t",
1414 	.bios_param =		storvsc_get_chs,
1415 	.queuecommand =		storvsc_queuecommand,
1416 	.eh_host_reset_handler =	storvsc_host_reset_handler,
1417 	.slave_alloc =		storvsc_device_alloc,
1418 	.slave_destroy =	storvsc_device_destroy,
1419 	.slave_configure =	storvsc_device_configure,
1420 	.cmd_per_lun =		1,
1421 	/* 64 max_queue * 1 target */
1422 	.can_queue =		STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
1423 	.this_id =		-1,
1424 	/* no use setting to 0 since ll_blk_rw reset it to 1 */
1425 	/* currently 32 */
1426 	.sg_tablesize =		MAX_MULTIPAGE_BUFFER_COUNT,
1427 	.use_clustering =	DISABLE_CLUSTERING,
1428 	/* Make sure we dont get a sg segment crosses a page boundary */
1429 	.dma_boundary =		PAGE_SIZE-1,
1430 };
1431 
1432 enum {
1433 	SCSI_GUID,
1434 	IDE_GUID,
1435 };
1436 
1437 static const struct hv_vmbus_device_id id_table[] = {
1438 	/* SCSI guid */
1439 	{ VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
1440 		       0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1441 	  .driver_data = SCSI_GUID },
1442 	/* IDE guid */
1443 	{ VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
1444 		       0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1445 	  .driver_data = IDE_GUID },
1446 	{ },
1447 };
1448 
1449 MODULE_DEVICE_TABLE(vmbus, id_table);
1450 
1451 
1452 /*
1453  * storvsc_probe - Add a new device for this driver
1454  */
1455 
storvsc_probe(struct hv_device * device,const struct hv_vmbus_device_id * dev_id)1456 static int storvsc_probe(struct hv_device *device,
1457 			const struct hv_vmbus_device_id *dev_id)
1458 {
1459 	int ret;
1460 	struct Scsi_Host *host;
1461 	struct hv_host_device *host_dev;
1462 	bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1463 	int path = 0;
1464 	int target = 0;
1465 	struct storvsc_device *stor_device;
1466 
1467 	host = scsi_host_alloc(&scsi_driver,
1468 			       sizeof(struct hv_host_device));
1469 	if (!host)
1470 		return -ENOMEM;
1471 
1472 	host_dev = shost_priv(host);
1473 	memset(host_dev, 0, sizeof(struct hv_host_device));
1474 
1475 	host_dev->port = host->host_no;
1476 	host_dev->dev = device;
1477 
1478 
1479 	stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
1480 	if (!stor_device) {
1481 		ret = -ENOMEM;
1482 		goto err_out0;
1483 	}
1484 
1485 	stor_device->destroy = false;
1486 	init_waitqueue_head(&stor_device->waiting_to_drain);
1487 	stor_device->device = device;
1488 	stor_device->host = host;
1489 	hv_set_drvdata(device, stor_device);
1490 
1491 	stor_device->port_number = host->host_no;
1492 	ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
1493 	if (ret)
1494 		goto err_out1;
1495 
1496 	if (dev_is_ide)
1497 		storvsc_get_ide_info(device, &target, &path);
1498 
1499 	host_dev->path = stor_device->path_id;
1500 	host_dev->target = stor_device->target_id;
1501 
1502 	/* max # of devices per target */
1503 	host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
1504 	/* max # of targets per channel */
1505 	host->max_id = STORVSC_MAX_TARGETS;
1506 	/* max # of channels */
1507 	host->max_channel = STORVSC_MAX_CHANNELS - 1;
1508 	/* max cmd length */
1509 	host->max_cmd_len = STORVSC_MAX_CMD_LEN;
1510 
1511 	/* Register the HBA and start the scsi bus scan */
1512 	ret = scsi_add_host(host, &device->device);
1513 	if (ret != 0)
1514 		goto err_out2;
1515 
1516 	if (!dev_is_ide) {
1517 		scsi_scan_host(host);
1518 		return 0;
1519 	}
1520 	ret = scsi_add_device(host, 0, target, 0);
1521 	if (ret) {
1522 		scsi_remove_host(host);
1523 		goto err_out2;
1524 	}
1525 	return 0;
1526 
1527 err_out2:
1528 	/*
1529 	 * Once we have connected with the host, we would need to
1530 	 * to invoke storvsc_dev_remove() to rollback this state and
1531 	 * this call also frees up the stor_device; hence the jump around
1532 	 * err_out1 label.
1533 	 */
1534 	storvsc_dev_remove(device);
1535 	goto err_out0;
1536 
1537 err_out1:
1538 	kfree(stor_device);
1539 
1540 err_out0:
1541 	scsi_host_put(host);
1542 	return ret;
1543 }
1544 
1545 /* The one and only one */
1546 
1547 static struct hv_driver storvsc_drv = {
1548 	.name = KBUILD_MODNAME,
1549 	.id_table = id_table,
1550 	.probe = storvsc_probe,
1551 	.remove = storvsc_remove,
1552 };
1553 
storvsc_drv_init(void)1554 static int __init storvsc_drv_init(void)
1555 {
1556 	u32 max_outstanding_req_per_channel;
1557 
1558 	/*
1559 	 * Divide the ring buffer data size (which is 1 page less
1560 	 * than the ring buffer size since that page is reserved for
1561 	 * the ring buffer indices) by the max request size (which is
1562 	 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
1563 	 */
1564 	max_outstanding_req_per_channel =
1565 		((storvsc_ringbuffer_size - PAGE_SIZE) /
1566 		ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
1567 		sizeof(struct vstor_packet) + sizeof(u64),
1568 		sizeof(u64)));
1569 
1570 	if (max_outstanding_req_per_channel <
1571 	    STORVSC_MAX_IO_REQUESTS)
1572 		return -EINVAL;
1573 
1574 	return vmbus_driver_register(&storvsc_drv);
1575 }
1576 
storvsc_drv_exit(void)1577 static void __exit storvsc_drv_exit(void)
1578 {
1579 	vmbus_driver_unregister(&storvsc_drv);
1580 }
1581 
1582 MODULE_LICENSE("GPL");
1583 MODULE_VERSION(HV_DRV_VERSION);
1584 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
1585 module_init(storvsc_drv_init);
1586 module_exit(storvsc_drv_exit);
1587