xref: /src/sys/dev/smartpqi/smartpqi_ioctl.c (revision 7f54c65abc67f50363bbd2a68a980d23e69c9ef0)
1 /*-
2  * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 
27 /*
28  * Management interface for smartpqi driver
29  */
30 
31 #include "smartpqi_includes.h"
32 
33 /*
34  * Wrapper function to copy to user from kernel
35  */
36 int
os_copy_to_user(struct pqisrc_softstate * softs,void * dest_buf,void * src_buf,int size,int mode)37 os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
38 		void *src_buf, int size, int mode)
39 {
40 	return(copyout(src_buf, dest_buf, size));
41 }
42 
43 /*
44  * Wrapper function to copy from user to kernel
45  */
46 int
os_copy_from_user(struct pqisrc_softstate * softs,void * dest_buf,void * src_buf,int size,int mode)47 os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
48 		void *src_buf, int size, int mode)
49 {
50 	return(copyin(src_buf, dest_buf, size));
51 }
52 
53 /*
54  * Device open function for ioctl entry
55  */
56 static int
smartpqi_open(struct cdev * cdev,int flags,int devtype,struct thread * td)57 smartpqi_open(struct cdev *cdev, int flags, int devtype,
58 		struct thread *td)
59 {
60 	return BSD_SUCCESS;
61 }
62 
63 /*
64  * Device close function for ioctl entry
65  */
66 static int
smartpqi_close(struct cdev * cdev,int flags,int devtype,struct thread * td)67 smartpqi_close(struct cdev *cdev, int flags, int devtype,
68 		struct thread *td)
69 {
70 	return BSD_SUCCESS;
71 }
72 
73 /*
74  * ioctl for getting driver info
75  */
76 static void
smartpqi_get_driver_info_ioctl(caddr_t udata,struct cdev const * cdev)77 smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev const *cdev)
78 {
79 	struct pqisrc_softstate *softs = cdev->si_drv1;
80 	pdriver_info driver_info = (pdriver_info)udata;
81 
82 	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
83 
84 	driver_info->major_version = PQISRC_DRIVER_MAJOR;
85 #if __FreeBSD__ <= 14
86 	driver_info->minor_version = (unsigned char) ((PQISRC_DRIVER_MINOR >> 4) & 0xFF);
87 #else
88 	driver_info->minor_version = PQISRC_DRIVER_MINOR;
89 #endif
90 	driver_info->release_version = PQISRC_DRIVER_RELEASE;
91 	driver_info->build_revision = PQISRC_DRIVER_REVISION;
92 	driver_info->max_targets = PQI_MAX_DEVICES - 1;
93 	driver_info->max_io = softs->max_io_for_scsi_ml;
94 	driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
95 
96 	DBG_FUNC("OUT\n");
97 }
98 
99 /*
100  * ioctl for getting controller info
101  */
102 static void
smartpqi_get_pci_info_ioctl(caddr_t udata,struct cdev const * cdev)103 smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev const *cdev)
104 {
105 	struct pqisrc_softstate *softs = cdev->si_drv1;
106 	device_t dev = softs->os_specific.pqi_dev;
107 	pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
108 	uint32_t sub_vendor = 0;
109 	uint32_t sub_device = 0;
110 	uint32_t vendor = 0;
111 	uint32_t device = 0;
112 
113 	DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
114 
115 	pci_info->bus = pci_get_bus(dev);
116 	pci_info->dev_fn = pci_get_function(dev);
117 	pci_info->domain = pci_get_domain(dev);
118 	sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
119 	sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
120 	pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
121 	vendor = pci_get_vendor(dev);
122 	device =  pci_get_device(dev);
123 	pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
124 
125 	DBG_FUNC("OUT\n");
126 }
127 
128 static inline int
pqi_status_to_bsd_ioctl_status(int pqi_status)129 pqi_status_to_bsd_ioctl_status(int pqi_status)
130 {
131 	if (PQI_STATUS_SUCCESS == pqi_status)
132 		return BSD_SUCCESS;
133 	else
134 		return EIO;
135 }
136 
137 /*
138  * ioctl entry point for user
139  */
140 static int
smartpqi_ioctl(struct cdev * cdev,u_long cmd,caddr_t udata,int flags,struct thread * td)141 smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
142 		int flags, struct thread *td)
143 {
144 	int bsd_status, pqi_status;
145 	struct pqisrc_softstate *softs = cdev->si_drv1;
146 
147 	DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
148 
149 	if (!udata) {
150 		DBG_ERR("udata is null !!\n");
151 		return EINVAL;
152 	}
153 
154 	if (pqisrc_ctrl_offline(softs)){
155 		return ENOTTY;
156 	}
157 
158 	switch (cmd) {
159 		case CCISS_GETDRIVVER:
160 			smartpqi_get_driver_info_ioctl(udata, cdev);
161 			bsd_status = BSD_SUCCESS;
162 			break;
163 		case CCISS_GETPCIINFO:
164 			smartpqi_get_pci_info_ioctl(udata, cdev);
165 			bsd_status = BSD_SUCCESS;
166 			break;
167 		case SMARTPQI_PASS_THRU:
168 		case CCISS_PASSTHRU:
169 			pqi_status = pqisrc_passthru_ioctl(softs, udata, 0);
170 			bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
171 			break;
172 		case SMARTPQI_BIG_PASS_THRU:
173 			pqi_status = pqisrc_big_passthru_ioctl(softs, udata, 0);
174 			bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
175 			break;
176 		case SMARTPQI_BIG_PASSTHRU_SUPPORTED:
177 			bsd_status = BSD_SUCCESS;
178 			break;
179 		case CCISS_REGNEWD:
180 			pqi_status = pqisrc_scan_devices(softs);
181 			bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
182 			break;
183 		default:
184 			DBG_WARN( "!IOCTL cmd 0x%lx not supported\n", cmd);
185 			bsd_status = ENOTTY;
186 			break;
187 	}
188 
189 	DBG_FUNC("OUT error = %d\n", bsd_status);
190 
191 	return bsd_status;
192 }
193 
194 static struct cdevsw smartpqi_cdevsw =
195 {
196 	.d_version = D_VERSION,
197 	.d_open    = smartpqi_open,
198 	.d_close   = smartpqi_close,
199 	.d_ioctl   = smartpqi_ioctl,
200 	.d_name    = "smartpqi",
201 };
202 
203 /*
204  * Function to create device node for ioctl
205  */
206 int
create_char_dev(struct pqisrc_softstate * softs,int card_index)207 create_char_dev(struct pqisrc_softstate *softs, int card_index)
208 {
209 	int error = BSD_SUCCESS;
210 
211 	DBG_FUNC("IN idx = %d\n", card_index);
212 
213 	softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
214 				UID_ROOT, GID_OPERATOR, 0640,
215 				"smartpqi%u", card_index);
216 	if(softs->os_specific.cdev) {
217 		softs->os_specific.cdev->si_drv1 = softs;
218 	} else {
219 		error = ENXIO;
220 	}
221 
222 	DBG_FUNC("OUT error = %d\n", error);
223 
224 	return error;
225 }
226 
227 /*
228  * Function to destroy device node for ioctl
229  */
230 void
destroy_char_dev(struct pqisrc_softstate * softs)231 destroy_char_dev(struct pqisrc_softstate *softs)
232 {
233 	DBG_FUNC("IN\n");
234 	if (softs->os_specific.cdev) {
235 		destroy_dev(softs->os_specific.cdev);
236 		softs->os_specific.cdev = NULL;
237 	}
238 	DBG_FUNC("OUT\n");
239 }
240 
241 /*
242  * Function used to send passthru commands to adapter
243  * to support management tools. For eg. ssacli, sscon.
244  */
245 int
pqisrc_passthru_ioctl(struct pqisrc_softstate * softs,void * arg,int mode)246 pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
247 {
248 	int ret;
249 	char *drv_buf = NULL;
250 	uint32_t tag = 0;
251 	IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
252 	dma_mem_t ioctl_dma_buf;
253 	pqisrc_raid_req_t request;
254 	raid_path_error_info_elem_t error_info;
255 	ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
256 	ob_queue_t const *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
257 	rcb_t *rcb = NULL;
258 
259 	memset(&request, 0, sizeof(request));
260 	memset(&error_info, 0, sizeof(error_info));
261 
262 	DBG_FUNC("IN\n");
263 
264 	if (pqisrc_ctrl_offline(softs))
265 		return PQI_STATUS_FAILURE;
266 
267 	if (!arg)
268 		return PQI_STATUS_FAILURE;
269 
270 	if (iocommand->buf_size < 1 &&
271 		iocommand->Request.Type.Direction != PQIIOCTL_NONE)
272 		return PQI_STATUS_FAILURE;
273 	if (iocommand->Request.CDBLen > sizeof(request.cmd.cdb))
274 		return PQI_STATUS_FAILURE;
275 
276 	switch (iocommand->Request.Type.Direction) {
277 		case PQIIOCTL_NONE:
278 		case PQIIOCTL_WRITE:
279 		case PQIIOCTL_READ:
280 		case PQIIOCTL_BIDIRECTIONAL:
281 			break;
282 		default:
283 			return PQI_STATUS_FAILURE;
284 	}
285 
286 	if (iocommand->buf_size > 0) {
287 		memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
288 		os_strlcpy(ioctl_dma_buf.tag, "Ioctl_PassthruCmd_Buffer", sizeof(ioctl_dma_buf.tag));
289 		ioctl_dma_buf.size = iocommand->buf_size;
290 		ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
291 		/* allocate memory */
292 		ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
293 		if (ret) {
294 			DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
295 			goto out;
296 		}
297 
298 		DBG_IO("ioctl_dma_buf.dma_addr  = %p\n",(void*)ioctl_dma_buf.dma_addr);
299 		DBG_IO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
300 
301 		drv_buf = (char *)ioctl_dma_buf.virt_addr;
302 		if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
303 			ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, iocommand->buf_size, mode);
304 			if (ret != 0) {
305 				goto free_mem;
306 			}
307 		}
308 	}
309 
310 	request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
311 	request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
312 									PQI_REQUEST_HEADER_LENGTH;
313 	memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
314 		sizeof(request.lun_number));
315 	memcpy(request.cmd.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
316 	request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
317 
318 	switch (iocommand->Request.Type.Direction) {
319 	case PQIIOCTL_NONE:
320 		request.data_direction = SOP_DATA_DIR_NONE;
321 		break;
322 	case PQIIOCTL_WRITE:
323 		request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
324 		break;
325 	case PQIIOCTL_READ:
326 		request.data_direction = SOP_DATA_DIR_TO_DEVICE;
327 		break;
328 	case PQIIOCTL_BIDIRECTIONAL:
329 		request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
330 		break;
331 	}
332 
333 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
334 	if (iocommand->buf_size > 0) {
335 		request.buffer_length = iocommand->buf_size;
336 		request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
337 		request.sg_descriptors[0].len = iocommand->buf_size;
338 		request.sg_descriptors[0].flags =  SG_FLAG_LAST;
339 	}
340 	tag = pqisrc_get_tag(&softs->taglist);
341 	if (INVALID_ELEM == tag) {
342 		DBG_ERR("Tag not available\n");
343 		goto free_mem;
344 	}
345 	request.request_id = tag;
346 	request.response_queue_id = ob_q->q_id;
347 	request.error_index = request.request_id;
348 	if (softs->timeout_in_passthrough) {
349 		request.timeout_in_sec = iocommand->Request.Timeout;
350 	}
351 
352 	rcb = &softs->rcb[tag];
353 	rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
354 	rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
355 	rcb->tag = tag;
356 	rcb->req_pending = true;
357 	/* Submit Command */
358 	ret = pqisrc_submit_cmnd(softs, ib_q, &request);
359 	if (ret != PQI_STATUS_SUCCESS) {
360 		DBG_ERR("Unable to submit command\n");
361 		goto err_out;
362 	}
363 
364 	ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_PASSTHROUGH_CMD_TIMEOUT);
365 	if (ret != PQI_STATUS_SUCCESS) {
366 		DBG_ERR("Passthru IOCTL cmd timed out !!\n");
367 		goto err_out;
368 	}
369 
370 	memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
371 
372 
373 	if (rcb->status) {
374 		size_t sense_data_length;
375 
376 		memcpy(&error_info, rcb->error_info, sizeof(error_info));
377 		iocommand->error_info.ScsiStatus = error_info.status;
378 		sense_data_length = error_info.sense_data_len;
379 
380 		if (!sense_data_length)
381 			sense_data_length = error_info.resp_data_len;
382 
383 		if (sense_data_length &&
384 			(sense_data_length > sizeof(error_info.data)))
385 				sense_data_length = sizeof(error_info.data);
386 
387 		if (sense_data_length) {
388 			if (sense_data_length >
389 				sizeof(iocommand->error_info.SenseInfo))
390 				sense_data_length =
391 					sizeof(iocommand->error_info.SenseInfo);
392 			memcpy (iocommand->error_info.SenseInfo,
393 					error_info.data, sense_data_length);
394 			iocommand->error_info.SenseLen = sense_data_length;
395 		}
396 
397 		if (error_info.data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
398 			rcb->status = PQI_STATUS_SUCCESS;
399 		}
400 	}
401 
402 	if (rcb->status == PQI_STATUS_SUCCESS && iocommand->buf_size > 0 &&
403 		(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
404 
405 		ret = os_copy_to_user(softs, (void*)iocommand->buf, (void*)drv_buf, iocommand->buf_size, mode);
406 		if (ret != 0) {
407 			DBG_ERR("Failed to copy the response\n");
408 			goto err_out;
409 		}
410 	}
411 
412 	os_reset_rcb(rcb);
413 	pqisrc_put_tag(&softs->taglist, request.request_id);
414 	if (iocommand->buf_size > 0)
415 		os_dma_mem_free(softs,&ioctl_dma_buf);
416 
417 	DBG_FUNC("OUT\n");
418 	return PQI_STATUS_SUCCESS;
419 
420 err_out:
421 	os_reset_rcb(rcb);
422 	pqisrc_put_tag(&softs->taglist, request.request_id);
423 
424 free_mem:
425 	if (iocommand->buf_size > 0)
426 		os_dma_mem_free(softs, &ioctl_dma_buf);
427 
428 out:
429 	DBG_FUNC("Failed OUT\n");
430 	return PQI_STATUS_FAILURE;
431 }
432 
433 /*
434  * Function used to send big passthru commands to adapter
435  * to support management tools. For eg. ssacli, sscon.
436  */
437 int
pqisrc_big_passthru_ioctl(struct pqisrc_softstate * softs,void * arg,int mode)438 pqisrc_big_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
439 {
440 	int ret;
441 	char *drv_buf = NULL;
442 	uint32_t tag = 0;
443 	BIG_IOCTL_Command_struct *iocommand = (BIG_IOCTL_Command_struct *)arg;
444 	dma_mem_t ioctl_dma_buf;
445 	pqisrc_raid_req_t request;
446 	raid_path_error_info_elem_t error_info;
447 	ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
448 	ob_queue_t const *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
449 	rcb_t *rcb = NULL;
450 
451 	memset(&request, 0, sizeof(request));
452 	memset(&error_info, 0, sizeof(error_info));
453 
454 	DBG_FUNC("IN\n");
455 
456 	if (pqisrc_ctrl_offline(softs))
457 		return PQI_STATUS_FAILURE;
458 
459 	if (!arg)
460 		return PQI_STATUS_FAILURE;
461 
462 	if (iocommand->buf_size < 1 &&
463 		iocommand->Request.Type.Direction != PQIIOCTL_NONE)
464 		return PQI_STATUS_FAILURE;
465 	if (iocommand->Request.CDBLen > sizeof(request.cmd.cdb))
466 		return PQI_STATUS_FAILURE;
467 
468 	switch (iocommand->Request.Type.Direction) {
469 		case PQIIOCTL_NONE:
470 		case PQIIOCTL_WRITE:
471 		case PQIIOCTL_READ:
472 		case PQIIOCTL_BIDIRECTIONAL:
473 			break;
474 		default:
475 			return PQI_STATUS_FAILURE;
476 	}
477 
478 	if (iocommand->buf_size > 0) {
479 		memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
480 		os_strlcpy(ioctl_dma_buf.tag, "Ioctl_PassthruCmd_Buffer", sizeof(ioctl_dma_buf.tag));
481 		ioctl_dma_buf.size = iocommand->buf_size;
482 		ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
483 		/* allocate memory */
484 		ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
485 		if (ret) {
486 			DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
487 			goto out;
488 		}
489 
490 		DBG_IO("ioctl_dma_buf.dma_addr  = %p\n",(void*)ioctl_dma_buf.dma_addr);
491 		DBG_IO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
492 
493 		drv_buf = (char *)ioctl_dma_buf.virt_addr;
494 		if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
495 			ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, iocommand->buf_size, mode);
496 			if (ret != 0) {
497 				goto free_mem;
498 			}
499 		}
500 	}
501 
502 	request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
503 	request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
504 									PQI_REQUEST_HEADER_LENGTH;
505 	memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
506 		sizeof(request.lun_number));
507 	memcpy(request.cmd.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
508 	request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
509 
510 	switch (iocommand->Request.Type.Direction) {
511 	case PQIIOCTL_NONE:
512 		request.data_direction = SOP_DATA_DIR_NONE;
513 		break;
514 	case PQIIOCTL_WRITE:
515 		request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
516 		break;
517 	case PQIIOCTL_READ:
518 		request.data_direction = SOP_DATA_DIR_TO_DEVICE;
519 		break;
520 	case PQIIOCTL_BIDIRECTIONAL:
521 		request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
522 		break;
523 	}
524 
525 	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
526 	if (iocommand->buf_size > 0) {
527 		request.buffer_length = iocommand->buf_size;
528 		request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
529 		request.sg_descriptors[0].len = iocommand->buf_size;
530 		request.sg_descriptors[0].flags =  SG_FLAG_LAST;
531 	}
532 	tag = pqisrc_get_tag(&softs->taglist);
533 	if (INVALID_ELEM == tag) {
534 		DBG_ERR("Tag not available\n");
535 		goto free_mem;
536 	}
537 	request.request_id = tag;
538 	request.response_queue_id = ob_q->q_id;
539 	request.error_index = request.request_id;
540 	if (softs->timeout_in_passthrough) {
541 		request.timeout_in_sec = iocommand->Request.Timeout;
542 	}
543 
544 	rcb = &softs->rcb[tag];
545 	rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
546 	rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
547 	rcb->tag = tag;
548 	rcb->req_pending = true;
549 	/* Submit Command */
550 	ret = pqisrc_submit_cmnd(softs, ib_q, &request);
551 	if (ret != PQI_STATUS_SUCCESS) {
552 		DBG_ERR("Unable to submit command\n");
553 		goto err_out;
554 	}
555 
556 	ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_PASSTHROUGH_CMD_TIMEOUT);
557 	if (ret != PQI_STATUS_SUCCESS) {
558 		DBG_ERR("Passthru IOCTL cmd timed out !!\n");
559 		goto err_out;
560 	}
561 
562 	memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
563 
564 
565 	if (rcb->status) {
566 		size_t sense_data_length;
567 
568 		memcpy(&error_info, rcb->error_info, sizeof(error_info));
569 		iocommand->error_info.ScsiStatus = error_info.status;
570 		sense_data_length = error_info.sense_data_len;
571 
572 		if (!sense_data_length)
573 			sense_data_length = error_info.resp_data_len;
574 
575 		if (sense_data_length &&
576 			(sense_data_length > sizeof(error_info.data)))
577 				sense_data_length = sizeof(error_info.data);
578 
579 		if (sense_data_length) {
580 			if (sense_data_length >
581 				sizeof(iocommand->error_info.SenseInfo))
582 				sense_data_length =
583 					sizeof(iocommand->error_info.SenseInfo);
584 			memcpy (iocommand->error_info.SenseInfo,
585 					error_info.data, sense_data_length);
586 			iocommand->error_info.SenseLen = sense_data_length;
587 		}
588 
589 		if (error_info.data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
590 			rcb->status = PQI_STATUS_SUCCESS;
591 		}
592 	}
593 
594 	if (rcb->status == PQI_STATUS_SUCCESS && iocommand->buf_size > 0 &&
595 		(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
596 
597 		ret = os_copy_to_user(softs, (void*)iocommand->buf, (void*)drv_buf, iocommand->buf_size, mode);
598 		if (ret != 0) {
599 			DBG_ERR("Failed to copy the response\n");
600 			goto err_out;
601 		}
602 	}
603 
604 	os_reset_rcb(rcb);
605 	pqisrc_put_tag(&softs->taglist, request.request_id);
606 	if (iocommand->buf_size > 0)
607 		os_dma_mem_free(softs,&ioctl_dma_buf);
608 
609 	DBG_FUNC("OUT\n");
610 	return PQI_STATUS_SUCCESS;
611 
612 err_out:
613 	os_reset_rcb(rcb);
614 	pqisrc_put_tag(&softs->taglist, request.request_id);
615 
616 free_mem:
617 	if (iocommand->buf_size > 0)
618 		os_dma_mem_free(softs, &ioctl_dma_buf);
619 
620 out:
621 	DBG_FUNC("Failed OUT\n");
622 	return PQI_STATUS_FAILURE;
623 }