xref: /src/sys/dev/smartpqi/smartpqi_cam.c (revision f25b8c9fb4f58cf61adb47d7570abe7caa6d385d)
1 /*-
2  * Copyright 2016-2025 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * CAM interface for smartpqi driver
28  */
29 
30 #include "smartpqi_includes.h"
31 
32 /*
33  * Set cam sim properties of the smartpqi adapter.
34  */
35 static void
36 update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
37 {
38 
39 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
40 					cam_sim_softc(sim);
41 
42 	device_t dev = softs->os_specific.pqi_dev;
43 
44 	DBG_FUNC("IN\n");
45 
46 	cpi->version_num = 1;
47 	cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
48 	cpi->target_sprt = 0;
49 	cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
50 	cpi->hba_eng_cnt = 0;
51 	cpi->max_lun = PQI_MAX_MULTILUN;
52 	cpi->max_target = MAX_TARGET_DEVICES;
53 	cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
54 	cpi->initiator_id = 255;
55 	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN-1);
56 	cpi->sim_vid[sizeof(cpi->sim_vid)-1] = '\0';
57 	strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN-1);
58 	cpi->hba_vid[sizeof(cpi->hba_vid)-1] = '\0';
59 	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN-1);
60 	cpi->dev_name[sizeof(cpi->dev_name)-1] = '\0';
61 	cpi->unit_number = cam_sim_unit(sim);
62 	cpi->bus_id = cam_sim_bus(sim);
63 	cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
64 	cpi->protocol = PROTO_SCSI;
65 	cpi->protocol_version = SCSI_REV_SPC4;
66 	cpi->transport = XPORT_SPI;
67 	cpi->transport_version = 2;
68 	cpi->ccb_h.status = CAM_REQ_CMP;
69 	cpi->hba_vendor = pci_get_vendor(dev);
70 	cpi->hba_device = pci_get_device(dev);
71 	cpi->hba_subvendor = pci_get_subvendor(dev);
72 	cpi->hba_subdevice = pci_get_subdevice(dev);
73 
74 	DBG_FUNC("OUT\n");
75 }
76 
77 /*
78  * Get transport settings of the smartpqi adapter.
79  */
80 static void
81 get_transport_settings(struct pqisrc_softstate *softs,
82 		struct ccb_trans_settings *cts)
83 {
84 	struct ccb_trans_settings_scsi	*scsi = &cts->proto_specific.scsi;
85 	struct ccb_trans_settings_sas	*sas = &cts->xport_specific.sas;
86 	struct ccb_trans_settings_spi	*spi = &cts->xport_specific.spi;
87 
88 	DBG_FUNC("IN\n");
89 
90 	cts->protocol = PROTO_SCSI;
91 	cts->protocol_version = SCSI_REV_SPC4;
92 	cts->transport = XPORT_SPI;
93 	cts->transport_version = 2;
94 	spi->valid = CTS_SPI_VALID_DISC;
95 	spi->flags = CTS_SPI_FLAGS_DISC_ENB;
96 	scsi->valid = CTS_SCSI_VALID_TQ;
97 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
98 	sas->valid = CTS_SAS_VALID_SPEED;
99 	cts->ccb_h.status = CAM_REQ_CMP;
100 
101 	DBG_FUNC("OUT\n");
102 }
103 
104 /*
105  *  Add the target to CAM layer and rescan, when a new device is found
106  */
107 void
108 os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
109 {
110 	union ccb *ccb;
111 	uint64_t lun;
112 
113 	DBG_FUNC("IN\n");
114 
115 	lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
116 	if(softs->os_specific.sim_registered) {
117 		if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
118 			DBG_ERR("rescan failed (can't allocate CCB)\n");
119 			return;
120 		}
121 
122 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
123 			cam_sim_path(softs->os_specific.sim),
124 			device->target, lun) != CAM_REQ_CMP) {
125 			DBG_ERR("rescan failed (can't create path)\n");
126 			xpt_free_ccb(ccb);
127 			return;
128 		}
129 		xpt_rescan(ccb);
130 	}
131 
132 	DBG_FUNC("OUT\n");
133 }
134 
135 /*
136  * Remove the device from CAM layer when deleted or hot removed
137  */
138 void
139 os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
140 {
141 	struct cam_path *tmppath = NULL;
142 	uint64_t lun;
143 
144 	DBG_FUNC("IN\n");
145 
146 	lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
147 	if(softs->os_specific.sim_registered) {
148 		if (xpt_create_path(&tmppath, NULL,
149 			cam_sim_path(softs->os_specific.sim),
150 			device->target, lun) != CAM_REQ_CMP) {
151 			DBG_ERR("unable to create path for async event\n");
152 			return;
153 		}
154 		xpt_async(AC_LOST_DEVICE, tmppath, NULL);
155 		xpt_free_path(tmppath);
156 		pqisrc_free_device(softs, device);
157 	}
158 
159 	DBG_FUNC("OUT\n");
160 
161 }
162 
163 /*
164  * Function to release the frozen simq
165  */
166 static void
167 pqi_release_camq(rcb_t *rcb)
168 {
169 	pqisrc_softstate_t *softs;
170 	struct ccb_scsiio *csio;
171 
172 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
173 	softs = rcb->softs;
174 
175 	DBG_FUNC("IN\n");
176 
177 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
178 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
179 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
180 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
181 		else
182 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
183 	}
184 
185 	DBG_FUNC("OUT\n");
186 }
187 
188 static void
189 pqi_synch_request(rcb_t *rcb)
190 {
191 	pqisrc_softstate_t *softs = rcb->softs;
192 
193 	DBG_IO("IN rcb = %p\n", rcb);
194 
195 	if (!(rcb->cm_flags & PQI_CMD_MAPPED))
196 		return;
197 
198 	if (rcb->bcount != 0 ) {
199 		if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
200 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
201 					rcb->cm_datamap,BUS_DMASYNC_POSTREAD);
202 		if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
203 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
204 					rcb->cm_datamap,BUS_DMASYNC_POSTWRITE);
205 		bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
206 			rcb->cm_datamap);
207 	}
208 	rcb->cm_flags &= ~PQI_CMD_MAPPED;
209 
210 	if(rcb->sgt && rcb->nseg)
211 		os_mem_free(rcb->softs, (void*)rcb->sgt,
212 			rcb->nseg*sizeof(sgt_t));
213 
214 	DBG_IO("OUT\n");
215 }
216 
217 /*
218  * Function to dma-unmap the completed request
219  */
220 static inline void
221 pqi_unmap_request(rcb_t *rcb)
222 {
223 	DBG_IO("IN rcb = %p\n", rcb);
224 
225 	pqi_synch_request(rcb);
226 	pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
227 
228 	DBG_IO("OUT\n");
229 }
230 
231 /*
232  * Construct meaningful LD name for volume here.
233  */
234 static void
235 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
236 {
237 	struct scsi_inquiry_data *inq = NULL;
238 	uint8_t *cdb = NULL;
239 	pqi_scsi_dev_t *device = NULL;
240 
241 	DBG_FUNC("IN\n");
242 
243 	if (pqisrc_ctrl_offline(softs))
244 		return;
245 
246  	cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
247 		(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
248 
249 	if(cdb[0] == INQUIRY &&
250 		(cdb[1] & SI_EVPD) == 0 &&
251 		(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
252 		csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
253 
254 		inq = (struct scsi_inquiry_data *)csio->data_ptr;
255 
256 		/* device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; */
257 		int target = csio->ccb_h.target_id;
258 		int lun = csio->ccb_h.target_lun;
259 		int index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
260 		if (index != INVALID_ELEM)
261 			device = softs->dev_list[index];
262 
263 		/* Let the disks be probed and dealt with via CAM. Only for LD
264 		  let it fall through and inquiry be tweaked */
265 		if( !device || 	!pqisrc_is_logical_device(device) ||
266 				(device->devtype != DISK_DEVICE)  ||
267 				pqisrc_is_external_raid_device(device)) {
268  	 		return;
269 		}
270 
271 		strncpy(inq->vendor, device->vendor,
272 				SID_VENDOR_SIZE-1);
273 		inq->vendor[sizeof(inq->vendor)-1] = '\0';
274 		strncpy(inq->product,
275 				pqisrc_raidlevel_to_string(device->raid_level),
276 				SID_PRODUCT_SIZE-1);
277 		inq->product[sizeof(inq->product)-1] = '\0';
278 		strncpy(inq->revision, device->volume_offline?"OFF":"OK",
279 				SID_REVISION_SIZE-1);
280 		inq->revision[sizeof(inq->revision)-1] = '\0';
281     	}
282 
283 	DBG_FUNC("OUT\n");
284 }
285 
286 static void
287 pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
288 {
289 	uint32_t release_tag;
290 	pqisrc_softstate_t *softs = rcb->softs;
291 
292 	DBG_IO("IN scsi io = %p\n", csio);
293 
294 	pqi_synch_request(rcb);
295 	smartpqi_fix_ld_inquiry(rcb->softs, csio);
296 	pqi_release_camq(rcb);
297 	release_tag = rcb->tag;
298 	os_reset_rcb(rcb);
299 	pqisrc_put_tag(&softs->taglist, release_tag);
300 	xpt_done((union ccb *)csio);
301 
302 	DBG_FUNC("OUT\n");
303 }
304 
305 /*
306  * Handle completion of a command - pass results back through the CCB
307  */
308 void
309 os_io_response_success(rcb_t *rcb)
310 {
311 	struct ccb_scsiio *csio;
312 
313 	DBG_IO("IN rcb = %p\n", rcb);
314 
315 	if (rcb == NULL)
316 		panic("rcb is null");
317 
318 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
319 
320 	if (csio == NULL)
321 		panic("csio is null");
322 
323 	rcb->status = PQI_STATUS_SUCCESS;
324 	csio->ccb_h.status = CAM_REQ_CMP;
325 
326 	pqi_complete_scsi_io(csio, rcb);
327 
328 	DBG_IO("OUT\n");
329 }
330 
331 static void
332 copy_sense_data_to_csio(struct ccb_scsiio *csio,
333 		uint8_t const *sense_data, uint16_t sense_data_len)
334 {
335 	DBG_IO("IN csio = %p\n", csio);
336 
337 	memset(&csio->sense_data, 0, csio->sense_len);
338 
339 	sense_data_len = (sense_data_len > csio->sense_len) ?
340 		csio->sense_len : sense_data_len;
341 
342 	if (sense_data)
343 		memcpy(&csio->sense_data, sense_data, sense_data_len);
344 
345 	if (csio->sense_len > sense_data_len)
346 		csio->sense_resid = csio->sense_len - sense_data_len;
347 	else
348 		csio->sense_resid = 0;
349 
350 	DBG_IO("OUT\n");
351 }
352 
353 /*
354  * Error response handling for raid IO
355  */
356 void
357 os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
358 {
359 	struct ccb_scsiio *csio;
360 	pqisrc_softstate_t *softs;
361 
362 	DBG_IO("IN\n");
363 
364 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
365 
366 	if (csio == NULL)
367 		panic("csio is null");
368 
369 	softs = rcb->softs;
370 
371 	csio->ccb_h.status = CAM_REQ_CMP_ERR;
372 
373 	if (!err_info || !rcb->dvp) {
374 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
375 				err_info, rcb->dvp);
376 		goto error_out;
377 	}
378 
379 	csio->scsi_status = err_info->status;
380 
381 	if (csio->ccb_h.func_code == XPT_SCSI_IO) {
382 		/*
383 		 * Handle specific SCSI status values.
384 		 */
385 		switch(csio->scsi_status) {
386 			case PQI_RAID_STATUS_QUEUE_FULL:
387 				csio->ccb_h.status = CAM_REQ_CMP;
388 				DBG_ERR("Queue Full error\n");
389 				break;
390 				/* check condition, sense data included */
391 			case PQI_RAID_STATUS_CHECK_CONDITION:
392 				{
393 					uint16_t sense_data_len =
394 						LE_16(err_info->sense_data_len);
395 					uint8_t *sense_data = NULL;
396 					if (sense_data_len)
397 						sense_data = err_info->data;
398 
399 					copy_sense_data_to_csio(csio, sense_data, sense_data_len);
400 					csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
401 						| CAM_AUTOSNS_VALID
402 						| CAM_REQ_CMP_ERR;
403 
404 				}
405 				break;
406 
407 			case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
408 				{
409 					uint32_t resid = 0;
410 					resid = rcb->bcount-err_info->data_out_transferred;
411 					csio->resid  = resid;
412 					csio->ccb_h.status = CAM_REQ_CMP;
413 				}
414 				break;
415 			default:
416 				csio->ccb_h.status = CAM_REQ_CMP;
417 				break;
418 		}
419 	}
420 
421 error_out:
422 	pqi_complete_scsi_io(csio, rcb);
423 
424 	DBG_IO("OUT\n");
425 }
426 
427 /*
428  * Error response handling for aio.
429  */
430 void
431 os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
432 {
433 	struct ccb_scsiio *csio;
434 	pqisrc_softstate_t *softs;
435 
436 	DBG_IO("IN\n");
437 
438 	if (rcb == NULL)
439 		panic("rcb is null");
440 
441 	rcb->status = PQI_STATUS_SUCCESS;
442 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
443 	if (csio == NULL)
444                 panic("csio is null");
445 
446 	softs = rcb->softs;
447 
448 	if (!err_info || !rcb->dvp) {
449 		csio->ccb_h.status = CAM_REQ_CMP_ERR;
450 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
451 				err_info, rcb->dvp);
452 		goto error_out;
453 	}
454 
455 	switch (err_info->service_resp) {
456 		case PQI_AIO_SERV_RESPONSE_COMPLETE:
457 			csio->ccb_h.status = err_info->status;
458 			break;
459 		case PQI_AIO_SERV_RESPONSE_FAILURE:
460 			switch(err_info->status) {
461 				case PQI_AIO_STATUS_IO_ABORTED:
462 					csio->ccb_h.status = CAM_REQ_ABORTED;
463 					DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
464 					break;
465 				case PQI_AIO_STATUS_UNDERRUN:
466 					csio->ccb_h.status = CAM_REQ_CMP;
467 					csio->resid =
468 						LE_32(err_info->resd_count);
469 					break;
470 				case PQI_AIO_STATUS_OVERRUN:
471 					csio->ccb_h.status = CAM_REQ_CMP;
472 					break;
473 				case PQI_AIO_STATUS_AIO_PATH_DISABLED:
474 					DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
475 					/* Timed out TMF response comes here */
476 					if (rcb->tm_req) {
477 						rcb->req_pending = false;
478 						rcb->status = PQI_STATUS_SUCCESS;
479 						DBG_ERR("AIO Disabled for TMF\n");
480 						return;
481 					}
482 					rcb->dvp->aio_enabled = false;
483 					rcb->dvp->offload_enabled = false;
484 					csio->ccb_h.status |= CAM_REQUEUE_REQ;
485 					break;
486 				case PQI_AIO_STATUS_IO_ERROR:
487 				case PQI_AIO_STATUS_IO_NO_DEVICE:
488 				case PQI_AIO_STATUS_INVALID_DEVICE:
489 				default:
490 					DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
491 					csio->ccb_h.status |=
492 						CAM_SCSI_STATUS_ERROR;
493 					break;
494 			}
495 			break;
496 		case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
497 		case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
498 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
499 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
500 			rcb->status = PQI_STATUS_SUCCESS;
501 			rcb->req_pending = false;
502 			return;
503 		case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
504 		case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
505 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
506 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
507 			rcb->status = PQI_STATUS_TIMEOUT;
508 			rcb->req_pending = false;
509 			return;
510 		default:
511 			DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
512 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
513 			break;
514 	}
515 
516 	if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
517 		csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
518 		uint8_t *sense_data = NULL;
519 		unsigned sense_data_len = LE_16(err_info->data_len);
520 		if (sense_data_len)
521 			sense_data = err_info->data;
522 		DBG_INFO("SCSI_STATUS_CHECK_COND  sense size %u\n",
523 			sense_data_len);
524 		copy_sense_data_to_csio(csio, sense_data, sense_data_len);
525 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
526 	}
527 
528 error_out:
529 	pqi_complete_scsi_io(csio, rcb);
530 	DBG_IO("OUT\n");
531 }
532 
533 static void
534 pqi_freeze_ccb(union ccb *ccb)
535 {
536 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
537 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
538 		xpt_freeze_devq(ccb->ccb_h.path, 1);
539 	}
540 }
541 
542 /*
543  * Command-mapping helper function - populate this command's s/g table.
544  */
545 static void
546 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
547 {
548 	rcb_t *rcb = (rcb_t *)arg;
549 	pqisrc_softstate_t *softs = rcb->softs;
550 	union ccb *ccb;
551 
552 	if (error || nseg > softs->pqi_cap.max_sg_elem)
553 	{
554 		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%u)\n",
555 			error, nseg, softs->pqi_cap.max_sg_elem);
556 		goto error_io;
557 	}
558 
559 	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
560 
561 	if (!rcb->sgt) {
562 		DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
563 		goto error_io;
564 	}
565 
566 	rcb->nseg = nseg;
567 	for (int i = 0; i < nseg; i++) {
568 		rcb->sgt[i].addr = segs[i].ds_addr;
569 		rcb->sgt[i].len = segs[i].ds_len;
570 		rcb->sgt[i].flags = 0;
571 	}
572 
573 	if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
574                 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
575                         rcb->cm_datamap, BUS_DMASYNC_PREREAD);
576 	if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
577                 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
578                         rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
579 
580 	/* Call IO functions depending on pd or ld */
581 	rcb->status = PQI_STATUS_FAILURE;
582 
583 	error = pqisrc_build_send_io(softs, rcb);
584 
585 	if (error) {
586 		rcb->req_pending = false;
587 		DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
588 	} else {
589 		/* Successfully IO was submitted to the device. */
590 		return;
591 	}
592 
593 error_io:
594 	ccb = rcb->cm_ccb;
595 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
596 	pqi_freeze_ccb(ccb);
597 	pqi_unmap_request(rcb);
598 	xpt_done(ccb);
599 	return;
600 }
601 
602 /*
603  * Function to dma-map the request buffer
604  */
605 static int
606 pqi_map_request(rcb_t *rcb)
607 {
608 	pqisrc_softstate_t *softs = rcb->softs;
609 	int bsd_status = BSD_SUCCESS;
610 	union ccb *ccb = rcb->cm_ccb;
611 
612 	DBG_FUNC("IN\n");
613 
614 	/* check that mapping is necessary */
615 	if (rcb->cm_flags & PQI_CMD_MAPPED)
616 		return BSD_SUCCESS;
617 
618 	rcb->cm_flags |= PQI_CMD_MAPPED;
619 
620 	if (rcb->bcount) {
621 		bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
622 			rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
623 		if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
624 			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %u\n",
625 					bsd_status, rcb->bcount);
626 			return bsd_status;
627 		}
628 	} else {
629 		/*
630 		 * Set up the command to go to the controller.  If there are no
631 		 * data buffers associated with the command then it can bypass
632 		 * busdma.
633 		 */
634 		/* Call IO functions depending on pd or ld */
635 		rcb->status = PQI_STATUS_FAILURE;
636 
637 		if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
638 			bsd_status = EIO;
639 		}
640 	}
641 
642 	DBG_FUNC("OUT error = %d\n", bsd_status);
643 
644 	return bsd_status;
645 }
646 
647 /*
648  * Function to clear the request control block
649  */
650 void
651 os_reset_rcb(rcb_t *rcb)
652 {
653 	rcb->error_info = NULL;
654 	rcb->req = NULL;
655 	rcb->status = -1;
656 	rcb->tag = INVALID_ELEM;
657 	rcb->dvp = NULL;
658 	rcb->cdbp = NULL;
659 	rcb->softs = NULL;
660 	rcb->cm_flags = 0;
661 	rcb->cm_data = NULL;
662 	rcb->bcount = 0;
663 	rcb->nseg = 0;
664 	rcb->sgt = NULL;
665 	rcb->cm_ccb = NULL;
666 	rcb->encrypt_enable = false;
667 	rcb->ioaccel_handle = 0;
668 	rcb->resp_qid = 0;
669 	rcb->req_pending = false;
670 	rcb->tm_req = false;
671 }
672 
673 /*
674  * Callback function for the lun rescan
675  */
676 static void
677 smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
678 {
679         xpt_free_path(ccb->ccb_h.path);
680         xpt_free_ccb(ccb);
681 }
682 
683 
684 /*
685  * Function to rescan the lun
686  */
687 static void
688 smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
689 			int lun)
690 {
691 	union ccb *ccb = NULL;
692 	cam_status status = 0;
693 	struct cam_path *path = NULL;
694 
695 	DBG_FUNC("IN\n");
696 
697 	ccb = xpt_alloc_ccb_nowait();
698 	if (ccb == NULL) {
699 		DBG_ERR("Unable to alloc ccb for lun rescan\n");
700 		return;
701 	}
702 
703 	status = xpt_create_path(&path, NULL,
704 				cam_sim_path(softs->os_specific.sim), target, lun);
705 	if (status != CAM_REQ_CMP) {
706 		DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
707 				 status);
708 		xpt_free_ccb(ccb);
709 		return;
710 	}
711 
712 	memset(ccb, 0, sizeof(union ccb));
713 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
714 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
715 	ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
716 	ccb->crcn.flags = CAM_FLAG_NONE;
717 
718 	xpt_action(ccb);
719 
720 	DBG_FUNC("OUT\n");
721 }
722 
723 /*
724  * Function to rescan the lun under each target
725  */
726 void
727 smartpqi_target_rescan(struct pqisrc_softstate *softs)
728 {
729 	pqi_scsi_dev_t *device;
730 	int index;
731 
732 	DBG_FUNC("IN\n");
733 
734 	for(index = 0; index < PQI_MAX_DEVICES; index++){
735 		/* if(softs->device_list[target][lun]){ */
736 		if(softs->dev_list[index] != NULL) {
737 			device = softs->dev_list[index];
738 			DBG_INFO("calling smartpqi_lun_rescan with T%d:L%d\n",device->target,device->lun);
739 			smartpqi_lun_rescan(softs, device->target, device->lun);
740 		}
741 	}
742 
743 	DBG_FUNC("OUT\n");
744 }
745 
746 /*
747  * Set the mode of tagged command queueing for the current task.
748  */
749 uint8_t
750 os_get_task_attr(rcb_t *rcb)
751 {
752 	union ccb *ccb = rcb->cm_ccb;
753 	uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
754 
755 	switch(ccb->csio.tag_action) {
756 	case MSG_HEAD_OF_Q_TAG:
757 		tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
758 		break;
759 	case MSG_ORDERED_Q_TAG:
760 		tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
761 		break;
762 	case MSG_SIMPLE_Q_TAG:
763 	default:
764 		tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
765 		break;
766 	}
767 	return tag_action;
768 }
769 
770 /*
771  * Complete all outstanding commands
772  */
773 void
774 os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
775 {
776 	int tag = 0;
777 	pqi_scsi_dev_t	*dvp = NULL;
778 
779 	DBG_FUNC("IN\n");
780 
781 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
782 		rcb_t *prcb = &softs->rcb[tag];
783 		dvp = prcb->dvp;
784 		if(prcb->req_pending && prcb->cm_ccb ) {
785 			prcb->req_pending = false;
786 			prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
787 			pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
788 			if (dvp)
789 				pqisrc_decrement_device_active_io(softs, dvp);
790 		}
791 	}
792 
793 	DBG_FUNC("OUT\n");
794 }
795 
796 /*
797  * IO handling functionality entry point
798  */
799 static int
800 pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
801 {
802 	rcb_t *rcb;
803 	uint32_t tag;
804 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
805 					cam_sim_softc(sim);
806 	int32_t error;
807 	pqi_scsi_dev_t *dvp;
808 	int target, lun, index;
809 
810 	DBG_FUNC("IN\n");
811 
812 	/* if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { */
813 	target = ccb->ccb_h.target_id;
814 	lun = ccb->ccb_h.target_lun;
815 	index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
816 
817 	if (index == INVALID_ELEM) {
818 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
819 		return ENXIO;
820 	}
821 
822 	if( softs->dev_list[index] == NULL ) {
823 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
824 		DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
825 		return ENXIO;
826 	}
827 
828 	/* DBG_INFO("starting IO on BTL = %d:%d:%d index = %d\n",softs->bus_id,target,lun,index); */
829 
830 	/* dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
831 	dvp = softs->dev_list[index];
832 	/* Check  controller state */
833 	if (IN_PQI_RESET(softs)) {
834 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET
835 					| CAM_BUSY | CAM_REQ_INPROG;
836 		DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
837 		return ENXIO;
838 	}
839 	/* Check device state */
840 	if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
841 		ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
842 		DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
843 		return ENXIO;
844 	}
845 	/* Check device reset */
846 	if (DEVICE_RESET(dvp)) {
847 		ccb->ccb_h.status = CAM_BUSY;
848 		DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
849 		return EBUSY;
850 	}
851 
852 	if (dvp->expose_device == false) {
853 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
854 		DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
855 		return ENXIO;
856 	}
857 
858 	tag = pqisrc_get_tag(&softs->taglist);
859 	if( tag == INVALID_ELEM ) {
860 		DBG_ERR("Get Tag failed\n");
861 		xpt_freeze_simq(softs->os_specific.sim, 1);
862 		softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
863 		ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
864 		return EIO;
865 	}
866 
867 	DBG_IO("tag = %u &softs->taglist : %p\n", tag, &softs->taglist);
868 
869 	rcb = &softs->rcb[tag];
870 	os_reset_rcb(rcb);
871 	rcb->tag = tag;
872 	rcb->softs = softs;
873 	rcb->cmdlen = ccb->csio.cdb_len;
874 	ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
875 
876 	rcb->cm_ccb = ccb;
877 	/* rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
878 	rcb->dvp = softs->dev_list[index];
879 
880 	rcb->cm_data = (void *)ccb->csio.data_ptr;
881 	rcb->bcount = ccb->csio.dxfer_len;
882 
883 	/*
884 	 * Submit the request to the adapter.
885 	 *
886 	 * Note that this may fail if we're unable to map the request (and
887 	 * if we ever learn a transport layer other than simple, may fail
888 	 * if the adapter rejects the command).
889 	 */
890 	if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
891 		xpt_freeze_simq(softs->os_specific.sim, 1);
892 		if (error == EINPROGRESS) {
893 			/* Release simq in the completion */
894 			softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
895 			error = BSD_SUCCESS;
896 		} else {
897 			rcb->req_pending = false;
898 			ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
899 			DBG_WARN("Requeue req error = %d target = %d\n", error,
900 				ccb->ccb_h.target_id);
901 			pqi_unmap_request(rcb);
902 			error = EIO;
903 		}
904 	}
905 
906 	DBG_FUNC("OUT error = %d\n", error);
907 
908 	return error;
909 }
910 
911 static inline int
912 pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t const *rcb)
913 {
914 	if (PQI_STATUS_SUCCESS == pqi_status &&
915 			PQI_STATUS_SUCCESS == rcb->status)
916 		return BSD_SUCCESS;
917 	else
918 		return EIO;
919 }
920 
921 /*
922  * Abort a task, task management functionality
923  */
924 static int
925 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
926 {
927 	rcb_t *rcb = NULL;
928 	struct ccb_hdr const *ccb_h = &ccb->ccb_h;
929 	rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
930 	uint32_t tag;
931 	int rval;
932 
933 	DBG_FUNC("IN\n");
934 
935 	tag = pqisrc_get_tag(&softs->taglist);
936 	rcb = &softs->rcb[tag];
937 	rcb->tag = tag;
938 
939 	if (rcb->dvp == NULL) {
940 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
941 		rval = ENXIO;
942 		goto error_tmf;
943 	}
944 
945 	rcb->tm_req = true;
946 
947 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
948 		SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
949 
950 	if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
951 		ccb->ccb_h.status = CAM_REQ_ABORTED;
952 
953 error_tmf:
954 	os_reset_rcb(rcb);
955 	pqisrc_put_tag(&softs->taglist, tag);
956 
957 	DBG_FUNC("OUT rval = %d\n", rval);
958 
959 	return rval;
960 }
961 
962 /*
963  * Abort a taskset, task management functionality
964  */
965 static int
966 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
967 {
968 	struct ccb_hdr const *ccb_h = &ccb->ccb_h;
969 	rcb_t *rcb = NULL;
970 	uint32_t tag;
971 	int rval;
972 
973 	DBG_FUNC("IN\n");
974 
975 	tag = pqisrc_get_tag(&softs->taglist);
976 	rcb = &softs->rcb[tag];
977 	rcb->tag = tag;
978 	rcb->cm_ccb = ccb;
979 
980 	if (rcb->dvp == NULL) {
981 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
982 		rval = ENXIO;
983 		goto error_tmf;
984 	}
985 
986 	rcb->tm_req = true;
987 
988 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL,
989 			SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
990 
991 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
992 
993 error_tmf:
994 	os_reset_rcb(rcb);
995 	pqisrc_put_tag(&softs->taglist, tag);
996 
997 	DBG_FUNC("OUT rval = %d\n", rval);
998 
999 	return rval;
1000 }
1001 
1002 /*
1003  * Target reset task management functionality
1004  */
1005 static int
1006 pqisrc_target_reset( pqisrc_softstate_t *softs,  union ccb *ccb)
1007 {
1008 
1009 	/* pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
1010 	struct ccb_hdr const *ccb_h = &ccb->ccb_h;
1011 	rcb_t *rcb = NULL;
1012 	uint32_t tag;
1013 	int rval;
1014 
1015 	int bus, target, lun;
1016 	int index;
1017 
1018 	DBG_FUNC("IN\n");
1019 
1020 	bus = softs->bus_id;
1021 	target = ccb->ccb_h.target_id;
1022 	lun = ccb->ccb_h.target_lun;
1023 
1024 	index = pqisrc_find_btl_list_index(softs,bus,target,lun);
1025 	if (index == INVALID_ELEM) {
1026 		DBG_ERR("device not found at BTL %d:%d:%d\n",bus,target,lun);
1027 		return (-1);
1028 	}
1029 
1030 	pqi_scsi_dev_t *devp = softs->dev_list[index];
1031 	if (devp == NULL) {
1032 		DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
1033 		return (-1);
1034 	}
1035 
1036 	tag = pqisrc_get_tag(&softs->taglist);
1037 	rcb = &softs->rcb[tag];
1038 	rcb->tag = tag;
1039 	rcb->cm_ccb = ccb;
1040 
1041 	rcb->tm_req = true;
1042 
1043 	rval = pqisrc_send_tmf(softs, devp, rcb, NULL,
1044 		SOP_TASK_MANAGEMENT_LUN_RESET);
1045 
1046 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
1047 
1048 	devp->reset_in_progress = false;
1049 
1050 	os_reset_rcb(rcb);
1051 	pqisrc_put_tag(&softs->taglist, tag);
1052 
1053 	DBG_FUNC("OUT rval = %d\n", rval);
1054 
1055 	return rval;
1056 
1057 }
1058 
1059 /*
1060  * cam entry point of the smartpqi module.
1061  */
1062 static void
1063 smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
1064 {
1065 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1066 	struct ccb_hdr const *ccb_h = &ccb->ccb_h;
1067 
1068 	DBG_FUNC("IN\n");
1069 
1070 	switch (ccb_h->func_code) {
1071 		case XPT_SCSI_IO:
1072 		{
1073 			if(!pqisrc_io_start(sim, ccb)) {
1074 				return;
1075 			}
1076 			break;
1077 		}
1078 		case XPT_CALC_GEOMETRY:
1079 		{
1080 			struct ccb_calc_geometry *ccg;
1081 			ccg = &ccb->ccg;
1082 			if (ccg->block_size == 0) {
1083 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1084 				ccb->ccb_h.status |= CAM_REQ_INVALID;
1085 				break;
1086 			}
1087 			cam_calc_geometry(ccg, /* extended */ 1);
1088 			ccb->ccb_h.status = CAM_REQ_CMP;
1089 			break;
1090 		}
1091 		case XPT_PATH_INQ:
1092 		{
1093 			update_sim_properties(sim, &ccb->cpi);
1094 			ccb->ccb_h.status = CAM_REQ_CMP;
1095 			break;
1096 		}
1097 		case XPT_GET_TRAN_SETTINGS:
1098 			get_transport_settings(softs, &ccb->cts);
1099 			ccb->ccb_h.status = CAM_REQ_CMP;
1100 			break;
1101 		case XPT_ABORT:
1102 			if(pqisrc_scsi_abort_task(softs,  ccb)) {
1103 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1104 				xpt_done(ccb);
1105 				DBG_ERR("Abort task failed on %d\n",
1106 					ccb->ccb_h.target_id);
1107 				return;
1108 			}
1109 			break;
1110 		case XPT_TERM_IO:
1111 			if (pqisrc_scsi_abort_task_set(softs,  ccb)) {
1112 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1113 				DBG_ERR("Abort task set failed on %d\n",
1114 					ccb->ccb_h.target_id);
1115 				xpt_done(ccb);
1116 				return;
1117 			}
1118 			break;
1119 		case XPT_RESET_DEV:
1120 			if(pqisrc_target_reset(softs,  ccb)) {
1121 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1122 				DBG_ERR("Target reset failed on %d\n",
1123 					ccb->ccb_h.target_id);
1124 				xpt_done(ccb);
1125 				return;
1126 			} else {
1127 				ccb->ccb_h.status = CAM_REQ_CMP;
1128 			}
1129 			break;
1130 		case XPT_RESET_BUS:
1131 			ccb->ccb_h.status = CAM_REQ_CMP;
1132 			break;
1133 		case XPT_SET_TRAN_SETTINGS:
1134 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1135 			return;
1136 		default:
1137 			DBG_WARN("UNSUPPORTED FUNC CODE\n");
1138 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1139 			break;
1140 	}
1141 	xpt_done(ccb);
1142 
1143 	DBG_FUNC("OUT\n");
1144 }
1145 
1146 /*
1147  * Function to poll the response, when interrupts are unavailable
1148  * This also serves supporting crash dump.
1149  */
1150 static void
1151 smartpqi_poll(struct cam_sim *sim)
1152 {
1153 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1154 	int i;
1155 
1156 	for (i = 1; i < softs->intr_count; i++ )
1157 		pqisrc_process_response_queue(softs, i);
1158 }
1159 
1160 /*
1161  * Function to adjust the queue depth of a device
1162  */
1163 void
1164 smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1165 {
1166 	struct ccb_relsim crs;
1167 
1168 	DBG_FUNC("IN\n");
1169 
1170 	memset(&crs, 0, sizeof(struct ccb_relsim));
1171 	xpt_setup_ccb(&crs.ccb_h, path, 5);
1172 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1173 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1174 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1175 	crs.openings = queue_depth;
1176 	xpt_action((union ccb *)&crs);
1177 	if(crs.ccb_h.status != CAM_REQ_CMP) {
1178 		printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1179 	}
1180 
1181 	DBG_FUNC("OUT\n");
1182 }
1183 
1184 /*
1185  * Function to register async callback for setting queue depth
1186  */
1187 static void
1188 smartpqi_async(void *callback_arg, u_int32_t code,
1189 		struct cam_path *path, void *arg)
1190 {
1191 	struct pqisrc_softstate *softs;
1192 	softs = (struct pqisrc_softstate*)callback_arg;
1193 
1194 	DBG_FUNC("IN\n");
1195 
1196 	switch (code) {
1197 		case AC_FOUND_DEVICE:
1198 		{
1199 			struct ccb_getdev *cgd;
1200 			cgd = (struct ccb_getdev *)arg;
1201 			if (cgd == NULL) {
1202 				break;
1203 			}
1204 			uint32_t t_id = cgd->ccb_h.target_id;
1205 
1206          if (softs != NULL) {
1207             /* pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; */
1208             int lun = cgd->ccb_h.target_lun;
1209             int index = pqisrc_find_btl_list_index(softs,softs->bus_id,t_id,lun);
1210             if (index != INVALID_ELEM) {
1211                pqi_scsi_dev_t const *dvp = softs->dev_list[index];
1212                if (dvp == NULL) {
1213                   DBG_ERR("Target is null, target id=%u\n", t_id);
1214                   break;
1215                }
1216                smartpqi_adjust_queue_depth(path, dvp->queue_depth);
1217             }
1218          }
1219 			break;
1220 		}
1221 		default:
1222 			break;
1223 	}
1224 
1225 	DBG_FUNC("OUT\n");
1226 }
1227 
1228 /*
1229  * Function to register sim with CAM layer for smartpqi driver
1230  */
1231 int
1232 register_sim(struct pqisrc_softstate *softs, int card_index)
1233 {
1234 	int max_transactions;
1235 	union ccb   *ccb = NULL;
1236 	cam_status status = 0;
1237 	struct ccb_setasync csa;
1238 	struct cam_sim *sim;
1239 
1240 	DBG_FUNC("IN\n");
1241 
1242 	max_transactions = softs->max_io_for_scsi_ml;
1243 	softs->os_specific.devq = cam_simq_alloc(max_transactions);
1244 	if (softs->os_specific.devq == NULL) {
1245 		DBG_ERR("cam_simq_alloc failed txns = %d\n",
1246 			max_transactions);
1247 		return ENOMEM;
1248 	}
1249 
1250 	sim = cam_sim_alloc(smartpqi_cam_action, \
1251 				smartpqi_poll, "smartpqi", softs, \
1252 				card_index, &softs->os_specific.cam_lock, \
1253 				1, max_transactions, softs->os_specific.devq);
1254 	if (sim == NULL) {
1255 		DBG_ERR("cam_sim_alloc failed txns = %d\n",
1256 			max_transactions);
1257 		cam_simq_free(softs->os_specific.devq);
1258 		return ENOMEM;
1259 	}
1260 
1261 	softs->os_specific.sim = sim;
1262 	mtx_lock(&softs->os_specific.cam_lock);
1263 	status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1264 	if (status != CAM_SUCCESS) {
1265 		DBG_ERR("xpt_bus_register failed status=%d\n", status);
1266 		cam_sim_free(softs->os_specific.sim, FALSE);
1267 		cam_simq_free(softs->os_specific.devq);
1268 		mtx_unlock(&softs->os_specific.cam_lock);
1269 		return ENXIO;
1270 	}
1271 
1272 	softs->os_specific.sim_registered = TRUE;
1273 	ccb = xpt_alloc_ccb_nowait();
1274 	if (ccb == NULL) {
1275 		DBG_ERR("xpt_create_path failed\n");
1276 		return ENXIO;
1277 	}
1278 
1279 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
1280 			cam_sim_path(softs->os_specific.sim),
1281 			CAM_TARGET_WILDCARD,
1282 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1283 		DBG_ERR("xpt_create_path failed\n");
1284 		xpt_free_ccb(ccb);
1285 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1286 		cam_sim_free(softs->os_specific.sim, TRUE);
1287 		mtx_unlock(&softs->os_specific.cam_lock);
1288 		return ENXIO;
1289 	}
1290 	/*
1291 	 * Callback to set the queue depth per target which is
1292 	 * derived from the FW.
1293 	 */
1294 	softs->os_specific.path = ccb->ccb_h.path;
1295 	memset(&csa, 0, sizeof(struct ccb_setasync));
1296 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1297 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1298 	csa.event_enable = AC_FOUND_DEVICE;
1299 	csa.callback = smartpqi_async;
1300 	csa.callback_arg = softs;
1301 	xpt_action((union ccb *)&csa);
1302 	if (csa.ccb_h.status != CAM_REQ_CMP) {
1303 		DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1304 			csa.ccb_h.status);
1305 	}
1306 
1307 	mtx_unlock(&softs->os_specific.cam_lock);
1308 	DBG_FUNC("OUT\n");
1309 
1310 	return BSD_SUCCESS;
1311 }
1312 
1313 /*
1314  * Function to deregister smartpqi sim from cam layer
1315  */
1316 void
1317 deregister_sim(struct pqisrc_softstate *softs)
1318 {
1319 	struct ccb_setasync csa;
1320 
1321 	DBG_FUNC("IN\n");
1322 
1323 	if (softs->os_specific.mtx_init) {
1324 		mtx_lock(&softs->os_specific.cam_lock);
1325 	}
1326 
1327 	memset(&csa, 0, sizeof(struct ccb_setasync));
1328 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1329 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1330 	csa.event_enable = 0;
1331 	csa.callback = smartpqi_async;
1332 	csa.callback_arg = softs;
1333 	xpt_action((union ccb *)&csa);
1334 	xpt_free_path(softs->os_specific.path);
1335 
1336 	if (softs->os_specific.sim) {
1337 		xpt_release_simq(softs->os_specific.sim, 0);
1338 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1339 		softs->os_specific.sim_registered = FALSE;
1340 		cam_sim_free(softs->os_specific.sim, FALSE);
1341 		softs->os_specific.sim = NULL;
1342 	}
1343 
1344 	if (softs->os_specific.mtx_init) {
1345 		mtx_unlock(&softs->os_specific.cam_lock);
1346 	}
1347 	if (softs->os_specific.devq != NULL) {
1348 		cam_simq_free(softs->os_specific.devq);
1349 	}
1350 	if (softs->os_specific.mtx_init) {
1351 		mtx_destroy(&softs->os_specific.cam_lock);
1352 		softs->os_specific.mtx_init = FALSE;
1353 	}
1354 
1355 	mtx_destroy(&softs->os_specific.map_lock);
1356 
1357 	DBG_FUNC("OUT\n");
1358 }
1359 
1360 void
1361 os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1362 {
1363 	struct cam_path *tmppath = NULL;
1364 
1365 	DBG_FUNC("IN\n");
1366 
1367 	if(softs->os_specific.sim_registered) {
1368 		if (xpt_create_path(&tmppath, NULL,
1369 			cam_sim_path(softs->os_specific.sim),
1370 			device->target, device->lun) != CAM_REQ_CMP) {
1371 			DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
1372 				device->bus, device->target, device->lun);
1373 			return;
1374 		}
1375 		xpt_async(AC_INQ_CHANGED, tmppath, NULL);
1376 		xpt_free_path(tmppath);
1377 	}
1378 
1379 	device->scsi_rescan = false;
1380 
1381 	DBG_FUNC("OUT\n");
1382 }
1383