1a3667aaeSNaresh Kumar Inna /* 2a3667aaeSNaresh Kumar Inna * This file is part of the Chelsio FCoE driver for Linux. 3a3667aaeSNaresh Kumar Inna * 4a3667aaeSNaresh Kumar Inna * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5a3667aaeSNaresh Kumar Inna * 6a3667aaeSNaresh Kumar Inna * This software is available to you under a choice of one of two 7a3667aaeSNaresh Kumar Inna * licenses. You may choose to be licensed under the terms of the GNU 8a3667aaeSNaresh Kumar Inna * General Public License (GPL) Version 2, available from the file 9a3667aaeSNaresh Kumar Inna * COPYING in the main directory of this source tree, or the 10a3667aaeSNaresh Kumar Inna * OpenIB.org BSD license below: 11a3667aaeSNaresh Kumar Inna * 12a3667aaeSNaresh Kumar Inna * Redistribution and use in source and binary forms, with or 13a3667aaeSNaresh Kumar Inna * without modification, are permitted provided that the following 14a3667aaeSNaresh Kumar Inna * conditions are met: 15a3667aaeSNaresh Kumar Inna * 16a3667aaeSNaresh Kumar Inna * - Redistributions of source code must retain the above 17a3667aaeSNaresh Kumar Inna * copyright notice, this list of conditions and the following 18a3667aaeSNaresh Kumar Inna * disclaimer. 19a3667aaeSNaresh Kumar Inna * 20a3667aaeSNaresh Kumar Inna * - Redistributions in binary form must reproduce the above 21a3667aaeSNaresh Kumar Inna * copyright notice, this list of conditions and the following 22a3667aaeSNaresh Kumar Inna * disclaimer in the documentation and/or other materials 23a3667aaeSNaresh Kumar Inna * provided with the distribution. 24a3667aaeSNaresh Kumar Inna * 25a3667aaeSNaresh Kumar Inna * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26a3667aaeSNaresh Kumar Inna * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27a3667aaeSNaresh Kumar Inna * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28a3667aaeSNaresh Kumar Inna * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29a3667aaeSNaresh Kumar Inna * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30a3667aaeSNaresh Kumar Inna * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31a3667aaeSNaresh Kumar Inna * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32a3667aaeSNaresh Kumar Inna * SOFTWARE. 33a3667aaeSNaresh Kumar Inna */ 34a3667aaeSNaresh Kumar Inna 35a3667aaeSNaresh Kumar Inna #include <linux/device.h> 36a3667aaeSNaresh Kumar Inna #include <linux/delay.h> 37a3667aaeSNaresh Kumar Inna #include <linux/ctype.h> 38a3667aaeSNaresh Kumar Inna #include <linux/kernel.h> 39a3667aaeSNaresh Kumar Inna #include <linux/slab.h> 40a3667aaeSNaresh Kumar Inna #include <linux/string.h> 41a3667aaeSNaresh Kumar Inna #include <linux/compiler.h> 42a3667aaeSNaresh Kumar Inna #include <linux/export.h> 43a3667aaeSNaresh Kumar Inna #include <linux/module.h> 44a3667aaeSNaresh Kumar Inna #include <asm/unaligned.h> 45a3667aaeSNaresh Kumar Inna #include <asm/page.h> 46a3667aaeSNaresh Kumar Inna #include <scsi/scsi.h> 47a3667aaeSNaresh Kumar Inna #include <scsi/scsi_device.h> 48a3667aaeSNaresh Kumar Inna #include <scsi/scsi_transport_fc.h> 49a3667aaeSNaresh Kumar Inna 50a3667aaeSNaresh Kumar Inna #include "csio_hw.h" 51a3667aaeSNaresh Kumar Inna #include "csio_lnode.h" 52a3667aaeSNaresh Kumar Inna #include "csio_rnode.h" 53a3667aaeSNaresh Kumar Inna #include "csio_scsi.h" 54a3667aaeSNaresh Kumar Inna #include "csio_init.h" 55a3667aaeSNaresh Kumar Inna 56a3667aaeSNaresh Kumar Inna int csio_scsi_eqsize = 65536; 57a3667aaeSNaresh Kumar Inna int csio_scsi_iqlen = 128; 58a3667aaeSNaresh Kumar Inna int csio_scsi_ioreqs = 2048; 59a3667aaeSNaresh Kumar Inna uint32_t csio_max_scan_tmo; 60a3667aaeSNaresh Kumar Inna uint32_t csio_delta_scan_tmo = 5; 61a3667aaeSNaresh Kumar Inna int csio_lun_qdepth = 32; 62a3667aaeSNaresh Kumar Inna 63a3667aaeSNaresh Kumar Inna static int csio_ddp_descs = 128; 64a3667aaeSNaresh Kumar Inna 65a3667aaeSNaresh Kumar Inna static int csio_do_abrt_cls(struct csio_hw *, 66a3667aaeSNaresh Kumar Inna struct csio_ioreq *, bool); 67a3667aaeSNaresh Kumar Inna 68a3667aaeSNaresh Kumar Inna static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); 69a3667aaeSNaresh Kumar Inna static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); 70a3667aaeSNaresh Kumar Inna static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); 71a3667aaeSNaresh Kumar Inna static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); 72a3667aaeSNaresh Kumar Inna static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); 73a3667aaeSNaresh Kumar Inna static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); 74a3667aaeSNaresh Kumar Inna 75a3667aaeSNaresh Kumar Inna /* 76a3667aaeSNaresh Kumar Inna * csio_scsi_match_io - Match an ioreq with the given SCSI level data. 77a3667aaeSNaresh Kumar Inna * @ioreq: The I/O request 78a3667aaeSNaresh Kumar Inna * @sld: Level information 79a3667aaeSNaresh Kumar Inna * 80a3667aaeSNaresh Kumar Inna * Should be called with lock held. 81a3667aaeSNaresh Kumar Inna * 82a3667aaeSNaresh Kumar Inna */ 83a3667aaeSNaresh Kumar Inna static bool 84a3667aaeSNaresh Kumar Inna csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld) 85a3667aaeSNaresh Kumar Inna { 86a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq); 87a3667aaeSNaresh Kumar Inna 88a3667aaeSNaresh Kumar Inna switch (sld->level) { 89a3667aaeSNaresh Kumar Inna case CSIO_LEV_LUN: 90a3667aaeSNaresh Kumar Inna if (scmnd == NULL) 91a3667aaeSNaresh Kumar Inna return false; 92a3667aaeSNaresh Kumar Inna 93a3667aaeSNaresh Kumar Inna return ((ioreq->lnode == sld->lnode) && 94a3667aaeSNaresh Kumar Inna (ioreq->rnode == sld->rnode) && 95a3667aaeSNaresh Kumar Inna ((uint64_t)scmnd->device->lun == sld->oslun)); 96a3667aaeSNaresh Kumar Inna 97a3667aaeSNaresh Kumar Inna case CSIO_LEV_RNODE: 98a3667aaeSNaresh Kumar Inna return ((ioreq->lnode == sld->lnode) && 99a3667aaeSNaresh Kumar Inna (ioreq->rnode == sld->rnode)); 100a3667aaeSNaresh Kumar Inna case CSIO_LEV_LNODE: 101a3667aaeSNaresh Kumar Inna return (ioreq->lnode == sld->lnode); 102a3667aaeSNaresh Kumar Inna case CSIO_LEV_ALL: 103a3667aaeSNaresh Kumar Inna return true; 104a3667aaeSNaresh Kumar Inna default: 105a3667aaeSNaresh Kumar Inna return false; 106a3667aaeSNaresh Kumar Inna } 107a3667aaeSNaresh Kumar Inna } 108a3667aaeSNaresh Kumar Inna 109a3667aaeSNaresh Kumar Inna /* 110a3667aaeSNaresh Kumar Inna * csio_scsi_gather_active_ios - Gather active I/Os based on level 111a3667aaeSNaresh Kumar Inna * @scm: SCSI module 112a3667aaeSNaresh Kumar Inna * @sld: Level information 113a3667aaeSNaresh Kumar Inna * @dest: The queue where these I/Os have to be gathered. 114a3667aaeSNaresh Kumar Inna * 115a3667aaeSNaresh Kumar Inna * Should be called with lock held. 116a3667aaeSNaresh Kumar Inna */ 117a3667aaeSNaresh Kumar Inna static void 118a3667aaeSNaresh Kumar Inna csio_scsi_gather_active_ios(struct csio_scsim *scm, 119a3667aaeSNaresh Kumar Inna struct csio_scsi_level_data *sld, 120a3667aaeSNaresh Kumar Inna struct list_head *dest) 121a3667aaeSNaresh Kumar Inna { 122a3667aaeSNaresh Kumar Inna struct list_head *tmp, *next; 123a3667aaeSNaresh Kumar Inna 124a3667aaeSNaresh Kumar Inna if (list_empty(&scm->active_q)) 125a3667aaeSNaresh Kumar Inna return; 126a3667aaeSNaresh Kumar Inna 127a3667aaeSNaresh Kumar Inna /* Just splice the entire active_q into dest */ 128a3667aaeSNaresh Kumar Inna if (sld->level == CSIO_LEV_ALL) { 129a3667aaeSNaresh Kumar Inna list_splice_tail_init(&scm->active_q, dest); 130a3667aaeSNaresh Kumar Inna return; 131a3667aaeSNaresh Kumar Inna } 132a3667aaeSNaresh Kumar Inna 133a3667aaeSNaresh Kumar Inna list_for_each_safe(tmp, next, &scm->active_q) { 134a3667aaeSNaresh Kumar Inna if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) { 135a3667aaeSNaresh Kumar Inna list_del_init(tmp); 136a3667aaeSNaresh Kumar Inna list_add_tail(tmp, dest); 137a3667aaeSNaresh Kumar Inna } 138a3667aaeSNaresh Kumar Inna } 139a3667aaeSNaresh Kumar Inna } 140a3667aaeSNaresh Kumar Inna 141a3667aaeSNaresh Kumar Inna static inline bool 142a3667aaeSNaresh Kumar Inna csio_scsi_itnexus_loss_error(uint16_t error) 143a3667aaeSNaresh Kumar Inna { 144a3667aaeSNaresh Kumar Inna switch (error) { 145a3667aaeSNaresh Kumar Inna case FW_ERR_LINK_DOWN: 146a3667aaeSNaresh Kumar Inna case FW_RDEV_NOT_READY: 147a3667aaeSNaresh Kumar Inna case FW_ERR_RDEV_LOST: 148a3667aaeSNaresh Kumar Inna case FW_ERR_RDEV_LOGO: 149a3667aaeSNaresh Kumar Inna case FW_ERR_RDEV_IMPL_LOGO: 150*2ed0fc2bSJiapeng Chong return true; 151a3667aaeSNaresh Kumar Inna } 152*2ed0fc2bSJiapeng Chong return false; 153a3667aaeSNaresh Kumar Inna } 154a3667aaeSNaresh Kumar Inna 155a3667aaeSNaresh Kumar Inna /* 156a3667aaeSNaresh Kumar Inna * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. 157a3667aaeSNaresh Kumar Inna * @req: IO req structure. 158a3667aaeSNaresh Kumar Inna * @addr: DMA location to place the payload. 159a3667aaeSNaresh Kumar Inna * 160a3667aaeSNaresh Kumar Inna * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests. 161a3667aaeSNaresh Kumar Inna */ 162a3667aaeSNaresh Kumar Inna static inline void 163a3667aaeSNaresh Kumar Inna csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) 164a3667aaeSNaresh Kumar Inna { 165a3667aaeSNaresh Kumar Inna struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; 166a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 167a3667aaeSNaresh Kumar Inna 168a3667aaeSNaresh Kumar Inna /* Check for Task Management */ 169a3667aaeSNaresh Kumar Inna if (likely(scmnd->SCp.Message == 0)) { 170a3667aaeSNaresh Kumar Inna int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); 171a3667aaeSNaresh Kumar Inna fcp_cmnd->fc_tm_flags = 0; 172a3667aaeSNaresh Kumar Inna fcp_cmnd->fc_cmdref = 0; 173a3667aaeSNaresh Kumar Inna 174a3667aaeSNaresh Kumar Inna memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); 17550668633SChristoph Hellwig fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 176a3667aaeSNaresh Kumar Inna fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); 177a3667aaeSNaresh Kumar Inna 178a3667aaeSNaresh Kumar Inna if (req->nsge) 179a3667aaeSNaresh Kumar Inna if (req->datadir == DMA_TO_DEVICE) 180a3667aaeSNaresh Kumar Inna fcp_cmnd->fc_flags = FCP_CFL_WRDATA; 181a3667aaeSNaresh Kumar Inna else 182a3667aaeSNaresh Kumar Inna fcp_cmnd->fc_flags = FCP_CFL_RDDATA; 183a3667aaeSNaresh Kumar Inna else 184a3667aaeSNaresh Kumar Inna fcp_cmnd->fc_flags = 0; 185a3667aaeSNaresh Kumar Inna } else { 186a3667aaeSNaresh Kumar Inna memset(fcp_cmnd, 0, sizeof(*fcp_cmnd)); 187a3667aaeSNaresh Kumar Inna int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); 188a3667aaeSNaresh Kumar Inna fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message; 189a3667aaeSNaresh Kumar Inna } 190a3667aaeSNaresh Kumar Inna } 191a3667aaeSNaresh Kumar Inna 192a3667aaeSNaresh Kumar Inna /* 193a3667aaeSNaresh Kumar Inna * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR. 194a3667aaeSNaresh Kumar Inna * @req: IO req structure. 195a3667aaeSNaresh Kumar Inna * @addr: DMA location to place the payload. 196a3667aaeSNaresh Kumar Inna * @size: Size of WR (including FW WR + immed data + rsp SG entry 197a3667aaeSNaresh Kumar Inna * 198a3667aaeSNaresh Kumar Inna * Wrapper for populating fw_scsi_cmd_wr. 199a3667aaeSNaresh Kumar Inna */ 200a3667aaeSNaresh Kumar Inna static inline void 201a3667aaeSNaresh Kumar Inna csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) 202a3667aaeSNaresh Kumar Inna { 203a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 204a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = req->rnode; 205a3667aaeSNaresh Kumar Inna struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; 206a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 207a3667aaeSNaresh Kumar Inna uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 208a3667aaeSNaresh Kumar Inna 209e2ac9628SHariprasad Shenai wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | 210a3667aaeSNaresh Kumar Inna FW_SCSI_CMD_WR_IMMDLEN(imm)); 211e2ac9628SHariprasad Shenai wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 212e2ac9628SHariprasad Shenai FW_WR_LEN16_V( 213a3667aaeSNaresh Kumar Inna DIV_ROUND_UP(size, 16))); 214a3667aaeSNaresh Kumar Inna 215a3667aaeSNaresh Kumar Inna wr->cookie = (uintptr_t) req; 2165036f0a0SNaresh Kumar Inna wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 217a3667aaeSNaresh Kumar Inna wr->tmo_val = (uint8_t) req->tmo; 218a3667aaeSNaresh Kumar Inna wr->r3 = 0; 219a3667aaeSNaresh Kumar Inna memset(&wr->r5, 0, 8); 220a3667aaeSNaresh Kumar Inna 221a3667aaeSNaresh Kumar Inna /* Get RSP DMA buffer */ 222a3667aaeSNaresh Kumar Inna dma_buf = &req->dma_buf; 223a3667aaeSNaresh Kumar Inna 224a3667aaeSNaresh Kumar Inna /* Prepare RSP SGL */ 225a3667aaeSNaresh Kumar Inna wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 226a3667aaeSNaresh Kumar Inna wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 227a3667aaeSNaresh Kumar Inna 228a3667aaeSNaresh Kumar Inna wr->r6 = 0; 229a3667aaeSNaresh Kumar Inna 230a3667aaeSNaresh Kumar Inna wr->u.fcoe.ctl_pri = 0; 231a3667aaeSNaresh Kumar Inna wr->u.fcoe.cp_en_class = 0; 232a3667aaeSNaresh Kumar Inna wr->u.fcoe.r4_lo[0] = 0; 233a3667aaeSNaresh Kumar Inna wr->u.fcoe.r4_lo[1] = 0; 234a3667aaeSNaresh Kumar Inna 235a3667aaeSNaresh Kumar Inna /* Frame a FCP command */ 236a3667aaeSNaresh Kumar Inna csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + 237a3667aaeSNaresh Kumar Inna sizeof(struct fw_scsi_cmd_wr))); 238a3667aaeSNaresh Kumar Inna } 239a3667aaeSNaresh Kumar Inna 240a3667aaeSNaresh Kumar Inna #define CSIO_SCSI_CMD_WR_SZ(_imm) \ 241a3667aaeSNaresh Kumar Inna (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \ 242a3667aaeSNaresh Kumar Inna ALIGN((_imm), 16)) /* Immed data */ 243a3667aaeSNaresh Kumar Inna 244a3667aaeSNaresh Kumar Inna #define CSIO_SCSI_CMD_WR_SZ_16(_imm) \ 245a3667aaeSNaresh Kumar Inna (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16)) 246a3667aaeSNaresh Kumar Inna 247a3667aaeSNaresh Kumar Inna /* 248a3667aaeSNaresh Kumar Inna * csio_scsi_cmd - Create a SCSI CMD WR. 249a3667aaeSNaresh Kumar Inna * @req: IO req structure. 250a3667aaeSNaresh Kumar Inna * 251a3667aaeSNaresh Kumar Inna * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR. 252a3667aaeSNaresh Kumar Inna * 253a3667aaeSNaresh Kumar Inna */ 254a3667aaeSNaresh Kumar Inna static inline void 255a3667aaeSNaresh Kumar Inna csio_scsi_cmd(struct csio_ioreq *req) 256a3667aaeSNaresh Kumar Inna { 257a3667aaeSNaresh Kumar Inna struct csio_wr_pair wrp; 258a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 259a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 260a3667aaeSNaresh Kumar Inna uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); 261a3667aaeSNaresh Kumar Inna 262a3667aaeSNaresh Kumar Inna req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 263a3667aaeSNaresh Kumar Inna if (unlikely(req->drv_status != 0)) 264a3667aaeSNaresh Kumar Inna return; 265a3667aaeSNaresh Kumar Inna 266a3667aaeSNaresh Kumar Inna if (wrp.size1 >= size) { 267a3667aaeSNaresh Kumar Inna /* Initialize WR in one shot */ 268a3667aaeSNaresh Kumar Inna csio_scsi_init_cmd_wr(req, wrp.addr1, size); 269a3667aaeSNaresh Kumar Inna } else { 270a3667aaeSNaresh Kumar Inna uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 271a3667aaeSNaresh Kumar Inna 272a3667aaeSNaresh Kumar Inna /* 273a3667aaeSNaresh Kumar Inna * Make a temporary copy of the WR and write back 274a3667aaeSNaresh Kumar Inna * the copy into the WR pair. 275a3667aaeSNaresh Kumar Inna */ 276a3667aaeSNaresh Kumar Inna csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); 277a3667aaeSNaresh Kumar Inna memcpy(wrp.addr1, tmpwr, wrp.size1); 278a3667aaeSNaresh Kumar Inna memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 279a3667aaeSNaresh Kumar Inna } 280a3667aaeSNaresh Kumar Inna } 281a3667aaeSNaresh Kumar Inna 282a3667aaeSNaresh Kumar Inna /* 283a3667aaeSNaresh Kumar Inna * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL 284a3667aaeSNaresh Kumar Inna * @hw: HW module 285a3667aaeSNaresh Kumar Inna * @req: IO request 286a3667aaeSNaresh Kumar Inna * @sgl: ULP TX SGL pointer. 287a3667aaeSNaresh Kumar Inna * 288a3667aaeSNaresh Kumar Inna */ 289a3667aaeSNaresh Kumar Inna static inline void 290a3667aaeSNaresh Kumar Inna csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, 291a3667aaeSNaresh Kumar Inna struct ulptx_sgl *sgl) 292a3667aaeSNaresh Kumar Inna { 293a3667aaeSNaresh Kumar Inna struct ulptx_sge_pair *sge_pair = NULL; 294a3667aaeSNaresh Kumar Inna struct scatterlist *sgel; 295a3667aaeSNaresh Kumar Inna uint32_t i = 0; 296a3667aaeSNaresh Kumar Inna uint32_t xfer_len; 297a3667aaeSNaresh Kumar Inna struct list_head *tmp; 298a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 299a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 300a3667aaeSNaresh Kumar Inna 301bdc590b9SHariprasad Shenai sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F | 302bdc590b9SHariprasad Shenai ULPTX_NSGE_V(req->nsge)); 303a3667aaeSNaresh Kumar Inna /* Now add the data SGLs */ 304a3667aaeSNaresh Kumar Inna if (likely(!req->dcopy)) { 305a3667aaeSNaresh Kumar Inna scsi_for_each_sg(scmnd, sgel, req->nsge, i) { 306a3667aaeSNaresh Kumar Inna if (i == 0) { 307a3667aaeSNaresh Kumar Inna sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); 308a3667aaeSNaresh Kumar Inna sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); 309a3667aaeSNaresh Kumar Inna sge_pair = (struct ulptx_sge_pair *)(sgl + 1); 310a3667aaeSNaresh Kumar Inna continue; 311a3667aaeSNaresh Kumar Inna } 312a3667aaeSNaresh Kumar Inna if ((i - 1) & 0x1) { 313a3667aaeSNaresh Kumar Inna sge_pair->addr[1] = cpu_to_be64( 314a3667aaeSNaresh Kumar Inna sg_dma_address(sgel)); 315a3667aaeSNaresh Kumar Inna sge_pair->len[1] = cpu_to_be32( 316a3667aaeSNaresh Kumar Inna sg_dma_len(sgel)); 317a3667aaeSNaresh Kumar Inna sge_pair++; 318a3667aaeSNaresh Kumar Inna } else { 319a3667aaeSNaresh Kumar Inna sge_pair->addr[0] = cpu_to_be64( 320a3667aaeSNaresh Kumar Inna sg_dma_address(sgel)); 321a3667aaeSNaresh Kumar Inna sge_pair->len[0] = cpu_to_be32( 322a3667aaeSNaresh Kumar Inna sg_dma_len(sgel)); 323a3667aaeSNaresh Kumar Inna } 324a3667aaeSNaresh Kumar Inna } 325a3667aaeSNaresh Kumar Inna } else { 326a3667aaeSNaresh Kumar Inna /* Program sg elements with driver's DDP buffer */ 327a3667aaeSNaresh Kumar Inna xfer_len = scsi_bufflen(scmnd); 328a3667aaeSNaresh Kumar Inna list_for_each(tmp, &req->gen_list) { 329a3667aaeSNaresh Kumar Inna dma_buf = (struct csio_dma_buf *)tmp; 330a3667aaeSNaresh Kumar Inna if (i == 0) { 331a3667aaeSNaresh Kumar Inna sgl->addr0 = cpu_to_be64(dma_buf->paddr); 332a3667aaeSNaresh Kumar Inna sgl->len0 = cpu_to_be32( 333a3667aaeSNaresh Kumar Inna min(xfer_len, dma_buf->len)); 334a3667aaeSNaresh Kumar Inna sge_pair = (struct ulptx_sge_pair *)(sgl + 1); 335a3667aaeSNaresh Kumar Inna } else if ((i - 1) & 0x1) { 336a3667aaeSNaresh Kumar Inna sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); 337a3667aaeSNaresh Kumar Inna sge_pair->len[1] = cpu_to_be32( 338a3667aaeSNaresh Kumar Inna min(xfer_len, dma_buf->len)); 339a3667aaeSNaresh Kumar Inna sge_pair++; 340a3667aaeSNaresh Kumar Inna } else { 341a3667aaeSNaresh Kumar Inna sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); 342a3667aaeSNaresh Kumar Inna sge_pair->len[0] = cpu_to_be32( 343a3667aaeSNaresh Kumar Inna min(xfer_len, dma_buf->len)); 344a3667aaeSNaresh Kumar Inna } 345a3667aaeSNaresh Kumar Inna xfer_len -= min(xfer_len, dma_buf->len); 346a3667aaeSNaresh Kumar Inna i++; 347a3667aaeSNaresh Kumar Inna } 348a3667aaeSNaresh Kumar Inna } 349a3667aaeSNaresh Kumar Inna } 350a3667aaeSNaresh Kumar Inna 351a3667aaeSNaresh Kumar Inna /* 352a3667aaeSNaresh Kumar Inna * csio_scsi_init_read_wr - Initialize the READ SCSI WR. 353a3667aaeSNaresh Kumar Inna * @req: IO req structure. 354a3667aaeSNaresh Kumar Inna * @wrp: DMA location to place the payload. 355a3667aaeSNaresh Kumar Inna * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL 356a3667aaeSNaresh Kumar Inna * 357a3667aaeSNaresh Kumar Inna * Wrapper for populating fw_scsi_read_wr. 358a3667aaeSNaresh Kumar Inna */ 359a3667aaeSNaresh Kumar Inna static inline void 360a3667aaeSNaresh Kumar Inna csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) 361a3667aaeSNaresh Kumar Inna { 362a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 363a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = req->rnode; 364a3667aaeSNaresh Kumar Inna struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp; 365a3667aaeSNaresh Kumar Inna struct ulptx_sgl *sgl; 366a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 367a3667aaeSNaresh Kumar Inna uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 368a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 369a3667aaeSNaresh Kumar Inna 370e2ac9628SHariprasad Shenai wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) | 371a3667aaeSNaresh Kumar Inna FW_SCSI_READ_WR_IMMDLEN(imm)); 372e2ac9628SHariprasad Shenai wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 373e2ac9628SHariprasad Shenai FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); 374a3667aaeSNaresh Kumar Inna wr->cookie = (uintptr_t)req; 3755036f0a0SNaresh Kumar Inna wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 376a3667aaeSNaresh Kumar Inna wr->tmo_val = (uint8_t)(req->tmo); 377a3667aaeSNaresh Kumar Inna wr->use_xfer_cnt = 1; 378a3667aaeSNaresh Kumar Inna wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 379a3667aaeSNaresh Kumar Inna wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 380a3667aaeSNaresh Kumar Inna /* Get RSP DMA buffer */ 381a3667aaeSNaresh Kumar Inna dma_buf = &req->dma_buf; 382a3667aaeSNaresh Kumar Inna 383a3667aaeSNaresh Kumar Inna /* Prepare RSP SGL */ 384a3667aaeSNaresh Kumar Inna wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 385a3667aaeSNaresh Kumar Inna wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 386a3667aaeSNaresh Kumar Inna 387a3667aaeSNaresh Kumar Inna wr->r4 = 0; 388a3667aaeSNaresh Kumar Inna 389a3667aaeSNaresh Kumar Inna wr->u.fcoe.ctl_pri = 0; 390a3667aaeSNaresh Kumar Inna wr->u.fcoe.cp_en_class = 0; 391a3667aaeSNaresh Kumar Inna wr->u.fcoe.r3_lo[0] = 0; 392a3667aaeSNaresh Kumar Inna wr->u.fcoe.r3_lo[1] = 0; 393a3667aaeSNaresh Kumar Inna csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + 394a3667aaeSNaresh Kumar Inna sizeof(struct fw_scsi_read_wr))); 395a3667aaeSNaresh Kumar Inna 396a3667aaeSNaresh Kumar Inna /* Move WR pointer past command and immediate data */ 397a3667aaeSNaresh Kumar Inna sgl = (struct ulptx_sgl *)((uintptr_t)wrp + 398a3667aaeSNaresh Kumar Inna sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); 399a3667aaeSNaresh Kumar Inna 400a3667aaeSNaresh Kumar Inna /* Fill in the DSGL */ 401a3667aaeSNaresh Kumar Inna csio_scsi_init_ultptx_dsgl(hw, req, sgl); 402a3667aaeSNaresh Kumar Inna } 403a3667aaeSNaresh Kumar Inna 404a3667aaeSNaresh Kumar Inna /* 405a3667aaeSNaresh Kumar Inna * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR. 406a3667aaeSNaresh Kumar Inna * @req: IO req structure. 407a3667aaeSNaresh Kumar Inna * @wrp: DMA location to place the payload. 408a3667aaeSNaresh Kumar Inna * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL 409a3667aaeSNaresh Kumar Inna * 410a3667aaeSNaresh Kumar Inna * Wrapper for populating fw_scsi_write_wr. 411a3667aaeSNaresh Kumar Inna */ 412a3667aaeSNaresh Kumar Inna static inline void 413a3667aaeSNaresh Kumar Inna csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) 414a3667aaeSNaresh Kumar Inna { 415a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 416a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = req->rnode; 417a3667aaeSNaresh Kumar Inna struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp; 418a3667aaeSNaresh Kumar Inna struct ulptx_sgl *sgl; 419a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 420a3667aaeSNaresh Kumar Inna uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; 421a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 422a3667aaeSNaresh Kumar Inna 423e2ac9628SHariprasad Shenai wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) | 424a3667aaeSNaresh Kumar Inna FW_SCSI_WRITE_WR_IMMDLEN(imm)); 425e2ac9628SHariprasad Shenai wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 426e2ac9628SHariprasad Shenai FW_WR_LEN16_V(DIV_ROUND_UP(size, 16))); 427a3667aaeSNaresh Kumar Inna wr->cookie = (uintptr_t)req; 4285036f0a0SNaresh Kumar Inna wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 429a3667aaeSNaresh Kumar Inna wr->tmo_val = (uint8_t)(req->tmo); 430a3667aaeSNaresh Kumar Inna wr->use_xfer_cnt = 1; 431a3667aaeSNaresh Kumar Inna wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 432a3667aaeSNaresh Kumar Inna wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); 433a3667aaeSNaresh Kumar Inna /* Get RSP DMA buffer */ 434a3667aaeSNaresh Kumar Inna dma_buf = &req->dma_buf; 435a3667aaeSNaresh Kumar Inna 436a3667aaeSNaresh Kumar Inna /* Prepare RSP SGL */ 437a3667aaeSNaresh Kumar Inna wr->rsp_dmalen = cpu_to_be32(dma_buf->len); 438a3667aaeSNaresh Kumar Inna wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); 439a3667aaeSNaresh Kumar Inna 440a3667aaeSNaresh Kumar Inna wr->r4 = 0; 441a3667aaeSNaresh Kumar Inna 442a3667aaeSNaresh Kumar Inna wr->u.fcoe.ctl_pri = 0; 443a3667aaeSNaresh Kumar Inna wr->u.fcoe.cp_en_class = 0; 444a3667aaeSNaresh Kumar Inna wr->u.fcoe.r3_lo[0] = 0; 445a3667aaeSNaresh Kumar Inna wr->u.fcoe.r3_lo[1] = 0; 446a3667aaeSNaresh Kumar Inna csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + 447a3667aaeSNaresh Kumar Inna sizeof(struct fw_scsi_write_wr))); 448a3667aaeSNaresh Kumar Inna 449a3667aaeSNaresh Kumar Inna /* Move WR pointer past command and immediate data */ 450a3667aaeSNaresh Kumar Inna sgl = (struct ulptx_sgl *)((uintptr_t)wrp + 451a3667aaeSNaresh Kumar Inna sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); 452a3667aaeSNaresh Kumar Inna 453a3667aaeSNaresh Kumar Inna /* Fill in the DSGL */ 454a3667aaeSNaresh Kumar Inna csio_scsi_init_ultptx_dsgl(hw, req, sgl); 455a3667aaeSNaresh Kumar Inna } 456a3667aaeSNaresh Kumar Inna 457a3667aaeSNaresh Kumar Inna /* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */ 458a3667aaeSNaresh Kumar Inna #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ 459a3667aaeSNaresh Kumar Inna do { \ 460a3667aaeSNaresh Kumar Inna (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \ 461a3667aaeSNaresh Kumar Inna ALIGN((imm), 16) + /* Immed data */ \ 462a3667aaeSNaresh Kumar Inna sizeof(struct ulptx_sgl); /* ulptx_sgl */ \ 463a3667aaeSNaresh Kumar Inna \ 464a3667aaeSNaresh Kumar Inna if (unlikely((req)->nsge > 1)) \ 465a3667aaeSNaresh Kumar Inna (sz) += (sizeof(struct ulptx_sge_pair) * \ 466a3667aaeSNaresh Kumar Inna (ALIGN(((req)->nsge - 1), 2) / 2)); \ 467a3667aaeSNaresh Kumar Inna /* Data SGE */ \ 468a3667aaeSNaresh Kumar Inna } while (0) 469a3667aaeSNaresh Kumar Inna 470a3667aaeSNaresh Kumar Inna /* 471a3667aaeSNaresh Kumar Inna * csio_scsi_read - Create a SCSI READ WR. 472a3667aaeSNaresh Kumar Inna * @req: IO req structure. 473a3667aaeSNaresh Kumar Inna * 474a3667aaeSNaresh Kumar Inna * Gets a WR slot in the ingress queue and initializes it with 475a3667aaeSNaresh Kumar Inna * SCSI READ WR. 476a3667aaeSNaresh Kumar Inna * 477a3667aaeSNaresh Kumar Inna */ 478a3667aaeSNaresh Kumar Inna static inline void 479a3667aaeSNaresh Kumar Inna csio_scsi_read(struct csio_ioreq *req) 480a3667aaeSNaresh Kumar Inna { 481a3667aaeSNaresh Kumar Inna struct csio_wr_pair wrp; 482a3667aaeSNaresh Kumar Inna uint32_t size; 483a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 484a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 485a3667aaeSNaresh Kumar Inna 486a3667aaeSNaresh Kumar Inna CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); 487a3667aaeSNaresh Kumar Inna size = ALIGN(size, 16); 488a3667aaeSNaresh Kumar Inna 489a3667aaeSNaresh Kumar Inna req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 490a3667aaeSNaresh Kumar Inna if (likely(req->drv_status == 0)) { 491a3667aaeSNaresh Kumar Inna if (likely(wrp.size1 >= size)) { 492a3667aaeSNaresh Kumar Inna /* Initialize WR in one shot */ 493a3667aaeSNaresh Kumar Inna csio_scsi_init_read_wr(req, wrp.addr1, size); 494a3667aaeSNaresh Kumar Inna } else { 495a3667aaeSNaresh Kumar Inna uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 496a3667aaeSNaresh Kumar Inna /* 497a3667aaeSNaresh Kumar Inna * Make a temporary copy of the WR and write back 498a3667aaeSNaresh Kumar Inna * the copy into the WR pair. 499a3667aaeSNaresh Kumar Inna */ 500a3667aaeSNaresh Kumar Inna csio_scsi_init_read_wr(req, (void *)tmpwr, size); 501a3667aaeSNaresh Kumar Inna memcpy(wrp.addr1, tmpwr, wrp.size1); 502a3667aaeSNaresh Kumar Inna memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 503a3667aaeSNaresh Kumar Inna } 504a3667aaeSNaresh Kumar Inna } 505a3667aaeSNaresh Kumar Inna } 506a3667aaeSNaresh Kumar Inna 507a3667aaeSNaresh Kumar Inna /* 508a3667aaeSNaresh Kumar Inna * csio_scsi_write - Create a SCSI WRITE WR. 509a3667aaeSNaresh Kumar Inna * @req: IO req structure. 510a3667aaeSNaresh Kumar Inna * 511a3667aaeSNaresh Kumar Inna * Gets a WR slot in the ingress queue and initializes it with 512a3667aaeSNaresh Kumar Inna * SCSI WRITE WR. 513a3667aaeSNaresh Kumar Inna * 514a3667aaeSNaresh Kumar Inna */ 515a3667aaeSNaresh Kumar Inna static inline void 516a3667aaeSNaresh Kumar Inna csio_scsi_write(struct csio_ioreq *req) 517a3667aaeSNaresh Kumar Inna { 518a3667aaeSNaresh Kumar Inna struct csio_wr_pair wrp; 519a3667aaeSNaresh Kumar Inna uint32_t size; 520a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 521a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 522a3667aaeSNaresh Kumar Inna 523a3667aaeSNaresh Kumar Inna CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); 524a3667aaeSNaresh Kumar Inna size = ALIGN(size, 16); 525a3667aaeSNaresh Kumar Inna 526a3667aaeSNaresh Kumar Inna req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 527a3667aaeSNaresh Kumar Inna if (likely(req->drv_status == 0)) { 528a3667aaeSNaresh Kumar Inna if (likely(wrp.size1 >= size)) { 529a3667aaeSNaresh Kumar Inna /* Initialize WR in one shot */ 530a3667aaeSNaresh Kumar Inna csio_scsi_init_write_wr(req, wrp.addr1, size); 531a3667aaeSNaresh Kumar Inna } else { 532a3667aaeSNaresh Kumar Inna uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 533a3667aaeSNaresh Kumar Inna /* 534a3667aaeSNaresh Kumar Inna * Make a temporary copy of the WR and write back 535a3667aaeSNaresh Kumar Inna * the copy into the WR pair. 536a3667aaeSNaresh Kumar Inna */ 537a3667aaeSNaresh Kumar Inna csio_scsi_init_write_wr(req, (void *)tmpwr, size); 538a3667aaeSNaresh Kumar Inna memcpy(wrp.addr1, tmpwr, wrp.size1); 539a3667aaeSNaresh Kumar Inna memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 540a3667aaeSNaresh Kumar Inna } 541a3667aaeSNaresh Kumar Inna } 542a3667aaeSNaresh Kumar Inna } 543a3667aaeSNaresh Kumar Inna 544a3667aaeSNaresh Kumar Inna /* 545a3667aaeSNaresh Kumar Inna * csio_setup_ddp - Setup DDP buffers for Read request. 546a3667aaeSNaresh Kumar Inna * @req: IO req structure. 547a3667aaeSNaresh Kumar Inna * 548a3667aaeSNaresh Kumar Inna * Checks SGLs/Data buffers are virtually contiguous required for DDP. 549a3667aaeSNaresh Kumar Inna * If contiguous,driver posts SGLs in the WR otherwise post internal 550a3667aaeSNaresh Kumar Inna * buffers for such request for DDP. 551a3667aaeSNaresh Kumar Inna */ 552a3667aaeSNaresh Kumar Inna static inline void 553a3667aaeSNaresh Kumar Inna csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) 554a3667aaeSNaresh Kumar Inna { 555a3667aaeSNaresh Kumar Inna #ifdef __CSIO_DEBUG__ 556a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 557a3667aaeSNaresh Kumar Inna #endif 558a3667aaeSNaresh Kumar Inna struct scatterlist *sgel = NULL; 559a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 560a3667aaeSNaresh Kumar Inna uint64_t sg_addr = 0; 561a3667aaeSNaresh Kumar Inna uint32_t ddp_pagesz = 4096; 562a3667aaeSNaresh Kumar Inna uint32_t buf_off; 563a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf = NULL; 564a3667aaeSNaresh Kumar Inna uint32_t alloc_len = 0; 565a3667aaeSNaresh Kumar Inna uint32_t xfer_len = 0; 566a3667aaeSNaresh Kumar Inna uint32_t sg_len = 0; 567a3667aaeSNaresh Kumar Inna uint32_t i; 568a3667aaeSNaresh Kumar Inna 569a3667aaeSNaresh Kumar Inna scsi_for_each_sg(scmnd, sgel, req->nsge, i) { 570a3667aaeSNaresh Kumar Inna sg_addr = sg_dma_address(sgel); 571a3667aaeSNaresh Kumar Inna sg_len = sg_dma_len(sgel); 572a3667aaeSNaresh Kumar Inna 573a3667aaeSNaresh Kumar Inna buf_off = sg_addr & (ddp_pagesz - 1); 574a3667aaeSNaresh Kumar Inna 575a3667aaeSNaresh Kumar Inna /* Except 1st buffer,all buffer addr have to be Page aligned */ 576a3667aaeSNaresh Kumar Inna if (i != 0 && buf_off) { 577a3667aaeSNaresh Kumar Inna csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n", 578a3667aaeSNaresh Kumar Inna sg_addr, sg_len); 579a3667aaeSNaresh Kumar Inna goto unaligned; 580a3667aaeSNaresh Kumar Inna } 581a3667aaeSNaresh Kumar Inna 582a3667aaeSNaresh Kumar Inna /* Except last buffer,all buffer must end on page boundary */ 583a3667aaeSNaresh Kumar Inna if ((i != (req->nsge - 1)) && 584a3667aaeSNaresh Kumar Inna ((buf_off + sg_len) & (ddp_pagesz - 1))) { 585a3667aaeSNaresh Kumar Inna csio_dbg(hw, 586a3667aaeSNaresh Kumar Inna "SGL addr not ending on page boundary" 587a3667aaeSNaresh Kumar Inna "(%llx:%d)\n", sg_addr, sg_len); 588a3667aaeSNaresh Kumar Inna goto unaligned; 589a3667aaeSNaresh Kumar Inna } 590a3667aaeSNaresh Kumar Inna } 591a3667aaeSNaresh Kumar Inna 592a3667aaeSNaresh Kumar Inna /* SGL's are virtually contiguous. HW will DDP to SGLs */ 593a3667aaeSNaresh Kumar Inna req->dcopy = 0; 594a3667aaeSNaresh Kumar Inna csio_scsi_read(req); 595a3667aaeSNaresh Kumar Inna 596a3667aaeSNaresh Kumar Inna return; 597a3667aaeSNaresh Kumar Inna 598a3667aaeSNaresh Kumar Inna unaligned: 599a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_unaligned); 600a3667aaeSNaresh Kumar Inna /* 601a3667aaeSNaresh Kumar Inna * For unaligned SGLs, driver will allocate internal DDP buffer. 602a3667aaeSNaresh Kumar Inna * Once command is completed data from DDP buffer copied to SGLs 603a3667aaeSNaresh Kumar Inna */ 604a3667aaeSNaresh Kumar Inna req->dcopy = 1; 605a3667aaeSNaresh Kumar Inna 606a3667aaeSNaresh Kumar Inna /* Use gen_list to store the DDP buffers */ 607a3667aaeSNaresh Kumar Inna INIT_LIST_HEAD(&req->gen_list); 608a3667aaeSNaresh Kumar Inna xfer_len = scsi_bufflen(scmnd); 609a3667aaeSNaresh Kumar Inna 610a3667aaeSNaresh Kumar Inna i = 0; 611a3667aaeSNaresh Kumar Inna /* Allocate ddp buffers for this request */ 612a3667aaeSNaresh Kumar Inna while (alloc_len < xfer_len) { 613a3667aaeSNaresh Kumar Inna dma_buf = csio_get_scsi_ddp(scsim); 614a3667aaeSNaresh Kumar Inna if (dma_buf == NULL || i > scsim->max_sge) { 615a3667aaeSNaresh Kumar Inna req->drv_status = -EBUSY; 616a3667aaeSNaresh Kumar Inna break; 617a3667aaeSNaresh Kumar Inna } 618a3667aaeSNaresh Kumar Inna alloc_len += dma_buf->len; 619a3667aaeSNaresh Kumar Inna /* Added to IO req */ 620a3667aaeSNaresh Kumar Inna list_add_tail(&dma_buf->list, &req->gen_list); 621a3667aaeSNaresh Kumar Inna i++; 622a3667aaeSNaresh Kumar Inna } 623a3667aaeSNaresh Kumar Inna 624a3667aaeSNaresh Kumar Inna if (!req->drv_status) { 625a3667aaeSNaresh Kumar Inna /* set number of ddp bufs used */ 626a3667aaeSNaresh Kumar Inna req->nsge = i; 627a3667aaeSNaresh Kumar Inna csio_scsi_read(req); 628a3667aaeSNaresh Kumar Inna return; 629a3667aaeSNaresh Kumar Inna } 630a3667aaeSNaresh Kumar Inna 631a3667aaeSNaresh Kumar Inna /* release dma descs */ 632a3667aaeSNaresh Kumar Inna if (i > 0) 633a3667aaeSNaresh Kumar Inna csio_put_scsi_ddp_list(scsim, &req->gen_list, i); 634a3667aaeSNaresh Kumar Inna } 635a3667aaeSNaresh Kumar Inna 636a3667aaeSNaresh Kumar Inna /* 637a3667aaeSNaresh Kumar Inna * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. 638a3667aaeSNaresh Kumar Inna * @req: IO req structure. 639a3667aaeSNaresh Kumar Inna * @addr: DMA location to place the payload. 640a3667aaeSNaresh Kumar Inna * @size: Size of WR 641a3667aaeSNaresh Kumar Inna * @abort: abort OR close 642a3667aaeSNaresh Kumar Inna * 643a3667aaeSNaresh Kumar Inna * Wrapper for populating fw_scsi_cmd_wr. 644a3667aaeSNaresh Kumar Inna */ 645a3667aaeSNaresh Kumar Inna static inline void 646a3667aaeSNaresh Kumar Inna csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, 647a3667aaeSNaresh Kumar Inna bool abort) 648a3667aaeSNaresh Kumar Inna { 649a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 650a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = req->rnode; 651a3667aaeSNaresh Kumar Inna struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; 652a3667aaeSNaresh Kumar Inna 653e2ac9628SHariprasad Shenai wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR)); 654e2ac9628SHariprasad Shenai wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | 655e2ac9628SHariprasad Shenai FW_WR_LEN16_V( 656a3667aaeSNaresh Kumar Inna DIV_ROUND_UP(size, 16))); 657a3667aaeSNaresh Kumar Inna 658a3667aaeSNaresh Kumar Inna wr->cookie = (uintptr_t) req; 6595036f0a0SNaresh Kumar Inna wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); 660a3667aaeSNaresh Kumar Inna wr->tmo_val = (uint8_t) req->tmo; 661a3667aaeSNaresh Kumar Inna /* 0 for CHK_ALL_IO tells FW to look up t_cookie */ 662a3667aaeSNaresh Kumar Inna wr->sub_opcode_to_chk_all_io = 663a3667aaeSNaresh Kumar Inna (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) | 664a3667aaeSNaresh Kumar Inna FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0)); 665a3667aaeSNaresh Kumar Inna wr->r3[0] = 0; 666a3667aaeSNaresh Kumar Inna wr->r3[1] = 0; 667a3667aaeSNaresh Kumar Inna wr->r3[2] = 0; 668a3667aaeSNaresh Kumar Inna wr->r3[3] = 0; 669a3667aaeSNaresh Kumar Inna /* Since we re-use the same ioreq for abort as well */ 670a3667aaeSNaresh Kumar Inna wr->t_cookie = (uintptr_t) req; 671a3667aaeSNaresh Kumar Inna } 672a3667aaeSNaresh Kumar Inna 673a3667aaeSNaresh Kumar Inna static inline void 674a3667aaeSNaresh Kumar Inna csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) 675a3667aaeSNaresh Kumar Inna { 676a3667aaeSNaresh Kumar Inna struct csio_wr_pair wrp; 677a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 678a3667aaeSNaresh Kumar Inna uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16); 679a3667aaeSNaresh Kumar Inna 680a3667aaeSNaresh Kumar Inna req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); 681a3667aaeSNaresh Kumar Inna if (req->drv_status != 0) 682a3667aaeSNaresh Kumar Inna return; 683a3667aaeSNaresh Kumar Inna 684a3667aaeSNaresh Kumar Inna if (wrp.size1 >= size) { 685a3667aaeSNaresh Kumar Inna /* Initialize WR in one shot */ 686a3667aaeSNaresh Kumar Inna csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); 687a3667aaeSNaresh Kumar Inna } else { 688a3667aaeSNaresh Kumar Inna uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); 689a3667aaeSNaresh Kumar Inna /* 690a3667aaeSNaresh Kumar Inna * Make a temporary copy of the WR and write back 691a3667aaeSNaresh Kumar Inna * the copy into the WR pair. 692a3667aaeSNaresh Kumar Inna */ 693a3667aaeSNaresh Kumar Inna csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); 694a3667aaeSNaresh Kumar Inna memcpy(wrp.addr1, tmpwr, wrp.size1); 695a3667aaeSNaresh Kumar Inna memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); 696a3667aaeSNaresh Kumar Inna } 697a3667aaeSNaresh Kumar Inna } 698a3667aaeSNaresh Kumar Inna 699a3667aaeSNaresh Kumar Inna /*****************************************************************************/ 700a3667aaeSNaresh Kumar Inna /* START: SCSI SM */ 701a3667aaeSNaresh Kumar Inna /*****************************************************************************/ 702a3667aaeSNaresh Kumar Inna static void 703a3667aaeSNaresh Kumar Inna csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) 704a3667aaeSNaresh Kumar Inna { 705a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 706a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 707a3667aaeSNaresh Kumar Inna 708a3667aaeSNaresh Kumar Inna switch (evt) { 709a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_START_IO: 710a3667aaeSNaresh Kumar Inna 711a3667aaeSNaresh Kumar Inna if (req->nsge) { 712a3667aaeSNaresh Kumar Inna if (req->datadir == DMA_TO_DEVICE) { 713a3667aaeSNaresh Kumar Inna req->dcopy = 0; 714a3667aaeSNaresh Kumar Inna csio_scsi_write(req); 715a3667aaeSNaresh Kumar Inna } else 716a3667aaeSNaresh Kumar Inna csio_setup_ddp(scsim, req); 717a3667aaeSNaresh Kumar Inna } else { 718a3667aaeSNaresh Kumar Inna csio_scsi_cmd(req); 719a3667aaeSNaresh Kumar Inna } 720a3667aaeSNaresh Kumar Inna 721a3667aaeSNaresh Kumar Inna if (likely(req->drv_status == 0)) { 722a3667aaeSNaresh Kumar Inna /* change state and enqueue on active_q */ 723a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_io_active); 724a3667aaeSNaresh Kumar Inna list_add_tail(&req->sm.sm_list, &scsim->active_q); 725a3667aaeSNaresh Kumar Inna csio_wr_issue(hw, req->eq_idx, false); 726a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_active); 727a3667aaeSNaresh Kumar Inna 728a3667aaeSNaresh Kumar Inna return; 729a3667aaeSNaresh Kumar Inna } 730a3667aaeSNaresh Kumar Inna break; 731a3667aaeSNaresh Kumar Inna 732a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_START_TM: 733a3667aaeSNaresh Kumar Inna csio_scsi_cmd(req); 734a3667aaeSNaresh Kumar Inna if (req->drv_status == 0) { 735a3667aaeSNaresh Kumar Inna /* 736a3667aaeSNaresh Kumar Inna * NOTE: We collect the affected I/Os prior to issuing 737a3667aaeSNaresh Kumar Inna * LUN reset, and not after it. This is to prevent 738a3667aaeSNaresh Kumar Inna * aborting I/Os that get issued after the LUN reset, 739a3667aaeSNaresh Kumar Inna * but prior to LUN reset completion (in the event that 740a3667aaeSNaresh Kumar Inna * the host stack has not blocked I/Os to a LUN that is 741a3667aaeSNaresh Kumar Inna * being reset. 742a3667aaeSNaresh Kumar Inna */ 743a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_tm_active); 744a3667aaeSNaresh Kumar Inna list_add_tail(&req->sm.sm_list, &scsim->active_q); 745a3667aaeSNaresh Kumar Inna csio_wr_issue(hw, req->eq_idx, false); 746a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_tm_active); 747a3667aaeSNaresh Kumar Inna } 748a3667aaeSNaresh Kumar Inna return; 749a3667aaeSNaresh Kumar Inna 750a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_ABORT: 751a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_CLOSE: 752a3667aaeSNaresh Kumar Inna /* 753a3667aaeSNaresh Kumar Inna * NOTE: 754a3667aaeSNaresh Kumar Inna * We could get here due to : 755a3667aaeSNaresh Kumar Inna * - a window in the cleanup path of the SCSI module 756a3667aaeSNaresh Kumar Inna * (csio_scsi_abort_io()). Please see NOTE in this function. 757a3667aaeSNaresh Kumar Inna * - a window in the time we tried to issue an abort/close 758a3667aaeSNaresh Kumar Inna * of a request to FW, and the FW completed the request 759a3667aaeSNaresh Kumar Inna * itself. 760a3667aaeSNaresh Kumar Inna * Print a message for now, and return INVAL either way. 761a3667aaeSNaresh Kumar Inna */ 762a3667aaeSNaresh Kumar Inna req->drv_status = -EINVAL; 763a3667aaeSNaresh Kumar Inna csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); 764a3667aaeSNaresh Kumar Inna break; 765a3667aaeSNaresh Kumar Inna 766a3667aaeSNaresh Kumar Inna default: 767a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 768a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 769a3667aaeSNaresh Kumar Inna } 770a3667aaeSNaresh Kumar Inna } 771a3667aaeSNaresh Kumar Inna 772a3667aaeSNaresh Kumar Inna static void 773a3667aaeSNaresh Kumar Inna csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) 774a3667aaeSNaresh Kumar Inna { 775a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 776a3667aaeSNaresh Kumar Inna struct csio_scsim *scm = csio_hw_to_scsim(hw); 777a3667aaeSNaresh Kumar Inna struct csio_rnode *rn; 778a3667aaeSNaresh Kumar Inna 779a3667aaeSNaresh Kumar Inna switch (evt) { 780a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_COMPLETED: 781a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_active); 782a3667aaeSNaresh Kumar Inna list_del_init(&req->sm.sm_list); 783a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 784a3667aaeSNaresh Kumar Inna /* 785a3667aaeSNaresh Kumar Inna * In MSIX mode, with multiple queues, the SCSI compeltions 786a3667aaeSNaresh Kumar Inna * could reach us sooner than the FW events sent to indicate 787a3667aaeSNaresh Kumar Inna * I-T nexus loss (link down, remote device logo etc). We 788a3667aaeSNaresh Kumar Inna * dont want to be returning such I/Os to the upper layer 789a3667aaeSNaresh Kumar Inna * immediately, since we wouldnt have reported the I-T nexus 790a3667aaeSNaresh Kumar Inna * loss itself. This forces us to serialize such completions 791a3667aaeSNaresh Kumar Inna * with the reporting of the I-T nexus loss. Therefore, we 792a3667aaeSNaresh Kumar Inna * internally queue up such up such completions in the rnode. 793a3667aaeSNaresh Kumar Inna * The reporting of I-T nexus loss to the upper layer is then 794a3667aaeSNaresh Kumar Inna * followed by the returning of I/Os in this internal queue. 795a3667aaeSNaresh Kumar Inna * Having another state alongwith another queue helps us take 796a3667aaeSNaresh Kumar Inna * actions for events such as ABORT received while we are 797a3667aaeSNaresh Kumar Inna * in this rnode queue. 798a3667aaeSNaresh Kumar Inna */ 799a3667aaeSNaresh Kumar Inna if (unlikely(req->wr_status != FW_SUCCESS)) { 800a3667aaeSNaresh Kumar Inna rn = req->rnode; 801a3667aaeSNaresh Kumar Inna /* 802a3667aaeSNaresh Kumar Inna * FW says remote device is lost, but rnode 803a3667aaeSNaresh Kumar Inna * doesnt reflect it. 804a3667aaeSNaresh Kumar Inna */ 805a3667aaeSNaresh Kumar Inna if (csio_scsi_itnexus_loss_error(req->wr_status) && 806a3667aaeSNaresh Kumar Inna csio_is_rnode_ready(rn)) { 807a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, 808a3667aaeSNaresh Kumar Inna csio_scsis_shost_cmpl_await); 809a3667aaeSNaresh Kumar Inna list_add_tail(&req->sm.sm_list, 810a3667aaeSNaresh Kumar Inna &rn->host_cmpl_q); 811a3667aaeSNaresh Kumar Inna } 812a3667aaeSNaresh Kumar Inna } 813a3667aaeSNaresh Kumar Inna 814a3667aaeSNaresh Kumar Inna break; 815a3667aaeSNaresh Kumar Inna 816a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_ABORT: 817a3667aaeSNaresh Kumar Inna csio_scsi_abrt_cls(req, SCSI_ABORT); 818a3667aaeSNaresh Kumar Inna if (req->drv_status == 0) { 819a3667aaeSNaresh Kumar Inna csio_wr_issue(hw, req->eq_idx, false); 820a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_aborting); 821a3667aaeSNaresh Kumar Inna } 822a3667aaeSNaresh Kumar Inna break; 823a3667aaeSNaresh Kumar Inna 824a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_CLOSE: 825a3667aaeSNaresh Kumar Inna csio_scsi_abrt_cls(req, SCSI_CLOSE); 826a3667aaeSNaresh Kumar Inna if (req->drv_status == 0) { 827a3667aaeSNaresh Kumar Inna csio_wr_issue(hw, req->eq_idx, false); 828a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_closing); 829a3667aaeSNaresh Kumar Inna } 830a3667aaeSNaresh Kumar Inna break; 831a3667aaeSNaresh Kumar Inna 832a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_DRVCLEANUP: 833a3667aaeSNaresh Kumar Inna req->wr_status = FW_HOSTERROR; 834a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_active); 835a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 836a3667aaeSNaresh Kumar Inna break; 837a3667aaeSNaresh Kumar Inna 838a3667aaeSNaresh Kumar Inna default: 839a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 840a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 841a3667aaeSNaresh Kumar Inna } 842a3667aaeSNaresh Kumar Inna } 843a3667aaeSNaresh Kumar Inna 844a3667aaeSNaresh Kumar Inna static void 845a3667aaeSNaresh Kumar Inna csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) 846a3667aaeSNaresh Kumar Inna { 847a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 848a3667aaeSNaresh Kumar Inna struct csio_scsim *scm = csio_hw_to_scsim(hw); 849a3667aaeSNaresh Kumar Inna 850a3667aaeSNaresh Kumar Inna switch (evt) { 851a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_COMPLETED: 852a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_tm_active); 853a3667aaeSNaresh Kumar Inna list_del_init(&req->sm.sm_list); 854a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 855a3667aaeSNaresh Kumar Inna 856a3667aaeSNaresh Kumar Inna break; 857a3667aaeSNaresh Kumar Inna 858a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_ABORT: 859a3667aaeSNaresh Kumar Inna csio_scsi_abrt_cls(req, SCSI_ABORT); 860a3667aaeSNaresh Kumar Inna if (req->drv_status == 0) { 861a3667aaeSNaresh Kumar Inna csio_wr_issue(hw, req->eq_idx, false); 862a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_aborting); 863a3667aaeSNaresh Kumar Inna } 864a3667aaeSNaresh Kumar Inna break; 865a3667aaeSNaresh Kumar Inna 866a3667aaeSNaresh Kumar Inna 867a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_CLOSE: 868a3667aaeSNaresh Kumar Inna csio_scsi_abrt_cls(req, SCSI_CLOSE); 869a3667aaeSNaresh Kumar Inna if (req->drv_status == 0) { 870a3667aaeSNaresh Kumar Inna csio_wr_issue(hw, req->eq_idx, false); 871a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_closing); 872a3667aaeSNaresh Kumar Inna } 873a3667aaeSNaresh Kumar Inna break; 874a3667aaeSNaresh Kumar Inna 875a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_DRVCLEANUP: 876a3667aaeSNaresh Kumar Inna req->wr_status = FW_HOSTERROR; 877a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_tm_active); 878a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 879a3667aaeSNaresh Kumar Inna break; 880a3667aaeSNaresh Kumar Inna 881a3667aaeSNaresh Kumar Inna default: 882a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 883a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 884a3667aaeSNaresh Kumar Inna } 885a3667aaeSNaresh Kumar Inna } 886a3667aaeSNaresh Kumar Inna 887a3667aaeSNaresh Kumar Inna static void 888a3667aaeSNaresh Kumar Inna csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) 889a3667aaeSNaresh Kumar Inna { 890a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 891a3667aaeSNaresh Kumar Inna struct csio_scsim *scm = csio_hw_to_scsim(hw); 892a3667aaeSNaresh Kumar Inna 893a3667aaeSNaresh Kumar Inna switch (evt) { 894a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_COMPLETED: 895a3667aaeSNaresh Kumar Inna csio_dbg(hw, 896a3667aaeSNaresh Kumar Inna "ioreq %p recvd cmpltd (wr_status:%d) " 897a3667aaeSNaresh Kumar Inna "in aborting st\n", req, req->wr_status); 898a3667aaeSNaresh Kumar Inna /* 899a3667aaeSNaresh Kumar Inna * Use -ECANCELED to explicitly tell the ABORTED event that 900a3667aaeSNaresh Kumar Inna * the original I/O was returned to driver by FW. 901a3667aaeSNaresh Kumar Inna * We dont really care if the I/O was returned with success by 902a3667aaeSNaresh Kumar Inna * FW (because the ABORT and completion of the I/O crossed each 903a3667aaeSNaresh Kumar Inna * other), or any other return value. Once we are in aborting 904a3667aaeSNaresh Kumar Inna * state, the success or failure of the I/O is unimportant to 905a3667aaeSNaresh Kumar Inna * us. 906a3667aaeSNaresh Kumar Inna */ 907a3667aaeSNaresh Kumar Inna req->drv_status = -ECANCELED; 908a3667aaeSNaresh Kumar Inna break; 909a3667aaeSNaresh Kumar Inna 910a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_ABORT: 911a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_abrt_dups); 912a3667aaeSNaresh Kumar Inna break; 913a3667aaeSNaresh Kumar Inna 914a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_ABORTED: 915a3667aaeSNaresh Kumar Inna 916a3667aaeSNaresh Kumar Inna csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", 917a3667aaeSNaresh Kumar Inna req, req->wr_status, req->drv_status); 918a3667aaeSNaresh Kumar Inna /* 919a3667aaeSNaresh Kumar Inna * Check if original I/O WR completed before the Abort 920a3667aaeSNaresh Kumar Inna * completion. 921a3667aaeSNaresh Kumar Inna */ 922a3667aaeSNaresh Kumar Inna if (req->drv_status != -ECANCELED) { 923a3667aaeSNaresh Kumar Inna csio_warn(hw, 924a3667aaeSNaresh Kumar Inna "Abort completed before original I/O," 925a3667aaeSNaresh Kumar Inna " req:%p\n", req); 926a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 927a3667aaeSNaresh Kumar Inna } 928a3667aaeSNaresh Kumar Inna 929a3667aaeSNaresh Kumar Inna /* 930a3667aaeSNaresh Kumar Inna * There are the following possible scenarios: 931a3667aaeSNaresh Kumar Inna * 1. The abort completed successfully, FW returned FW_SUCCESS. 932a3667aaeSNaresh Kumar Inna * 2. The completion of an I/O and the receipt of 933a3667aaeSNaresh Kumar Inna * abort for that I/O by the FW crossed each other. 934a3667aaeSNaresh Kumar Inna * The FW returned FW_EINVAL. The original I/O would have 935a3667aaeSNaresh Kumar Inna * returned with FW_SUCCESS or any other SCSI error. 93629779a22SColin Ian King * 3. The FW couldn't sent the abort out on the wire, as there 937a3667aaeSNaresh Kumar Inna * was an I-T nexus loss (link down, remote device logged 938a3667aaeSNaresh Kumar Inna * out etc). FW sent back an appropriate IT nexus loss status 939a3667aaeSNaresh Kumar Inna * for the abort. 940a3667aaeSNaresh Kumar Inna * 4. FW sent an abort, but abort timed out (remote device 941a3667aaeSNaresh Kumar Inna * didnt respond). FW replied back with 942a3667aaeSNaresh Kumar Inna * FW_SCSI_ABORT_TIMEDOUT. 94329779a22SColin Ian King * 5. FW couldn't genuinely abort the request for some reason, 944a3667aaeSNaresh Kumar Inna * and sent us an error. 945a3667aaeSNaresh Kumar Inna * 946a3667aaeSNaresh Kumar Inna * The first 3 scenarios are treated as succesful abort 947a3667aaeSNaresh Kumar Inna * operations by the host, while the last 2 are failed attempts 948a3667aaeSNaresh Kumar Inna * to abort. Manipulate the return value of the request 949a3667aaeSNaresh Kumar Inna * appropriately, so that host can convey these results 950a3667aaeSNaresh Kumar Inna * back to the upper layer. 951a3667aaeSNaresh Kumar Inna */ 952a3667aaeSNaresh Kumar Inna if ((req->wr_status == FW_SUCCESS) || 953a3667aaeSNaresh Kumar Inna (req->wr_status == FW_EINVAL) || 954a3667aaeSNaresh Kumar Inna csio_scsi_itnexus_loss_error(req->wr_status)) 955a3667aaeSNaresh Kumar Inna req->wr_status = FW_SCSI_ABORT_REQUESTED; 956a3667aaeSNaresh Kumar Inna 957a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_active); 958a3667aaeSNaresh Kumar Inna list_del_init(&req->sm.sm_list); 959a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 960a3667aaeSNaresh Kumar Inna break; 961a3667aaeSNaresh Kumar Inna 962a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_DRVCLEANUP: 963a3667aaeSNaresh Kumar Inna req->wr_status = FW_HOSTERROR; 964a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_active); 965a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 966a3667aaeSNaresh Kumar Inna break; 967a3667aaeSNaresh Kumar Inna 968a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_CLOSE: 969a3667aaeSNaresh Kumar Inna /* 970a3667aaeSNaresh Kumar Inna * We can receive this event from the module 971a3667aaeSNaresh Kumar Inna * cleanup paths, if the FW forgot to reply to the ABORT WR 972a3667aaeSNaresh Kumar Inna * and left this ioreq in this state. For now, just ignore 973a3667aaeSNaresh Kumar Inna * the event. The CLOSE event is sent to this state, as 974a3667aaeSNaresh Kumar Inna * the LINK may have already gone down. 975a3667aaeSNaresh Kumar Inna */ 976a3667aaeSNaresh Kumar Inna break; 977a3667aaeSNaresh Kumar Inna 978a3667aaeSNaresh Kumar Inna default: 979a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 980a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 981a3667aaeSNaresh Kumar Inna } 982a3667aaeSNaresh Kumar Inna } 983a3667aaeSNaresh Kumar Inna 984a3667aaeSNaresh Kumar Inna static void 985a3667aaeSNaresh Kumar Inna csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) 986a3667aaeSNaresh Kumar Inna { 987a3667aaeSNaresh Kumar Inna struct csio_hw *hw = req->lnode->hwp; 988a3667aaeSNaresh Kumar Inna struct csio_scsim *scm = csio_hw_to_scsim(hw); 989a3667aaeSNaresh Kumar Inna 990a3667aaeSNaresh Kumar Inna switch (evt) { 991a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_COMPLETED: 992a3667aaeSNaresh Kumar Inna csio_dbg(hw, 993a3667aaeSNaresh Kumar Inna "ioreq %p recvd cmpltd (wr_status:%d) " 994a3667aaeSNaresh Kumar Inna "in closing st\n", req, req->wr_status); 995a3667aaeSNaresh Kumar Inna /* 996a3667aaeSNaresh Kumar Inna * Use -ECANCELED to explicitly tell the CLOSED event that 997a3667aaeSNaresh Kumar Inna * the original I/O was returned to driver by FW. 998a3667aaeSNaresh Kumar Inna * We dont really care if the I/O was returned with success by 999a3667aaeSNaresh Kumar Inna * FW (because the CLOSE and completion of the I/O crossed each 1000a3667aaeSNaresh Kumar Inna * other), or any other return value. Once we are in aborting 1001a3667aaeSNaresh Kumar Inna * state, the success or failure of the I/O is unimportant to 1002a3667aaeSNaresh Kumar Inna * us. 1003a3667aaeSNaresh Kumar Inna */ 1004a3667aaeSNaresh Kumar Inna req->drv_status = -ECANCELED; 1005a3667aaeSNaresh Kumar Inna break; 1006a3667aaeSNaresh Kumar Inna 1007a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_CLOSED: 1008a3667aaeSNaresh Kumar Inna /* 1009a3667aaeSNaresh Kumar Inna * Check if original I/O WR completed before the Close 1010a3667aaeSNaresh Kumar Inna * completion. 1011a3667aaeSNaresh Kumar Inna */ 1012a3667aaeSNaresh Kumar Inna if (req->drv_status != -ECANCELED) { 1013a3667aaeSNaresh Kumar Inna csio_fatal(hw, 1014a3667aaeSNaresh Kumar Inna "Close completed before original I/O," 1015a3667aaeSNaresh Kumar Inna " req:%p\n", req); 1016a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 1017a3667aaeSNaresh Kumar Inna } 1018a3667aaeSNaresh Kumar Inna 1019a3667aaeSNaresh Kumar Inna /* 1020a3667aaeSNaresh Kumar Inna * Either close succeeded, or we issued close to FW at the 1021a3667aaeSNaresh Kumar Inna * same time FW compelted it to us. Either way, the I/O 1022a3667aaeSNaresh Kumar Inna * is closed. 1023a3667aaeSNaresh Kumar Inna */ 1024a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || 1025a3667aaeSNaresh Kumar Inna (req->wr_status == FW_EINVAL)); 1026a3667aaeSNaresh Kumar Inna req->wr_status = FW_SCSI_CLOSE_REQUESTED; 1027a3667aaeSNaresh Kumar Inna 1028a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_active); 1029a3667aaeSNaresh Kumar Inna list_del_init(&req->sm.sm_list); 1030a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 1031a3667aaeSNaresh Kumar Inna break; 1032a3667aaeSNaresh Kumar Inna 1033a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_CLOSE: 1034a3667aaeSNaresh Kumar Inna break; 1035a3667aaeSNaresh Kumar Inna 1036a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_DRVCLEANUP: 1037a3667aaeSNaresh Kumar Inna req->wr_status = FW_HOSTERROR; 1038a3667aaeSNaresh Kumar Inna CSIO_DEC_STATS(scm, n_active); 1039a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 1040a3667aaeSNaresh Kumar Inna break; 1041a3667aaeSNaresh Kumar Inna 1042a3667aaeSNaresh Kumar Inna default: 1043a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); 1044a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 1045a3667aaeSNaresh Kumar Inna } 1046a3667aaeSNaresh Kumar Inna } 1047a3667aaeSNaresh Kumar Inna 1048a3667aaeSNaresh Kumar Inna static void 1049a3667aaeSNaresh Kumar Inna csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) 1050a3667aaeSNaresh Kumar Inna { 1051a3667aaeSNaresh Kumar Inna switch (evt) { 1052a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_ABORT: 1053a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_CLOSE: 1054a3667aaeSNaresh Kumar Inna /* 1055a3667aaeSNaresh Kumar Inna * Just succeed the abort request, and hope that 1056a3667aaeSNaresh Kumar Inna * the remote device unregister path will cleanup 1057a3667aaeSNaresh Kumar Inna * this I/O to the upper layer within a sane 1058a3667aaeSNaresh Kumar Inna * amount of time. 1059a3667aaeSNaresh Kumar Inna */ 1060a3667aaeSNaresh Kumar Inna /* 1061a3667aaeSNaresh Kumar Inna * A close can come in during a LINK DOWN. The FW would have 1062a3667aaeSNaresh Kumar Inna * returned us the I/O back, but not the remote device lost 1063a3667aaeSNaresh Kumar Inna * FW event. In this interval, if the I/O times out at the upper 1064a3667aaeSNaresh Kumar Inna * layer, a close can come in. Take the same action as abort: 1065a3667aaeSNaresh Kumar Inna * return success, and hope that the remote device unregister 1066a3667aaeSNaresh Kumar Inna * path will cleanup this I/O. If the FW still doesnt send 1067a3667aaeSNaresh Kumar Inna * the msg, the close times out, and the upper layer resorts 1068a3667aaeSNaresh Kumar Inna * to the next level of error recovery. 1069a3667aaeSNaresh Kumar Inna */ 1070a3667aaeSNaresh Kumar Inna req->drv_status = 0; 1071a3667aaeSNaresh Kumar Inna break; 1072a3667aaeSNaresh Kumar Inna case CSIO_SCSIE_DRVCLEANUP: 1073a3667aaeSNaresh Kumar Inna csio_set_state(&req->sm, csio_scsis_uninit); 1074a3667aaeSNaresh Kumar Inna break; 1075a3667aaeSNaresh Kumar Inna default: 1076a3667aaeSNaresh Kumar Inna csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", 1077a3667aaeSNaresh Kumar Inna evt, req); 1078a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 1079a3667aaeSNaresh Kumar Inna } 1080a3667aaeSNaresh Kumar Inna } 1081a3667aaeSNaresh Kumar Inna 1082a3667aaeSNaresh Kumar Inna /* 1083a3667aaeSNaresh Kumar Inna * csio_scsi_cmpl_handler - WR completion handler for SCSI. 1084a3667aaeSNaresh Kumar Inna * @hw: HW module. 1085a3667aaeSNaresh Kumar Inna * @wr: The completed WR from the ingress queue. 1086a3667aaeSNaresh Kumar Inna * @len: Length of the WR. 1087a3667aaeSNaresh Kumar Inna * @flb: Freelist buffer array. 1088a3667aaeSNaresh Kumar Inna * @priv: Private object 1089a3667aaeSNaresh Kumar Inna * @scsiwr: Pointer to SCSI WR. 1090a3667aaeSNaresh Kumar Inna * 1091a3667aaeSNaresh Kumar Inna * This is the WR completion handler called per completion from the 1092a3667aaeSNaresh Kumar Inna * ISR. It is called with lock held. It walks past the RSS and CPL message 1093a3667aaeSNaresh Kumar Inna * header where the actual WR is present. 1094a3667aaeSNaresh Kumar Inna * It then gets the status, WR handle (ioreq pointer) and the len of 1095a3667aaeSNaresh Kumar Inna * the WR, based on WR opcode. Only on a non-good status is the entire 1096a3667aaeSNaresh Kumar Inna * WR copied into the WR cache (ioreq->fw_wr). 1097a3667aaeSNaresh Kumar Inna * The ioreq corresponding to the WR is returned to the caller. 1098a3667aaeSNaresh Kumar Inna * NOTE: The SCSI queue doesnt allocate a freelist today, hence 1099a3667aaeSNaresh Kumar Inna * no freelist buffer is expected. 1100a3667aaeSNaresh Kumar Inna */ 1101a3667aaeSNaresh Kumar Inna struct csio_ioreq * 1102a3667aaeSNaresh Kumar Inna csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, 1103a3667aaeSNaresh Kumar Inna struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr) 1104a3667aaeSNaresh Kumar Inna { 1105a3667aaeSNaresh Kumar Inna struct csio_ioreq *ioreq = NULL; 1106a3667aaeSNaresh Kumar Inna struct cpl_fw6_msg *cpl; 1107a3667aaeSNaresh Kumar Inna uint8_t *tempwr; 1108a3667aaeSNaresh Kumar Inna uint8_t status; 1109a3667aaeSNaresh Kumar Inna struct csio_scsim *scm = csio_hw_to_scsim(hw); 1110a3667aaeSNaresh Kumar Inna 1111a3667aaeSNaresh Kumar Inna /* skip RSS header */ 1112a3667aaeSNaresh Kumar Inna cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64)); 1113a3667aaeSNaresh Kumar Inna 1114a3667aaeSNaresh Kumar Inna if (unlikely(cpl->opcode != CPL_FW6_MSG)) { 1115a3667aaeSNaresh Kumar Inna csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n", 1116a3667aaeSNaresh Kumar Inna cpl->opcode); 1117a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_inval_cplop); 1118a3667aaeSNaresh Kumar Inna return NULL; 1119a3667aaeSNaresh Kumar Inna } 1120a3667aaeSNaresh Kumar Inna 1121a3667aaeSNaresh Kumar Inna tempwr = (uint8_t *)(cpl->data); 1122a3667aaeSNaresh Kumar Inna status = csio_wr_status(tempwr); 1123a3667aaeSNaresh Kumar Inna *scsiwr = tempwr; 1124a3667aaeSNaresh Kumar Inna 1125a3667aaeSNaresh Kumar Inna if (likely((*tempwr == FW_SCSI_READ_WR) || 1126a3667aaeSNaresh Kumar Inna (*tempwr == FW_SCSI_WRITE_WR) || 1127a3667aaeSNaresh Kumar Inna (*tempwr == FW_SCSI_CMD_WR))) { 1128a3667aaeSNaresh Kumar Inna ioreq = (struct csio_ioreq *)((uintptr_t) 1129a3667aaeSNaresh Kumar Inna (((struct fw_scsi_read_wr *)tempwr)->cookie)); 1130a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(virt_addr_valid(ioreq)); 1131a3667aaeSNaresh Kumar Inna 1132a3667aaeSNaresh Kumar Inna ioreq->wr_status = status; 1133a3667aaeSNaresh Kumar Inna 1134a3667aaeSNaresh Kumar Inna return ioreq; 1135a3667aaeSNaresh Kumar Inna } 1136a3667aaeSNaresh Kumar Inna 1137a3667aaeSNaresh Kumar Inna if (*tempwr == FW_SCSI_ABRT_CLS_WR) { 1138a3667aaeSNaresh Kumar Inna ioreq = (struct csio_ioreq *)((uintptr_t) 1139a3667aaeSNaresh Kumar Inna (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); 1140a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(virt_addr_valid(ioreq)); 1141a3667aaeSNaresh Kumar Inna 1142a3667aaeSNaresh Kumar Inna ioreq->wr_status = status; 1143a3667aaeSNaresh Kumar Inna return ioreq; 1144a3667aaeSNaresh Kumar Inna } 1145a3667aaeSNaresh Kumar Inna 1146a3667aaeSNaresh Kumar Inna csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr); 1147a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_inval_scsiop); 1148a3667aaeSNaresh Kumar Inna return NULL; 1149a3667aaeSNaresh Kumar Inna } 1150a3667aaeSNaresh Kumar Inna 1151a3667aaeSNaresh Kumar Inna /* 1152a3667aaeSNaresh Kumar Inna * csio_scsi_cleanup_io_q - Cleanup the given queue. 1153a3667aaeSNaresh Kumar Inna * @scm: SCSI module. 1154a3667aaeSNaresh Kumar Inna * @q: Queue to be cleaned up. 1155a3667aaeSNaresh Kumar Inna * 1156a3667aaeSNaresh Kumar Inna * Called with lock held. Has to exit with lock held. 1157a3667aaeSNaresh Kumar Inna */ 1158a3667aaeSNaresh Kumar Inna void 1159a3667aaeSNaresh Kumar Inna csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) 1160a3667aaeSNaresh Kumar Inna { 1161a3667aaeSNaresh Kumar Inna struct csio_hw *hw = scm->hw; 1162a3667aaeSNaresh Kumar Inna struct csio_ioreq *ioreq; 1163a3667aaeSNaresh Kumar Inna struct list_head *tmp, *next; 1164a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd; 1165a3667aaeSNaresh Kumar Inna 1166a3667aaeSNaresh Kumar Inna /* Call back the completion routines of the active_q */ 1167a3667aaeSNaresh Kumar Inna list_for_each_safe(tmp, next, q) { 1168a3667aaeSNaresh Kumar Inna ioreq = (struct csio_ioreq *)tmp; 1169a3667aaeSNaresh Kumar Inna csio_scsi_drvcleanup(ioreq); 1170a3667aaeSNaresh Kumar Inna list_del_init(&ioreq->sm.sm_list); 1171a3667aaeSNaresh Kumar Inna scmnd = csio_scsi_cmnd(ioreq); 1172a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1173a3667aaeSNaresh Kumar Inna 1174a3667aaeSNaresh Kumar Inna /* 1175a3667aaeSNaresh Kumar Inna * Upper layers may have cleared this command, hence this 1176a3667aaeSNaresh Kumar Inna * check to avoid accessing stale references. 1177a3667aaeSNaresh Kumar Inna */ 1178a3667aaeSNaresh Kumar Inna if (scmnd != NULL) 1179a3667aaeSNaresh Kumar Inna ioreq->io_cbfn(hw, ioreq); 1180a3667aaeSNaresh Kumar Inna 1181a3667aaeSNaresh Kumar Inna spin_lock_irq(&scm->freelist_lock); 1182a3667aaeSNaresh Kumar Inna csio_put_scsi_ioreq(scm, ioreq); 1183a3667aaeSNaresh Kumar Inna spin_unlock_irq(&scm->freelist_lock); 1184a3667aaeSNaresh Kumar Inna 1185a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1186a3667aaeSNaresh Kumar Inna } 1187a3667aaeSNaresh Kumar Inna } 1188a3667aaeSNaresh Kumar Inna 1189a3667aaeSNaresh Kumar Inna #define CSIO_SCSI_ABORT_Q_POLL_MS 2000 1190a3667aaeSNaresh Kumar Inna 1191a3667aaeSNaresh Kumar Inna static void 1192a3667aaeSNaresh Kumar Inna csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd) 1193a3667aaeSNaresh Kumar Inna { 1194a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = ioreq->lnode; 1195a3667aaeSNaresh Kumar Inna struct csio_hw *hw = ln->hwp; 1196a3667aaeSNaresh Kumar Inna int ready = 0; 1197a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1198a3667aaeSNaresh Kumar Inna int rv; 1199a3667aaeSNaresh Kumar Inna 1200a3667aaeSNaresh Kumar Inna if (csio_scsi_cmnd(ioreq) != scmnd) { 1201a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_abrt_race_comp); 1202a3667aaeSNaresh Kumar Inna return; 1203a3667aaeSNaresh Kumar Inna } 1204a3667aaeSNaresh Kumar Inna 1205a3667aaeSNaresh Kumar Inna ready = csio_is_lnode_ready(ln); 1206a3667aaeSNaresh Kumar Inna 1207a3667aaeSNaresh Kumar Inna rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); 1208a3667aaeSNaresh Kumar Inna if (rv != 0) { 1209a3667aaeSNaresh Kumar Inna if (ready) 1210a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_abrt_busy_error); 1211a3667aaeSNaresh Kumar Inna else 1212a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_cls_busy_error); 1213a3667aaeSNaresh Kumar Inna } 1214a3667aaeSNaresh Kumar Inna } 1215a3667aaeSNaresh Kumar Inna 1216a3667aaeSNaresh Kumar Inna /* 1217a3667aaeSNaresh Kumar Inna * csio_scsi_abort_io_q - Abort all I/Os on given queue 1218a3667aaeSNaresh Kumar Inna * @scm: SCSI module. 1219a3667aaeSNaresh Kumar Inna * @q: Queue to abort. 1220a3667aaeSNaresh Kumar Inna * @tmo: Timeout in ms 1221a3667aaeSNaresh Kumar Inna * 1222a3667aaeSNaresh Kumar Inna * Attempt to abort all I/Os on given queue, and wait for a max 1223a3667aaeSNaresh Kumar Inna * of tmo milliseconds for them to complete. Returns success 1224a3667aaeSNaresh Kumar Inna * if all I/Os are aborted. Else returns -ETIMEDOUT. 1225a3667aaeSNaresh Kumar Inna * Should be entered with lock held. Exits with lock held. 1226a3667aaeSNaresh Kumar Inna * NOTE: 1227a3667aaeSNaresh Kumar Inna * Lock has to be held across the loop that aborts I/Os, since dropping the lock 1228a3667aaeSNaresh Kumar Inna * in between can cause the list to be corrupted. As a result, the caller 1229a3667aaeSNaresh Kumar Inna * of this function has to ensure that the number of I/os to be aborted 1230a3667aaeSNaresh Kumar Inna * is finite enough to not cause lock-held-for-too-long issues. 1231a3667aaeSNaresh Kumar Inna */ 1232a3667aaeSNaresh Kumar Inna static int 1233a3667aaeSNaresh Kumar Inna csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) 1234a3667aaeSNaresh Kumar Inna { 1235a3667aaeSNaresh Kumar Inna struct csio_hw *hw = scm->hw; 1236a3667aaeSNaresh Kumar Inna struct list_head *tmp, *next; 1237a3667aaeSNaresh Kumar Inna int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); 1238a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd; 1239a3667aaeSNaresh Kumar Inna 1240a3667aaeSNaresh Kumar Inna if (list_empty(q)) 1241a3667aaeSNaresh Kumar Inna return 0; 1242a3667aaeSNaresh Kumar Inna 1243a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Aborting SCSI I/Os\n"); 1244a3667aaeSNaresh Kumar Inna 1245a3667aaeSNaresh Kumar Inna /* Now abort/close I/Os in the queue passed */ 1246a3667aaeSNaresh Kumar Inna list_for_each_safe(tmp, next, q) { 1247a3667aaeSNaresh Kumar Inna scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp); 1248a3667aaeSNaresh Kumar Inna csio_abrt_cls((struct csio_ioreq *)tmp, scmnd); 1249a3667aaeSNaresh Kumar Inna } 1250a3667aaeSNaresh Kumar Inna 1251a3667aaeSNaresh Kumar Inna /* Wait till all active I/Os are completed/aborted/closed */ 1252a3667aaeSNaresh Kumar Inna while (!list_empty(q) && count--) { 1253a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1254a3667aaeSNaresh Kumar Inna msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1255a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1256a3667aaeSNaresh Kumar Inna } 1257a3667aaeSNaresh Kumar Inna 1258a3667aaeSNaresh Kumar Inna /* all aborts completed */ 1259a3667aaeSNaresh Kumar Inna if (list_empty(q)) 1260a3667aaeSNaresh Kumar Inna return 0; 1261a3667aaeSNaresh Kumar Inna 1262a3667aaeSNaresh Kumar Inna return -ETIMEDOUT; 1263a3667aaeSNaresh Kumar Inna } 1264a3667aaeSNaresh Kumar Inna 1265a3667aaeSNaresh Kumar Inna /* 1266a3667aaeSNaresh Kumar Inna * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. 1267a3667aaeSNaresh Kumar Inna * @scm: SCSI module. 1268a3667aaeSNaresh Kumar Inna * @abort: abort required. 1269a3667aaeSNaresh Kumar Inna * Called with lock held, should exit with lock held. 1270a3667aaeSNaresh Kumar Inna * Can sleep when waiting for I/Os to complete. 1271a3667aaeSNaresh Kumar Inna */ 1272a3667aaeSNaresh Kumar Inna int 1273a3667aaeSNaresh Kumar Inna csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort) 1274a3667aaeSNaresh Kumar Inna { 1275a3667aaeSNaresh Kumar Inna struct csio_hw *hw = scm->hw; 1276a3667aaeSNaresh Kumar Inna int rv = 0; 1277a3667aaeSNaresh Kumar Inna int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); 1278a3667aaeSNaresh Kumar Inna 1279a3667aaeSNaresh Kumar Inna /* No I/Os pending */ 1280a3667aaeSNaresh Kumar Inna if (list_empty(&scm->active_q)) 1281a3667aaeSNaresh Kumar Inna return 0; 1282a3667aaeSNaresh Kumar Inna 1283a3667aaeSNaresh Kumar Inna /* Wait until all active I/Os are completed */ 1284a3667aaeSNaresh Kumar Inna while (!list_empty(&scm->active_q) && count--) { 1285a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1286a3667aaeSNaresh Kumar Inna msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1287a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1288a3667aaeSNaresh Kumar Inna } 1289a3667aaeSNaresh Kumar Inna 1290a3667aaeSNaresh Kumar Inna /* all I/Os completed */ 1291a3667aaeSNaresh Kumar Inna if (list_empty(&scm->active_q)) 1292a3667aaeSNaresh Kumar Inna return 0; 1293a3667aaeSNaresh Kumar Inna 1294a3667aaeSNaresh Kumar Inna /* Else abort */ 1295a3667aaeSNaresh Kumar Inna if (abort) { 1296a3667aaeSNaresh Kumar Inna rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); 1297a3667aaeSNaresh Kumar Inna if (rv == 0) 1298a3667aaeSNaresh Kumar Inna return rv; 1299a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); 1300a3667aaeSNaresh Kumar Inna } 1301a3667aaeSNaresh Kumar Inna 1302a3667aaeSNaresh Kumar Inna csio_scsi_cleanup_io_q(scm, &scm->active_q); 1303a3667aaeSNaresh Kumar Inna 1304a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(list_empty(&scm->active_q)); 1305a3667aaeSNaresh Kumar Inna 1306a3667aaeSNaresh Kumar Inna return rv; 1307a3667aaeSNaresh Kumar Inna } 1308a3667aaeSNaresh Kumar Inna 1309a3667aaeSNaresh Kumar Inna /* 1310a3667aaeSNaresh Kumar Inna * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. 1311a3667aaeSNaresh Kumar Inna * @scm: SCSI module. 1312a3667aaeSNaresh Kumar Inna * @lnode: lnode 1313a3667aaeSNaresh Kumar Inna * 1314a3667aaeSNaresh Kumar Inna * Called with lock held, should exit with lock held. 1315a3667aaeSNaresh Kumar Inna * Can sleep (with dropped lock) when waiting for I/Os to complete. 1316a3667aaeSNaresh Kumar Inna */ 1317a3667aaeSNaresh Kumar Inna int 1318a3667aaeSNaresh Kumar Inna csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln) 1319a3667aaeSNaresh Kumar Inna { 1320a3667aaeSNaresh Kumar Inna struct csio_hw *hw = scm->hw; 1321a3667aaeSNaresh Kumar Inna struct csio_scsi_level_data sld; 1322a3667aaeSNaresh Kumar Inna int rv; 1323a3667aaeSNaresh Kumar Inna int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); 1324a3667aaeSNaresh Kumar Inna 1325a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln); 1326a3667aaeSNaresh Kumar Inna 1327a3667aaeSNaresh Kumar Inna sld.level = CSIO_LEV_LNODE; 1328a3667aaeSNaresh Kumar Inna sld.lnode = ln; 1329a3667aaeSNaresh Kumar Inna INIT_LIST_HEAD(&ln->cmpl_q); 1330a3667aaeSNaresh Kumar Inna csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); 1331a3667aaeSNaresh Kumar Inna 1332a3667aaeSNaresh Kumar Inna /* No I/Os pending on this lnode */ 1333a3667aaeSNaresh Kumar Inna if (list_empty(&ln->cmpl_q)) 1334a3667aaeSNaresh Kumar Inna return 0; 1335a3667aaeSNaresh Kumar Inna 1336a3667aaeSNaresh Kumar Inna /* Wait until all active I/Os on this lnode are completed */ 1337a3667aaeSNaresh Kumar Inna while (!list_empty(&ln->cmpl_q) && count--) { 1338a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1339a3667aaeSNaresh Kumar Inna msleep(CSIO_SCSI_ABORT_Q_POLL_MS); 1340a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1341a3667aaeSNaresh Kumar Inna } 1342a3667aaeSNaresh Kumar Inna 1343a3667aaeSNaresh Kumar Inna /* all I/Os completed */ 1344a3667aaeSNaresh Kumar Inna if (list_empty(&ln->cmpl_q)) 1345a3667aaeSNaresh Kumar Inna return 0; 1346a3667aaeSNaresh Kumar Inna 1347a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln); 1348a3667aaeSNaresh Kumar Inna 1349a3667aaeSNaresh Kumar Inna /* I/Os are pending, abort them */ 1350a3667aaeSNaresh Kumar Inna rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); 1351a3667aaeSNaresh Kumar Inna if (rv != 0) { 1352a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); 1353a3667aaeSNaresh Kumar Inna csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); 1354a3667aaeSNaresh Kumar Inna } 1355a3667aaeSNaresh Kumar Inna 1356a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); 1357a3667aaeSNaresh Kumar Inna 1358a3667aaeSNaresh Kumar Inna return rv; 1359a3667aaeSNaresh Kumar Inna } 1360a3667aaeSNaresh Kumar Inna 1361a3667aaeSNaresh Kumar Inna static ssize_t 1362a3667aaeSNaresh Kumar Inna csio_show_hw_state(struct device *dev, 1363a3667aaeSNaresh Kumar Inna struct device_attribute *attr, char *buf) 1364a3667aaeSNaresh Kumar Inna { 1365a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1366a3667aaeSNaresh Kumar Inna struct csio_hw *hw = csio_lnode_to_hw(ln); 1367a3667aaeSNaresh Kumar Inna 1368a3667aaeSNaresh Kumar Inna if (csio_is_hw_ready(hw)) 1369a3667aaeSNaresh Kumar Inna return snprintf(buf, PAGE_SIZE, "ready\n"); 1370a3667aaeSNaresh Kumar Inna else 1371a3667aaeSNaresh Kumar Inna return snprintf(buf, PAGE_SIZE, "not ready\n"); 1372a3667aaeSNaresh Kumar Inna } 1373a3667aaeSNaresh Kumar Inna 1374a3667aaeSNaresh Kumar Inna /* Device reset */ 1375a3667aaeSNaresh Kumar Inna static ssize_t 1376a3667aaeSNaresh Kumar Inna csio_device_reset(struct device *dev, 1377a3667aaeSNaresh Kumar Inna struct device_attribute *attr, const char *buf, size_t count) 1378a3667aaeSNaresh Kumar Inna { 1379a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1380a3667aaeSNaresh Kumar Inna struct csio_hw *hw = csio_lnode_to_hw(ln); 1381a3667aaeSNaresh Kumar Inna 1382a3667aaeSNaresh Kumar Inna if (*buf != '1') 1383a3667aaeSNaresh Kumar Inna return -EINVAL; 1384a3667aaeSNaresh Kumar Inna 1385a3667aaeSNaresh Kumar Inna /* Delete NPIV lnodes */ 1386a3667aaeSNaresh Kumar Inna csio_lnodes_exit(hw, 1); 1387a3667aaeSNaresh Kumar Inna 1388a3667aaeSNaresh Kumar Inna /* Block upper IOs */ 1389a3667aaeSNaresh Kumar Inna csio_lnodes_block_request(hw); 1390a3667aaeSNaresh Kumar Inna 1391a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1392a3667aaeSNaresh Kumar Inna csio_hw_reset(hw); 1393a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1394a3667aaeSNaresh Kumar Inna 1395a3667aaeSNaresh Kumar Inna /* Unblock upper IOs */ 1396a3667aaeSNaresh Kumar Inna csio_lnodes_unblock_request(hw); 1397a3667aaeSNaresh Kumar Inna return count; 1398a3667aaeSNaresh Kumar Inna } 1399a3667aaeSNaresh Kumar Inna 1400a3667aaeSNaresh Kumar Inna /* disable port */ 1401a3667aaeSNaresh Kumar Inna static ssize_t 1402a3667aaeSNaresh Kumar Inna csio_disable_port(struct device *dev, 1403a3667aaeSNaresh Kumar Inna struct device_attribute *attr, const char *buf, size_t count) 1404a3667aaeSNaresh Kumar Inna { 1405a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1406a3667aaeSNaresh Kumar Inna struct csio_hw *hw = csio_lnode_to_hw(ln); 1407a3667aaeSNaresh Kumar Inna bool disable; 1408a3667aaeSNaresh Kumar Inna 1409a3667aaeSNaresh Kumar Inna if (*buf == '1' || *buf == '0') 1410a3667aaeSNaresh Kumar Inna disable = (*buf == '1') ? true : false; 1411a3667aaeSNaresh Kumar Inna else 1412a3667aaeSNaresh Kumar Inna return -EINVAL; 1413a3667aaeSNaresh Kumar Inna 1414a3667aaeSNaresh Kumar Inna /* Block upper IOs */ 1415a3667aaeSNaresh Kumar Inna csio_lnodes_block_by_port(hw, ln->portid); 1416a3667aaeSNaresh Kumar Inna 1417a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1418a3667aaeSNaresh Kumar Inna csio_disable_lnodes(hw, ln->portid, disable); 1419a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1420a3667aaeSNaresh Kumar Inna 1421a3667aaeSNaresh Kumar Inna /* Unblock upper IOs */ 1422a3667aaeSNaresh Kumar Inna csio_lnodes_unblock_by_port(hw, ln->portid); 1423a3667aaeSNaresh Kumar Inna return count; 1424a3667aaeSNaresh Kumar Inna } 1425a3667aaeSNaresh Kumar Inna 1426a3667aaeSNaresh Kumar Inna /* Show debug level */ 1427a3667aaeSNaresh Kumar Inna static ssize_t 1428a3667aaeSNaresh Kumar Inna csio_show_dbg_level(struct device *dev, 1429a3667aaeSNaresh Kumar Inna struct device_attribute *attr, char *buf) 1430a3667aaeSNaresh Kumar Inna { 1431a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1432a3667aaeSNaresh Kumar Inna 1433a3667aaeSNaresh Kumar Inna return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level); 1434a3667aaeSNaresh Kumar Inna } 1435a3667aaeSNaresh Kumar Inna 1436a3667aaeSNaresh Kumar Inna /* Store debug level */ 1437a3667aaeSNaresh Kumar Inna static ssize_t 1438a3667aaeSNaresh Kumar Inna csio_store_dbg_level(struct device *dev, 1439a3667aaeSNaresh Kumar Inna struct device_attribute *attr, const char *buf, size_t count) 1440a3667aaeSNaresh Kumar Inna { 1441a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1442a3667aaeSNaresh Kumar Inna struct csio_hw *hw = csio_lnode_to_hw(ln); 1443a3667aaeSNaresh Kumar Inna uint32_t dbg_level = 0; 1444a3667aaeSNaresh Kumar Inna 1445a3667aaeSNaresh Kumar Inna if (!isdigit(buf[0])) 1446a3667aaeSNaresh Kumar Inna return -EINVAL; 1447a3667aaeSNaresh Kumar Inna 1448a3667aaeSNaresh Kumar Inna if (sscanf(buf, "%i", &dbg_level)) 1449a3667aaeSNaresh Kumar Inna return -EINVAL; 1450a3667aaeSNaresh Kumar Inna 1451a3667aaeSNaresh Kumar Inna ln->params.log_level = dbg_level; 1452a3667aaeSNaresh Kumar Inna hw->params.log_level = dbg_level; 1453a3667aaeSNaresh Kumar Inna 1454a3667aaeSNaresh Kumar Inna return 0; 1455a3667aaeSNaresh Kumar Inna } 1456a3667aaeSNaresh Kumar Inna 1457a3667aaeSNaresh Kumar Inna static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); 145849c12413SNaresh Kumar Inna static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset); 145949c12413SNaresh Kumar Inna static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port); 1460a3667aaeSNaresh Kumar Inna static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, 1461a3667aaeSNaresh Kumar Inna csio_store_dbg_level); 1462a3667aaeSNaresh Kumar Inna 1463a3667aaeSNaresh Kumar Inna static struct device_attribute *csio_fcoe_lport_attrs[] = { 1464a3667aaeSNaresh Kumar Inna &dev_attr_hw_state, 1465a3667aaeSNaresh Kumar Inna &dev_attr_device_reset, 1466a3667aaeSNaresh Kumar Inna &dev_attr_disable_port, 1467a3667aaeSNaresh Kumar Inna &dev_attr_dbg_level, 1468a3667aaeSNaresh Kumar Inna NULL, 1469a3667aaeSNaresh Kumar Inna }; 1470a3667aaeSNaresh Kumar Inna 1471a3667aaeSNaresh Kumar Inna static ssize_t 1472a3667aaeSNaresh Kumar Inna csio_show_num_reg_rnodes(struct device *dev, 1473a3667aaeSNaresh Kumar Inna struct device_attribute *attr, char *buf) 1474a3667aaeSNaresh Kumar Inna { 1475a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(class_to_shost(dev)); 1476a3667aaeSNaresh Kumar Inna 1477a3667aaeSNaresh Kumar Inna return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes); 1478a3667aaeSNaresh Kumar Inna } 1479a3667aaeSNaresh Kumar Inna 1480a3667aaeSNaresh Kumar Inna static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); 1481a3667aaeSNaresh Kumar Inna 1482a3667aaeSNaresh Kumar Inna static struct device_attribute *csio_fcoe_vport_attrs[] = { 1483a3667aaeSNaresh Kumar Inna &dev_attr_num_reg_rnodes, 1484a3667aaeSNaresh Kumar Inna &dev_attr_dbg_level, 1485a3667aaeSNaresh Kumar Inna NULL, 1486a3667aaeSNaresh Kumar Inna }; 1487a3667aaeSNaresh Kumar Inna 1488a3667aaeSNaresh Kumar Inna static inline uint32_t 1489a3667aaeSNaresh Kumar Inna csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) 1490a3667aaeSNaresh Kumar Inna { 1491a3667aaeSNaresh Kumar Inna struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1492a3667aaeSNaresh Kumar Inna struct scatterlist *sg; 1493a3667aaeSNaresh Kumar Inna uint32_t bytes_left; 1494a3667aaeSNaresh Kumar Inna uint32_t bytes_copy; 1495a3667aaeSNaresh Kumar Inna uint32_t buf_off = 0; 1496a3667aaeSNaresh Kumar Inna uint32_t start_off = 0; 1497a3667aaeSNaresh Kumar Inna uint32_t sg_off = 0; 1498a3667aaeSNaresh Kumar Inna void *sg_addr; 1499a3667aaeSNaresh Kumar Inna void *buf_addr; 1500a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 1501a3667aaeSNaresh Kumar Inna 1502a3667aaeSNaresh Kumar Inna bytes_left = scsi_bufflen(scmnd); 1503a3667aaeSNaresh Kumar Inna sg = scsi_sglist(scmnd); 1504a3667aaeSNaresh Kumar Inna dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); 1505a3667aaeSNaresh Kumar Inna 1506a3667aaeSNaresh Kumar Inna /* Copy data from driver buffer to SGs of SCSI CMD */ 1507a3667aaeSNaresh Kumar Inna while (bytes_left > 0 && sg && dma_buf) { 1508a3667aaeSNaresh Kumar Inna if (buf_off >= dma_buf->len) { 1509a3667aaeSNaresh Kumar Inna buf_off = 0; 1510a3667aaeSNaresh Kumar Inna dma_buf = (struct csio_dma_buf *) 1511a3667aaeSNaresh Kumar Inna csio_list_next(dma_buf); 1512a3667aaeSNaresh Kumar Inna continue; 1513a3667aaeSNaresh Kumar Inna } 1514a3667aaeSNaresh Kumar Inna 1515a3667aaeSNaresh Kumar Inna if (start_off >= sg->length) { 1516a3667aaeSNaresh Kumar Inna start_off -= sg->length; 1517a3667aaeSNaresh Kumar Inna sg = sg_next(sg); 1518a3667aaeSNaresh Kumar Inna continue; 1519a3667aaeSNaresh Kumar Inna } 1520a3667aaeSNaresh Kumar Inna 1521a3667aaeSNaresh Kumar Inna buf_addr = dma_buf->vaddr + buf_off; 1522a3667aaeSNaresh Kumar Inna sg_off = sg->offset + start_off; 1523a3667aaeSNaresh Kumar Inna bytes_copy = min((dma_buf->len - buf_off), 1524a3667aaeSNaresh Kumar Inna sg->length - start_off); 1525a3667aaeSNaresh Kumar Inna bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), 1526a3667aaeSNaresh Kumar Inna bytes_copy); 1527a3667aaeSNaresh Kumar Inna 1528a3667aaeSNaresh Kumar Inna sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT)); 1529a3667aaeSNaresh Kumar Inna if (!sg_addr) { 1530a3667aaeSNaresh Kumar Inna csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", 1531a3667aaeSNaresh Kumar Inna sg, req); 1532a3667aaeSNaresh Kumar Inna break; 1533a3667aaeSNaresh Kumar Inna } 1534a3667aaeSNaresh Kumar Inna 1535a3667aaeSNaresh Kumar Inna csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n", 1536a3667aaeSNaresh Kumar Inna sg_addr, sg_off, buf_addr, bytes_copy); 1537a3667aaeSNaresh Kumar Inna memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy); 1538a3667aaeSNaresh Kumar Inna kunmap_atomic(sg_addr); 1539a3667aaeSNaresh Kumar Inna 1540a3667aaeSNaresh Kumar Inna start_off += bytes_copy; 1541a3667aaeSNaresh Kumar Inna buf_off += bytes_copy; 1542a3667aaeSNaresh Kumar Inna bytes_left -= bytes_copy; 1543a3667aaeSNaresh Kumar Inna } 1544a3667aaeSNaresh Kumar Inna 1545a3667aaeSNaresh Kumar Inna if (bytes_left > 0) 1546a3667aaeSNaresh Kumar Inna return DID_ERROR; 1547a3667aaeSNaresh Kumar Inna else 1548a3667aaeSNaresh Kumar Inna return DID_OK; 1549a3667aaeSNaresh Kumar Inna } 1550a3667aaeSNaresh Kumar Inna 1551a3667aaeSNaresh Kumar Inna /* 1552a3667aaeSNaresh Kumar Inna * csio_scsi_err_handler - SCSI error handler. 1553a3667aaeSNaresh Kumar Inna * @hw: HW module. 1554a3667aaeSNaresh Kumar Inna * @req: IO request. 1555a3667aaeSNaresh Kumar Inna * 1556a3667aaeSNaresh Kumar Inna */ 1557a3667aaeSNaresh Kumar Inna static inline void 1558a3667aaeSNaresh Kumar Inna csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) 1559a3667aaeSNaresh Kumar Inna { 1560a3667aaeSNaresh Kumar Inna struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1561a3667aaeSNaresh Kumar Inna struct csio_scsim *scm = csio_hw_to_scsim(hw); 1562a3667aaeSNaresh Kumar Inna struct fcp_resp_with_ext *fcp_resp; 1563a3667aaeSNaresh Kumar Inna struct fcp_resp_rsp_info *rsp_info; 1564a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 1565a3667aaeSNaresh Kumar Inna uint8_t flags, scsi_status = 0; 1566a3667aaeSNaresh Kumar Inna uint32_t host_status = DID_OK; 1567a3667aaeSNaresh Kumar Inna uint32_t rsp_len = 0, sns_len = 0; 1568a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1569a3667aaeSNaresh Kumar Inna 1570a3667aaeSNaresh Kumar Inna 1571a3667aaeSNaresh Kumar Inna switch (req->wr_status) { 1572a3667aaeSNaresh Kumar Inna case FW_HOSTERROR: 1573a3667aaeSNaresh Kumar Inna if (unlikely(!csio_is_hw_ready(hw))) 1574a3667aaeSNaresh Kumar Inna return; 1575a3667aaeSNaresh Kumar Inna 1576a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1577a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_hosterror); 1578a3667aaeSNaresh Kumar Inna 1579a3667aaeSNaresh Kumar Inna break; 1580a3667aaeSNaresh Kumar Inna case FW_SCSI_RSP_ERR: 1581a3667aaeSNaresh Kumar Inna dma_buf = &req->dma_buf; 1582a3667aaeSNaresh Kumar Inna fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; 1583a3667aaeSNaresh Kumar Inna rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); 1584a3667aaeSNaresh Kumar Inna flags = fcp_resp->resp.fr_flags; 1585a3667aaeSNaresh Kumar Inna scsi_status = fcp_resp->resp.fr_status; 1586a3667aaeSNaresh Kumar Inna 1587a3667aaeSNaresh Kumar Inna if (flags & FCP_RSP_LEN_VAL) { 1588a3667aaeSNaresh Kumar Inna rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); 1589a3667aaeSNaresh Kumar Inna if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) || 1590a3667aaeSNaresh Kumar Inna (rsp_info->rsp_code != FCP_TMF_CMPL)) { 1591a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1592a3667aaeSNaresh Kumar Inna goto out; 1593a3667aaeSNaresh Kumar Inna } 1594a3667aaeSNaresh Kumar Inna } 1595a3667aaeSNaresh Kumar Inna 1596a3667aaeSNaresh Kumar Inna if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { 1597a3667aaeSNaresh Kumar Inna sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); 1598a3667aaeSNaresh Kumar Inna if (sns_len > SCSI_SENSE_BUFFERSIZE) 1599a3667aaeSNaresh Kumar Inna sns_len = SCSI_SENSE_BUFFERSIZE; 1600a3667aaeSNaresh Kumar Inna 1601a3667aaeSNaresh Kumar Inna memcpy(cmnd->sense_buffer, 1602a3667aaeSNaresh Kumar Inna &rsp_info->_fr_resvd[0] + rsp_len, sns_len); 1603a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_autosense); 1604a3667aaeSNaresh Kumar Inna } 1605a3667aaeSNaresh Kumar Inna 1606a3667aaeSNaresh Kumar Inna scsi_set_resid(cmnd, 0); 1607a3667aaeSNaresh Kumar Inna 1608a3667aaeSNaresh Kumar Inna /* Under run */ 1609a3667aaeSNaresh Kumar Inna if (flags & FCP_RESID_UNDER) { 1610a3667aaeSNaresh Kumar Inna scsi_set_resid(cmnd, 1611a3667aaeSNaresh Kumar Inna be32_to_cpu(fcp_resp->ext.fr_resid)); 1612a3667aaeSNaresh Kumar Inna 1613a3667aaeSNaresh Kumar Inna if (!(flags & FCP_SNS_LEN_VAL) && 1614a3667aaeSNaresh Kumar Inna (scsi_status == SAM_STAT_GOOD) && 1615a3667aaeSNaresh Kumar Inna ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) 1616a3667aaeSNaresh Kumar Inna < cmnd->underflow)) 1617a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1618a3667aaeSNaresh Kumar Inna } else if (flags & FCP_RESID_OVER) 1619a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1620a3667aaeSNaresh Kumar Inna 1621a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_rsperror); 1622a3667aaeSNaresh Kumar Inna break; 1623a3667aaeSNaresh Kumar Inna 1624a3667aaeSNaresh Kumar Inna case FW_SCSI_OVER_FLOW_ERR: 1625a3667aaeSNaresh Kumar Inna csio_warn(hw, 1626a3667aaeSNaresh Kumar Inna "Over-flow error,cmnd:0x%x expected len:0x%x" 1627a3667aaeSNaresh Kumar Inna " resid:0x%x\n", cmnd->cmnd[0], 1628a3667aaeSNaresh Kumar Inna scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 1629a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1630a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_ovflerror); 1631a3667aaeSNaresh Kumar Inna break; 1632a3667aaeSNaresh Kumar Inna 1633a3667aaeSNaresh Kumar Inna case FW_SCSI_UNDER_FLOW_ERR: 1634a3667aaeSNaresh Kumar Inna csio_warn(hw, 1635a3667aaeSNaresh Kumar Inna "Under-flow error,cmnd:0x%x expected" 16369cb78c16SHannes Reinecke " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n", 1637a3667aaeSNaresh Kumar Inna cmnd->cmnd[0], scsi_bufflen(cmnd), 1638a3667aaeSNaresh Kumar Inna scsi_get_resid(cmnd), cmnd->device->lun, 1639a3667aaeSNaresh Kumar Inna rn->flowid); 1640a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1641a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_unflerror); 1642a3667aaeSNaresh Kumar Inna break; 1643a3667aaeSNaresh Kumar Inna 1644a3667aaeSNaresh Kumar Inna case FW_SCSI_ABORT_REQUESTED: 1645a3667aaeSNaresh Kumar Inna case FW_SCSI_ABORTED: 1646a3667aaeSNaresh Kumar Inna case FW_SCSI_CLOSE_REQUESTED: 1647a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, 1648a3667aaeSNaresh Kumar Inna cmnd->cmnd[0], 1649a3667aaeSNaresh Kumar Inna (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? 1650a3667aaeSNaresh Kumar Inna "closed" : "aborted"); 1651a3667aaeSNaresh Kumar Inna /* 1652a3667aaeSNaresh Kumar Inna * csio_eh_abort_handler checks this value to 1653a3667aaeSNaresh Kumar Inna * succeed or fail the abort request. 1654a3667aaeSNaresh Kumar Inna */ 1655a3667aaeSNaresh Kumar Inna host_status = DID_REQUEUE; 1656a3667aaeSNaresh Kumar Inna if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) 1657a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_closed); 1658a3667aaeSNaresh Kumar Inna else 1659a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_aborted); 1660a3667aaeSNaresh Kumar Inna break; 1661a3667aaeSNaresh Kumar Inna 1662a3667aaeSNaresh Kumar Inna case FW_SCSI_ABORT_TIMEDOUT: 1663a3667aaeSNaresh Kumar Inna /* FW timed out the abort itself */ 1664a3667aaeSNaresh Kumar Inna csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", 1665a3667aaeSNaresh Kumar Inna req, cmnd, req->wr_status); 1666a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1667a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_abrt_timedout); 1668a3667aaeSNaresh Kumar Inna break; 1669a3667aaeSNaresh Kumar Inna 1670a3667aaeSNaresh Kumar Inna case FW_RDEV_NOT_READY: 1671a3667aaeSNaresh Kumar Inna /* 1672a3667aaeSNaresh Kumar Inna * In firmware, a RDEV can get into this state 1673a3667aaeSNaresh Kumar Inna * temporarily, before moving into dissapeared/lost 1674a3667aaeSNaresh Kumar Inna * state. So, the driver should complete the request equivalent 1675a3667aaeSNaresh Kumar Inna * to device-disappeared! 1676a3667aaeSNaresh Kumar Inna */ 1677a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_rdev_nr_error); 1678a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1679a3667aaeSNaresh Kumar Inna break; 1680a3667aaeSNaresh Kumar Inna 1681a3667aaeSNaresh Kumar Inna case FW_ERR_RDEV_LOST: 1682a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_rdev_lost_error); 1683a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1684a3667aaeSNaresh Kumar Inna break; 1685a3667aaeSNaresh Kumar Inna 1686a3667aaeSNaresh Kumar Inna case FW_ERR_RDEV_LOGO: 1687a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_rdev_logo_error); 1688a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1689a3667aaeSNaresh Kumar Inna break; 1690a3667aaeSNaresh Kumar Inna 1691a3667aaeSNaresh Kumar Inna case FW_ERR_RDEV_IMPL_LOGO: 1692a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1693a3667aaeSNaresh Kumar Inna break; 1694a3667aaeSNaresh Kumar Inna 1695a3667aaeSNaresh Kumar Inna case FW_ERR_LINK_DOWN: 1696a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_link_down_error); 1697a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1698a3667aaeSNaresh Kumar Inna break; 1699a3667aaeSNaresh Kumar Inna 1700a3667aaeSNaresh Kumar Inna case FW_FCOE_NO_XCHG: 1701a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_no_xchg_error); 1702a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1703a3667aaeSNaresh Kumar Inna break; 1704a3667aaeSNaresh Kumar Inna 1705a3667aaeSNaresh Kumar Inna default: 1706a3667aaeSNaresh Kumar Inna csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", 1707a3667aaeSNaresh Kumar Inna req->wr_status, req, cmnd); 1708a3667aaeSNaresh Kumar Inna CSIO_DB_ASSERT(0); 1709a3667aaeSNaresh Kumar Inna 1710a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_unknown_error); 1711a3667aaeSNaresh Kumar Inna host_status = DID_ERROR; 1712a3667aaeSNaresh Kumar Inna break; 1713a3667aaeSNaresh Kumar Inna } 1714a3667aaeSNaresh Kumar Inna 1715a3667aaeSNaresh Kumar Inna out: 17165c2442fdSVarun Prakash if (req->nsge > 0) { 1717a3667aaeSNaresh Kumar Inna scsi_dma_unmap(cmnd); 17185c2442fdSVarun Prakash if (req->dcopy && (host_status == DID_OK)) 17195c2442fdSVarun Prakash host_status = csio_scsi_copy_to_sgl(hw, req); 17205c2442fdSVarun Prakash } 1721a3667aaeSNaresh Kumar Inna 1722a3667aaeSNaresh Kumar Inna cmnd->result = (((host_status) << 16) | scsi_status); 1723a3667aaeSNaresh Kumar Inna cmnd->scsi_done(cmnd); 1724a3667aaeSNaresh Kumar Inna 1725a3667aaeSNaresh Kumar Inna /* Wake up waiting threads */ 1726a3667aaeSNaresh Kumar Inna csio_scsi_cmnd(req) = NULL; 17273e3f5a8aSDaniel Wagner complete(&req->cmplobj); 1728a3667aaeSNaresh Kumar Inna } 1729a3667aaeSNaresh Kumar Inna 1730a3667aaeSNaresh Kumar Inna /* 1731a3667aaeSNaresh Kumar Inna * csio_scsi_cbfn - SCSI callback function. 1732a3667aaeSNaresh Kumar Inna * @hw: HW module. 1733a3667aaeSNaresh Kumar Inna * @req: IO request. 1734a3667aaeSNaresh Kumar Inna * 1735a3667aaeSNaresh Kumar Inna */ 1736a3667aaeSNaresh Kumar Inna static void 1737a3667aaeSNaresh Kumar Inna csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) 1738a3667aaeSNaresh Kumar Inna { 1739a3667aaeSNaresh Kumar Inna struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 1740a3667aaeSNaresh Kumar Inna uint8_t scsi_status = SAM_STAT_GOOD; 1741a3667aaeSNaresh Kumar Inna uint32_t host_status = DID_OK; 1742a3667aaeSNaresh Kumar Inna 1743a3667aaeSNaresh Kumar Inna if (likely(req->wr_status == FW_SUCCESS)) { 1744a3667aaeSNaresh Kumar Inna if (req->nsge > 0) { 1745a3667aaeSNaresh Kumar Inna scsi_dma_unmap(cmnd); 1746a3667aaeSNaresh Kumar Inna if (req->dcopy) 1747a3667aaeSNaresh Kumar Inna host_status = csio_scsi_copy_to_sgl(hw, req); 1748a3667aaeSNaresh Kumar Inna } 1749a3667aaeSNaresh Kumar Inna 1750a3667aaeSNaresh Kumar Inna cmnd->result = (((host_status) << 16) | scsi_status); 1751a3667aaeSNaresh Kumar Inna cmnd->scsi_done(cmnd); 1752a3667aaeSNaresh Kumar Inna csio_scsi_cmnd(req) = NULL; 1753a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); 1754a3667aaeSNaresh Kumar Inna } else { 1755a3667aaeSNaresh Kumar Inna /* Error handling */ 1756a3667aaeSNaresh Kumar Inna csio_scsi_err_handler(hw, req); 1757a3667aaeSNaresh Kumar Inna } 1758a3667aaeSNaresh Kumar Inna } 1759a3667aaeSNaresh Kumar Inna 1760a3667aaeSNaresh Kumar Inna /** 1761a3667aaeSNaresh Kumar Inna * csio_queuecommand - Entry point to kickstart an I/O request. 1762a3667aaeSNaresh Kumar Inna * @host: The scsi_host pointer. 1763a3667aaeSNaresh Kumar Inna * @cmnd: The I/O request from ML. 1764a3667aaeSNaresh Kumar Inna * 1765a3667aaeSNaresh Kumar Inna * This routine does the following: 1766a3667aaeSNaresh Kumar Inna * - Checks for HW and Rnode module readiness. 1767a3667aaeSNaresh Kumar Inna * - Gets a free ioreq structure (which is already initialized 1768a3667aaeSNaresh Kumar Inna * to uninit during its allocation). 1769a3667aaeSNaresh Kumar Inna * - Maps SG elements. 1770a3667aaeSNaresh Kumar Inna * - Initializes ioreq members. 1771a3667aaeSNaresh Kumar Inna * - Kicks off the SCSI state machine for this IO. 1772a3667aaeSNaresh Kumar Inna * - Returns busy status on error. 1773a3667aaeSNaresh Kumar Inna */ 1774a3667aaeSNaresh Kumar Inna static int 1775a3667aaeSNaresh Kumar Inna csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) 1776a3667aaeSNaresh Kumar Inna { 1777a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(host); 1778a3667aaeSNaresh Kumar Inna struct csio_hw *hw = csio_lnode_to_hw(ln); 1779a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1780a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1781a3667aaeSNaresh Kumar Inna struct csio_ioreq *ioreq = NULL; 1782a3667aaeSNaresh Kumar Inna unsigned long flags; 1783a3667aaeSNaresh Kumar Inna int nsge = 0; 1784a3667aaeSNaresh Kumar Inna int rv = SCSI_MLQUEUE_HOST_BUSY, nr; 1785a3667aaeSNaresh Kumar Inna int retval; 1786a3667aaeSNaresh Kumar Inna struct csio_scsi_qset *sqset; 1787a3667aaeSNaresh Kumar Inna struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1788a3667aaeSNaresh Kumar Inna 17899cf2bab6SJens Axboe sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)]; 1790a3667aaeSNaresh Kumar Inna 1791a3667aaeSNaresh Kumar Inna nr = fc_remote_port_chkready(rport); 1792a3667aaeSNaresh Kumar Inna if (nr) { 1793a3667aaeSNaresh Kumar Inna cmnd->result = nr; 1794a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_rn_nr_error); 1795a3667aaeSNaresh Kumar Inna goto err_done; 1796a3667aaeSNaresh Kumar Inna } 1797a3667aaeSNaresh Kumar Inna 1798a3667aaeSNaresh Kumar Inna if (unlikely(!csio_is_hw_ready(hw))) { 1799a3667aaeSNaresh Kumar Inna cmnd->result = (DID_REQUEUE << 16); 1800a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_hw_nr_error); 1801a3667aaeSNaresh Kumar Inna goto err_done; 1802a3667aaeSNaresh Kumar Inna } 1803a3667aaeSNaresh Kumar Inna 1804a3667aaeSNaresh Kumar Inna /* Get req->nsge, if there are SG elements to be mapped */ 1805a3667aaeSNaresh Kumar Inna nsge = scsi_dma_map(cmnd); 1806a3667aaeSNaresh Kumar Inna if (unlikely(nsge < 0)) { 1807a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_dmamap_error); 1808a3667aaeSNaresh Kumar Inna goto err; 1809a3667aaeSNaresh Kumar Inna } 1810a3667aaeSNaresh Kumar Inna 1811a3667aaeSNaresh Kumar Inna /* Do we support so many mappings? */ 1812a3667aaeSNaresh Kumar Inna if (unlikely(nsge > scsim->max_sge)) { 1813a3667aaeSNaresh Kumar Inna csio_warn(hw, 1814a3667aaeSNaresh Kumar Inna "More SGEs than can be supported." 1815a3667aaeSNaresh Kumar Inna " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); 1816a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_unsupp_sge_error); 1817a3667aaeSNaresh Kumar Inna goto err_dma_unmap; 1818a3667aaeSNaresh Kumar Inna } 1819a3667aaeSNaresh Kumar Inna 1820a3667aaeSNaresh Kumar Inna /* Get a free ioreq structure - SM is already set to uninit */ 1821a3667aaeSNaresh Kumar Inna ioreq = csio_get_scsi_ioreq_lock(hw, scsim); 1822a3667aaeSNaresh Kumar Inna if (!ioreq) { 1823a3667aaeSNaresh Kumar Inna csio_err(hw, "Out of I/O request elements. Active #:%d\n", 1824a3667aaeSNaresh Kumar Inna scsim->stats.n_active); 1825a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_no_req_error); 1826a3667aaeSNaresh Kumar Inna goto err_dma_unmap; 1827a3667aaeSNaresh Kumar Inna } 1828a3667aaeSNaresh Kumar Inna 1829a3667aaeSNaresh Kumar Inna ioreq->nsge = nsge; 1830a3667aaeSNaresh Kumar Inna ioreq->lnode = ln; 1831a3667aaeSNaresh Kumar Inna ioreq->rnode = rn; 1832a3667aaeSNaresh Kumar Inna ioreq->iq_idx = sqset->iq_idx; 1833a3667aaeSNaresh Kumar Inna ioreq->eq_idx = sqset->eq_idx; 1834a3667aaeSNaresh Kumar Inna ioreq->wr_status = 0; 1835a3667aaeSNaresh Kumar Inna ioreq->drv_status = 0; 1836a3667aaeSNaresh Kumar Inna csio_scsi_cmnd(ioreq) = (void *)cmnd; 1837a3667aaeSNaresh Kumar Inna ioreq->tmo = 0; 1838a3667aaeSNaresh Kumar Inna ioreq->datadir = cmnd->sc_data_direction; 1839a3667aaeSNaresh Kumar Inna 1840a3667aaeSNaresh Kumar Inna if (cmnd->sc_data_direction == DMA_TO_DEVICE) { 1841a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(ln, n_output_requests); 1842a3667aaeSNaresh Kumar Inna ln->stats.n_output_bytes += scsi_bufflen(cmnd); 1843a3667aaeSNaresh Kumar Inna } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { 1844a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(ln, n_input_requests); 1845a3667aaeSNaresh Kumar Inna ln->stats.n_input_bytes += scsi_bufflen(cmnd); 1846a3667aaeSNaresh Kumar Inna } else 1847a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(ln, n_control_requests); 1848a3667aaeSNaresh Kumar Inna 1849a3667aaeSNaresh Kumar Inna /* Set cbfn */ 1850a3667aaeSNaresh Kumar Inna ioreq->io_cbfn = csio_scsi_cbfn; 1851a3667aaeSNaresh Kumar Inna 1852a3667aaeSNaresh Kumar Inna /* Needed during abort */ 1853a3667aaeSNaresh Kumar Inna cmnd->host_scribble = (unsigned char *)ioreq; 1854a3667aaeSNaresh Kumar Inna cmnd->SCp.Message = 0; 1855a3667aaeSNaresh Kumar Inna 1856a3667aaeSNaresh Kumar Inna /* Kick off SCSI IO SM on the ioreq */ 1857a3667aaeSNaresh Kumar Inna spin_lock_irqsave(&hw->lock, flags); 1858a3667aaeSNaresh Kumar Inna retval = csio_scsi_start_io(ioreq); 1859a3667aaeSNaresh Kumar Inna spin_unlock_irqrestore(&hw->lock, flags); 1860a3667aaeSNaresh Kumar Inna 1861a3667aaeSNaresh Kumar Inna if (retval != 0) { 186229779a22SColin Ian King csio_err(hw, "ioreq: %p couldn't be started, status:%d\n", 1863a3667aaeSNaresh Kumar Inna ioreq, retval); 1864a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_busy_error); 1865a3667aaeSNaresh Kumar Inna goto err_put_req; 1866a3667aaeSNaresh Kumar Inna } 1867a3667aaeSNaresh Kumar Inna 1868a3667aaeSNaresh Kumar Inna return 0; 1869a3667aaeSNaresh Kumar Inna 1870a3667aaeSNaresh Kumar Inna err_put_req: 1871a3667aaeSNaresh Kumar Inna csio_put_scsi_ioreq_lock(hw, scsim, ioreq); 1872a3667aaeSNaresh Kumar Inna err_dma_unmap: 1873a3667aaeSNaresh Kumar Inna if (nsge > 0) 1874a3667aaeSNaresh Kumar Inna scsi_dma_unmap(cmnd); 1875a3667aaeSNaresh Kumar Inna err: 1876a3667aaeSNaresh Kumar Inna return rv; 1877a3667aaeSNaresh Kumar Inna 1878a3667aaeSNaresh Kumar Inna err_done: 1879a3667aaeSNaresh Kumar Inna cmnd->scsi_done(cmnd); 1880a3667aaeSNaresh Kumar Inna return 0; 1881a3667aaeSNaresh Kumar Inna } 1882a3667aaeSNaresh Kumar Inna 1883a3667aaeSNaresh Kumar Inna static int 1884a3667aaeSNaresh Kumar Inna csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort) 1885a3667aaeSNaresh Kumar Inna { 1886a3667aaeSNaresh Kumar Inna int rv; 1887a3667aaeSNaresh Kumar Inna int cpu = smp_processor_id(); 1888a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = ioreq->lnode; 1889a3667aaeSNaresh Kumar Inna struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; 1890a3667aaeSNaresh Kumar Inna 1891a3667aaeSNaresh Kumar Inna ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; 1892a3667aaeSNaresh Kumar Inna /* 1893a3667aaeSNaresh Kumar Inna * Use current processor queue for posting the abort/close, but retain 1894a3667aaeSNaresh Kumar Inna * the ingress queue ID of the original I/O being aborted/closed - we 1895a3667aaeSNaresh Kumar Inna * need the abort/close completion to be received on the same queue 1896a3667aaeSNaresh Kumar Inna * as the original I/O. 1897a3667aaeSNaresh Kumar Inna */ 1898a3667aaeSNaresh Kumar Inna ioreq->eq_idx = sqset->eq_idx; 1899a3667aaeSNaresh Kumar Inna 1900a3667aaeSNaresh Kumar Inna if (abort == SCSI_ABORT) 1901a3667aaeSNaresh Kumar Inna rv = csio_scsi_abort(ioreq); 1902a3667aaeSNaresh Kumar Inna else 1903a3667aaeSNaresh Kumar Inna rv = csio_scsi_close(ioreq); 1904a3667aaeSNaresh Kumar Inna 1905a3667aaeSNaresh Kumar Inna return rv; 1906a3667aaeSNaresh Kumar Inna } 1907a3667aaeSNaresh Kumar Inna 1908a3667aaeSNaresh Kumar Inna static int 1909a3667aaeSNaresh Kumar Inna csio_eh_abort_handler(struct scsi_cmnd *cmnd) 1910a3667aaeSNaresh Kumar Inna { 1911a3667aaeSNaresh Kumar Inna struct csio_ioreq *ioreq; 1912a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(cmnd->device->host); 1913a3667aaeSNaresh Kumar Inna struct csio_hw *hw = csio_lnode_to_hw(ln); 1914a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 1915a3667aaeSNaresh Kumar Inna int ready = 0, ret; 1916a3667aaeSNaresh Kumar Inna unsigned long tmo = 0; 1917a3667aaeSNaresh Kumar Inna int rv; 1918a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 1919a3667aaeSNaresh Kumar Inna 1920a3667aaeSNaresh Kumar Inna ret = fc_block_scsi_eh(cmnd); 1921a3667aaeSNaresh Kumar Inna if (ret) 1922a3667aaeSNaresh Kumar Inna return ret; 1923a3667aaeSNaresh Kumar Inna 1924a3667aaeSNaresh Kumar Inna ioreq = (struct csio_ioreq *)cmnd->host_scribble; 1925a3667aaeSNaresh Kumar Inna if (!ioreq) 1926a3667aaeSNaresh Kumar Inna return SUCCESS; 1927a3667aaeSNaresh Kumar Inna 1928a3667aaeSNaresh Kumar Inna if (!rn) 1929a3667aaeSNaresh Kumar Inna return FAILED; 1930a3667aaeSNaresh Kumar Inna 1931a3667aaeSNaresh Kumar Inna csio_dbg(hw, 1932a3667aaeSNaresh Kumar Inna "Request to abort ioreq:%p cmd:%p cdb:%08llx" 19339cb78c16SHannes Reinecke " ssni:0x%x lun:%llu iq:0x%x\n", 1934a3667aaeSNaresh Kumar Inna ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, 1935a3667aaeSNaresh Kumar Inna cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); 1936a3667aaeSNaresh Kumar Inna 1937a3667aaeSNaresh Kumar Inna if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) { 1938a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_abrt_race_comp); 1939a3667aaeSNaresh Kumar Inna return SUCCESS; 1940a3667aaeSNaresh Kumar Inna } 1941a3667aaeSNaresh Kumar Inna 1942a3667aaeSNaresh Kumar Inna ready = csio_is_lnode_ready(ln); 1943a3667aaeSNaresh Kumar Inna tmo = CSIO_SCSI_ABRT_TMO_MS; 1944a3667aaeSNaresh Kumar Inna 19453e3f5a8aSDaniel Wagner reinit_completion(&ioreq->cmplobj); 1946a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1947a3667aaeSNaresh Kumar Inna rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); 1948a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1949a3667aaeSNaresh Kumar Inna 1950a3667aaeSNaresh Kumar Inna if (rv != 0) { 1951a3667aaeSNaresh Kumar Inna if (rv == -EINVAL) { 1952a3667aaeSNaresh Kumar Inna /* Return success, if abort/close request issued on 1953a3667aaeSNaresh Kumar Inna * already completed IO 1954a3667aaeSNaresh Kumar Inna */ 1955a3667aaeSNaresh Kumar Inna return SUCCESS; 1956a3667aaeSNaresh Kumar Inna } 1957a3667aaeSNaresh Kumar Inna if (ready) 1958a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_abrt_busy_error); 1959a3667aaeSNaresh Kumar Inna else 1960a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_cls_busy_error); 1961a3667aaeSNaresh Kumar Inna 1962a3667aaeSNaresh Kumar Inna goto inval_scmnd; 1963a3667aaeSNaresh Kumar Inna } 1964a3667aaeSNaresh Kumar Inna 1965a3667aaeSNaresh Kumar Inna wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); 1966a3667aaeSNaresh Kumar Inna 1967a3667aaeSNaresh Kumar Inna /* FW didnt respond to abort within our timeout */ 1968a3667aaeSNaresh Kumar Inna if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { 1969a3667aaeSNaresh Kumar Inna 1970a3667aaeSNaresh Kumar Inna csio_err(hw, "Abort timed out -- req: %p\n", ioreq); 1971a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scsim, n_abrt_timedout); 1972a3667aaeSNaresh Kumar Inna 1973a3667aaeSNaresh Kumar Inna inval_scmnd: 1974a3667aaeSNaresh Kumar Inna if (ioreq->nsge > 0) 1975a3667aaeSNaresh Kumar Inna scsi_dma_unmap(cmnd); 1976a3667aaeSNaresh Kumar Inna 1977a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 1978a3667aaeSNaresh Kumar Inna csio_scsi_cmnd(ioreq) = NULL; 1979a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 1980a3667aaeSNaresh Kumar Inna 1981a3667aaeSNaresh Kumar Inna cmnd->result = (DID_ERROR << 16); 1982a3667aaeSNaresh Kumar Inna cmnd->scsi_done(cmnd); 1983a3667aaeSNaresh Kumar Inna 1984a3667aaeSNaresh Kumar Inna return FAILED; 1985a3667aaeSNaresh Kumar Inna } 1986a3667aaeSNaresh Kumar Inna 1987a3667aaeSNaresh Kumar Inna /* FW successfully aborted the request */ 1988a3667aaeSNaresh Kumar Inna if (host_byte(cmnd->result) == DID_REQUEUE) { 1989a3667aaeSNaresh Kumar Inna csio_info(hw, 1990f50e7605SHannes Reinecke "Aborted SCSI command to (%d:%llu) tag %u\n", 1991a3667aaeSNaresh Kumar Inna cmnd->device->id, cmnd->device->lun, 1992f50e7605SHannes Reinecke cmnd->request->tag); 1993a3667aaeSNaresh Kumar Inna return SUCCESS; 1994a3667aaeSNaresh Kumar Inna } else { 1995a3667aaeSNaresh Kumar Inna csio_info(hw, 1996f50e7605SHannes Reinecke "Failed to abort SCSI command, (%d:%llu) tag %u\n", 1997a3667aaeSNaresh Kumar Inna cmnd->device->id, cmnd->device->lun, 1998f50e7605SHannes Reinecke cmnd->request->tag); 1999a3667aaeSNaresh Kumar Inna return FAILED; 2000a3667aaeSNaresh Kumar Inna } 2001a3667aaeSNaresh Kumar Inna } 2002a3667aaeSNaresh Kumar Inna 2003a3667aaeSNaresh Kumar Inna /* 2004a3667aaeSNaresh Kumar Inna * csio_tm_cbfn - TM callback function. 2005a3667aaeSNaresh Kumar Inna * @hw: HW module. 2006a3667aaeSNaresh Kumar Inna * @req: IO request. 2007a3667aaeSNaresh Kumar Inna * 2008a3667aaeSNaresh Kumar Inna * Cache the result in 'cmnd', since ioreq will be freed soon 2009a3667aaeSNaresh Kumar Inna * after we return from here, and the waiting thread shouldnt trust 2010a3667aaeSNaresh Kumar Inna * the ioreq contents. 2011a3667aaeSNaresh Kumar Inna */ 2012a3667aaeSNaresh Kumar Inna static void 2013a3667aaeSNaresh Kumar Inna csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) 2014a3667aaeSNaresh Kumar Inna { 2015a3667aaeSNaresh Kumar Inna struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); 2016a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 2017a3667aaeSNaresh Kumar Inna uint8_t flags = 0; 2018a3667aaeSNaresh Kumar Inna struct fcp_resp_with_ext *fcp_resp; 2019a3667aaeSNaresh Kumar Inna struct fcp_resp_rsp_info *rsp_info; 2020a3667aaeSNaresh Kumar Inna 2021a3667aaeSNaresh Kumar Inna csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", 2022a3667aaeSNaresh Kumar Inna req, req->wr_status); 2023a3667aaeSNaresh Kumar Inna 2024a3667aaeSNaresh Kumar Inna /* Cache FW return status */ 2025a3667aaeSNaresh Kumar Inna cmnd->SCp.Status = req->wr_status; 2026a3667aaeSNaresh Kumar Inna 2027a3667aaeSNaresh Kumar Inna /* Special handling based on FCP response */ 2028a3667aaeSNaresh Kumar Inna 2029a3667aaeSNaresh Kumar Inna /* 2030a3667aaeSNaresh Kumar Inna * FW returns us this error, if flags were set. FCP4 says 2031a3667aaeSNaresh Kumar Inna * FCP_RSP_LEN_VAL in flags shall be set for TM completions. 2032a3667aaeSNaresh Kumar Inna * So if a target were to set this bit, we expect that the 2033a3667aaeSNaresh Kumar Inna * rsp_code is set to FCP_TMF_CMPL for a successful TM 2034a3667aaeSNaresh Kumar Inna * completion. Any other rsp_code means TM operation failed. 2035a3667aaeSNaresh Kumar Inna * If a target were to just ignore setting flags, we treat 2036a3667aaeSNaresh Kumar Inna * the TM operation as success, and FW returns FW_SUCCESS. 2037a3667aaeSNaresh Kumar Inna */ 2038a3667aaeSNaresh Kumar Inna if (req->wr_status == FW_SCSI_RSP_ERR) { 2039a3667aaeSNaresh Kumar Inna dma_buf = &req->dma_buf; 2040a3667aaeSNaresh Kumar Inna fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; 2041a3667aaeSNaresh Kumar Inna rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); 2042a3667aaeSNaresh Kumar Inna 2043a3667aaeSNaresh Kumar Inna flags = fcp_resp->resp.fr_flags; 2044a3667aaeSNaresh Kumar Inna 2045a3667aaeSNaresh Kumar Inna /* Modify return status if flags indicate success */ 2046a3667aaeSNaresh Kumar Inna if (flags & FCP_RSP_LEN_VAL) 2047a3667aaeSNaresh Kumar Inna if (rsp_info->rsp_code == FCP_TMF_CMPL) 2048a3667aaeSNaresh Kumar Inna cmnd->SCp.Status = FW_SUCCESS; 2049a3667aaeSNaresh Kumar Inna 2050a3667aaeSNaresh Kumar Inna csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); 2051a3667aaeSNaresh Kumar Inna } 2052a3667aaeSNaresh Kumar Inna 2053a3667aaeSNaresh Kumar Inna /* Wake up the TM handler thread */ 2054a3667aaeSNaresh Kumar Inna csio_scsi_cmnd(req) = NULL; 2055a3667aaeSNaresh Kumar Inna } 2056a3667aaeSNaresh Kumar Inna 2057a3667aaeSNaresh Kumar Inna static int 2058a3667aaeSNaresh Kumar Inna csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) 2059a3667aaeSNaresh Kumar Inna { 2060a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(cmnd->device->host); 2061a3667aaeSNaresh Kumar Inna struct csio_hw *hw = csio_lnode_to_hw(ln); 2062a3667aaeSNaresh Kumar Inna struct csio_scsim *scsim = csio_hw_to_scsim(hw); 2063a3667aaeSNaresh Kumar Inna struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); 2064a3667aaeSNaresh Kumar Inna struct csio_ioreq *ioreq = NULL; 2065a3667aaeSNaresh Kumar Inna struct csio_scsi_qset *sqset; 2066a3667aaeSNaresh Kumar Inna unsigned long flags; 2067a3667aaeSNaresh Kumar Inna int retval; 2068a3667aaeSNaresh Kumar Inna int count, ret; 2069a3667aaeSNaresh Kumar Inna LIST_HEAD(local_q); 2070a3667aaeSNaresh Kumar Inna struct csio_scsi_level_data sld; 2071a3667aaeSNaresh Kumar Inna 2072a3667aaeSNaresh Kumar Inna if (!rn) 2073a3667aaeSNaresh Kumar Inna goto fail; 2074a3667aaeSNaresh Kumar Inna 20759cb78c16SHannes Reinecke csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n", 2076a3667aaeSNaresh Kumar Inna cmnd->device->lun, rn->flowid, rn->scsi_id); 2077a3667aaeSNaresh Kumar Inna 2078a3667aaeSNaresh Kumar Inna if (!csio_is_lnode_ready(ln)) { 2079a3667aaeSNaresh Kumar Inna csio_err(hw, 2080a3667aaeSNaresh Kumar Inna "LUN reset cannot be issued on non-ready" 20819cb78c16SHannes Reinecke " local node vnpi:0x%x (LUN:%llu)\n", 2082a3667aaeSNaresh Kumar Inna ln->vnp_flowid, cmnd->device->lun); 2083a3667aaeSNaresh Kumar Inna goto fail; 2084a3667aaeSNaresh Kumar Inna } 2085a3667aaeSNaresh Kumar Inna 2086a3667aaeSNaresh Kumar Inna /* Lnode is ready, now wait on rport node readiness */ 2087a3667aaeSNaresh Kumar Inna ret = fc_block_scsi_eh(cmnd); 2088a3667aaeSNaresh Kumar Inna if (ret) 2089a3667aaeSNaresh Kumar Inna return ret; 2090a3667aaeSNaresh Kumar Inna 2091a3667aaeSNaresh Kumar Inna /* 2092a3667aaeSNaresh Kumar Inna * If we have blocked in the previous call, at this point, either the 2093a3667aaeSNaresh Kumar Inna * remote node has come back online, or device loss timer has fired 2094a3667aaeSNaresh Kumar Inna * and the remote node is destroyed. Allow the LUN reset only for 2095a3667aaeSNaresh Kumar Inna * the former case, since LUN reset is a TMF I/O on the wire, and we 2096a3667aaeSNaresh Kumar Inna * need a valid session to issue it. 2097a3667aaeSNaresh Kumar Inna */ 2098a3667aaeSNaresh Kumar Inna if (fc_remote_port_chkready(rn->rport)) { 2099a3667aaeSNaresh Kumar Inna csio_err(hw, 2100a3667aaeSNaresh Kumar Inna "LUN reset cannot be issued on non-ready" 21019cb78c16SHannes Reinecke " remote node ssni:0x%x (LUN:%llu)\n", 2102a3667aaeSNaresh Kumar Inna rn->flowid, cmnd->device->lun); 2103a3667aaeSNaresh Kumar Inna goto fail; 2104a3667aaeSNaresh Kumar Inna } 2105a3667aaeSNaresh Kumar Inna 2106a3667aaeSNaresh Kumar Inna /* Get a free ioreq structure - SM is already set to uninit */ 2107a3667aaeSNaresh Kumar Inna ioreq = csio_get_scsi_ioreq_lock(hw, scsim); 2108a3667aaeSNaresh Kumar Inna 2109a3667aaeSNaresh Kumar Inna if (!ioreq) { 2110a3667aaeSNaresh Kumar Inna csio_err(hw, "Out of IO request elements. Active # :%d\n", 2111a3667aaeSNaresh Kumar Inna scsim->stats.n_active); 2112a3667aaeSNaresh Kumar Inna goto fail; 2113a3667aaeSNaresh Kumar Inna } 2114a3667aaeSNaresh Kumar Inna 2115a3667aaeSNaresh Kumar Inna sqset = &hw->sqset[ln->portid][smp_processor_id()]; 2116a3667aaeSNaresh Kumar Inna ioreq->nsge = 0; 2117a3667aaeSNaresh Kumar Inna ioreq->lnode = ln; 2118a3667aaeSNaresh Kumar Inna ioreq->rnode = rn; 2119a3667aaeSNaresh Kumar Inna ioreq->iq_idx = sqset->iq_idx; 2120a3667aaeSNaresh Kumar Inna ioreq->eq_idx = sqset->eq_idx; 2121a3667aaeSNaresh Kumar Inna 2122a3667aaeSNaresh Kumar Inna csio_scsi_cmnd(ioreq) = cmnd; 2123a3667aaeSNaresh Kumar Inna cmnd->host_scribble = (unsigned char *)ioreq; 2124a3667aaeSNaresh Kumar Inna cmnd->SCp.Status = 0; 2125a3667aaeSNaresh Kumar Inna 2126a3667aaeSNaresh Kumar Inna cmnd->SCp.Message = FCP_TMF_LUN_RESET; 2127a3667aaeSNaresh Kumar Inna ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; 2128a3667aaeSNaresh Kumar Inna 2129a3667aaeSNaresh Kumar Inna /* 2130a3667aaeSNaresh Kumar Inna * FW times the LUN reset for ioreq->tmo, so we got to wait a little 2131a3667aaeSNaresh Kumar Inna * longer (10s for now) than that to allow FW to return the timed 2132a3667aaeSNaresh Kumar Inna * out command. 2133a3667aaeSNaresh Kumar Inna */ 2134a3667aaeSNaresh Kumar Inna count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); 2135a3667aaeSNaresh Kumar Inna 2136a3667aaeSNaresh Kumar Inna /* Set cbfn */ 2137a3667aaeSNaresh Kumar Inna ioreq->io_cbfn = csio_tm_cbfn; 2138a3667aaeSNaresh Kumar Inna 2139a3667aaeSNaresh Kumar Inna /* Save of the ioreq info for later use */ 2140a3667aaeSNaresh Kumar Inna sld.level = CSIO_LEV_LUN; 2141a3667aaeSNaresh Kumar Inna sld.lnode = ioreq->lnode; 2142a3667aaeSNaresh Kumar Inna sld.rnode = ioreq->rnode; 21439cb78c16SHannes Reinecke sld.oslun = cmnd->device->lun; 2144a3667aaeSNaresh Kumar Inna 2145a3667aaeSNaresh Kumar Inna spin_lock_irqsave(&hw->lock, flags); 2146a3667aaeSNaresh Kumar Inna /* Kick off TM SM on the ioreq */ 2147a3667aaeSNaresh Kumar Inna retval = csio_scsi_start_tm(ioreq); 2148a3667aaeSNaresh Kumar Inna spin_unlock_irqrestore(&hw->lock, flags); 2149a3667aaeSNaresh Kumar Inna 2150a3667aaeSNaresh Kumar Inna if (retval != 0) { 2151a3667aaeSNaresh Kumar Inna csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", 2152a3667aaeSNaresh Kumar Inna ioreq, retval); 2153a3667aaeSNaresh Kumar Inna goto fail_ret_ioreq; 2154a3667aaeSNaresh Kumar Inna } 2155a3667aaeSNaresh Kumar Inna 2156a3667aaeSNaresh Kumar Inna csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n", 2157a3667aaeSNaresh Kumar Inna count * (CSIO_SCSI_TM_POLL_MS / 1000)); 2158a3667aaeSNaresh Kumar Inna /* Wait for completion */ 2159a3667aaeSNaresh Kumar Inna while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) 2160a3667aaeSNaresh Kumar Inna && count--) 2161a3667aaeSNaresh Kumar Inna msleep(CSIO_SCSI_TM_POLL_MS); 2162a3667aaeSNaresh Kumar Inna 2163a3667aaeSNaresh Kumar Inna /* LUN reset timed-out */ 2164a3667aaeSNaresh Kumar Inna if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { 21659cb78c16SHannes Reinecke csio_err(hw, "LUN reset (%d:%llu) timed out\n", 2166a3667aaeSNaresh Kumar Inna cmnd->device->id, cmnd->device->lun); 2167a3667aaeSNaresh Kumar Inna 2168a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 2169a3667aaeSNaresh Kumar Inna csio_scsi_drvcleanup(ioreq); 2170a3667aaeSNaresh Kumar Inna list_del_init(&ioreq->sm.sm_list); 2171a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 2172a3667aaeSNaresh Kumar Inna 2173a3667aaeSNaresh Kumar Inna goto fail_ret_ioreq; 2174a3667aaeSNaresh Kumar Inna } 2175a3667aaeSNaresh Kumar Inna 2176a3667aaeSNaresh Kumar Inna /* LUN reset returned, check cached status */ 2177a3667aaeSNaresh Kumar Inna if (cmnd->SCp.Status != FW_SUCCESS) { 21789cb78c16SHannes Reinecke csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n", 2179a3667aaeSNaresh Kumar Inna cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); 2180a3667aaeSNaresh Kumar Inna goto fail; 2181a3667aaeSNaresh Kumar Inna } 2182a3667aaeSNaresh Kumar Inna 2183a3667aaeSNaresh Kumar Inna /* LUN reset succeeded, Start aborting affected I/Os */ 2184a3667aaeSNaresh Kumar Inna /* 2185a3667aaeSNaresh Kumar Inna * Since the host guarantees during LUN reset that there 2186a3667aaeSNaresh Kumar Inna * will not be any more I/Os to that LUN, until the LUN reset 2187a3667aaeSNaresh Kumar Inna * completes, we gather pending I/Os after the LUN reset. 2188a3667aaeSNaresh Kumar Inna */ 2189a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 2190a3667aaeSNaresh Kumar Inna csio_scsi_gather_active_ios(scsim, &sld, &local_q); 2191a3667aaeSNaresh Kumar Inna 2192a3667aaeSNaresh Kumar Inna retval = csio_scsi_abort_io_q(scsim, &local_q, 30000); 2193a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 2194a3667aaeSNaresh Kumar Inna 2195a3667aaeSNaresh Kumar Inna /* Aborts may have timed out */ 2196a3667aaeSNaresh Kumar Inna if (retval != 0) { 2197a3667aaeSNaresh Kumar Inna csio_err(hw, 21989cb78c16SHannes Reinecke "Attempt to abort I/Os during LUN reset of %llu" 2199a3667aaeSNaresh Kumar Inna " returned %d\n", cmnd->device->lun, retval); 2200a3667aaeSNaresh Kumar Inna /* Return I/Os back to active_q */ 2201a3667aaeSNaresh Kumar Inna spin_lock_irq(&hw->lock); 2202a3667aaeSNaresh Kumar Inna list_splice_tail_init(&local_q, &scsim->active_q); 2203a3667aaeSNaresh Kumar Inna spin_unlock_irq(&hw->lock); 2204a3667aaeSNaresh Kumar Inna goto fail; 2205a3667aaeSNaresh Kumar Inna } 2206a3667aaeSNaresh Kumar Inna 2207a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(rn, n_lun_rst); 2208a3667aaeSNaresh Kumar Inna 22099cb78c16SHannes Reinecke csio_info(hw, "LUN reset occurred (%d:%llu)\n", 2210a3667aaeSNaresh Kumar Inna cmnd->device->id, cmnd->device->lun); 2211a3667aaeSNaresh Kumar Inna 2212a3667aaeSNaresh Kumar Inna return SUCCESS; 2213a3667aaeSNaresh Kumar Inna 2214a3667aaeSNaresh Kumar Inna fail_ret_ioreq: 2215a3667aaeSNaresh Kumar Inna csio_put_scsi_ioreq_lock(hw, scsim, ioreq); 2216a3667aaeSNaresh Kumar Inna fail: 2217a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(rn, n_lun_rst_fail); 2218a3667aaeSNaresh Kumar Inna return FAILED; 2219a3667aaeSNaresh Kumar Inna } 2220a3667aaeSNaresh Kumar Inna 2221a3667aaeSNaresh Kumar Inna static int 2222a3667aaeSNaresh Kumar Inna csio_slave_alloc(struct scsi_device *sdev) 2223a3667aaeSNaresh Kumar Inna { 2224a3667aaeSNaresh Kumar Inna struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2225a3667aaeSNaresh Kumar Inna 2226a3667aaeSNaresh Kumar Inna if (!rport || fc_remote_port_chkready(rport)) 2227a3667aaeSNaresh Kumar Inna return -ENXIO; 2228a3667aaeSNaresh Kumar Inna 2229a3667aaeSNaresh Kumar Inna sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); 2230a3667aaeSNaresh Kumar Inna 2231a3667aaeSNaresh Kumar Inna return 0; 2232a3667aaeSNaresh Kumar Inna } 2233a3667aaeSNaresh Kumar Inna 2234a3667aaeSNaresh Kumar Inna static int 2235a3667aaeSNaresh Kumar Inna csio_slave_configure(struct scsi_device *sdev) 2236a3667aaeSNaresh Kumar Inna { 2237db5ed4dfSChristoph Hellwig scsi_change_queue_depth(sdev, csio_lun_qdepth); 2238a3667aaeSNaresh Kumar Inna return 0; 2239a3667aaeSNaresh Kumar Inna } 2240a3667aaeSNaresh Kumar Inna 2241a3667aaeSNaresh Kumar Inna static void 2242a3667aaeSNaresh Kumar Inna csio_slave_destroy(struct scsi_device *sdev) 2243a3667aaeSNaresh Kumar Inna { 2244a3667aaeSNaresh Kumar Inna sdev->hostdata = NULL; 2245a3667aaeSNaresh Kumar Inna } 2246a3667aaeSNaresh Kumar Inna 2247a3667aaeSNaresh Kumar Inna static int 2248a3667aaeSNaresh Kumar Inna csio_scan_finished(struct Scsi_Host *shost, unsigned long time) 2249a3667aaeSNaresh Kumar Inna { 2250a3667aaeSNaresh Kumar Inna struct csio_lnode *ln = shost_priv(shost); 2251a3667aaeSNaresh Kumar Inna int rv = 1; 2252a3667aaeSNaresh Kumar Inna 2253a3667aaeSNaresh Kumar Inna spin_lock_irq(shost->host_lock); 2254a3667aaeSNaresh Kumar Inna if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) 2255a3667aaeSNaresh Kumar Inna goto out; 2256a3667aaeSNaresh Kumar Inna 2257a3667aaeSNaresh Kumar Inna rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ, 2258a3667aaeSNaresh Kumar Inna csio_delta_scan_tmo * HZ); 2259a3667aaeSNaresh Kumar Inna out: 2260a3667aaeSNaresh Kumar Inna spin_unlock_irq(shost->host_lock); 2261a3667aaeSNaresh Kumar Inna 2262a3667aaeSNaresh Kumar Inna return rv; 2263a3667aaeSNaresh Kumar Inna } 2264a3667aaeSNaresh Kumar Inna 2265a3667aaeSNaresh Kumar Inna struct scsi_host_template csio_fcoe_shost_template = { 2266a3667aaeSNaresh Kumar Inna .module = THIS_MODULE, 2267a3667aaeSNaresh Kumar Inna .name = CSIO_DRV_DESC, 2268a3667aaeSNaresh Kumar Inna .proc_name = KBUILD_MODNAME, 2269a3667aaeSNaresh Kumar Inna .queuecommand = csio_queuecommand, 2270b6a05c82SChristoph Hellwig .eh_timed_out = fc_eh_timed_out, 2271a3667aaeSNaresh Kumar Inna .eh_abort_handler = csio_eh_abort_handler, 2272a3667aaeSNaresh Kumar Inna .eh_device_reset_handler = csio_eh_lun_reset_handler, 2273a3667aaeSNaresh Kumar Inna .slave_alloc = csio_slave_alloc, 2274a3667aaeSNaresh Kumar Inna .slave_configure = csio_slave_configure, 2275a3667aaeSNaresh Kumar Inna .slave_destroy = csio_slave_destroy, 2276a3667aaeSNaresh Kumar Inna .scan_finished = csio_scan_finished, 2277a3667aaeSNaresh Kumar Inna .this_id = -1, 2278a3667aaeSNaresh Kumar Inna .sg_tablesize = CSIO_SCSI_MAX_SGE, 2279a3667aaeSNaresh Kumar Inna .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2280a3667aaeSNaresh Kumar Inna .shost_attrs = csio_fcoe_lport_attrs, 2281a3667aaeSNaresh Kumar Inna .max_sectors = CSIO_MAX_SECTOR_SIZE, 2282a3667aaeSNaresh Kumar Inna }; 2283a3667aaeSNaresh Kumar Inna 2284a3667aaeSNaresh Kumar Inna struct scsi_host_template csio_fcoe_shost_vport_template = { 2285a3667aaeSNaresh Kumar Inna .module = THIS_MODULE, 2286a3667aaeSNaresh Kumar Inna .name = CSIO_DRV_DESC, 2287a3667aaeSNaresh Kumar Inna .proc_name = KBUILD_MODNAME, 2288a3667aaeSNaresh Kumar Inna .queuecommand = csio_queuecommand, 2289b6a05c82SChristoph Hellwig .eh_timed_out = fc_eh_timed_out, 2290a3667aaeSNaresh Kumar Inna .eh_abort_handler = csio_eh_abort_handler, 2291a3667aaeSNaresh Kumar Inna .eh_device_reset_handler = csio_eh_lun_reset_handler, 2292a3667aaeSNaresh Kumar Inna .slave_alloc = csio_slave_alloc, 2293a3667aaeSNaresh Kumar Inna .slave_configure = csio_slave_configure, 2294a3667aaeSNaresh Kumar Inna .slave_destroy = csio_slave_destroy, 2295a3667aaeSNaresh Kumar Inna .scan_finished = csio_scan_finished, 2296a3667aaeSNaresh Kumar Inna .this_id = -1, 2297a3667aaeSNaresh Kumar Inna .sg_tablesize = CSIO_SCSI_MAX_SGE, 2298a3667aaeSNaresh Kumar Inna .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2299a3667aaeSNaresh Kumar Inna .shost_attrs = csio_fcoe_vport_attrs, 2300a3667aaeSNaresh Kumar Inna .max_sectors = CSIO_MAX_SECTOR_SIZE, 2301a3667aaeSNaresh Kumar Inna }; 2302a3667aaeSNaresh Kumar Inna 2303a3667aaeSNaresh Kumar Inna /* 2304a3667aaeSNaresh Kumar Inna * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. 2305a3667aaeSNaresh Kumar Inna * @scm: SCSI Module 2306a3667aaeSNaresh Kumar Inna * @hw: HW device. 2307a3667aaeSNaresh Kumar Inna * @buf_size: buffer size 2308a3667aaeSNaresh Kumar Inna * @num_buf : Number of buffers. 2309a3667aaeSNaresh Kumar Inna * 2310a3667aaeSNaresh Kumar Inna * This routine allocates DMA buffers required for SCSI Data xfer, if 2311a3667aaeSNaresh Kumar Inna * each SGL buffer for a SCSI Read request posted by SCSI midlayer are 2312a3667aaeSNaresh Kumar Inna * not virtually contiguous. 2313a3667aaeSNaresh Kumar Inna */ 2314a3667aaeSNaresh Kumar Inna static int 2315a3667aaeSNaresh Kumar Inna csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, 2316a3667aaeSNaresh Kumar Inna int buf_size, int num_buf) 2317a3667aaeSNaresh Kumar Inna { 2318a3667aaeSNaresh Kumar Inna int n = 0; 2319a3667aaeSNaresh Kumar Inna struct list_head *tmp; 2320a3667aaeSNaresh Kumar Inna struct csio_dma_buf *ddp_desc = NULL; 2321a3667aaeSNaresh Kumar Inna uint32_t unit_size = 0; 2322a3667aaeSNaresh Kumar Inna 2323a3667aaeSNaresh Kumar Inna if (!num_buf) 2324a3667aaeSNaresh Kumar Inna return 0; 2325a3667aaeSNaresh Kumar Inna 2326a3667aaeSNaresh Kumar Inna if (!buf_size) 2327a3667aaeSNaresh Kumar Inna return -EINVAL; 2328a3667aaeSNaresh Kumar Inna 2329a3667aaeSNaresh Kumar Inna INIT_LIST_HEAD(&scm->ddp_freelist); 2330a3667aaeSNaresh Kumar Inna 2331a3667aaeSNaresh Kumar Inna /* Align buf size to page size */ 2332a3667aaeSNaresh Kumar Inna buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; 2333a3667aaeSNaresh Kumar Inna /* Initialize dma descriptors */ 2334a3667aaeSNaresh Kumar Inna for (n = 0; n < num_buf; n++) { 2335a3667aaeSNaresh Kumar Inna /* Set unit size to request size */ 2336a3667aaeSNaresh Kumar Inna unit_size = buf_size; 2337a3667aaeSNaresh Kumar Inna ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); 2338a3667aaeSNaresh Kumar Inna if (!ddp_desc) { 2339a3667aaeSNaresh Kumar Inna csio_err(hw, 2340a3667aaeSNaresh Kumar Inna "Failed to allocate ddp descriptors," 2341a3667aaeSNaresh Kumar Inna " Num allocated = %d.\n", 2342a3667aaeSNaresh Kumar Inna scm->stats.n_free_ddp); 2343a3667aaeSNaresh Kumar Inna goto no_mem; 2344a3667aaeSNaresh Kumar Inna } 2345a3667aaeSNaresh Kumar Inna 2346a3667aaeSNaresh Kumar Inna /* Allocate Dma buffers for DDP */ 2347c22b332dSChristoph Hellwig ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size, 2348c22b332dSChristoph Hellwig &ddp_desc->paddr, GFP_KERNEL); 2349a3667aaeSNaresh Kumar Inna if (!ddp_desc->vaddr) { 2350a3667aaeSNaresh Kumar Inna csio_err(hw, 2351a3667aaeSNaresh Kumar Inna "SCSI response DMA buffer (ddp) allocation" 2352a3667aaeSNaresh Kumar Inna " failed!\n"); 2353a3667aaeSNaresh Kumar Inna kfree(ddp_desc); 2354a3667aaeSNaresh Kumar Inna goto no_mem; 2355a3667aaeSNaresh Kumar Inna } 2356a3667aaeSNaresh Kumar Inna 2357a3667aaeSNaresh Kumar Inna ddp_desc->len = unit_size; 2358a3667aaeSNaresh Kumar Inna 2359a3667aaeSNaresh Kumar Inna /* Added it to scsi ddp freelist */ 2360a3667aaeSNaresh Kumar Inna list_add_tail(&ddp_desc->list, &scm->ddp_freelist); 2361a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_free_ddp); 2362a3667aaeSNaresh Kumar Inna } 2363a3667aaeSNaresh Kumar Inna 2364a3667aaeSNaresh Kumar Inna return 0; 2365a3667aaeSNaresh Kumar Inna no_mem: 2366a3667aaeSNaresh Kumar Inna /* release dma descs back to freelist and free dma memory */ 2367a3667aaeSNaresh Kumar Inna list_for_each(tmp, &scm->ddp_freelist) { 2368a3667aaeSNaresh Kumar Inna ddp_desc = (struct csio_dma_buf *) tmp; 2369a3667aaeSNaresh Kumar Inna tmp = csio_list_prev(tmp); 2370c22b332dSChristoph Hellwig dma_free_coherent(&hw->pdev->dev, ddp_desc->len, 2371c22b332dSChristoph Hellwig ddp_desc->vaddr, ddp_desc->paddr); 2372a3667aaeSNaresh Kumar Inna list_del_init(&ddp_desc->list); 2373a3667aaeSNaresh Kumar Inna kfree(ddp_desc); 2374a3667aaeSNaresh Kumar Inna } 2375a3667aaeSNaresh Kumar Inna scm->stats.n_free_ddp = 0; 2376a3667aaeSNaresh Kumar Inna 2377a3667aaeSNaresh Kumar Inna return -ENOMEM; 2378a3667aaeSNaresh Kumar Inna } 2379a3667aaeSNaresh Kumar Inna 2380a3667aaeSNaresh Kumar Inna /* 2381a3667aaeSNaresh Kumar Inna * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs. 2382a3667aaeSNaresh Kumar Inna * @scm: SCSI Module 2383a3667aaeSNaresh Kumar Inna * @hw: HW device. 2384a3667aaeSNaresh Kumar Inna * 2385a3667aaeSNaresh Kumar Inna * This routine frees ddp buffers. 2386a3667aaeSNaresh Kumar Inna */ 2387a3667aaeSNaresh Kumar Inna static void 2388a3667aaeSNaresh Kumar Inna csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw) 2389a3667aaeSNaresh Kumar Inna { 2390a3667aaeSNaresh Kumar Inna struct list_head *tmp; 2391a3667aaeSNaresh Kumar Inna struct csio_dma_buf *ddp_desc; 2392a3667aaeSNaresh Kumar Inna 2393a3667aaeSNaresh Kumar Inna /* release dma descs back to freelist and free dma memory */ 2394a3667aaeSNaresh Kumar Inna list_for_each(tmp, &scm->ddp_freelist) { 2395a3667aaeSNaresh Kumar Inna ddp_desc = (struct csio_dma_buf *) tmp; 2396a3667aaeSNaresh Kumar Inna tmp = csio_list_prev(tmp); 2397c22b332dSChristoph Hellwig dma_free_coherent(&hw->pdev->dev, ddp_desc->len, 2398c22b332dSChristoph Hellwig ddp_desc->vaddr, ddp_desc->paddr); 2399a3667aaeSNaresh Kumar Inna list_del_init(&ddp_desc->list); 2400a3667aaeSNaresh Kumar Inna kfree(ddp_desc); 2401a3667aaeSNaresh Kumar Inna } 2402a3667aaeSNaresh Kumar Inna scm->stats.n_free_ddp = 0; 2403a3667aaeSNaresh Kumar Inna } 2404a3667aaeSNaresh Kumar Inna 2405a3667aaeSNaresh Kumar Inna /** 2406a3667aaeSNaresh Kumar Inna * csio_scsim_init - Initialize SCSI Module 2407a3667aaeSNaresh Kumar Inna * @scm: SCSI Module 2408a3667aaeSNaresh Kumar Inna * @hw: HW module 2409a3667aaeSNaresh Kumar Inna * 2410a3667aaeSNaresh Kumar Inna */ 2411a3667aaeSNaresh Kumar Inna int 2412a3667aaeSNaresh Kumar Inna csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) 2413a3667aaeSNaresh Kumar Inna { 2414a3667aaeSNaresh Kumar Inna int i; 2415a3667aaeSNaresh Kumar Inna struct csio_ioreq *ioreq; 2416a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 2417a3667aaeSNaresh Kumar Inna 2418a3667aaeSNaresh Kumar Inna INIT_LIST_HEAD(&scm->active_q); 2419a3667aaeSNaresh Kumar Inna scm->hw = hw; 2420a3667aaeSNaresh Kumar Inna 2421a3667aaeSNaresh Kumar Inna scm->proto_cmd_len = sizeof(struct fcp_cmnd); 2422a3667aaeSNaresh Kumar Inna scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; 2423a3667aaeSNaresh Kumar Inna scm->max_sge = CSIO_SCSI_MAX_SGE; 2424a3667aaeSNaresh Kumar Inna 2425a3667aaeSNaresh Kumar Inna spin_lock_init(&scm->freelist_lock); 2426a3667aaeSNaresh Kumar Inna 2427a3667aaeSNaresh Kumar Inna /* Pre-allocate ioreqs and initialize them */ 2428a3667aaeSNaresh Kumar Inna INIT_LIST_HEAD(&scm->ioreq_freelist); 2429a3667aaeSNaresh Kumar Inna for (i = 0; i < csio_scsi_ioreqs; i++) { 2430a3667aaeSNaresh Kumar Inna 2431a3667aaeSNaresh Kumar Inna ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); 2432a3667aaeSNaresh Kumar Inna if (!ioreq) { 2433a3667aaeSNaresh Kumar Inna csio_err(hw, 2434a3667aaeSNaresh Kumar Inna "I/O request element allocation failed, " 2435a3667aaeSNaresh Kumar Inna " Num allocated = %d.\n", 2436a3667aaeSNaresh Kumar Inna scm->stats.n_free_ioreq); 2437a3667aaeSNaresh Kumar Inna 2438a3667aaeSNaresh Kumar Inna goto free_ioreq; 2439a3667aaeSNaresh Kumar Inna } 2440a3667aaeSNaresh Kumar Inna 2441a3667aaeSNaresh Kumar Inna /* Allocate Dma buffers for Response Payload */ 2442a3667aaeSNaresh Kumar Inna dma_buf = &ioreq->dma_buf; 2443decab9a6SRomain Perier dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL, 2444a3667aaeSNaresh Kumar Inna &dma_buf->paddr); 2445a3667aaeSNaresh Kumar Inna if (!dma_buf->vaddr) { 2446a3667aaeSNaresh Kumar Inna csio_err(hw, 2447a3667aaeSNaresh Kumar Inna "SCSI response DMA buffer allocation" 2448a3667aaeSNaresh Kumar Inna " failed!\n"); 2449a3667aaeSNaresh Kumar Inna kfree(ioreq); 2450a3667aaeSNaresh Kumar Inna goto free_ioreq; 2451a3667aaeSNaresh Kumar Inna } 2452a3667aaeSNaresh Kumar Inna 2453a3667aaeSNaresh Kumar Inna dma_buf->len = scm->proto_rsp_len; 2454a3667aaeSNaresh Kumar Inna 2455a3667aaeSNaresh Kumar Inna /* Set state to uninit */ 2456a3667aaeSNaresh Kumar Inna csio_init_state(&ioreq->sm, csio_scsis_uninit); 2457a3667aaeSNaresh Kumar Inna INIT_LIST_HEAD(&ioreq->gen_list); 2458a3667aaeSNaresh Kumar Inna init_completion(&ioreq->cmplobj); 2459a3667aaeSNaresh Kumar Inna 2460a3667aaeSNaresh Kumar Inna list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); 2461a3667aaeSNaresh Kumar Inna CSIO_INC_STATS(scm, n_free_ioreq); 2462a3667aaeSNaresh Kumar Inna } 2463a3667aaeSNaresh Kumar Inna 2464a3667aaeSNaresh Kumar Inna if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs)) 2465a3667aaeSNaresh Kumar Inna goto free_ioreq; 2466a3667aaeSNaresh Kumar Inna 2467a3667aaeSNaresh Kumar Inna return 0; 2468a3667aaeSNaresh Kumar Inna 2469a3667aaeSNaresh Kumar Inna free_ioreq: 2470a3667aaeSNaresh Kumar Inna /* 2471a3667aaeSNaresh Kumar Inna * Free up existing allocations, since an error 2472a3667aaeSNaresh Kumar Inna * from here means we are returning for good 2473a3667aaeSNaresh Kumar Inna */ 2474a3667aaeSNaresh Kumar Inna while (!list_empty(&scm->ioreq_freelist)) { 2475a3667aaeSNaresh Kumar Inna struct csio_sm *tmp; 2476a3667aaeSNaresh Kumar Inna 2477a3667aaeSNaresh Kumar Inna tmp = list_first_entry(&scm->ioreq_freelist, 2478a3667aaeSNaresh Kumar Inna struct csio_sm, sm_list); 2479a3667aaeSNaresh Kumar Inna list_del_init(&tmp->sm_list); 2480a3667aaeSNaresh Kumar Inna ioreq = (struct csio_ioreq *)tmp; 2481a3667aaeSNaresh Kumar Inna 2482a3667aaeSNaresh Kumar Inna dma_buf = &ioreq->dma_buf; 2483decab9a6SRomain Perier dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr, 2484a3667aaeSNaresh Kumar Inna dma_buf->paddr); 2485a3667aaeSNaresh Kumar Inna 2486a3667aaeSNaresh Kumar Inna kfree(ioreq); 2487a3667aaeSNaresh Kumar Inna } 2488a3667aaeSNaresh Kumar Inna 2489a3667aaeSNaresh Kumar Inna scm->stats.n_free_ioreq = 0; 2490a3667aaeSNaresh Kumar Inna 2491a3667aaeSNaresh Kumar Inna return -ENOMEM; 2492a3667aaeSNaresh Kumar Inna } 2493a3667aaeSNaresh Kumar Inna 2494a3667aaeSNaresh Kumar Inna /** 2495a3667aaeSNaresh Kumar Inna * csio_scsim_exit: Uninitialize SCSI Module 2496a3667aaeSNaresh Kumar Inna * @scm: SCSI Module 2497a3667aaeSNaresh Kumar Inna * 2498a3667aaeSNaresh Kumar Inna */ 2499a3667aaeSNaresh Kumar Inna void 2500a3667aaeSNaresh Kumar Inna csio_scsim_exit(struct csio_scsim *scm) 2501a3667aaeSNaresh Kumar Inna { 2502a3667aaeSNaresh Kumar Inna struct csio_ioreq *ioreq; 2503a3667aaeSNaresh Kumar Inna struct csio_dma_buf *dma_buf; 2504a3667aaeSNaresh Kumar Inna 2505a3667aaeSNaresh Kumar Inna while (!list_empty(&scm->ioreq_freelist)) { 2506a3667aaeSNaresh Kumar Inna struct csio_sm *tmp; 2507a3667aaeSNaresh Kumar Inna 2508a3667aaeSNaresh Kumar Inna tmp = list_first_entry(&scm->ioreq_freelist, 2509a3667aaeSNaresh Kumar Inna struct csio_sm, sm_list); 2510a3667aaeSNaresh Kumar Inna list_del_init(&tmp->sm_list); 2511a3667aaeSNaresh Kumar Inna ioreq = (struct csio_ioreq *)tmp; 2512a3667aaeSNaresh Kumar Inna 2513a3667aaeSNaresh Kumar Inna dma_buf = &ioreq->dma_buf; 2514decab9a6SRomain Perier dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr, 2515a3667aaeSNaresh Kumar Inna dma_buf->paddr); 2516a3667aaeSNaresh Kumar Inna 2517a3667aaeSNaresh Kumar Inna kfree(ioreq); 2518a3667aaeSNaresh Kumar Inna } 2519a3667aaeSNaresh Kumar Inna 2520a3667aaeSNaresh Kumar Inna scm->stats.n_free_ioreq = 0; 2521a3667aaeSNaresh Kumar Inna 2522a3667aaeSNaresh Kumar Inna csio_scsi_free_ddp_bufs(scm, scm->hw); 2523a3667aaeSNaresh Kumar Inna } 2524