xref: /linux/drivers/scsi/elx/efct/efct_hw.c (revision dd53d333aadb980944021d076c825d5736a13db5)
14df84e84SJames Smart // SPDX-License-Identifier: GPL-2.0
24df84e84SJames Smart /*
34df84e84SJames Smart  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
44df84e84SJames Smart  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
54df84e84SJames Smart  */
64df84e84SJames Smart 
74df84e84SJames Smart #include "efct_driver.h"
84df84e84SJames Smart #include "efct_hw.h"
94df84e84SJames Smart #include "efct_unsol.h"
104df84e84SJames Smart 
114df84e84SJames Smart struct efct_mbox_rqst_ctx {
124df84e84SJames Smart 	int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
134df84e84SJames Smart 	void *arg;
144df84e84SJames Smart };
154df84e84SJames Smart 
164df84e84SJames Smart static int
174df84e84SJames Smart efct_hw_link_event_init(struct efct_hw *hw)
184df84e84SJames Smart {
194df84e84SJames Smart 	hw->link.status = SLI4_LINK_STATUS_MAX;
204df84e84SJames Smart 	hw->link.topology = SLI4_LINK_TOPO_NONE;
214df84e84SJames Smart 	hw->link.medium = SLI4_LINK_MEDIUM_MAX;
224df84e84SJames Smart 	hw->link.speed = 0;
234df84e84SJames Smart 	hw->link.loop_map = NULL;
244df84e84SJames Smart 	hw->link.fc_id = U32_MAX;
254df84e84SJames Smart 
264df84e84SJames Smart 	return 0;
274df84e84SJames Smart }
284df84e84SJames Smart 
294df84e84SJames Smart static int
304df84e84SJames Smart efct_hw_read_max_dump_size(struct efct_hw *hw)
314df84e84SJames Smart {
324df84e84SJames Smart 	u8 buf[SLI4_BMBX_SIZE];
334df84e84SJames Smart 	struct efct *efct = hw->os;
344df84e84SJames Smart 	int rc = 0;
354df84e84SJames Smart 	struct sli4_rsp_cmn_set_dump_location *rsp;
364df84e84SJames Smart 
374df84e84SJames Smart 	/* attempt to detemine the dump size for function 0 only. */
384df84e84SJames Smart 	if (PCI_FUNC(efct->pci->devfn) != 0)
394df84e84SJames Smart 		return rc;
404df84e84SJames Smart 
414df84e84SJames Smart 	if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
424df84e84SJames Smart 		return -EIO;
434df84e84SJames Smart 
444df84e84SJames Smart 	rsp = (struct sli4_rsp_cmn_set_dump_location *)
454df84e84SJames Smart 	      (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
464df84e84SJames Smart 
474df84e84SJames Smart 	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
484df84e84SJames Smart 	if (rc != 0) {
494df84e84SJames Smart 		efc_log_debug(hw->os, "set dump location cmd failed\n");
504df84e84SJames Smart 		return rc;
514df84e84SJames Smart 	}
524df84e84SJames Smart 
534df84e84SJames Smart 	hw->dump_size =
544df84e84SJames Smart 	  le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
554df84e84SJames Smart 
564df84e84SJames Smart 	efc_log_debug(hw->os, "Dump size %x\n",	hw->dump_size);
574df84e84SJames Smart 
584df84e84SJames Smart 	return rc;
594df84e84SJames Smart }
604df84e84SJames Smart 
614df84e84SJames Smart static int
624df84e84SJames Smart __efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
634df84e84SJames Smart {
644df84e84SJames Smart 	struct sli4_cmd_read_topology *read_topo =
654df84e84SJames Smart 				(struct sli4_cmd_read_topology *)mqe;
664df84e84SJames Smart 	u8 speed;
674df84e84SJames Smart 	struct efc_domain_record drec = {0};
684df84e84SJames Smart 	struct efct *efct = hw->os;
694df84e84SJames Smart 
704df84e84SJames Smart 	if (status || le16_to_cpu(read_topo->hdr.status)) {
714df84e84SJames Smart 		efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
724df84e84SJames Smart 			      le16_to_cpu(read_topo->hdr.status));
734df84e84SJames Smart 		return -EIO;
744df84e84SJames Smart 	}
754df84e84SJames Smart 
764df84e84SJames Smart 	switch (le32_to_cpu(read_topo->dw2_attentype) &
774df84e84SJames Smart 		SLI4_READTOPO_ATTEN_TYPE) {
784df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_LINK_UP:
794df84e84SJames Smart 		hw->link.status = SLI4_LINK_STATUS_UP;
804df84e84SJames Smart 		break;
814df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_LINK_DOWN:
824df84e84SJames Smart 		hw->link.status = SLI4_LINK_STATUS_DOWN;
834df84e84SJames Smart 		break;
844df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
854df84e84SJames Smart 		hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
864df84e84SJames Smart 		break;
874df84e84SJames Smart 	default:
884df84e84SJames Smart 		hw->link.status = SLI4_LINK_STATUS_MAX;
894df84e84SJames Smart 		break;
904df84e84SJames Smart 	}
914df84e84SJames Smart 
924df84e84SJames Smart 	switch (read_topo->topology) {
934df84e84SJames Smart 	case SLI4_READ_TOPO_NON_FC_AL:
944df84e84SJames Smart 		hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
954df84e84SJames Smart 		break;
964df84e84SJames Smart 	case SLI4_READ_TOPO_FC_AL:
974df84e84SJames Smart 		hw->link.topology = SLI4_LINK_TOPO_FC_AL;
984df84e84SJames Smart 		if (hw->link.status == SLI4_LINK_STATUS_UP)
994df84e84SJames Smart 			hw->link.loop_map = hw->loop_map.virt;
1004df84e84SJames Smart 		hw->link.fc_id = read_topo->acquired_al_pa;
1014df84e84SJames Smart 		break;
1024df84e84SJames Smart 	default:
1034df84e84SJames Smart 		hw->link.topology = SLI4_LINK_TOPO_MAX;
1044df84e84SJames Smart 		break;
1054df84e84SJames Smart 	}
1064df84e84SJames Smart 
1074df84e84SJames Smart 	hw->link.medium = SLI4_LINK_MEDIUM_FC;
1084df84e84SJames Smart 
1094df84e84SJames Smart 	speed = (le32_to_cpu(read_topo->currlink_state) &
1104df84e84SJames Smart 		 SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
1114df84e84SJames Smart 	switch (speed) {
1124df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_1G:
1134df84e84SJames Smart 		hw->link.speed =  1 * 1000;
1144df84e84SJames Smart 		break;
1154df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_2G:
1164df84e84SJames Smart 		hw->link.speed =  2 * 1000;
1174df84e84SJames Smart 		break;
1184df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_4G:
1194df84e84SJames Smart 		hw->link.speed =  4 * 1000;
1204df84e84SJames Smart 		break;
1214df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_8G:
1224df84e84SJames Smart 		hw->link.speed =  8 * 1000;
1234df84e84SJames Smart 		break;
1244df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_16G:
1254df84e84SJames Smart 		hw->link.speed = 16 * 1000;
1264df84e84SJames Smart 		break;
1274df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_32G:
1284df84e84SJames Smart 		hw->link.speed = 32 * 1000;
1294df84e84SJames Smart 		break;
1304df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_64G:
1314df84e84SJames Smart 		hw->link.speed = 64 * 1000;
1324df84e84SJames Smart 		break;
1334df84e84SJames Smart 	case SLI4_READ_TOPOLOGY_SPEED_128G:
1344df84e84SJames Smart 		hw->link.speed = 128 * 1000;
1354df84e84SJames Smart 		break;
1364df84e84SJames Smart 	}
1374df84e84SJames Smart 
1384df84e84SJames Smart 	drec.speed = hw->link.speed;
1394df84e84SJames Smart 	drec.fc_id = hw->link.fc_id;
1404df84e84SJames Smart 	drec.is_nport = true;
1414df84e84SJames Smart 	efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
1424df84e84SJames Smart 
1434df84e84SJames Smart 	return 0;
1444df84e84SJames Smart }
1454df84e84SJames Smart 
1464df84e84SJames Smart static int
1474df84e84SJames Smart efct_hw_cb_link(void *ctx, void *e)
1484df84e84SJames Smart {
1494df84e84SJames Smart 	struct efct_hw *hw = ctx;
1504df84e84SJames Smart 	struct sli4_link_event *event = e;
1514df84e84SJames Smart 	struct efc_domain *d = NULL;
1524df84e84SJames Smart 	int rc = 0;
1534df84e84SJames Smart 	struct efct *efct = hw->os;
1544df84e84SJames Smart 
1554df84e84SJames Smart 	efct_hw_link_event_init(hw);
1564df84e84SJames Smart 
1574df84e84SJames Smart 	switch (event->status) {
1584df84e84SJames Smart 	case SLI4_LINK_STATUS_UP:
1594df84e84SJames Smart 
1604df84e84SJames Smart 		hw->link = *event;
1614df84e84SJames Smart 		efct->efcport->link_status = EFC_LINK_STATUS_UP;
1624df84e84SJames Smart 
1634df84e84SJames Smart 		if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
1644df84e84SJames Smart 			struct efc_domain_record drec = {0};
1654df84e84SJames Smart 
1664df84e84SJames Smart 			efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
1674df84e84SJames Smart 				     event->speed);
1684df84e84SJames Smart 			drec.speed = event->speed;
1694df84e84SJames Smart 			drec.fc_id = event->fc_id;
1704df84e84SJames Smart 			drec.is_nport = true;
1714df84e84SJames Smart 			efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
1724df84e84SJames Smart 				      &drec);
1734df84e84SJames Smart 		} else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
1744df84e84SJames Smart 			u8 buf[SLI4_BMBX_SIZE];
1754df84e84SJames Smart 
1764df84e84SJames Smart 			efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
1774df84e84SJames Smart 				     event->speed);
1784df84e84SJames Smart 
1794df84e84SJames Smart 			if (!sli_cmd_read_topology(&hw->sli, buf,
1804df84e84SJames Smart 						   &hw->loop_map)) {
1814df84e84SJames Smart 				rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
1824df84e84SJames Smart 						__efct_read_topology_cb, NULL);
1834df84e84SJames Smart 			}
1844df84e84SJames Smart 
1854df84e84SJames Smart 			if (rc)
1864df84e84SJames Smart 				efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
1874df84e84SJames Smart 		} else {
1884df84e84SJames Smart 			efc_log_info(hw->os, "%s(%#x), speed is %d\n",
1894df84e84SJames Smart 				     "Link Up, unsupported topology ",
1904df84e84SJames Smart 				     event->topology, event->speed);
1914df84e84SJames Smart 		}
1924df84e84SJames Smart 		break;
1934df84e84SJames Smart 	case SLI4_LINK_STATUS_DOWN:
1944df84e84SJames Smart 		efc_log_info(hw->os, "Link down\n");
1954df84e84SJames Smart 
1964df84e84SJames Smart 		hw->link.status = event->status;
1974df84e84SJames Smart 		efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
1984df84e84SJames Smart 
1994df84e84SJames Smart 		d = efct->efcport->domain;
2004df84e84SJames Smart 		if (d)
2014df84e84SJames Smart 			efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
2024df84e84SJames Smart 		break;
2034df84e84SJames Smart 	default:
2044df84e84SJames Smart 		efc_log_debug(hw->os, "unhandled link status %#x\n",
2054df84e84SJames Smart 			      event->status);
2064df84e84SJames Smart 		break;
2074df84e84SJames Smart 	}
2084df84e84SJames Smart 
2094df84e84SJames Smart 	return 0;
2104df84e84SJames Smart }
2114df84e84SJames Smart 
2124df84e84SJames Smart int
2134df84e84SJames Smart efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
2144df84e84SJames Smart {
2154df84e84SJames Smart 	u32 i, max_sgl, cpus;
2164df84e84SJames Smart 
2174df84e84SJames Smart 	if (hw->hw_setup_called)
2184df84e84SJames Smart 		return 0;
2194df84e84SJames Smart 
2204df84e84SJames Smart 	/*
2214df84e84SJames Smart 	 * efct_hw_init() relies on NULL pointers indicating that a structure
2224df84e84SJames Smart 	 * needs allocation. If a structure is non-NULL, efct_hw_init() won't
2234df84e84SJames Smart 	 * free/realloc that memory
2244df84e84SJames Smart 	 */
2254df84e84SJames Smart 	memset(hw, 0, sizeof(struct efct_hw));
2264df84e84SJames Smart 
2274df84e84SJames Smart 	hw->hw_setup_called = true;
2284df84e84SJames Smart 
2294df84e84SJames Smart 	hw->os = os;
2304df84e84SJames Smart 
2314df84e84SJames Smart 	mutex_init(&hw->bmbx_lock);
2324df84e84SJames Smart 	spin_lock_init(&hw->cmd_lock);
2334df84e84SJames Smart 	INIT_LIST_HEAD(&hw->cmd_head);
2344df84e84SJames Smart 	INIT_LIST_HEAD(&hw->cmd_pending);
2354df84e84SJames Smart 	hw->cmd_head_count = 0;
2364df84e84SJames Smart 
2374df84e84SJames Smart 	/* Create mailbox command ctx pool */
2384df84e84SJames Smart 	hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
2394df84e84SJames Smart 					sizeof(struct efct_command_ctx));
2404df84e84SJames Smart 	if (!hw->cmd_ctx_pool) {
2414df84e84SJames Smart 		efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
2424df84e84SJames Smart 		return -EIO;
2434df84e84SJames Smart 	}
2444df84e84SJames Smart 
2454df84e84SJames Smart 	/* Create mailbox request ctx pool for library callback */
2464df84e84SJames Smart 	hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
2474df84e84SJames Smart 					sizeof(struct efct_mbox_rqst_ctx));
2484df84e84SJames Smart 	if (!hw->mbox_rqst_pool) {
2494df84e84SJames Smart 		efc_log_err(hw->os, "failed to allocate mbox request pool\n");
2504df84e84SJames Smart 		return -EIO;
2514df84e84SJames Smart 	}
2524df84e84SJames Smart 
2534df84e84SJames Smart 	spin_lock_init(&hw->io_lock);
2544df84e84SJames Smart 	INIT_LIST_HEAD(&hw->io_inuse);
2554df84e84SJames Smart 	INIT_LIST_HEAD(&hw->io_free);
2564df84e84SJames Smart 	INIT_LIST_HEAD(&hw->io_wait_free);
2574df84e84SJames Smart 
2584df84e84SJames Smart 	atomic_set(&hw->io_alloc_failed_count, 0);
2594df84e84SJames Smart 
2604df84e84SJames Smart 	hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
2614df84e84SJames Smart 	if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
2624df84e84SJames Smart 		efc_log_err(hw->os, "SLI setup failed\n");
2634df84e84SJames Smart 		return -EIO;
2644df84e84SJames Smart 	}
2654df84e84SJames Smart 
2664df84e84SJames Smart 	efct_hw_link_event_init(hw);
2674df84e84SJames Smart 
2684df84e84SJames Smart 	sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
2694df84e84SJames Smart 
2704df84e84SJames Smart 	/*
2714df84e84SJames Smart 	 * Set all the queue sizes to the maximum allowed.
2724df84e84SJames Smart 	 */
2734df84e84SJames Smart 	for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
2744df84e84SJames Smart 		hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
2754df84e84SJames Smart 	/*
2764df84e84SJames Smart 	 * Adjust the size of the WQs so that the CQ is twice as big as
2774df84e84SJames Smart 	 * the WQ to allow for 2 completions per IO. This allows us to
2784df84e84SJames Smart 	 * handle multi-phase as well as aborts.
2794df84e84SJames Smart 	 */
2804df84e84SJames Smart 	hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
2814df84e84SJames Smart 
2824df84e84SJames Smart 	/*
2834df84e84SJames Smart 	 * The RQ assignment for RQ pair mode.
2844df84e84SJames Smart 	 */
2854df84e84SJames Smart 
2864df84e84SJames Smart 	hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
2874df84e84SJames Smart 	hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
2884df84e84SJames Smart 
2894df84e84SJames Smart 	cpus = num_possible_cpus();
2904df84e84SJames Smart 	hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
2914df84e84SJames Smart 
2924df84e84SJames Smart 	max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
2934df84e84SJames Smart 	max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
2944df84e84SJames Smart 	hw->config.n_sgl = max_sgl;
2954df84e84SJames Smart 
2964df84e84SJames Smart 	(void)efct_hw_read_max_dump_size(hw);
2974df84e84SJames Smart 
2984df84e84SJames Smart 	return 0;
2994df84e84SJames Smart }
3004df84e84SJames Smart 
3014df84e84SJames Smart static void
3024df84e84SJames Smart efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
3034df84e84SJames Smart {
3044df84e84SJames Smart 	efc_log_info(hw->os,
3054df84e84SJames Smart 		     "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
3064df84e84SJames Smart 		     j, hw->config.filter_def[j], i, id);
3074df84e84SJames Smart }
3084df84e84SJames Smart 
3094df84e84SJames Smart static inline void
3104df84e84SJames Smart efct_hw_init_free_io(struct efct_hw_io *io)
3114df84e84SJames Smart {
3124df84e84SJames Smart 	/*
3134df84e84SJames Smart 	 * Set io->done to NULL, to avoid any callbacks, should
3144df84e84SJames Smart 	 * a completion be received for one of these IOs
3154df84e84SJames Smart 	 */
3164df84e84SJames Smart 	io->done = NULL;
3174df84e84SJames Smart 	io->abort_done = NULL;
3184df84e84SJames Smart 	io->status_saved = false;
3194df84e84SJames Smart 	io->abort_in_progress = false;
3204df84e84SJames Smart 	io->type = 0xFFFF;
3214df84e84SJames Smart 	io->wq = NULL;
3224df84e84SJames Smart }
3234df84e84SJames Smart 
3244df84e84SJames Smart static u8 efct_hw_iotype_is_originator(u16 io_type)
3254df84e84SJames Smart {
3264df84e84SJames Smart 	switch (io_type) {
3274df84e84SJames Smart 	case EFCT_HW_FC_CT:
3284df84e84SJames Smart 	case EFCT_HW_ELS_REQ:
3294df84e84SJames Smart 		return 0;
3304df84e84SJames Smart 	default:
3314df84e84SJames Smart 		return -EIO;
3324df84e84SJames Smart 	}
3334df84e84SJames Smart }
3344df84e84SJames Smart 
3354df84e84SJames Smart static void
3364df84e84SJames Smart efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
3374df84e84SJames Smart {
3384df84e84SJames Smart 	/* Restore the default */
3394df84e84SJames Smart 	io->sgl = &io->def_sgl;
3404df84e84SJames Smart 	io->sgl_count = io->def_sgl_count;
3414df84e84SJames Smart }
3424df84e84SJames Smart 
3434df84e84SJames Smart static void
3444df84e84SJames Smart efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
3454df84e84SJames Smart {
3464df84e84SJames Smart 	struct efct_hw_io *io = arg;
3474df84e84SJames Smart 	struct efct_hw *hw = io->hw;
3484df84e84SJames Smart 	struct sli4_fc_wcqe *wcqe = (void *)cqe;
3494df84e84SJames Smart 	u32	len = 0;
3504df84e84SJames Smart 	u32 ext = 0;
3514df84e84SJames Smart 
3524df84e84SJames Smart 	/* clear xbusy flag if WCQE[XB] is clear */
3534df84e84SJames Smart 	if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
3544df84e84SJames Smart 		io->xbusy = false;
3554df84e84SJames Smart 
3564df84e84SJames Smart 	/* get extended CQE status */
3574df84e84SJames Smart 	switch (io->type) {
3584df84e84SJames Smart 	case EFCT_HW_BLS_ACC:
3594df84e84SJames Smart 	case EFCT_HW_BLS_RJT:
3604df84e84SJames Smart 		break;
3614df84e84SJames Smart 	case EFCT_HW_ELS_REQ:
3624df84e84SJames Smart 		sli_fc_els_did(&hw->sli, cqe, &ext);
3634df84e84SJames Smart 		len = sli_fc_response_length(&hw->sli, cqe);
3644df84e84SJames Smart 		break;
3654df84e84SJames Smart 	case EFCT_HW_ELS_RSP:
3664df84e84SJames Smart 	case EFCT_HW_FC_CT_RSP:
3674df84e84SJames Smart 		break;
3684df84e84SJames Smart 	case EFCT_HW_FC_CT:
3694df84e84SJames Smart 		len = sli_fc_response_length(&hw->sli, cqe);
3704df84e84SJames Smart 		break;
3714df84e84SJames Smart 	case EFCT_HW_IO_TARGET_WRITE:
3724df84e84SJames Smart 		len = sli_fc_io_length(&hw->sli, cqe);
3734df84e84SJames Smart 		break;
3744df84e84SJames Smart 	case EFCT_HW_IO_TARGET_READ:
3754df84e84SJames Smart 		len = sli_fc_io_length(&hw->sli, cqe);
3764df84e84SJames Smart 		break;
3774df84e84SJames Smart 	case EFCT_HW_IO_TARGET_RSP:
3784df84e84SJames Smart 		break;
3794df84e84SJames Smart 	case EFCT_HW_IO_DNRX_REQUEUE:
3804df84e84SJames Smart 		/* release the count for re-posting the buffer */
3814df84e84SJames Smart 		/* efct_hw_io_free(hw, io); */
3824df84e84SJames Smart 		break;
3834df84e84SJames Smart 	default:
3844df84e84SJames Smart 		efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
3854df84e84SJames Smart 			    io->type, io->indicator);
3864df84e84SJames Smart 		break;
3874df84e84SJames Smart 	}
3884df84e84SJames Smart 	if (status) {
3894df84e84SJames Smart 		ext = sli_fc_ext_status(&hw->sli, cqe);
3904df84e84SJames Smart 		/*
3914df84e84SJames Smart 		 * If we're not an originator IO, and XB is set, then issue
3924df84e84SJames Smart 		 * abort for the IO from within the HW
3934df84e84SJames Smart 		 */
3944df84e84SJames Smart 		if ((!efct_hw_iotype_is_originator(io->type)) &&
3954df84e84SJames Smart 		    wcqe->flags & SLI4_WCQE_XB) {
3964df84e84SJames Smart 			int rc;
3974df84e84SJames Smart 
3984df84e84SJames Smart 			efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
3994df84e84SJames Smart 				      io->indicator, io->reqtag);
4004df84e84SJames Smart 
4014df84e84SJames Smart 			/*
4024df84e84SJames Smart 			 * Because targets may send a response when the IO
4034df84e84SJames Smart 			 * completes using the same XRI, we must wait for the
4044df84e84SJames Smart 			 * XRI_ABORTED CQE to issue the IO callback
4054df84e84SJames Smart 			 */
4064df84e84SJames Smart 			rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
4074df84e84SJames Smart 			if (rc == 0) {
4084df84e84SJames Smart 				/*
4094df84e84SJames Smart 				 * latch status to return after abort is
4104df84e84SJames Smart 				 * complete
4114df84e84SJames Smart 				 */
4124df84e84SJames Smart 				io->status_saved = true;
4134df84e84SJames Smart 				io->saved_status = status;
4144df84e84SJames Smart 				io->saved_ext = ext;
4154df84e84SJames Smart 				io->saved_len = len;
4164df84e84SJames Smart 				goto exit_efct_hw_wq_process_io;
4174df84e84SJames Smart 			} else if (rc == -EINPROGRESS) {
4184df84e84SJames Smart 				/*
4194df84e84SJames Smart 				 * Already being aborted by someone else (ABTS
4204df84e84SJames Smart 				 * perhaps). Just return original
4214df84e84SJames Smart 				 * error.
4224df84e84SJames Smart 				 */
4234df84e84SJames Smart 				efc_log_debug(hw->os, "%s%#x tag=%#x\n",
4244df84e84SJames Smart 					      "abort in progress xri=",
4254df84e84SJames Smart 					      io->indicator, io->reqtag);
4264df84e84SJames Smart 
4274df84e84SJames Smart 			} else {
4284df84e84SJames Smart 				/* Failed to abort for some other reason, log
4294df84e84SJames Smart 				 * error
4304df84e84SJames Smart 				 */
4314df84e84SJames Smart 				efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
4324df84e84SJames Smart 					      "Failed to abort xri=",
4334df84e84SJames Smart 					      io->indicator, io->reqtag, rc);
4344df84e84SJames Smart 			}
4354df84e84SJames Smart 		}
4364df84e84SJames Smart 	}
4374df84e84SJames Smart 
4384df84e84SJames Smart 	if (io->done) {
4394df84e84SJames Smart 		efct_hw_done_t done = io->done;
4404df84e84SJames Smart 
4414df84e84SJames Smart 		io->done = NULL;
4424df84e84SJames Smart 
4434df84e84SJames Smart 		if (io->status_saved) {
4444df84e84SJames Smart 			/* use latched status if exists */
4454df84e84SJames Smart 			status = io->saved_status;
4464df84e84SJames Smart 			len = io->saved_len;
4474df84e84SJames Smart 			ext = io->saved_ext;
4484df84e84SJames Smart 			io->status_saved = false;
4494df84e84SJames Smart 		}
4504df84e84SJames Smart 
4514df84e84SJames Smart 		/* Restore default SGL */
4524df84e84SJames Smart 		efct_hw_io_restore_sgl(hw, io);
4534df84e84SJames Smart 		done(io, len, status, ext, io->arg);
4544df84e84SJames Smart 	}
4554df84e84SJames Smart 
4564df84e84SJames Smart exit_efct_hw_wq_process_io:
4574df84e84SJames Smart 	return;
4584df84e84SJames Smart }
4594df84e84SJames Smart 
4604df84e84SJames Smart static int
4614df84e84SJames Smart efct_hw_setup_io(struct efct_hw *hw)
4624df84e84SJames Smart {
4634df84e84SJames Smart 	u32	i = 0;
4644df84e84SJames Smart 	struct efct_hw_io	*io = NULL;
4654df84e84SJames Smart 	uintptr_t	xfer_virt = 0;
4664df84e84SJames Smart 	uintptr_t	xfer_phys = 0;
4674df84e84SJames Smart 	u32	index;
4684df84e84SJames Smart 	bool new_alloc = true;
4694df84e84SJames Smart 	struct efc_dma *dma;
4704df84e84SJames Smart 	struct efct *efct = hw->os;
4714df84e84SJames Smart 
4724df84e84SJames Smart 	if (!hw->io) {
4734df84e84SJames Smart 		hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
4744df84e84SJames Smart 		if (!hw->io)
4754df84e84SJames Smart 			return -ENOMEM;
4764df84e84SJames Smart 
4774df84e84SJames Smart 		memset(hw->io, 0, hw->config.n_io * sizeof(io));
4784df84e84SJames Smart 
4794df84e84SJames Smart 		for (i = 0; i < hw->config.n_io; i++) {
4804df84e84SJames Smart 			hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
4814df84e84SJames Smart 			if (!hw->io[i])
4824df84e84SJames Smart 				goto error;
4834df84e84SJames Smart 		}
4844df84e84SJames Smart 
4854df84e84SJames Smart 		/* Create WQE buffs for IO */
4864df84e84SJames Smart 		hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
4874df84e84SJames Smart 					GFP_KERNEL);
4884df84e84SJames Smart 		if (!hw->wqe_buffs) {
4894df84e84SJames Smart 			kfree(hw->io);
4904df84e84SJames Smart 			return -ENOMEM;
4914df84e84SJames Smart 		}
4924df84e84SJames Smart 
4934df84e84SJames Smart 	} else {
4944df84e84SJames Smart 		/* re-use existing IOs, including SGLs */
4954df84e84SJames Smart 		new_alloc = false;
4964df84e84SJames Smart 	}
4974df84e84SJames Smart 
4984df84e84SJames Smart 	if (new_alloc) {
4994df84e84SJames Smart 		dma = &hw->xfer_rdy;
5004df84e84SJames Smart 		dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
5014df84e84SJames Smart 		dma->virt = dma_alloc_coherent(&efct->pci->dev,
5024df84e84SJames Smart 					       dma->size, &dma->phys, GFP_DMA);
5034df84e84SJames Smart 		if (!dma->virt)
5044df84e84SJames Smart 			return -ENOMEM;
5054df84e84SJames Smart 	}
5064df84e84SJames Smart 	xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
5074df84e84SJames Smart 	xfer_phys = hw->xfer_rdy.phys;
5084df84e84SJames Smart 
5094df84e84SJames Smart 	/* Initialize the pool of HW IO objects */
5104df84e84SJames Smart 	for (i = 0; i < hw->config.n_io; i++) {
5114df84e84SJames Smart 		struct hw_wq_callback *wqcb;
5124df84e84SJames Smart 
5134df84e84SJames Smart 		io = hw->io[i];
5144df84e84SJames Smart 
5154df84e84SJames Smart 		/* initialize IO fields */
5164df84e84SJames Smart 		io->hw = hw;
5174df84e84SJames Smart 
5184df84e84SJames Smart 		/* Assign a WQE buff */
5194df84e84SJames Smart 		io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
5204df84e84SJames Smart 
5214df84e84SJames Smart 		/* Allocate the request tag for this IO */
5224df84e84SJames Smart 		wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
5234df84e84SJames Smart 		if (!wqcb) {
5244df84e84SJames Smart 			efc_log_err(hw->os, "can't allocate request tag\n");
5254df84e84SJames Smart 			return -ENOSPC;
5264df84e84SJames Smart 		}
5274df84e84SJames Smart 		io->reqtag = wqcb->instance_index;
5284df84e84SJames Smart 
5294df84e84SJames Smart 		/* Now for the fields that are initialized on each free */
5304df84e84SJames Smart 		efct_hw_init_free_io(io);
5314df84e84SJames Smart 
5324df84e84SJames Smart 		/* The XB flag isn't cleared on IO free, so init to zero */
5334df84e84SJames Smart 		io->xbusy = 0;
5344df84e84SJames Smart 
5354df84e84SJames Smart 		if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
5364df84e84SJames Smart 				       &io->indicator, &index)) {
5374df84e84SJames Smart 			efc_log_err(hw->os,
5384df84e84SJames Smart 				    "sli_resource_alloc failed @ %d\n", i);
5394df84e84SJames Smart 			return -ENOMEM;
5404df84e84SJames Smart 		}
5414df84e84SJames Smart 
5424df84e84SJames Smart 		if (new_alloc) {
5434df84e84SJames Smart 			dma = &io->def_sgl;
5444df84e84SJames Smart 			dma->size = hw->config.n_sgl *
5454df84e84SJames Smart 					sizeof(struct sli4_sge);
5464df84e84SJames Smart 			dma->virt = dma_alloc_coherent(&efct->pci->dev,
5474df84e84SJames Smart 						       dma->size, &dma->phys,
5484df84e84SJames Smart 						       GFP_DMA);
5494df84e84SJames Smart 			if (!dma->virt) {
5504df84e84SJames Smart 				efc_log_err(hw->os, "dma_alloc fail %d\n", i);
5514df84e84SJames Smart 				memset(&io->def_sgl, 0,
5524df84e84SJames Smart 				       sizeof(struct efc_dma));
5534df84e84SJames Smart 				return -ENOMEM;
5544df84e84SJames Smart 			}
5554df84e84SJames Smart 		}
5564df84e84SJames Smart 		io->def_sgl_count = hw->config.n_sgl;
5574df84e84SJames Smart 		io->sgl = &io->def_sgl;
5584df84e84SJames Smart 		io->sgl_count = io->def_sgl_count;
5594df84e84SJames Smart 
5604df84e84SJames Smart 		if (hw->xfer_rdy.size) {
5614df84e84SJames Smart 			io->xfer_rdy.virt = (void *)xfer_virt;
5624df84e84SJames Smart 			io->xfer_rdy.phys = xfer_phys;
5634df84e84SJames Smart 			io->xfer_rdy.size = sizeof(struct fcp_txrdy);
5644df84e84SJames Smart 
5654df84e84SJames Smart 			xfer_virt += sizeof(struct fcp_txrdy);
5664df84e84SJames Smart 			xfer_phys += sizeof(struct fcp_txrdy);
5674df84e84SJames Smart 		}
5684df84e84SJames Smart 	}
5694df84e84SJames Smart 
5704df84e84SJames Smart 	return 0;
5714df84e84SJames Smart error:
5724df84e84SJames Smart 	for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
5734df84e84SJames Smart 		kfree(hw->io[i]);
5744df84e84SJames Smart 		hw->io[i] = NULL;
5754df84e84SJames Smart 	}
5764df84e84SJames Smart 
5774df84e84SJames Smart 	kfree(hw->io);
5784df84e84SJames Smart 	hw->io = NULL;
5794df84e84SJames Smart 
5804df84e84SJames Smart 	return -ENOMEM;
5814df84e84SJames Smart }
5824df84e84SJames Smart 
5834df84e84SJames Smart static int
5844df84e84SJames Smart efct_hw_init_prereg_io(struct efct_hw *hw)
5854df84e84SJames Smart {
5864df84e84SJames Smart 	u32 i, idx = 0;
5874df84e84SJames Smart 	struct efct_hw_io *io = NULL;
5884df84e84SJames Smart 	u8 cmd[SLI4_BMBX_SIZE];
5894df84e84SJames Smart 	int rc = 0;
5904df84e84SJames Smart 	u32 n_rem;
5914df84e84SJames Smart 	u32 n = 0;
5924df84e84SJames Smart 	u32 sgls_per_request = 256;
5934df84e84SJames Smart 	struct efc_dma **sgls = NULL;
5944df84e84SJames Smart 	struct efc_dma req;
5954df84e84SJames Smart 	struct efct *efct = hw->os;
5964df84e84SJames Smart 
5974df84e84SJames Smart 	sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
5984df84e84SJames Smart 	if (!sgls)
5994df84e84SJames Smart 		return -ENOMEM;
6004df84e84SJames Smart 
6014df84e84SJames Smart 	memset(&req, 0, sizeof(struct efc_dma));
6024df84e84SJames Smart 	req.size = 32 + sgls_per_request * 16;
6034df84e84SJames Smart 	req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
6044df84e84SJames Smart 				      GFP_DMA);
6054df84e84SJames Smart 	if (!req.virt) {
6064df84e84SJames Smart 		kfree(sgls);
6074df84e84SJames Smart 		return -ENOMEM;
6084df84e84SJames Smart 	}
6094df84e84SJames Smart 
6104df84e84SJames Smart 	for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
6114df84e84SJames Smart 		/* Copy address of SGL's into local sgls[] array, break
6124df84e84SJames Smart 		 * out if the xri is not contiguous.
6134df84e84SJames Smart 		 */
6144df84e84SJames Smart 		u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
6154df84e84SJames Smart 
6164df84e84SJames Smart 		for (n = 0; n < min; n++) {
6174df84e84SJames Smart 			/* Check that we have contiguous xri values */
6184df84e84SJames Smart 			if (n > 0) {
6194df84e84SJames Smart 				if (hw->io[idx + n]->indicator !=
6204df84e84SJames Smart 				    hw->io[idx + n - 1]->indicator + 1)
6214df84e84SJames Smart 					break;
6224df84e84SJames Smart 			}
6234df84e84SJames Smart 
6244df84e84SJames Smart 			sgls[n] = hw->io[idx + n]->sgl;
6254df84e84SJames Smart 		}
6264df84e84SJames Smart 
6274df84e84SJames Smart 		if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
6284df84e84SJames Smart 				hw->io[idx]->indicator,	n, sgls, NULL, &req)) {
6294df84e84SJames Smart 			rc = -EIO;
6304df84e84SJames Smart 			break;
6314df84e84SJames Smart 		}
6324df84e84SJames Smart 
6334df84e84SJames Smart 		rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
6344df84e84SJames Smart 		if (rc) {
6354df84e84SJames Smart 			efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
6364df84e84SJames Smart 			break;
6374df84e84SJames Smart 		}
6384df84e84SJames Smart 
6394df84e84SJames Smart 		/* Add to tail if successful */
6404df84e84SJames Smart 		for (i = 0; i < n; i++, idx++) {
6414df84e84SJames Smart 			io = hw->io[idx];
6424df84e84SJames Smart 			io->state = EFCT_HW_IO_STATE_FREE;
6434df84e84SJames Smart 			INIT_LIST_HEAD(&io->list_entry);
6444df84e84SJames Smart 			list_add_tail(&io->list_entry, &hw->io_free);
6454df84e84SJames Smart 		}
6464df84e84SJames Smart 	}
6474df84e84SJames Smart 
6484df84e84SJames Smart 	dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
6494df84e84SJames Smart 	memset(&req, 0, sizeof(struct efc_dma));
6504df84e84SJames Smart 	kfree(sgls);
6514df84e84SJames Smart 
6524df84e84SJames Smart 	return rc;
6534df84e84SJames Smart }
6544df84e84SJames Smart 
6554df84e84SJames Smart static int
6564df84e84SJames Smart efct_hw_init_io(struct efct_hw *hw)
6574df84e84SJames Smart {
6584df84e84SJames Smart 	u32 i, idx = 0;
6594df84e84SJames Smart 	bool prereg = false;
6604df84e84SJames Smart 	struct efct_hw_io *io = NULL;
6614df84e84SJames Smart 	int rc = 0;
6624df84e84SJames Smart 
6634df84e84SJames Smart 	prereg = hw->sli.params.sgl_pre_registered;
6644df84e84SJames Smart 
6654df84e84SJames Smart 	if (prereg)
6664df84e84SJames Smart 		return efct_hw_init_prereg_io(hw);
6674df84e84SJames Smart 
6684df84e84SJames Smart 	for (i = 0; i < hw->config.n_io; i++, idx++) {
6694df84e84SJames Smart 		io = hw->io[idx];
6704df84e84SJames Smart 		io->state = EFCT_HW_IO_STATE_FREE;
6714df84e84SJames Smart 		INIT_LIST_HEAD(&io->list_entry);
6724df84e84SJames Smart 		list_add_tail(&io->list_entry, &hw->io_free);
6734df84e84SJames Smart 	}
6744df84e84SJames Smart 
6754df84e84SJames Smart 	return rc;
6764df84e84SJames Smart }
6774df84e84SJames Smart 
6784df84e84SJames Smart static int
6794df84e84SJames Smart efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
6804df84e84SJames Smart {
6814df84e84SJames Smart 	int rc = 0;
6824df84e84SJames Smart 	u8 buf[SLI4_BMBX_SIZE];
6834df84e84SJames Smart 	struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
6844df84e84SJames Smart 
6854df84e84SJames Smart 	memset(&param, 0, sizeof(param));
6864df84e84SJames Smart 	param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
6874df84e84SJames Smart 	/* build the set_features command */
6884df84e84SJames Smart 	sli_cmd_common_set_features(&hw->sli, buf,
6894df84e84SJames Smart 		SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), &param);
6904df84e84SJames Smart 
6914df84e84SJames Smart 	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
6924df84e84SJames Smart 	if (rc)
6934df84e84SJames Smart 		efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
6944df84e84SJames Smart 			     fdt_xfer_hint, rc);
6954df84e84SJames Smart 	else
6964df84e84SJames Smart 		efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
6974df84e84SJames Smart 			     le32_to_cpu(param.fdt_xfer_hint));
6984df84e84SJames Smart 
6994df84e84SJames Smart 	return rc;
7004df84e84SJames Smart }
7014df84e84SJames Smart 
7024df84e84SJames Smart static int
7034df84e84SJames Smart efct_hw_config_rq(struct efct_hw *hw)
7044df84e84SJames Smart {
7054df84e84SJames Smart 	u32 min_rq_count, i, rc;
7064df84e84SJames Smart 	struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
7074df84e84SJames Smart 	u8 buf[SLI4_BMBX_SIZE];
7084df84e84SJames Smart 
7094df84e84SJames Smart 	efc_log_info(hw->os, "using REG_FCFI standard\n");
7104df84e84SJames Smart 
7114df84e84SJames Smart 	/*
7124df84e84SJames Smart 	 * Set the filter match/mask values from hw's
7134df84e84SJames Smart 	 * filter_def values
7144df84e84SJames Smart 	 */
7154df84e84SJames Smart 	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
7164df84e84SJames Smart 		rq_cfg[i].rq_id = cpu_to_le16(0xffff);
7174df84e84SJames Smart 		rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
7184df84e84SJames Smart 		rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
7194df84e84SJames Smart 		rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
7204df84e84SJames Smart 		rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
7214df84e84SJames Smart 	}
7224df84e84SJames Smart 
7234df84e84SJames Smart 	/*
7244df84e84SJames Smart 	 * Update the rq_id's of the FCF configuration
7254df84e84SJames Smart 	 * (don't update more than the number of rq_cfg
7264df84e84SJames Smart 	 * elements)
7274df84e84SJames Smart 	 */
7284df84e84SJames Smart 	min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG)	?
7294df84e84SJames Smart 			hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
7304df84e84SJames Smart 	for (i = 0; i < min_rq_count; i++) {
7314df84e84SJames Smart 		struct hw_rq *rq = hw->hw_rq[i];
7324df84e84SJames Smart 		u32 j;
7334df84e84SJames Smart 
7344df84e84SJames Smart 		for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
7354df84e84SJames Smart 			u32 mask = (rq->filter_mask != 0) ?
7364df84e84SJames Smart 				rq->filter_mask : 1;
7374df84e84SJames Smart 
7384df84e84SJames Smart 			if (!(mask & (1U << j)))
7394df84e84SJames Smart 				continue;
7404df84e84SJames Smart 
7414df84e84SJames Smart 			rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
7424df84e84SJames Smart 			efct_logfcfi(hw, j, i, rq->hdr->id);
7434df84e84SJames Smart 		}
7444df84e84SJames Smart 	}
7454df84e84SJames Smart 
7464df84e84SJames Smart 	rc = -EIO;
7474df84e84SJames Smart 	if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0,	rq_cfg))
7484df84e84SJames Smart 		rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
7494df84e84SJames Smart 
7504df84e84SJames Smart 	if (rc != 0) {
7514df84e84SJames Smart 		efc_log_err(hw->os, "FCFI registration failed\n");
7524df84e84SJames Smart 		return rc;
7534df84e84SJames Smart 	}
7544df84e84SJames Smart 	hw->fcf_indicator =
7554df84e84SJames Smart 		le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
7564df84e84SJames Smart 
7574df84e84SJames Smart 	return rc;
7584df84e84SJames Smart }
7594df84e84SJames Smart 
7604df84e84SJames Smart static int
7614df84e84SJames Smart efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
7624df84e84SJames Smart {
7634df84e84SJames Smart 	u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
7644df84e84SJames Smart 	struct hw_rq *rq;
7654df84e84SJames Smart 	struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
7664df84e84SJames Smart 	struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
7674df84e84SJames Smart 	u32 rc, i;
7684df84e84SJames Smart 
7694df84e84SJames Smart 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
7704df84e84SJames Smart 		goto issue_cmd;
7714df84e84SJames Smart 
7724df84e84SJames Smart 	/* Set the filter match/mask values from hw's filter_def values */
7734df84e84SJames Smart 	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
7744df84e84SJames Smart 		rq_filter[i].rq_id = cpu_to_le16(0xffff);
7754df84e84SJames Smart 		rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
7764df84e84SJames Smart 		rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
7774df84e84SJames Smart 		rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
7784df84e84SJames Smart 		rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
7794df84e84SJames Smart 	}
7804df84e84SJames Smart 
7814df84e84SJames Smart 	rq = hw->hw_rq[0];
7824df84e84SJames Smart 	rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
7834df84e84SJames Smart 	rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
7844df84e84SJames Smart 
7854df84e84SJames Smart 	mrq_bitmask = 0x2;
7864df84e84SJames Smart issue_cmd:
7874df84e84SJames Smart 	efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
7884df84e84SJames Smart 		      hw->hw_rq_count, hw->config.rq_selection_policy, mode);
7894df84e84SJames Smart 	/* Invoke REG_FCFI_MRQ */
7904df84e84SJames Smart 	rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
7914df84e84SJames Smart 				  hw->config.rq_selection_policy, mrq_bitmask,
7924df84e84SJames Smart 				  hw->hw_mrq_count, rq_filter);
7934df84e84SJames Smart 	if (rc) {
7944df84e84SJames Smart 		efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
7954df84e84SJames Smart 		return -EIO;
7964df84e84SJames Smart 	}
7974df84e84SJames Smart 
7984df84e84SJames Smart 	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
7994df84e84SJames Smart 
8004df84e84SJames Smart 	rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
8014df84e84SJames Smart 
8024df84e84SJames Smart 	if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
8034df84e84SJames Smart 		efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
8044df84e84SJames Smart 			    rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
8054df84e84SJames Smart 		return -EIO;
8064df84e84SJames Smart 	}
8074df84e84SJames Smart 
8084df84e84SJames Smart 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
8094df84e84SJames Smart 		hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
8104df84e84SJames Smart 
8114df84e84SJames Smart 	return 0;
8124df84e84SJames Smart }
8134df84e84SJames Smart 
8144df84e84SJames Smart static void
8154df84e84SJames Smart efct_hw_queue_hash_add(struct efct_queue_hash *hash,
8164df84e84SJames Smart 		       u16 id, u16 index)
8174df84e84SJames Smart {
8184df84e84SJames Smart 	u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
8194df84e84SJames Smart 
8204df84e84SJames Smart 	/*
8214df84e84SJames Smart 	 * Since the hash is always bigger than the number of queues, then we
8224df84e84SJames Smart 	 * never have to worry about an infinite loop.
8234df84e84SJames Smart 	 */
8244df84e84SJames Smart 	while (hash[hash_index].in_use)
8254df84e84SJames Smart 		hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
8264df84e84SJames Smart 
8274df84e84SJames Smart 	/* not used, claim the entry */
8284df84e84SJames Smart 	hash[hash_index].id = id;
8294df84e84SJames Smart 	hash[hash_index].in_use = true;
8304df84e84SJames Smart 	hash[hash_index].index = index;
8314df84e84SJames Smart }
8324df84e84SJames Smart 
8334df84e84SJames Smart static int
8344df84e84SJames Smart efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
8354df84e84SJames Smart {
8364df84e84SJames Smart 	int rc = 0;
8374df84e84SJames Smart 	u8 buf[SLI4_BMBX_SIZE];
8384df84e84SJames Smart 	struct sli4_rqst_cmn_set_features_health_check param;
8394df84e84SJames Smart 	u32 health_check_flag = 0;
8404df84e84SJames Smart 
8414df84e84SJames Smart 	memset(&param, 0, sizeof(param));
8424df84e84SJames Smart 
8434df84e84SJames Smart 	if (enable)
8444df84e84SJames Smart 		health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
8454df84e84SJames Smart 
8464df84e84SJames Smart 	if (query)
8474df84e84SJames Smart 		health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
8484df84e84SJames Smart 
8494df84e84SJames Smart 	param.health_check_dword = cpu_to_le32(health_check_flag);
8504df84e84SJames Smart 
8514df84e84SJames Smart 	/* build the set_features command */
8524df84e84SJames Smart 	sli_cmd_common_set_features(&hw->sli, buf,
8534df84e84SJames Smart 		SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), &param);
8544df84e84SJames Smart 
8554df84e84SJames Smart 	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
8564df84e84SJames Smart 	if (rc)
8574df84e84SJames Smart 		efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
8584df84e84SJames Smart 	else
8594df84e84SJames Smart 		efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
8604df84e84SJames Smart 
8614df84e84SJames Smart 	return rc;
8624df84e84SJames Smart }
8634df84e84SJames Smart 
8644df84e84SJames Smart int
8654df84e84SJames Smart efct_hw_init(struct efct_hw *hw)
8664df84e84SJames Smart {
8674df84e84SJames Smart 	int rc;
8684df84e84SJames Smart 	u32 i = 0;
8694df84e84SJames Smart 	int rem_count;
8704df84e84SJames Smart 	unsigned long flags = 0;
8714df84e84SJames Smart 	struct efct_hw_io *temp;
8724df84e84SJames Smart 	struct efc_dma *dma;
8734df84e84SJames Smart 
8744df84e84SJames Smart 	/*
8754df84e84SJames Smart 	 * Make sure the command lists are empty. If this is start-of-day,
8764df84e84SJames Smart 	 * they'll be empty since they were just initialized in efct_hw_setup.
8774df84e84SJames Smart 	 * If we've just gone through a reset, the command and command pending
8784df84e84SJames Smart 	 * lists should have been cleaned up as part of the reset
8794df84e84SJames Smart 	 * (efct_hw_reset()).
8804df84e84SJames Smart 	 */
8814df84e84SJames Smart 	spin_lock_irqsave(&hw->cmd_lock, flags);
8824df84e84SJames Smart 	if (!list_empty(&hw->cmd_head)) {
8834df84e84SJames Smart 		spin_unlock_irqrestore(&hw->cmd_lock, flags);
8844df84e84SJames Smart 		efc_log_err(hw->os, "command found on cmd list\n");
8854df84e84SJames Smart 		return -EIO;
8864df84e84SJames Smart 	}
8874df84e84SJames Smart 	if (!list_empty(&hw->cmd_pending)) {
8884df84e84SJames Smart 		spin_unlock_irqrestore(&hw->cmd_lock, flags);
8894df84e84SJames Smart 		efc_log_err(hw->os, "command found on pending list\n");
8904df84e84SJames Smart 		return -EIO;
8914df84e84SJames Smart 	}
8924df84e84SJames Smart 	spin_unlock_irqrestore(&hw->cmd_lock, flags);
8934df84e84SJames Smart 
8944df84e84SJames Smart 	/* Free RQ buffers if prevously allocated */
8954df84e84SJames Smart 	efct_hw_rx_free(hw);
8964df84e84SJames Smart 
8974df84e84SJames Smart 	/*
8984df84e84SJames Smart 	 * The IO queues must be initialized here for the reset case. The
8994df84e84SJames Smart 	 * efct_hw_init_io() function will re-add the IOs to the free list.
9004df84e84SJames Smart 	 * The cmd_head list should be OK since we free all entries in
9014df84e84SJames Smart 	 * efct_hw_command_cancel() that is called in the efct_hw_reset().
9024df84e84SJames Smart 	 */
9034df84e84SJames Smart 
9044df84e84SJames Smart 	/* If we are in this function due to a reset, there may be stale items
9054df84e84SJames Smart 	 * on lists that need to be removed.  Clean them up.
9064df84e84SJames Smart 	 */
9074df84e84SJames Smart 	rem_count = 0;
9084df84e84SJames Smart 	while ((!list_empty(&hw->io_wait_free))) {
9094df84e84SJames Smart 		rem_count++;
9104df84e84SJames Smart 		temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
9114df84e84SJames Smart 					list_entry);
9124df84e84SJames Smart 		list_del_init(&temp->list_entry);
9134df84e84SJames Smart 	}
9144df84e84SJames Smart 	if (rem_count > 0)
9154df84e84SJames Smart 		efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
9164df84e84SJames Smart 			      rem_count);
9174df84e84SJames Smart 
9184df84e84SJames Smart 	rem_count = 0;
9194df84e84SJames Smart 	while ((!list_empty(&hw->io_inuse))) {
9204df84e84SJames Smart 		rem_count++;
9214df84e84SJames Smart 		temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
9224df84e84SJames Smart 					list_entry);
9234df84e84SJames Smart 		list_del_init(&temp->list_entry);
9244df84e84SJames Smart 	}
9254df84e84SJames Smart 	if (rem_count > 0)
9264df84e84SJames Smart 		efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
9274df84e84SJames Smart 			      rem_count);
9284df84e84SJames Smart 
9294df84e84SJames Smart 	rem_count = 0;
9304df84e84SJames Smart 	while ((!list_empty(&hw->io_free))) {
9314df84e84SJames Smart 		rem_count++;
9324df84e84SJames Smart 		temp = list_first_entry(&hw->io_free, struct efct_hw_io,
9334df84e84SJames Smart 					list_entry);
9344df84e84SJames Smart 		list_del_init(&temp->list_entry);
9354df84e84SJames Smart 	}
9364df84e84SJames Smart 	if (rem_count > 0)
9374df84e84SJames Smart 		efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
9384df84e84SJames Smart 			      rem_count);
9394df84e84SJames Smart 
9404df84e84SJames Smart 	/* If MRQ not required, Make sure we dont request feature. */
9414df84e84SJames Smart 	if (hw->config.n_rq == 1)
9424df84e84SJames Smart 		hw->sli.features &= (~SLI4_REQFEAT_MRQP);
9434df84e84SJames Smart 
9444df84e84SJames Smart 	if (sli_init(&hw->sli)) {
9454df84e84SJames Smart 		efc_log_err(hw->os, "SLI failed to initialize\n");
9464df84e84SJames Smart 		return -EIO;
9474df84e84SJames Smart 	}
9484df84e84SJames Smart 
9494df84e84SJames Smart 	if (hw->sliport_healthcheck) {
9504df84e84SJames Smart 		rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
9514df84e84SJames Smart 		if (rc != 0) {
9524df84e84SJames Smart 			efc_log_err(hw->os, "Enable port Health check fail\n");
9534df84e84SJames Smart 			return rc;
9544df84e84SJames Smart 		}
9554df84e84SJames Smart 	}
9564df84e84SJames Smart 
9574df84e84SJames Smart 	/*
9584df84e84SJames Smart 	 * Set FDT transfer hint, only works on Lancer
9594df84e84SJames Smart 	 */
9604df84e84SJames Smart 	if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
9614df84e84SJames Smart 		/*
9624df84e84SJames Smart 		 * Non-fatal error. In particular, we can disregard failure to
9634df84e84SJames Smart 		 * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware
9644df84e84SJames Smart 		 * that do not support EFCT_HW_FDT_XFER_HINT feature.
9654df84e84SJames Smart 		 */
9664df84e84SJames Smart 		efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
9674df84e84SJames Smart 	}
9684df84e84SJames Smart 
9694df84e84SJames Smart 	/* zero the hashes */
9704df84e84SJames Smart 	memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
9714df84e84SJames Smart 	efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
9724df84e84SJames Smart 		      EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
9734df84e84SJames Smart 
9744df84e84SJames Smart 	memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
9754df84e84SJames Smart 	efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
9764df84e84SJames Smart 		      EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
9774df84e84SJames Smart 
9784df84e84SJames Smart 	memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
9794df84e84SJames Smart 	efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
9804df84e84SJames Smart 		      EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
9814df84e84SJames Smart 
9824df84e84SJames Smart 	rc = efct_hw_init_queues(hw);
9834df84e84SJames Smart 	if (rc)
9844df84e84SJames Smart 		return rc;
9854df84e84SJames Smart 
9864df84e84SJames Smart 	rc = efct_hw_map_wq_cpu(hw);
9874df84e84SJames Smart 	if (rc)
9884df84e84SJames Smart 		return rc;
9894df84e84SJames Smart 
9904df84e84SJames Smart 	/* Allocate and p_st RQ buffers */
9914df84e84SJames Smart 	rc = efct_hw_rx_allocate(hw);
9924df84e84SJames Smart 	if (rc) {
9934df84e84SJames Smart 		efc_log_err(hw->os, "rx_allocate failed\n");
9944df84e84SJames Smart 		return rc;
9954df84e84SJames Smart 	}
9964df84e84SJames Smart 
9974df84e84SJames Smart 	rc = efct_hw_rx_post(hw);
9984df84e84SJames Smart 	if (rc) {
9994df84e84SJames Smart 		efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
10004df84e84SJames Smart 		return rc;
10014df84e84SJames Smart 	}
10024df84e84SJames Smart 
10034df84e84SJames Smart 	if (hw->config.n_eq == 1) {
10044df84e84SJames Smart 		rc = efct_hw_config_rq(hw);
10054df84e84SJames Smart 		if (rc) {
10064df84e84SJames Smart 			efc_log_err(hw->os, "config rq failed %d\n", rc);
10074df84e84SJames Smart 			return rc;
10084df84e84SJames Smart 		}
10094df84e84SJames Smart 	} else {
10104df84e84SJames Smart 		rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
10114df84e84SJames Smart 		if (rc != 0) {
10124df84e84SJames Smart 			efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
10134df84e84SJames Smart 			return rc;
10144df84e84SJames Smart 		}
10154df84e84SJames Smart 
10164df84e84SJames Smart 		rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
10174df84e84SJames Smart 		if (rc != 0) {
10184df84e84SJames Smart 			efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
10194df84e84SJames Smart 			return rc;
10204df84e84SJames Smart 		}
10214df84e84SJames Smart 	}
10224df84e84SJames Smart 
10234df84e84SJames Smart 	/*
10244df84e84SJames Smart 	 * Allocate the WQ request tag pool, if not previously allocated
10254df84e84SJames Smart 	 * (the request tag value is 16 bits, thus the pool allocation size
10264df84e84SJames Smart 	 * of 64k)
10274df84e84SJames Smart 	 */
10284df84e84SJames Smart 	hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
10294df84e84SJames Smart 	if (!hw->wq_reqtag_pool) {
10304df84e84SJames Smart 		efc_log_err(hw->os, "efct_hw_reqtag_init failed %d\n", rc);
10314df84e84SJames Smart 		return rc;
10324df84e84SJames Smart 	}
10334df84e84SJames Smart 
10344df84e84SJames Smart 	rc = efct_hw_setup_io(hw);
10354df84e84SJames Smart 	if (rc) {
10364df84e84SJames Smart 		efc_log_err(hw->os, "IO allocation failure\n");
10374df84e84SJames Smart 		return rc;
10384df84e84SJames Smart 	}
10394df84e84SJames Smart 
10404df84e84SJames Smart 	rc = efct_hw_init_io(hw);
10414df84e84SJames Smart 	if (rc) {
10424df84e84SJames Smart 		efc_log_err(hw->os, "IO initialization failure\n");
10434df84e84SJames Smart 		return rc;
10444df84e84SJames Smart 	}
10454df84e84SJames Smart 
10464df84e84SJames Smart 	dma = &hw->loop_map;
10474df84e84SJames Smart 	dma->size = SLI4_MIN_LOOP_MAP_BYTES;
10484df84e84SJames Smart 	dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
10494df84e84SJames Smart 				       GFP_DMA);
10504df84e84SJames Smart 	if (!dma->virt)
10514df84e84SJames Smart 		return -EIO;
10524df84e84SJames Smart 
10534df84e84SJames Smart 	/*
10544df84e84SJames Smart 	 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ
10554df84e84SJames Smart 	 * entries
10564df84e84SJames Smart 	 */
10574df84e84SJames Smart 	for (i = 0; i < hw->eq_count; i++)
10584df84e84SJames Smart 		sli_queue_arm(&hw->sli, &hw->eq[i], true);
10594df84e84SJames Smart 
10604df84e84SJames Smart 	/*
10614df84e84SJames Smart 	 * Initialize RQ hash
10624df84e84SJames Smart 	 */
10634df84e84SJames Smart 	for (i = 0; i < hw->rq_count; i++)
10644df84e84SJames Smart 		efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
10654df84e84SJames Smart 
10664df84e84SJames Smart 	/*
10674df84e84SJames Smart 	 * Initialize WQ hash
10684df84e84SJames Smart 	 */
10694df84e84SJames Smart 	for (i = 0; i < hw->wq_count; i++)
10704df84e84SJames Smart 		efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
10714df84e84SJames Smart 
10724df84e84SJames Smart 	/*
10734df84e84SJames Smart 	 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
10744df84e84SJames Smart 	 */
10754df84e84SJames Smart 	for (i = 0; i < hw->cq_count; i++) {
10764df84e84SJames Smart 		efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
10774df84e84SJames Smart 		sli_queue_arm(&hw->sli, &hw->cq[i], true);
10784df84e84SJames Smart 	}
10794df84e84SJames Smart 
10804df84e84SJames Smart 	/* Set RQ process limit*/
10814df84e84SJames Smart 	for (i = 0; i < hw->hw_rq_count; i++) {
10824df84e84SJames Smart 		struct hw_rq *rq = hw->hw_rq[i];
10834df84e84SJames Smart 
10844df84e84SJames Smart 		hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
10854df84e84SJames Smart 	}
10864df84e84SJames Smart 
10874df84e84SJames Smart 	/* record the fact that the queues are functional */
10884df84e84SJames Smart 	hw->state = EFCT_HW_STATE_ACTIVE;
10894df84e84SJames Smart 	/*
10904df84e84SJames Smart 	 * Allocate a HW IOs for send frame.
10914df84e84SJames Smart 	 */
10924df84e84SJames Smart 	hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
10934df84e84SJames Smart 	if (!hw->hw_wq[0]->send_frame_io)
10944df84e84SJames Smart 		efc_log_err(hw->os, "alloc for send_frame_io failed\n");
10954df84e84SJames Smart 
10964df84e84SJames Smart 	/* Initialize send frame sequence id */
10974df84e84SJames Smart 	atomic_set(&hw->send_frame_seq_id, 0);
10984df84e84SJames Smart 
10994df84e84SJames Smart 	return 0;
11004df84e84SJames Smart }
11014df84e84SJames Smart 
11024df84e84SJames Smart int
11034df84e84SJames Smart efct_hw_parse_filter(struct efct_hw *hw, void *value)
11044df84e84SJames Smart {
11054df84e84SJames Smart 	int rc = 0;
11064df84e84SJames Smart 	char *p = NULL;
11074df84e84SJames Smart 	char *token;
11084df84e84SJames Smart 	u32 idx = 0;
11094df84e84SJames Smart 
11104df84e84SJames Smart 	for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
11114df84e84SJames Smart 		hw->config.filter_def[idx] = 0;
11124df84e84SJames Smart 
11134df84e84SJames Smart 	p = kstrdup(value, GFP_KERNEL);
11144df84e84SJames Smart 	if (!p || !*p) {
11154df84e84SJames Smart 		efc_log_err(hw->os, "p is NULL\n");
11164df84e84SJames Smart 		return -ENOMEM;
11174df84e84SJames Smart 	}
11184df84e84SJames Smart 
11194df84e84SJames Smart 	idx = 0;
11204df84e84SJames Smart 	while ((token = strsep(&p, ",")) && *token) {
11214df84e84SJames Smart 		if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
11224df84e84SJames Smart 			efc_log_err(hw->os, "kstrtoint failed\n");
11234df84e84SJames Smart 
11244df84e84SJames Smart 		if (!p || !*p)
11254df84e84SJames Smart 			break;
11264df84e84SJames Smart 
11274df84e84SJames Smart 		if (idx == ARRAY_SIZE(hw->config.filter_def))
11284df84e84SJames Smart 			break;
11294df84e84SJames Smart 	}
11304df84e84SJames Smart 	kfree(p);
11314df84e84SJames Smart 
11324df84e84SJames Smart 	return rc;
11334df84e84SJames Smart }
11344df84e84SJames Smart 
11354df84e84SJames Smart u64
11364df84e84SJames Smart efct_get_wwnn(struct efct_hw *hw)
11374df84e84SJames Smart {
11384df84e84SJames Smart 	struct sli4 *sli = &hw->sli;
11394df84e84SJames Smart 	u8 p[8];
11404df84e84SJames Smart 
11414df84e84SJames Smart 	memcpy(p, sli->wwnn, sizeof(p));
11424df84e84SJames Smart 	return get_unaligned_be64(p);
11434df84e84SJames Smart }
11444df84e84SJames Smart 
11454df84e84SJames Smart u64
11464df84e84SJames Smart efct_get_wwpn(struct efct_hw *hw)
11474df84e84SJames Smart {
11484df84e84SJames Smart 	struct sli4 *sli = &hw->sli;
11494df84e84SJames Smart 	u8 p[8];
11504df84e84SJames Smart 
11514df84e84SJames Smart 	memcpy(p, sli->wwpn, sizeof(p));
11524df84e84SJames Smart 	return get_unaligned_be64(p);
11534df84e84SJames Smart }
1154580c0255SJames Smart 
1155580c0255SJames Smart static struct efc_hw_rq_buffer *
1156580c0255SJames Smart efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
1157580c0255SJames Smart 			u32 size)
1158580c0255SJames Smart {
1159580c0255SJames Smart 	struct efct *efct = hw->os;
1160580c0255SJames Smart 	struct efc_hw_rq_buffer *rq_buf = NULL;
1161580c0255SJames Smart 	struct efc_hw_rq_buffer *prq;
1162580c0255SJames Smart 	u32 i;
1163580c0255SJames Smart 
1164580c0255SJames Smart 	if (!count)
1165580c0255SJames Smart 		return NULL;
1166580c0255SJames Smart 
1167580c0255SJames Smart 	rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
1168580c0255SJames Smart 	if (!rq_buf)
1169580c0255SJames Smart 		return NULL;
1170580c0255SJames Smart 	memset(rq_buf, 0, sizeof(*rq_buf) * count);
1171580c0255SJames Smart 
1172580c0255SJames Smart 	for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
1173580c0255SJames Smart 		prq->rqindex = rqindex;
1174580c0255SJames Smart 		prq->dma.size = size;
1175580c0255SJames Smart 		prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
1176580c0255SJames Smart 						   prq->dma.size,
1177580c0255SJames Smart 						   &prq->dma.phys,
1178580c0255SJames Smart 						   GFP_DMA);
1179580c0255SJames Smart 		if (!prq->dma.virt) {
1180580c0255SJames Smart 			efc_log_err(hw->os, "DMA allocation failed\n");
1181580c0255SJames Smart 			kfree(rq_buf);
1182580c0255SJames Smart 			return NULL;
1183580c0255SJames Smart 		}
1184580c0255SJames Smart 	}
1185580c0255SJames Smart 	return rq_buf;
1186580c0255SJames Smart }
1187580c0255SJames Smart 
1188580c0255SJames Smart static void
1189580c0255SJames Smart efct_hw_rx_buffer_free(struct efct_hw *hw,
1190580c0255SJames Smart 		       struct efc_hw_rq_buffer *rq_buf,
1191580c0255SJames Smart 			u32 count)
1192580c0255SJames Smart {
1193580c0255SJames Smart 	struct efct *efct = hw->os;
1194580c0255SJames Smart 	u32 i;
1195580c0255SJames Smart 	struct efc_hw_rq_buffer *prq;
1196580c0255SJames Smart 
1197580c0255SJames Smart 	if (rq_buf) {
1198580c0255SJames Smart 		for (i = 0, prq = rq_buf; i < count; i++, prq++) {
1199580c0255SJames Smart 			dma_free_coherent(&efct->pci->dev,
1200580c0255SJames Smart 					  prq->dma.size, prq->dma.virt,
1201580c0255SJames Smart 					  prq->dma.phys);
1202580c0255SJames Smart 			memset(&prq->dma, 0, sizeof(struct efc_dma));
1203580c0255SJames Smart 		}
1204580c0255SJames Smart 
1205580c0255SJames Smart 		kfree(rq_buf);
1206580c0255SJames Smart 	}
1207580c0255SJames Smart }
1208580c0255SJames Smart 
1209580c0255SJames Smart int
1210580c0255SJames Smart efct_hw_rx_allocate(struct efct_hw *hw)
1211580c0255SJames Smart {
1212580c0255SJames Smart 	struct efct *efct = hw->os;
1213580c0255SJames Smart 	u32 i;
1214580c0255SJames Smart 	int rc = 0;
1215580c0255SJames Smart 	u32 rqindex = 0;
1216580c0255SJames Smart 	u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
1217580c0255SJames Smart 	u32 payload_size = hw->config.rq_default_buffer_size;
1218580c0255SJames Smart 
1219580c0255SJames Smart 	rqindex = 0;
1220580c0255SJames Smart 
1221580c0255SJames Smart 	for (i = 0; i < hw->hw_rq_count; i++) {
1222580c0255SJames Smart 		struct hw_rq *rq = hw->hw_rq[i];
1223580c0255SJames Smart 
1224580c0255SJames Smart 		/* Allocate header buffers */
1225580c0255SJames Smart 		rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
1226580c0255SJames Smart 						      rq->entry_count,
1227580c0255SJames Smart 						      hdr_size);
1228580c0255SJames Smart 		if (!rq->hdr_buf) {
1229580c0255SJames Smart 			efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
1230580c0255SJames Smart 			rc = -EIO;
1231580c0255SJames Smart 			break;
1232580c0255SJames Smart 		}
1233580c0255SJames Smart 
1234580c0255SJames Smart 		efc_log_debug(hw->os,
1235580c0255SJames Smart 			      "rq[%2d] rq_id %02d header  %4d by %4d bytes\n",
1236580c0255SJames Smart 			      i, rq->hdr->id, rq->entry_count, hdr_size);
1237580c0255SJames Smart 
1238580c0255SJames Smart 		rqindex++;
1239580c0255SJames Smart 
1240580c0255SJames Smart 		/* Allocate payload buffers */
1241580c0255SJames Smart 		rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
1242580c0255SJames Smart 							  rq->entry_count,
1243580c0255SJames Smart 							  payload_size);
1244580c0255SJames Smart 		if (!rq->payload_buf) {
1245580c0255SJames Smart 			efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
1246580c0255SJames Smart 			rc = -EIO;
1247580c0255SJames Smart 			break;
1248580c0255SJames Smart 		}
1249580c0255SJames Smart 		efc_log_debug(hw->os,
1250580c0255SJames Smart 			      "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
1251580c0255SJames Smart 			      i, rq->data->id, rq->entry_count, payload_size);
1252580c0255SJames Smart 		rqindex++;
1253580c0255SJames Smart 	}
1254580c0255SJames Smart 
1255580c0255SJames Smart 	return rc ? -EIO : 0;
1256580c0255SJames Smart }
1257580c0255SJames Smart 
1258580c0255SJames Smart int
1259580c0255SJames Smart efct_hw_rx_post(struct efct_hw *hw)
1260580c0255SJames Smart {
1261580c0255SJames Smart 	u32 i;
1262580c0255SJames Smart 	u32 idx;
1263580c0255SJames Smart 	u32 rq_idx;
1264580c0255SJames Smart 	int rc = 0;
1265580c0255SJames Smart 
1266580c0255SJames Smart 	if (!hw->seq_pool) {
1267580c0255SJames Smart 		u32 count = 0;
1268580c0255SJames Smart 
1269580c0255SJames Smart 		for (i = 0; i < hw->hw_rq_count; i++)
1270580c0255SJames Smart 			count += hw->hw_rq[i]->entry_count;
1271580c0255SJames Smart 
1272580c0255SJames Smart 		hw->seq_pool = kmalloc_array(count,
1273580c0255SJames Smart 				sizeof(struct efc_hw_sequence),	GFP_KERNEL);
1274580c0255SJames Smart 		if (!hw->seq_pool)
1275580c0255SJames Smart 			return -ENOMEM;
1276580c0255SJames Smart 	}
1277580c0255SJames Smart 
1278580c0255SJames Smart 	/*
1279580c0255SJames Smart 	 * In RQ pair mode, we MUST post the header and payload buffer at the
1280580c0255SJames Smart 	 * same time.
1281580c0255SJames Smart 	 */
1282580c0255SJames Smart 	for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
1283580c0255SJames Smart 		struct hw_rq *rq = hw->hw_rq[rq_idx];
1284580c0255SJames Smart 
1285580c0255SJames Smart 		for (i = 0; i < rq->entry_count - 1; i++) {
1286580c0255SJames Smart 			struct efc_hw_sequence *seq;
1287580c0255SJames Smart 
1288580c0255SJames Smart 			seq = hw->seq_pool + idx;
1289580c0255SJames Smart 			idx++;
1290580c0255SJames Smart 			seq->header = &rq->hdr_buf[i];
1291580c0255SJames Smart 			seq->payload = &rq->payload_buf[i];
1292580c0255SJames Smart 			rc = efct_hw_sequence_free(hw, seq);
1293580c0255SJames Smart 			if (rc)
1294580c0255SJames Smart 				break;
1295580c0255SJames Smart 		}
1296580c0255SJames Smart 		if (rc)
1297580c0255SJames Smart 			break;
1298580c0255SJames Smart 	}
1299580c0255SJames Smart 
1300580c0255SJames Smart 	if (rc && hw->seq_pool)
1301580c0255SJames Smart 		kfree(hw->seq_pool);
1302580c0255SJames Smart 
1303580c0255SJames Smart 	return rc;
1304580c0255SJames Smart }
1305580c0255SJames Smart 
1306580c0255SJames Smart void
1307580c0255SJames Smart efct_hw_rx_free(struct efct_hw *hw)
1308580c0255SJames Smart {
1309580c0255SJames Smart 	u32 i;
1310580c0255SJames Smart 
1311580c0255SJames Smart 	/* Free hw_rq buffers */
1312580c0255SJames Smart 	for (i = 0; i < hw->hw_rq_count; i++) {
1313580c0255SJames Smart 		struct hw_rq *rq = hw->hw_rq[i];
1314580c0255SJames Smart 
1315580c0255SJames Smart 		if (rq) {
1316580c0255SJames Smart 			efct_hw_rx_buffer_free(hw, rq->hdr_buf,
1317580c0255SJames Smart 					       rq->entry_count);
1318580c0255SJames Smart 			rq->hdr_buf = NULL;
1319580c0255SJames Smart 			efct_hw_rx_buffer_free(hw, rq->payload_buf,
1320580c0255SJames Smart 					       rq->entry_count);
1321580c0255SJames Smart 			rq->payload_buf = NULL;
1322580c0255SJames Smart 		}
1323580c0255SJames Smart 	}
1324580c0255SJames Smart }
1325580c0255SJames Smart 
1326580c0255SJames Smart static int
1327580c0255SJames Smart efct_hw_cmd_submit_pending(struct efct_hw *hw)
1328580c0255SJames Smart {
1329580c0255SJames Smart 	int rc = 0;
1330580c0255SJames Smart 
1331580c0255SJames Smart 	/* Assumes lock held */
1332580c0255SJames Smart 
1333580c0255SJames Smart 	/* Only submit MQE if there's room */
1334580c0255SJames Smart 	while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
1335580c0255SJames Smart 	       !list_empty(&hw->cmd_pending)) {
1336580c0255SJames Smart 		struct efct_command_ctx *ctx;
1337580c0255SJames Smart 
1338580c0255SJames Smart 		ctx = list_first_entry(&hw->cmd_pending,
1339580c0255SJames Smart 				       struct efct_command_ctx, list_entry);
1340580c0255SJames Smart 		if (!ctx)
1341580c0255SJames Smart 			break;
1342580c0255SJames Smart 
1343580c0255SJames Smart 		list_del_init(&ctx->list_entry);
1344580c0255SJames Smart 
1345580c0255SJames Smart 		list_add_tail(&ctx->list_entry, &hw->cmd_head);
1346580c0255SJames Smart 		hw->cmd_head_count++;
1347580c0255SJames Smart 		if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
1348580c0255SJames Smart 			efc_log_debug(hw->os,
1349580c0255SJames Smart 				      "sli_queue_write failed: %d\n", rc);
1350580c0255SJames Smart 			rc = -EIO;
1351580c0255SJames Smart 			break;
1352580c0255SJames Smart 		}
1353580c0255SJames Smart 	}
1354580c0255SJames Smart 	return rc;
1355580c0255SJames Smart }
1356580c0255SJames Smart 
1357580c0255SJames Smart int
1358580c0255SJames Smart efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
1359580c0255SJames Smart {
1360580c0255SJames Smart 	int rc = -EIO;
1361580c0255SJames Smart 	unsigned long flags = 0;
1362580c0255SJames Smart 	void *bmbx = NULL;
1363580c0255SJames Smart 
1364580c0255SJames Smart 	/*
1365580c0255SJames Smart 	 * If the chip is in an error state (UE'd) then reject this mailbox
1366580c0255SJames Smart 	 * command.
1367580c0255SJames Smart 	 */
1368580c0255SJames Smart 	if (sli_fw_error_status(&hw->sli) > 0) {
1369580c0255SJames Smart 		efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
1370580c0255SJames Smart 		efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
1371580c0255SJames Smart 			     sli_reg_read_status(&hw->sli),
1372580c0255SJames Smart 			     sli_reg_read_err1(&hw->sli),
1373580c0255SJames Smart 			     sli_reg_read_err2(&hw->sli));
1374580c0255SJames Smart 
1375580c0255SJames Smart 		return -EIO;
1376580c0255SJames Smart 	}
1377580c0255SJames Smart 
1378580c0255SJames Smart 	/*
1379580c0255SJames Smart 	 * Send a mailbox command to the hardware, and either wait for
1380580c0255SJames Smart 	 * a completion (EFCT_CMD_POLL) or get an optional asynchronous
1381580c0255SJames Smart 	 * completion (EFCT_CMD_NOWAIT).
1382580c0255SJames Smart 	 */
1383580c0255SJames Smart 
1384580c0255SJames Smart 	if (opts == EFCT_CMD_POLL) {
1385580c0255SJames Smart 		mutex_lock(&hw->bmbx_lock);
1386580c0255SJames Smart 		bmbx = hw->sli.bmbx.virt;
1387580c0255SJames Smart 
1388580c0255SJames Smart 		memset(bmbx, 0, SLI4_BMBX_SIZE);
1389580c0255SJames Smart 		memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
1390580c0255SJames Smart 
1391580c0255SJames Smart 		if (sli_bmbx_command(&hw->sli) == 0) {
1392580c0255SJames Smart 			rc = 0;
1393580c0255SJames Smart 			memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
1394580c0255SJames Smart 		}
1395580c0255SJames Smart 		mutex_unlock(&hw->bmbx_lock);
1396580c0255SJames Smart 	} else if (opts == EFCT_CMD_NOWAIT) {
1397580c0255SJames Smart 		struct efct_command_ctx	*ctx = NULL;
1398580c0255SJames Smart 
1399580c0255SJames Smart 		if (hw->state != EFCT_HW_STATE_ACTIVE) {
1400580c0255SJames Smart 			efc_log_err(hw->os, "Can't send command, HW state=%d\n",
1401580c0255SJames Smart 				    hw->state);
1402580c0255SJames Smart 			return -EIO;
1403580c0255SJames Smart 		}
1404580c0255SJames Smart 
1405580c0255SJames Smart 		ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
1406580c0255SJames Smart 		if (!ctx)
1407580c0255SJames Smart 			return -ENOSPC;
1408580c0255SJames Smart 
1409580c0255SJames Smart 		memset(ctx, 0, sizeof(struct efct_command_ctx));
1410580c0255SJames Smart 
1411580c0255SJames Smart 		if (cb) {
1412580c0255SJames Smart 			ctx->cb = cb;
1413580c0255SJames Smart 			ctx->arg = arg;
1414580c0255SJames Smart 		}
1415580c0255SJames Smart 
1416580c0255SJames Smart 		memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
1417580c0255SJames Smart 		ctx->ctx = hw;
1418580c0255SJames Smart 
1419580c0255SJames Smart 		spin_lock_irqsave(&hw->cmd_lock, flags);
1420580c0255SJames Smart 
1421580c0255SJames Smart 		/* Add to pending list */
1422580c0255SJames Smart 		INIT_LIST_HEAD(&ctx->list_entry);
1423580c0255SJames Smart 		list_add_tail(&ctx->list_entry, &hw->cmd_pending);
1424580c0255SJames Smart 
1425580c0255SJames Smart 		/* Submit as much of the pending list as we can */
1426580c0255SJames Smart 		rc = efct_hw_cmd_submit_pending(hw);
1427580c0255SJames Smart 
1428580c0255SJames Smart 		spin_unlock_irqrestore(&hw->cmd_lock, flags);
1429580c0255SJames Smart 	}
1430580c0255SJames Smart 
1431580c0255SJames Smart 	return rc;
1432580c0255SJames Smart }
1433580c0255SJames Smart 
1434580c0255SJames Smart static int
1435580c0255SJames Smart efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
1436580c0255SJames Smart 			size_t size)
1437580c0255SJames Smart {
1438580c0255SJames Smart 	struct efct_command_ctx *ctx = NULL;
1439580c0255SJames Smart 	unsigned long flags = 0;
1440580c0255SJames Smart 
1441580c0255SJames Smart 	spin_lock_irqsave(&hw->cmd_lock, flags);
1442580c0255SJames Smart 	if (!list_empty(&hw->cmd_head)) {
1443580c0255SJames Smart 		ctx = list_first_entry(&hw->cmd_head,
1444580c0255SJames Smart 				       struct efct_command_ctx, list_entry);
1445580c0255SJames Smart 		list_del_init(&ctx->list_entry);
1446580c0255SJames Smart 	}
1447580c0255SJames Smart 	if (!ctx) {
1448580c0255SJames Smart 		efc_log_err(hw->os, "no command context\n");
1449580c0255SJames Smart 		spin_unlock_irqrestore(&hw->cmd_lock, flags);
1450580c0255SJames Smart 		return -EIO;
1451580c0255SJames Smart 	}
1452580c0255SJames Smart 
1453580c0255SJames Smart 	hw->cmd_head_count--;
1454580c0255SJames Smart 
1455580c0255SJames Smart 	/* Post any pending requests */
1456580c0255SJames Smart 	efct_hw_cmd_submit_pending(hw);
1457580c0255SJames Smart 
1458580c0255SJames Smart 	spin_unlock_irqrestore(&hw->cmd_lock, flags);
1459580c0255SJames Smart 
1460580c0255SJames Smart 	if (ctx->cb) {
1461580c0255SJames Smart 		memcpy(ctx->buf, mqe, size);
1462580c0255SJames Smart 		ctx->cb(hw, status, ctx->buf, ctx->arg);
1463580c0255SJames Smart 	}
1464580c0255SJames Smart 
1465580c0255SJames Smart 	mempool_free(ctx, hw->cmd_ctx_pool);
1466580c0255SJames Smart 
1467580c0255SJames Smart 	return 0;
1468580c0255SJames Smart }
1469580c0255SJames Smart 
1470580c0255SJames Smart static int
1471580c0255SJames Smart efct_hw_mq_process(struct efct_hw *hw,
1472580c0255SJames Smart 		   int status, struct sli4_queue *mq)
1473580c0255SJames Smart {
1474580c0255SJames Smart 	u8 mqe[SLI4_BMBX_SIZE];
1475580c0255SJames Smart 	int rc;
1476580c0255SJames Smart 
1477580c0255SJames Smart 	rc = sli_mq_read(&hw->sli, mq, mqe);
1478580c0255SJames Smart 	if (!rc)
1479580c0255SJames Smart 		rc = efct_hw_command_process(hw, status, mqe, mq->size);
1480580c0255SJames Smart 
1481580c0255SJames Smart 	return rc;
1482580c0255SJames Smart }
1483580c0255SJames Smart 
1484580c0255SJames Smart static int
1485580c0255SJames Smart efct_hw_command_cancel(struct efct_hw *hw)
1486580c0255SJames Smart {
1487580c0255SJames Smart 	unsigned long flags = 0;
1488580c0255SJames Smart 	int rc = 0;
1489580c0255SJames Smart 
1490580c0255SJames Smart 	spin_lock_irqsave(&hw->cmd_lock, flags);
1491580c0255SJames Smart 
1492580c0255SJames Smart 	/*
1493580c0255SJames Smart 	 * Manually clean up remaining commands. Note: since this calls
1494580c0255SJames Smart 	 * efct_hw_command_process(), we'll also process the cmd_pending
1495580c0255SJames Smart 	 * list, so no need to manually clean that out.
1496580c0255SJames Smart 	 */
1497580c0255SJames Smart 	while (!list_empty(&hw->cmd_head)) {
1498580c0255SJames Smart 		u8		mqe[SLI4_BMBX_SIZE] = { 0 };
1499580c0255SJames Smart 		struct efct_command_ctx *ctx;
1500580c0255SJames Smart 
1501580c0255SJames Smart 		ctx = list_first_entry(&hw->cmd_head,
1502580c0255SJames Smart 				       struct efct_command_ctx, list_entry);
1503580c0255SJames Smart 
1504580c0255SJames Smart 		efc_log_debug(hw->os, "hung command %08x\n",
1505580c0255SJames Smart 			      !ctx ? U32_MAX :
1506580c0255SJames Smart 			      (!ctx->buf ? U32_MAX : *((u32 *)ctx->buf)));
1507580c0255SJames Smart 		spin_unlock_irqrestore(&hw->cmd_lock, flags);
1508580c0255SJames Smart 		rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
1509580c0255SJames Smart 		spin_lock_irqsave(&hw->cmd_lock, flags);
1510580c0255SJames Smart 	}
1511580c0255SJames Smart 
1512580c0255SJames Smart 	spin_unlock_irqrestore(&hw->cmd_lock, flags);
1513580c0255SJames Smart 
1514580c0255SJames Smart 	return rc;
1515580c0255SJames Smart }
1516580c0255SJames Smart 
1517580c0255SJames Smart static void
1518580c0255SJames Smart efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
1519580c0255SJames Smart {
1520580c0255SJames Smart 	struct efct_mbox_rqst_ctx *ctx = arg;
1521580c0255SJames Smart 
1522580c0255SJames Smart 	if (ctx) {
1523580c0255SJames Smart 		if (ctx->callback)
1524580c0255SJames Smart 			(*ctx->callback)(hw->os->efcport, status, mqe,
1525580c0255SJames Smart 					 ctx->arg);
1526580c0255SJames Smart 
1527580c0255SJames Smart 		mempool_free(ctx, hw->mbox_rqst_pool);
1528580c0255SJames Smart 	}
1529580c0255SJames Smart }
1530580c0255SJames Smart 
1531580c0255SJames Smart int
1532580c0255SJames Smart efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
1533580c0255SJames Smart {
1534580c0255SJames Smart 	struct efct_mbox_rqst_ctx *ctx;
1535580c0255SJames Smart 	struct efct *efct = base;
1536580c0255SJames Smart 	struct efct_hw *hw = &efct->hw;
1537580c0255SJames Smart 	int rc;
1538580c0255SJames Smart 
1539580c0255SJames Smart 	/*
1540580c0255SJames Smart 	 * Allocate a callback context (which includes the mbox cmd buffer),
1541580c0255SJames Smart 	 * we need this to be persistent as the mbox cmd submission may be
1542580c0255SJames Smart 	 * queued and executed later execution.
1543580c0255SJames Smart 	 */
1544580c0255SJames Smart 	ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
1545580c0255SJames Smart 	if (!ctx)
1546580c0255SJames Smart 		return -EIO;
1547580c0255SJames Smart 
1548580c0255SJames Smart 	ctx->callback = cb;
1549580c0255SJames Smart 	ctx->arg = arg;
1550580c0255SJames Smart 
1551580c0255SJames Smart 	rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
1552580c0255SJames Smart 	if (rc) {
1553580c0255SJames Smart 		efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
1554580c0255SJames Smart 		mempool_free(ctx, hw->mbox_rqst_pool);
1555580c0255SJames Smart 		return -EIO;
1556580c0255SJames Smart 	}
1557580c0255SJames Smart 
1558580c0255SJames Smart 	return 0;
1559580c0255SJames Smart }
156063de5132SJames Smart 
156163de5132SJames Smart static inline struct efct_hw_io *
156263de5132SJames Smart _efct_hw_io_alloc(struct efct_hw *hw)
156363de5132SJames Smart {
156463de5132SJames Smart 	struct efct_hw_io *io = NULL;
156563de5132SJames Smart 
156663de5132SJames Smart 	if (!list_empty(&hw->io_free)) {
156763de5132SJames Smart 		io = list_first_entry(&hw->io_free, struct efct_hw_io,
156863de5132SJames Smart 				      list_entry);
156963de5132SJames Smart 		list_del(&io->list_entry);
157063de5132SJames Smart 	}
157163de5132SJames Smart 	if (io) {
157263de5132SJames Smart 		INIT_LIST_HEAD(&io->list_entry);
157363de5132SJames Smart 		list_add_tail(&io->list_entry, &hw->io_inuse);
157463de5132SJames Smart 		io->state = EFCT_HW_IO_STATE_INUSE;
157563de5132SJames Smart 		io->abort_reqtag = U32_MAX;
157663de5132SJames Smart 		io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
157763de5132SJames Smart 		if (!io->wq) {
157863de5132SJames Smart 			efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
157963de5132SJames Smart 				    raw_smp_processor_id());
158063de5132SJames Smart 			io->wq = hw->hw_wq[0];
158163de5132SJames Smart 		}
158263de5132SJames Smart 		kref_init(&io->ref);
158363de5132SJames Smart 		io->release = efct_hw_io_free_internal;
158463de5132SJames Smart 	} else {
158563de5132SJames Smart 		atomic_add(1, &hw->io_alloc_failed_count);
158663de5132SJames Smart 	}
158763de5132SJames Smart 
158863de5132SJames Smart 	return io;
158963de5132SJames Smart }
159063de5132SJames Smart 
159163de5132SJames Smart struct efct_hw_io *
159263de5132SJames Smart efct_hw_io_alloc(struct efct_hw *hw)
159363de5132SJames Smart {
159463de5132SJames Smart 	struct efct_hw_io *io = NULL;
159563de5132SJames Smart 	unsigned long flags = 0;
159663de5132SJames Smart 
159763de5132SJames Smart 	spin_lock_irqsave(&hw->io_lock, flags);
159863de5132SJames Smart 	io = _efct_hw_io_alloc(hw);
159963de5132SJames Smart 	spin_unlock_irqrestore(&hw->io_lock, flags);
160063de5132SJames Smart 
160163de5132SJames Smart 	return io;
160263de5132SJames Smart }
160363de5132SJames Smart 
160463de5132SJames Smart static void
160563de5132SJames Smart efct_hw_io_free_move_correct_list(struct efct_hw *hw,
160663de5132SJames Smart 				  struct efct_hw_io *io)
160763de5132SJames Smart {
160863de5132SJames Smart 	/*
160963de5132SJames Smart 	 * When an IO is freed, depending on the exchange busy flag,
161063de5132SJames Smart 	 * move it to the correct list.
161163de5132SJames Smart 	 */
161263de5132SJames Smart 	if (io->xbusy) {
161363de5132SJames Smart 		/*
161463de5132SJames Smart 		 * add to wait_free list and wait for XRI_ABORTED CQEs to clean
161563de5132SJames Smart 		 * up
161663de5132SJames Smart 		 */
161763de5132SJames Smart 		INIT_LIST_HEAD(&io->list_entry);
161863de5132SJames Smart 		list_add_tail(&io->list_entry, &hw->io_wait_free);
161963de5132SJames Smart 		io->state = EFCT_HW_IO_STATE_WAIT_FREE;
162063de5132SJames Smart 	} else {
162163de5132SJames Smart 		/* IO not busy, add to free list */
162263de5132SJames Smart 		INIT_LIST_HEAD(&io->list_entry);
162363de5132SJames Smart 		list_add_tail(&io->list_entry, &hw->io_free);
162463de5132SJames Smart 		io->state = EFCT_HW_IO_STATE_FREE;
162563de5132SJames Smart 	}
162663de5132SJames Smart }
162763de5132SJames Smart 
162863de5132SJames Smart static inline void
162963de5132SJames Smart efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
163063de5132SJames Smart {
163163de5132SJames Smart 	/* initialize IO fields */
163263de5132SJames Smart 	efct_hw_init_free_io(io);
163363de5132SJames Smart 
163463de5132SJames Smart 	/* Restore default SGL */
163563de5132SJames Smart 	efct_hw_io_restore_sgl(hw, io);
163663de5132SJames Smart }
163763de5132SJames Smart 
163863de5132SJames Smart void
163963de5132SJames Smart efct_hw_io_free_internal(struct kref *arg)
164063de5132SJames Smart {
164163de5132SJames Smart 	unsigned long flags = 0;
164263de5132SJames Smart 	struct efct_hw_io *io =	container_of(arg, struct efct_hw_io, ref);
164363de5132SJames Smart 	struct efct_hw *hw = io->hw;
164463de5132SJames Smart 
164563de5132SJames Smart 	/* perform common cleanup */
164663de5132SJames Smart 	efct_hw_io_free_common(hw, io);
164763de5132SJames Smart 
164863de5132SJames Smart 	spin_lock_irqsave(&hw->io_lock, flags);
164963de5132SJames Smart 	/* remove from in-use list */
165063de5132SJames Smart 	if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
165163de5132SJames Smart 		list_del_init(&io->list_entry);
165263de5132SJames Smart 		efct_hw_io_free_move_correct_list(hw, io);
165363de5132SJames Smart 	}
165463de5132SJames Smart 	spin_unlock_irqrestore(&hw->io_lock, flags);
165563de5132SJames Smart }
165663de5132SJames Smart 
165763de5132SJames Smart int
165863de5132SJames Smart efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
165963de5132SJames Smart {
166063de5132SJames Smart 	return kref_put(&io->ref, io->release);
166163de5132SJames Smart }
166263de5132SJames Smart 
166363de5132SJames Smart struct efct_hw_io *
166463de5132SJames Smart efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
166563de5132SJames Smart {
166663de5132SJames Smart 	u32 ioindex;
166763de5132SJames Smart 
166863de5132SJames Smart 	ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
166963de5132SJames Smart 	return hw->io[ioindex];
167063de5132SJames Smart }
167163de5132SJames Smart 
167263de5132SJames Smart int
167363de5132SJames Smart efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
167463de5132SJames Smart 		     enum efct_hw_io_type type)
167563de5132SJames Smart {
167663de5132SJames Smart 	struct sli4_sge	*data = NULL;
167763de5132SJames Smart 	u32 i = 0;
167863de5132SJames Smart 	u32 skips = 0;
167963de5132SJames Smart 	u32 sge_flags = 0;
168063de5132SJames Smart 
168163de5132SJames Smart 	if (!io) {
168263de5132SJames Smart 		efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
168363de5132SJames Smart 		return -EIO;
168463de5132SJames Smart 	}
168563de5132SJames Smart 
168663de5132SJames Smart 	/* Clear / reset the scatter-gather list */
168763de5132SJames Smart 	io->sgl = &io->def_sgl;
168863de5132SJames Smart 	io->sgl_count = io->def_sgl_count;
168963de5132SJames Smart 	io->first_data_sge = 0;
169063de5132SJames Smart 
169163de5132SJames Smart 	memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
169263de5132SJames Smart 	io->n_sge = 0;
169363de5132SJames Smart 	io->sge_offset = 0;
169463de5132SJames Smart 
169563de5132SJames Smart 	io->type = type;
169663de5132SJames Smart 
169763de5132SJames Smart 	data = io->sgl->virt;
169863de5132SJames Smart 
169963de5132SJames Smart 	/*
170063de5132SJames Smart 	 * Some IO types have underlying hardware requirements on the order
170163de5132SJames Smart 	 * of SGEs. Process all special entries here.
170263de5132SJames Smart 	 */
170363de5132SJames Smart 	switch (type) {
170463de5132SJames Smart 	case EFCT_HW_IO_TARGET_WRITE:
170563de5132SJames Smart 
170663de5132SJames Smart 		/* populate host resident XFER_RDY buffer */
170763de5132SJames Smart 		sge_flags = le32_to_cpu(data->dw2_flags);
170863de5132SJames Smart 		sge_flags &= (~SLI4_SGE_TYPE_MASK);
170963de5132SJames Smart 		sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
171063de5132SJames Smart 		data->buffer_address_high =
171163de5132SJames Smart 			cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
171263de5132SJames Smart 		data->buffer_address_low  =
171363de5132SJames Smart 			cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
171463de5132SJames Smart 		data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
171563de5132SJames Smart 		data->dw2_flags = cpu_to_le32(sge_flags);
171663de5132SJames Smart 		data++;
171763de5132SJames Smart 
171863de5132SJames Smart 		skips = EFCT_TARGET_WRITE_SKIPS;
171963de5132SJames Smart 
172063de5132SJames Smart 		io->n_sge = 1;
172163de5132SJames Smart 		break;
172263de5132SJames Smart 	case EFCT_HW_IO_TARGET_READ:
172363de5132SJames Smart 		/*
172463de5132SJames Smart 		 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
172563de5132SJames Smart 		 */
172663de5132SJames Smart 		skips = EFCT_TARGET_READ_SKIPS;
172763de5132SJames Smart 		break;
172863de5132SJames Smart 	case EFCT_HW_IO_TARGET_RSP:
172963de5132SJames Smart 		/*
173063de5132SJames Smart 		 * No skips, etc. for FCP_TRSP64
173163de5132SJames Smart 		 */
173263de5132SJames Smart 		break;
173363de5132SJames Smart 	default:
173463de5132SJames Smart 		efc_log_err(hw->os, "unsupported IO type %#x\n", type);
173563de5132SJames Smart 		return -EIO;
173663de5132SJames Smart 	}
173763de5132SJames Smart 
173863de5132SJames Smart 	/*
173963de5132SJames Smart 	 * Write skip entries
174063de5132SJames Smart 	 */
174163de5132SJames Smart 	for (i = 0; i < skips; i++) {
174263de5132SJames Smart 		sge_flags = le32_to_cpu(data->dw2_flags);
174363de5132SJames Smart 		sge_flags &= (~SLI4_SGE_TYPE_MASK);
174463de5132SJames Smart 		sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
174563de5132SJames Smart 		data->dw2_flags = cpu_to_le32(sge_flags);
174663de5132SJames Smart 		data++;
174763de5132SJames Smart 	}
174863de5132SJames Smart 
174963de5132SJames Smart 	io->n_sge += skips;
175063de5132SJames Smart 
175163de5132SJames Smart 	/*
175263de5132SJames Smart 	 * Set last
175363de5132SJames Smart 	 */
175463de5132SJames Smart 	sge_flags = le32_to_cpu(data->dw2_flags);
175563de5132SJames Smart 	sge_flags |= SLI4_SGE_LAST;
175663de5132SJames Smart 	data->dw2_flags = cpu_to_le32(sge_flags);
175763de5132SJames Smart 
175863de5132SJames Smart 	return 0;
175963de5132SJames Smart }
176063de5132SJames Smart 
176163de5132SJames Smart int
176263de5132SJames Smart efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
176363de5132SJames Smart 		   uintptr_t addr, u32 length)
176463de5132SJames Smart {
176563de5132SJames Smart 	struct sli4_sge	*data = NULL;
176663de5132SJames Smart 	u32 sge_flags = 0;
176763de5132SJames Smart 
176863de5132SJames Smart 	if (!io || !addr || !length) {
176963de5132SJames Smart 		efc_log_err(hw->os,
177063de5132SJames Smart 			    "bad parameter hw=%p io=%p addr=%lx length=%u\n",
177163de5132SJames Smart 			    hw, io, addr, length);
177263de5132SJames Smart 		return -EIO;
177363de5132SJames Smart 	}
177463de5132SJames Smart 
177563de5132SJames Smart 	if (length > hw->sli.sge_supported_length) {
177663de5132SJames Smart 		efc_log_err(hw->os,
177763de5132SJames Smart 			    "length of SGE %d bigger than allowed %d\n",
177863de5132SJames Smart 			    length, hw->sli.sge_supported_length);
177963de5132SJames Smart 		return -EIO;
178063de5132SJames Smart 	}
178163de5132SJames Smart 
178263de5132SJames Smart 	data = io->sgl->virt;
178363de5132SJames Smart 	data += io->n_sge;
178463de5132SJames Smart 
178563de5132SJames Smart 	sge_flags = le32_to_cpu(data->dw2_flags);
178663de5132SJames Smart 	sge_flags &= ~SLI4_SGE_TYPE_MASK;
178763de5132SJames Smart 	sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
178863de5132SJames Smart 	sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
178963de5132SJames Smart 	sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
179063de5132SJames Smart 
179163de5132SJames Smart 	data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
179263de5132SJames Smart 	data->buffer_address_low  = cpu_to_le32(lower_32_bits(addr));
179363de5132SJames Smart 	data->buffer_length = cpu_to_le32(length);
179463de5132SJames Smart 
179563de5132SJames Smart 	/*
179663de5132SJames Smart 	 * Always assume this is the last entry and mark as such.
179763de5132SJames Smart 	 * If this is not the first entry unset the "last SGE"
179863de5132SJames Smart 	 * indication for the previous entry
179963de5132SJames Smart 	 */
180063de5132SJames Smart 	sge_flags |= SLI4_SGE_LAST;
180163de5132SJames Smart 	data->dw2_flags = cpu_to_le32(sge_flags);
180263de5132SJames Smart 
180363de5132SJames Smart 	if (io->n_sge) {
180463de5132SJames Smart 		sge_flags = le32_to_cpu(data[-1].dw2_flags);
180563de5132SJames Smart 		sge_flags &= ~SLI4_SGE_LAST;
180663de5132SJames Smart 		data[-1].dw2_flags = cpu_to_le32(sge_flags);
180763de5132SJames Smart 	}
180863de5132SJames Smart 
180963de5132SJames Smart 	/* Set first_data_bde if not previously set */
181063de5132SJames Smart 	if (io->first_data_sge == 0)
181163de5132SJames Smart 		io->first_data_sge = io->n_sge;
181263de5132SJames Smart 
181363de5132SJames Smart 	io->sge_offset += length;
181463de5132SJames Smart 	io->n_sge++;
181563de5132SJames Smart 
181663de5132SJames Smart 	return 0;
181763de5132SJames Smart }
181863de5132SJames Smart 
181963de5132SJames Smart void
182063de5132SJames Smart efct_hw_io_abort_all(struct efct_hw *hw)
182163de5132SJames Smart {
182263de5132SJames Smart 	struct efct_hw_io *io_to_abort	= NULL;
182363de5132SJames Smart 	struct efct_hw_io *next_io = NULL;
182463de5132SJames Smart 
182563de5132SJames Smart 	list_for_each_entry_safe(io_to_abort, next_io,
182663de5132SJames Smart 				 &hw->io_inuse, list_entry) {
182763de5132SJames Smart 		efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
182863de5132SJames Smart 	}
182963de5132SJames Smart }
183063de5132SJames Smart 
183163de5132SJames Smart static void
183263de5132SJames Smart efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
183363de5132SJames Smart {
183463de5132SJames Smart 	struct efct_hw_io *io = arg;
183563de5132SJames Smart 	struct efct_hw *hw = io->hw;
183663de5132SJames Smart 	u32 ext = 0;
183763de5132SJames Smart 	u32 len = 0;
183863de5132SJames Smart 	struct hw_wq_callback *wqcb;
183963de5132SJames Smart 
184063de5132SJames Smart 	/*
184163de5132SJames Smart 	 * For IOs that were aborted internally, we may need to issue the
184263de5132SJames Smart 	 * callback here depending on whether a XRI_ABORTED CQE is expected ot
184363de5132SJames Smart 	 * not. If the status is Local Reject/No XRI, then
184463de5132SJames Smart 	 * issue the callback now.
184563de5132SJames Smart 	 */
184663de5132SJames Smart 	ext = sli_fc_ext_status(&hw->sli, cqe);
184763de5132SJames Smart 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
184863de5132SJames Smart 	    ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
184963de5132SJames Smart 		efct_hw_done_t done = io->done;
185063de5132SJames Smart 
185163de5132SJames Smart 		io->done = NULL;
185263de5132SJames Smart 
185363de5132SJames Smart 		/*
185463de5132SJames Smart 		 * Use latched status as this is always saved for an internal
185563de5132SJames Smart 		 * abort Note: We won't have both a done and abort_done
185663de5132SJames Smart 		 * function, so don't worry about
185763de5132SJames Smart 		 *       clobbering the len, status and ext fields.
185863de5132SJames Smart 		 */
185963de5132SJames Smart 		status = io->saved_status;
186063de5132SJames Smart 		len = io->saved_len;
186163de5132SJames Smart 		ext = io->saved_ext;
186263de5132SJames Smart 		io->status_saved = false;
186363de5132SJames Smart 		done(io, len, status, ext, io->arg);
186463de5132SJames Smart 	}
186563de5132SJames Smart 
186663de5132SJames Smart 	if (io->abort_done) {
186763de5132SJames Smart 		efct_hw_done_t done = io->abort_done;
186863de5132SJames Smart 
186963de5132SJames Smart 		io->abort_done = NULL;
187063de5132SJames Smart 		done(io, len, status, ext, io->abort_arg);
187163de5132SJames Smart 	}
187263de5132SJames Smart 
187363de5132SJames Smart 	/* clear abort bit to indicate abort is complete */
187463de5132SJames Smart 	io->abort_in_progress = false;
187563de5132SJames Smart 
187663de5132SJames Smart 	/* Free the WQ callback */
187763de5132SJames Smart 	if (io->abort_reqtag == U32_MAX) {
187863de5132SJames Smart 		efc_log_err(hw->os, "HW IO already freed\n");
187963de5132SJames Smart 		return;
188063de5132SJames Smart 	}
188163de5132SJames Smart 
188263de5132SJames Smart 	wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
188363de5132SJames Smart 	efct_hw_reqtag_free(hw, wqcb);
188463de5132SJames Smart 
188563de5132SJames Smart 	/*
188663de5132SJames Smart 	 * Call efct_hw_io_free() because this releases the WQ reservation as
188763de5132SJames Smart 	 * well as doing the refcount put. Don't duplicate the code here.
188863de5132SJames Smart 	 */
188963de5132SJames Smart 	(void)efct_hw_io_free(hw, io);
189063de5132SJames Smart }
189163de5132SJames Smart 
189263de5132SJames Smart static void
189363de5132SJames Smart efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
189463de5132SJames Smart {
189563de5132SJames Smart 	struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
189663de5132SJames Smart 
189763de5132SJames Smart 	memset(abort, 0, hw->sli.wqe_size);
189863de5132SJames Smart 
189963de5132SJames Smart 	abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
190063de5132SJames Smart 	abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
190163de5132SJames Smart 
190263de5132SJames Smart 	/* Suppress ABTS retries */
190363de5132SJames Smart 	abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
190463de5132SJames Smart 
190563de5132SJames Smart 	abort->t_tag  = cpu_to_le32(wqe->id);
190663de5132SJames Smart 	abort->command = SLI4_WQE_ABORT;
190763de5132SJames Smart 	abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
190863de5132SJames Smart 
190963de5132SJames Smart 	abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
191063de5132SJames Smart 
191163de5132SJames Smart 	abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
191263de5132SJames Smart }
191363de5132SJames Smart 
191463de5132SJames Smart int
191563de5132SJames Smart efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
191663de5132SJames Smart 		 bool send_abts, void *cb, void *arg)
191763de5132SJames Smart {
191863de5132SJames Smart 	struct hw_wq_callback *wqcb;
191963de5132SJames Smart 	unsigned long flags = 0;
192063de5132SJames Smart 
192163de5132SJames Smart 	if (!io_to_abort) {
192263de5132SJames Smart 		efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
192363de5132SJames Smart 			    hw, io_to_abort);
192463de5132SJames Smart 		return -EIO;
192563de5132SJames Smart 	}
192663de5132SJames Smart 
192763de5132SJames Smart 	if (hw->state != EFCT_HW_STATE_ACTIVE) {
192863de5132SJames Smart 		efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
192963de5132SJames Smart 			    hw->state);
193063de5132SJames Smart 		return -EIO;
193163de5132SJames Smart 	}
193263de5132SJames Smart 
193363de5132SJames Smart 	/* take a reference on IO being aborted */
193463de5132SJames Smart 	if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
193563de5132SJames Smart 		/* command no longer active */
193663de5132SJames Smart 		efc_log_debug(hw->os,
193763de5132SJames Smart 			      "io not active xri=0x%x tag=0x%x\n",
193863de5132SJames Smart 			      io_to_abort->indicator, io_to_abort->reqtag);
193963de5132SJames Smart 		return -ENOENT;
194063de5132SJames Smart 	}
194163de5132SJames Smart 
194263de5132SJames Smart 	/* Must have a valid WQ reference */
194363de5132SJames Smart 	if (!io_to_abort->wq) {
194463de5132SJames Smart 		efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
194563de5132SJames Smart 			      io_to_abort->indicator);
194663de5132SJames Smart 		/* efct_ref_get(): same function */
194763de5132SJames Smart 		kref_put(&io_to_abort->ref, io_to_abort->release);
194863de5132SJames Smart 		return -ENOENT;
194963de5132SJames Smart 	}
195063de5132SJames Smart 
195163de5132SJames Smart 	/*
195263de5132SJames Smart 	 * Validation checks complete; now check to see if already being
195363de5132SJames Smart 	 * aborted, if not set the flag.
195463de5132SJames Smart 	 */
195563de5132SJames Smart 	if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
195663de5132SJames Smart 		/* efct_ref_get(): same function */
195763de5132SJames Smart 		kref_put(&io_to_abort->ref, io_to_abort->release);
195863de5132SJames Smart 		efc_log_debug(hw->os,
195963de5132SJames Smart 			      "io already being aborted xri=0x%x tag=0x%x\n",
196063de5132SJames Smart 			      io_to_abort->indicator, io_to_abort->reqtag);
196163de5132SJames Smart 		return -EINPROGRESS;
196263de5132SJames Smart 	}
196363de5132SJames Smart 
196463de5132SJames Smart 	/*
196563de5132SJames Smart 	 * If we got here, the possibilities are:
196663de5132SJames Smart 	 * - host owned xri
196763de5132SJames Smart 	 *	- io_to_abort->wq_index != U32_MAX
196863de5132SJames Smart 	 *		- submit ABORT_WQE to same WQ
196963de5132SJames Smart 	 * - port owned xri:
197063de5132SJames Smart 	 *	- rxri: io_to_abort->wq_index == U32_MAX
197163de5132SJames Smart 	 *		- submit ABORT_WQE to any WQ
197263de5132SJames Smart 	 *	- non-rxri
197363de5132SJames Smart 	 *		- io_to_abort->index != U32_MAX
197463de5132SJames Smart 	 *			- submit ABORT_WQE to same WQ
197563de5132SJames Smart 	 *		- io_to_abort->index == U32_MAX
197663de5132SJames Smart 	 *			- submit ABORT_WQE to any WQ
197763de5132SJames Smart 	 */
197863de5132SJames Smart 	io_to_abort->abort_done = cb;
197963de5132SJames Smart 	io_to_abort->abort_arg  = arg;
198063de5132SJames Smart 
198163de5132SJames Smart 	/* Allocate a request tag for the abort portion of this IO */
198263de5132SJames Smart 	wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
198363de5132SJames Smart 	if (!wqcb) {
198463de5132SJames Smart 		efc_log_err(hw->os, "can't allocate request tag\n");
198563de5132SJames Smart 		return -ENOSPC;
198663de5132SJames Smart 	}
198763de5132SJames Smart 
198863de5132SJames Smart 	io_to_abort->abort_reqtag = wqcb->instance_index;
198963de5132SJames Smart 	io_to_abort->wqe.send_abts = send_abts;
199063de5132SJames Smart 	io_to_abort->wqe.id = io_to_abort->indicator;
199163de5132SJames Smart 	io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
199263de5132SJames Smart 
199363de5132SJames Smart 	/*
199463de5132SJames Smart 	 * If the wqe is on the pending list, then set this wqe to be
199563de5132SJames Smart 	 * aborted when the IO's wqe is removed from the list.
199663de5132SJames Smart 	 */
199763de5132SJames Smart 	if (io_to_abort->wq) {
199863de5132SJames Smart 		spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
199963de5132SJames Smart 		if (io_to_abort->wqe.list_entry.next) {
200063de5132SJames Smart 			io_to_abort->wqe.abort_wqe_submit_needed = true;
200163de5132SJames Smart 			spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
200263de5132SJames Smart 					       flags);
200363de5132SJames Smart 			return 0;
200463de5132SJames Smart 		}
200563de5132SJames Smart 		spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
200663de5132SJames Smart 	}
200763de5132SJames Smart 
200863de5132SJames Smart 	efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
200963de5132SJames Smart 
201063de5132SJames Smart 	/* ABORT_WQE does not actually utilize an XRI on the Port,
201163de5132SJames Smart 	 * therefore, keep xbusy as-is to track the exchange's state,
201263de5132SJames Smart 	 * not the ABORT_WQE's state
201363de5132SJames Smart 	 */
201463de5132SJames Smart 	if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
201563de5132SJames Smart 		io_to_abort->abort_in_progress = false;
201663de5132SJames Smart 		/* efct_ref_get(): same function */
201763de5132SJames Smart 		kref_put(&io_to_abort->ref, io_to_abort->release);
201863de5132SJames Smart 		return -EIO;
201963de5132SJames Smart 	}
202063de5132SJames Smart 
202163de5132SJames Smart 	return 0;
202263de5132SJames Smart }
202363de5132SJames Smart 
202463de5132SJames Smart void
202563de5132SJames Smart efct_hw_reqtag_pool_free(struct efct_hw *hw)
202663de5132SJames Smart {
202763de5132SJames Smart 	u32 i;
202863de5132SJames Smart 	struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
202963de5132SJames Smart 	struct hw_wq_callback *wqcb = NULL;
203063de5132SJames Smart 
203163de5132SJames Smart 	if (reqtag_pool) {
203263de5132SJames Smart 		for (i = 0; i < U16_MAX; i++) {
203363de5132SJames Smart 			wqcb = reqtag_pool->tags[i];
203463de5132SJames Smart 			if (!wqcb)
203563de5132SJames Smart 				continue;
203663de5132SJames Smart 
203763de5132SJames Smart 			kfree(wqcb);
203863de5132SJames Smart 		}
203963de5132SJames Smart 		kfree(reqtag_pool);
204063de5132SJames Smart 		hw->wq_reqtag_pool = NULL;
204163de5132SJames Smart 	}
204263de5132SJames Smart }
204363de5132SJames Smart 
204463de5132SJames Smart struct reqtag_pool *
204563de5132SJames Smart efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
204663de5132SJames Smart {
204763de5132SJames Smart 	u32 i = 0;
204863de5132SJames Smart 	struct reqtag_pool *reqtag_pool;
204963de5132SJames Smart 	struct hw_wq_callback *wqcb;
205063de5132SJames Smart 
205163de5132SJames Smart 	reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
205263de5132SJames Smart 	if (!reqtag_pool)
205363de5132SJames Smart 		return NULL;
205463de5132SJames Smart 
205563de5132SJames Smart 	INIT_LIST_HEAD(&reqtag_pool->freelist);
205663de5132SJames Smart 	/* initialize reqtag pool lock */
205763de5132SJames Smart 	spin_lock_init(&reqtag_pool->lock);
205863de5132SJames Smart 	for (i = 0; i < U16_MAX; i++) {
205963de5132SJames Smart 		wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
206063de5132SJames Smart 		if (!wqcb)
206163de5132SJames Smart 			break;
206263de5132SJames Smart 
206363de5132SJames Smart 		reqtag_pool->tags[i] = wqcb;
206463de5132SJames Smart 		wqcb->instance_index = i;
206563de5132SJames Smart 		wqcb->callback = NULL;
206663de5132SJames Smart 		wqcb->arg = NULL;
206763de5132SJames Smart 		INIT_LIST_HEAD(&wqcb->list_entry);
206863de5132SJames Smart 		list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
206963de5132SJames Smart 	}
207063de5132SJames Smart 
207163de5132SJames Smart 	return reqtag_pool;
207263de5132SJames Smart }
207363de5132SJames Smart 
207463de5132SJames Smart struct hw_wq_callback *
207563de5132SJames Smart efct_hw_reqtag_alloc(struct efct_hw *hw,
207663de5132SJames Smart 		     void (*callback)(void *arg, u8 *cqe, int status),
207763de5132SJames Smart 		     void *arg)
207863de5132SJames Smart {
207963de5132SJames Smart 	struct hw_wq_callback *wqcb = NULL;
208063de5132SJames Smart 	struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
208163de5132SJames Smart 	unsigned long flags = 0;
208263de5132SJames Smart 
208363de5132SJames Smart 	if (!callback)
208463de5132SJames Smart 		return wqcb;
208563de5132SJames Smart 
208663de5132SJames Smart 	spin_lock_irqsave(&reqtag_pool->lock, flags);
208763de5132SJames Smart 
208863de5132SJames Smart 	if (!list_empty(&reqtag_pool->freelist)) {
208963de5132SJames Smart 		wqcb = list_first_entry(&reqtag_pool->freelist,
209063de5132SJames Smart 					struct hw_wq_callback, list_entry);
209163de5132SJames Smart 	}
209263de5132SJames Smart 
209363de5132SJames Smart 	if (wqcb) {
209463de5132SJames Smart 		list_del_init(&wqcb->list_entry);
209563de5132SJames Smart 		spin_unlock_irqrestore(&reqtag_pool->lock, flags);
209663de5132SJames Smart 		wqcb->callback = callback;
209763de5132SJames Smart 		wqcb->arg = arg;
209863de5132SJames Smart 	} else {
209963de5132SJames Smart 		spin_unlock_irqrestore(&reqtag_pool->lock, flags);
210063de5132SJames Smart 	}
210163de5132SJames Smart 
210263de5132SJames Smart 	return wqcb;
210363de5132SJames Smart }
210463de5132SJames Smart 
210563de5132SJames Smart void
210663de5132SJames Smart efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
210763de5132SJames Smart {
210863de5132SJames Smart 	unsigned long flags = 0;
210963de5132SJames Smart 	struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
211063de5132SJames Smart 
211163de5132SJames Smart 	if (!wqcb->callback)
211263de5132SJames Smart 		efc_log_err(hw->os, "WQCB is already freed\n");
211363de5132SJames Smart 
211463de5132SJames Smart 	spin_lock_irqsave(&reqtag_pool->lock, flags);
211563de5132SJames Smart 	wqcb->callback = NULL;
211663de5132SJames Smart 	wqcb->arg = NULL;
211763de5132SJames Smart 	INIT_LIST_HEAD(&wqcb->list_entry);
211863de5132SJames Smart 	list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
211963de5132SJames Smart 	spin_unlock_irqrestore(&reqtag_pool->lock, flags);
212063de5132SJames Smart }
212163de5132SJames Smart 
212263de5132SJames Smart struct hw_wq_callback *
212363de5132SJames Smart efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
212463de5132SJames Smart {
212563de5132SJames Smart 	struct hw_wq_callback *wqcb;
212663de5132SJames Smart 
212763de5132SJames Smart 	wqcb = hw->wq_reqtag_pool->tags[instance_index];
212863de5132SJames Smart 	if (!wqcb)
212963de5132SJames Smart 		efc_log_err(hw->os, "wqcb for instance %d is null\n",
213063de5132SJames Smart 			    instance_index);
213163de5132SJames Smart 
213263de5132SJames Smart 	return wqcb;
213363de5132SJames Smart }
2134e2cf422bSJames Smart 
2135e2cf422bSJames Smart int
2136e2cf422bSJames Smart efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
2137e2cf422bSJames Smart {
2138e2cf422bSJames Smart 	int index = -1;
2139e2cf422bSJames Smart 	int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
2140e2cf422bSJames Smart 
2141e2cf422bSJames Smart 	/*
2142e2cf422bSJames Smart 	 * Since the hash is always bigger than the maximum number of Qs, then
2143e2cf422bSJames Smart 	 * we never have to worry about an infinite loop. We will always find
2144e2cf422bSJames Smart 	 * an unused entry.
2145e2cf422bSJames Smart 	 */
2146e2cf422bSJames Smart 	do {
2147e2cf422bSJames Smart 		if (hash[i].in_use && hash[i].id == id)
2148e2cf422bSJames Smart 			index = hash[i].index;
2149e2cf422bSJames Smart 		else
2150e2cf422bSJames Smart 			i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
2151e2cf422bSJames Smart 	} while (index == -1 && hash[i].in_use);
2152e2cf422bSJames Smart 
2153e2cf422bSJames Smart 	return index;
2154e2cf422bSJames Smart }
2155e2cf422bSJames Smart 
2156e2cf422bSJames Smart int
2157e2cf422bSJames Smart efct_hw_process(struct efct_hw *hw, u32 vector,
2158e2cf422bSJames Smart 		u32 max_isr_time_msec)
2159e2cf422bSJames Smart {
2160e2cf422bSJames Smart 	struct hw_eq *eq;
2161e2cf422bSJames Smart 
2162e2cf422bSJames Smart 	/*
2163e2cf422bSJames Smart 	 * The caller should disable interrupts if they wish to prevent us
2164e2cf422bSJames Smart 	 * from processing during a shutdown. The following states are defined:
2165e2cf422bSJames Smart 	 *   EFCT_HW_STATE_UNINITIALIZED - No queues allocated
2166e2cf422bSJames Smart 	 *   EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2167e2cf422bSJames Smart 	 *                                    queues are cleared.
2168e2cf422bSJames Smart 	 *   EFCT_HW_STATE_ACTIVE - Chip and queues are operational
2169e2cf422bSJames Smart 	 *   EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2170e2cf422bSJames Smart 	 *   EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2171e2cf422bSJames Smart 	 *                                        completions.
2172e2cf422bSJames Smart 	 */
2173e2cf422bSJames Smart 	if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
2174e2cf422bSJames Smart 		return 0;
2175e2cf422bSJames Smart 
2176e2cf422bSJames Smart 	/* Get pointer to struct hw_eq */
2177e2cf422bSJames Smart 	eq = hw->hw_eq[vector];
2178e2cf422bSJames Smart 	if (!eq)
2179e2cf422bSJames Smart 		return 0;
2180e2cf422bSJames Smart 
2181e2cf422bSJames Smart 	eq->use_count++;
2182e2cf422bSJames Smart 
2183e2cf422bSJames Smart 	return efct_hw_eq_process(hw, eq, max_isr_time_msec);
2184e2cf422bSJames Smart }
2185e2cf422bSJames Smart 
2186e2cf422bSJames Smart int
2187e2cf422bSJames Smart efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
2188e2cf422bSJames Smart 		   u32 max_isr_time_msec)
2189e2cf422bSJames Smart {
2190e2cf422bSJames Smart 	u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
2191e2cf422bSJames Smart 	u32 tcheck_count;
2192e2cf422bSJames Smart 	u64 tstart;
2193e2cf422bSJames Smart 	u64 telapsed;
2194e2cf422bSJames Smart 	bool done = false;
2195e2cf422bSJames Smart 
2196e2cf422bSJames Smart 	tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
2197e2cf422bSJames Smart 	tstart = jiffies_to_msecs(jiffies);
2198e2cf422bSJames Smart 
2199e2cf422bSJames Smart 	while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
2200e2cf422bSJames Smart 		u16 cq_id = 0;
2201e2cf422bSJames Smart 		int rc;
2202e2cf422bSJames Smart 
2203e2cf422bSJames Smart 		rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2204e2cf422bSJames Smart 		if (unlikely(rc)) {
2205e2cf422bSJames Smart 			if (rc == SLI4_EQE_STATUS_EQ_FULL) {
2206e2cf422bSJames Smart 				u32 i;
2207e2cf422bSJames Smart 
2208e2cf422bSJames Smart 				/*
2209e2cf422bSJames Smart 				 * Received a sentinel EQE indicating the
2210e2cf422bSJames Smart 				 * EQ is full. Process all CQs
2211e2cf422bSJames Smart 				 */
2212e2cf422bSJames Smart 				for (i = 0; i < hw->cq_count; i++)
2213e2cf422bSJames Smart 					efct_hw_cq_process(hw, hw->hw_cq[i]);
2214e2cf422bSJames Smart 				continue;
2215e2cf422bSJames Smart 			} else {
2216e2cf422bSJames Smart 				return rc;
2217e2cf422bSJames Smart 			}
2218e2cf422bSJames Smart 		} else {
2219e2cf422bSJames Smart 			int index;
2220e2cf422bSJames Smart 
2221e2cf422bSJames Smart 			index  = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
2222e2cf422bSJames Smart 
2223e2cf422bSJames Smart 			if (likely(index >= 0))
2224e2cf422bSJames Smart 				efct_hw_cq_process(hw, hw->hw_cq[index]);
2225e2cf422bSJames Smart 			else
2226e2cf422bSJames Smart 				efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2227e2cf422bSJames Smart 		}
2228e2cf422bSJames Smart 
2229e2cf422bSJames Smart 		if (eq->queue->n_posted > eq->queue->posted_limit)
2230e2cf422bSJames Smart 			sli_queue_arm(&hw->sli, eq->queue, false);
2231e2cf422bSJames Smart 
2232e2cf422bSJames Smart 		if (tcheck_count && (--tcheck_count == 0)) {
2233e2cf422bSJames Smart 			tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
2234e2cf422bSJames Smart 			telapsed = jiffies_to_msecs(jiffies) - tstart;
2235e2cf422bSJames Smart 			if (telapsed >= max_isr_time_msec)
2236e2cf422bSJames Smart 				done = true;
2237e2cf422bSJames Smart 		}
2238e2cf422bSJames Smart 	}
2239e2cf422bSJames Smart 	sli_queue_eq_arm(&hw->sli, eq->queue, true);
2240e2cf422bSJames Smart 
2241e2cf422bSJames Smart 	return 0;
2242e2cf422bSJames Smart }
2243e2cf422bSJames Smart 
2244e2cf422bSJames Smart static int
2245e2cf422bSJames Smart _efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
2246e2cf422bSJames Smart {
2247e2cf422bSJames Smart 	int queue_rc;
2248e2cf422bSJames Smart 
2249e2cf422bSJames Smart 	/* Every so often, set the wqec bit to generate comsummed completions */
2250e2cf422bSJames Smart 	if (wq->wqec_count)
2251e2cf422bSJames Smart 		wq->wqec_count--;
2252e2cf422bSJames Smart 
2253e2cf422bSJames Smart 	if (wq->wqec_count == 0) {
2254e2cf422bSJames Smart 		struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
2255e2cf422bSJames Smart 
2256e2cf422bSJames Smart 		genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
2257e2cf422bSJames Smart 		wq->wqec_count = wq->wqec_set_count;
2258e2cf422bSJames Smart 	}
2259e2cf422bSJames Smart 
2260e2cf422bSJames Smart 	/* Decrement WQ free count */
2261e2cf422bSJames Smart 	wq->free_count--;
2262e2cf422bSJames Smart 
2263e2cf422bSJames Smart 	queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
2264e2cf422bSJames Smart 
2265e2cf422bSJames Smart 	return (queue_rc < 0) ? -EIO : 0;
2266e2cf422bSJames Smart }
2267e2cf422bSJames Smart 
2268e2cf422bSJames Smart static void
2269e2cf422bSJames Smart hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
2270e2cf422bSJames Smart {
2271e2cf422bSJames Smart 	struct efct_hw_wqe *wqe;
2272e2cf422bSJames Smart 	unsigned long flags = 0;
2273e2cf422bSJames Smart 
2274e2cf422bSJames Smart 	spin_lock_irqsave(&wq->queue->lock, flags);
2275e2cf422bSJames Smart 
2276e2cf422bSJames Smart 	/* Update free count with value passed in */
2277e2cf422bSJames Smart 	wq->free_count += update_free_count;
2278e2cf422bSJames Smart 
2279e2cf422bSJames Smart 	while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
2280e2cf422bSJames Smart 		wqe = list_first_entry(&wq->pending_list,
2281e2cf422bSJames Smart 				       struct efct_hw_wqe, list_entry);
2282e2cf422bSJames Smart 		list_del_init(&wqe->list_entry);
2283e2cf422bSJames Smart 		_efct_hw_wq_write(wq, wqe);
2284e2cf422bSJames Smart 
2285e2cf422bSJames Smart 		if (wqe->abort_wqe_submit_needed) {
2286e2cf422bSJames Smart 			wqe->abort_wqe_submit_needed = false;
2287e2cf422bSJames Smart 			efct_hw_fill_abort_wqe(wq->hw, wqe);
2288e2cf422bSJames Smart 			INIT_LIST_HEAD(&wqe->list_entry);
2289e2cf422bSJames Smart 			list_add_tail(&wqe->list_entry, &wq->pending_list);
2290e2cf422bSJames Smart 			wq->wq_pending_count++;
2291e2cf422bSJames Smart 		}
2292e2cf422bSJames Smart 	}
2293e2cf422bSJames Smart 
2294e2cf422bSJames Smart 	spin_unlock_irqrestore(&wq->queue->lock, flags);
2295e2cf422bSJames Smart }
2296e2cf422bSJames Smart 
2297e2cf422bSJames Smart void
2298e2cf422bSJames Smart efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
2299e2cf422bSJames Smart {
2300e2cf422bSJames Smart 	u8 cqe[sizeof(struct sli4_mcqe)];
2301e2cf422bSJames Smart 	u16 rid = U16_MAX;
2302e2cf422bSJames Smart 	/* completion type */
2303e2cf422bSJames Smart 	enum sli4_qentry ctype;
2304e2cf422bSJames Smart 	u32 n_processed = 0;
2305e2cf422bSJames Smart 	u32 tstart, telapsed;
2306e2cf422bSJames Smart 
2307e2cf422bSJames Smart 	tstart = jiffies_to_msecs(jiffies);
2308e2cf422bSJames Smart 
2309e2cf422bSJames Smart 	while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
2310e2cf422bSJames Smart 		int status;
2311e2cf422bSJames Smart 
2312e2cf422bSJames Smart 		status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
2313e2cf422bSJames Smart 		/*
2314e2cf422bSJames Smart 		 * The sign of status is significant. If status is:
2315e2cf422bSJames Smart 		 * == 0 : call completed correctly and
2316e2cf422bSJames Smart 		 * the CQE indicated success
2317e2cf422bSJames Smart 		 * > 0 : call completed correctly and
2318e2cf422bSJames Smart 		 * the CQE indicated an error
2319e2cf422bSJames Smart 		 * < 0 : call failed and no information is available about the
2320e2cf422bSJames Smart 		 * CQE
2321e2cf422bSJames Smart 		 */
2322e2cf422bSJames Smart 		if (status < 0) {
2323e2cf422bSJames Smart 			if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
2324e2cf422bSJames Smart 				/*
2325e2cf422bSJames Smart 				 * Notification that an entry was consumed,
2326e2cf422bSJames Smart 				 * but not completed
2327e2cf422bSJames Smart 				 */
2328e2cf422bSJames Smart 				continue;
2329e2cf422bSJames Smart 
2330e2cf422bSJames Smart 			break;
2331e2cf422bSJames Smart 		}
2332e2cf422bSJames Smart 
2333e2cf422bSJames Smart 		switch (ctype) {
2334e2cf422bSJames Smart 		case SLI4_QENTRY_ASYNC:
2335e2cf422bSJames Smart 			sli_cqe_async(&hw->sli, cqe);
2336e2cf422bSJames Smart 			break;
2337e2cf422bSJames Smart 		case SLI4_QENTRY_MQ:
2338e2cf422bSJames Smart 			/*
2339e2cf422bSJames Smart 			 * Process MQ entry. Note there is no way to determine
2340e2cf422bSJames Smart 			 * the MQ_ID from the completion entry.
2341e2cf422bSJames Smart 			 */
2342e2cf422bSJames Smart 			efct_hw_mq_process(hw, status, hw->mq);
2343e2cf422bSJames Smart 			break;
2344e2cf422bSJames Smart 		case SLI4_QENTRY_WQ:
2345e2cf422bSJames Smart 			efct_hw_wq_process(hw, cq, cqe, status, rid);
2346e2cf422bSJames Smart 			break;
2347e2cf422bSJames Smart 		case SLI4_QENTRY_WQ_RELEASE: {
2348e2cf422bSJames Smart 			u32 wq_id = rid;
2349e2cf422bSJames Smart 			int index;
2350e2cf422bSJames Smart 			struct hw_wq *wq = NULL;
2351e2cf422bSJames Smart 
2352e2cf422bSJames Smart 			index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
2353e2cf422bSJames Smart 
2354e2cf422bSJames Smart 			if (likely(index >= 0)) {
2355e2cf422bSJames Smart 				wq = hw->hw_wq[index];
2356e2cf422bSJames Smart 			} else {
2357e2cf422bSJames Smart 				efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
2358e2cf422bSJames Smart 				break;
2359e2cf422bSJames Smart 			}
2360e2cf422bSJames Smart 			/* Submit any HW IOs that are on the WQ pending list */
2361e2cf422bSJames Smart 			hw_wq_submit_pending(wq, wq->wqec_set_count);
2362e2cf422bSJames Smart 
2363e2cf422bSJames Smart 			break;
2364e2cf422bSJames Smart 		}
2365e2cf422bSJames Smart 
2366e2cf422bSJames Smart 		case SLI4_QENTRY_RQ:
2367e2cf422bSJames Smart 			efct_hw_rqpair_process_rq(hw, cq, cqe);
2368e2cf422bSJames Smart 			break;
2369e2cf422bSJames Smart 		case SLI4_QENTRY_XABT: {
2370e2cf422bSJames Smart 			efct_hw_xabt_process(hw, cq, cqe, rid);
2371e2cf422bSJames Smart 			break;
2372e2cf422bSJames Smart 		}
2373e2cf422bSJames Smart 		default:
2374e2cf422bSJames Smart 			efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
2375e2cf422bSJames Smart 				      ctype, rid);
2376e2cf422bSJames Smart 			break;
2377e2cf422bSJames Smart 		}
2378e2cf422bSJames Smart 
2379e2cf422bSJames Smart 		n_processed++;
2380e2cf422bSJames Smart 		if (n_processed == cq->queue->proc_limit)
2381e2cf422bSJames Smart 			break;
2382e2cf422bSJames Smart 
2383e2cf422bSJames Smart 		if (cq->queue->n_posted >= cq->queue->posted_limit)
2384e2cf422bSJames Smart 			sli_queue_arm(&hw->sli, cq->queue, false);
2385e2cf422bSJames Smart 	}
2386e2cf422bSJames Smart 
2387e2cf422bSJames Smart 	sli_queue_arm(&hw->sli, cq->queue, true);
2388e2cf422bSJames Smart 
2389e2cf422bSJames Smart 	if (n_processed > cq->queue->max_num_processed)
2390e2cf422bSJames Smart 		cq->queue->max_num_processed = n_processed;
2391e2cf422bSJames Smart 	telapsed = jiffies_to_msecs(jiffies) - tstart;
2392e2cf422bSJames Smart 	if (telapsed > cq->queue->max_process_time)
2393e2cf422bSJames Smart 		cq->queue->max_process_time = telapsed;
2394e2cf422bSJames Smart }
2395e2cf422bSJames Smart 
2396e2cf422bSJames Smart void
2397e2cf422bSJames Smart efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
2398e2cf422bSJames Smart 		   u8 *cqe, int status, u16 rid)
2399e2cf422bSJames Smart {
2400e2cf422bSJames Smart 	struct hw_wq_callback *wqcb;
2401e2cf422bSJames Smart 
2402e2cf422bSJames Smart 	if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
2403e2cf422bSJames Smart 		if (status)
2404e2cf422bSJames Smart 			efc_log_err(hw->os, "reque xri failed, status = %d\n",
2405e2cf422bSJames Smart 				    status);
2406e2cf422bSJames Smart 		return;
2407e2cf422bSJames Smart 	}
2408e2cf422bSJames Smart 
2409e2cf422bSJames Smart 	wqcb = efct_hw_reqtag_get_instance(hw, rid);
2410e2cf422bSJames Smart 	if (!wqcb) {
2411e2cf422bSJames Smart 		efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
2412e2cf422bSJames Smart 		return;
2413e2cf422bSJames Smart 	}
2414e2cf422bSJames Smart 
2415e2cf422bSJames Smart 	if (!wqcb->callback) {
2416e2cf422bSJames Smart 		efc_log_err(hw->os, "wqcb callback is NULL\n");
2417e2cf422bSJames Smart 		return;
2418e2cf422bSJames Smart 	}
2419e2cf422bSJames Smart 
2420e2cf422bSJames Smart 	(*wqcb->callback)(wqcb->arg, cqe, status);
2421e2cf422bSJames Smart }
2422e2cf422bSJames Smart 
2423e2cf422bSJames Smart void
2424e2cf422bSJames Smart efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
2425e2cf422bSJames Smart 		     u8 *cqe, u16 rid)
2426e2cf422bSJames Smart {
2427e2cf422bSJames Smart 	/* search IOs wait free list */
2428e2cf422bSJames Smart 	struct efct_hw_io *io = NULL;
2429e2cf422bSJames Smart 	unsigned long flags = 0;
2430e2cf422bSJames Smart 
2431e2cf422bSJames Smart 	io = efct_hw_io_lookup(hw, rid);
2432e2cf422bSJames Smart 	if (!io) {
2433e2cf422bSJames Smart 		/* IO lookup failure should never happen */
2434e2cf422bSJames Smart 		efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
2435e2cf422bSJames Smart 		return;
2436e2cf422bSJames Smart 	}
2437e2cf422bSJames Smart 
2438e2cf422bSJames Smart 	if (!io->xbusy)
2439e2cf422bSJames Smart 		efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
2440e2cf422bSJames Smart 	else
2441e2cf422bSJames Smart 		/* mark IO as no longer busy */
2442e2cf422bSJames Smart 		io->xbusy = false;
2443e2cf422bSJames Smart 
2444e2cf422bSJames Smart 	/*
2445e2cf422bSJames Smart 	 * For IOs that were aborted internally, we need to issue any pending
2446e2cf422bSJames Smart 	 * callback here.
2447e2cf422bSJames Smart 	 */
2448e2cf422bSJames Smart 	if (io->done) {
2449e2cf422bSJames Smart 		efct_hw_done_t done = io->done;
2450e2cf422bSJames Smart 		void		*arg = io->arg;
2451e2cf422bSJames Smart 
2452e2cf422bSJames Smart 		/*
2453e2cf422bSJames Smart 		 * Use latched status as this is always saved for an internal
2454e2cf422bSJames Smart 		 * abort
2455e2cf422bSJames Smart 		 */
2456e2cf422bSJames Smart 		int status = io->saved_status;
2457e2cf422bSJames Smart 		u32 len = io->saved_len;
2458e2cf422bSJames Smart 		u32 ext = io->saved_ext;
2459e2cf422bSJames Smart 
2460e2cf422bSJames Smart 		io->done = NULL;
2461e2cf422bSJames Smart 		io->status_saved = false;
2462e2cf422bSJames Smart 
2463e2cf422bSJames Smart 		done(io, len, status, ext, arg);
2464e2cf422bSJames Smart 	}
2465e2cf422bSJames Smart 
2466e2cf422bSJames Smart 	spin_lock_irqsave(&hw->io_lock, flags);
2467e2cf422bSJames Smart 	if (io->state == EFCT_HW_IO_STATE_INUSE ||
2468e2cf422bSJames Smart 	    io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
2469e2cf422bSJames Smart 		/* if on wait_free list, caller has already freed IO;
2470e2cf422bSJames Smart 		 * remove from wait_free list and add to free list.
2471e2cf422bSJames Smart 		 * if on in-use list, already marked as no longer busy;
2472e2cf422bSJames Smart 		 * just leave there and wait for caller to free.
2473e2cf422bSJames Smart 		 */
2474e2cf422bSJames Smart 		if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
2475e2cf422bSJames Smart 			io->state = EFCT_HW_IO_STATE_FREE;
2476e2cf422bSJames Smart 			list_del_init(&io->list_entry);
2477e2cf422bSJames Smart 			efct_hw_io_free_move_correct_list(hw, io);
2478e2cf422bSJames Smart 		}
2479e2cf422bSJames Smart 	}
2480e2cf422bSJames Smart 	spin_unlock_irqrestore(&hw->io_lock, flags);
2481e2cf422bSJames Smart }
2482e2cf422bSJames Smart 
2483e2cf422bSJames Smart static int
2484e2cf422bSJames Smart efct_hw_flush(struct efct_hw *hw)
2485e2cf422bSJames Smart {
2486e2cf422bSJames Smart 	u32 i = 0;
2487e2cf422bSJames Smart 
2488e2cf422bSJames Smart 	/* Process any remaining completions */
2489e2cf422bSJames Smart 	for (i = 0; i < hw->eq_count; i++)
2490e2cf422bSJames Smart 		efct_hw_process(hw, i, ~0);
2491e2cf422bSJames Smart 
2492e2cf422bSJames Smart 	return 0;
2493e2cf422bSJames Smart }
2494dd53d333SJames Smart 
2495dd53d333SJames Smart int
2496dd53d333SJames Smart efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
2497dd53d333SJames Smart {
2498dd53d333SJames Smart 	int rc = 0;
2499dd53d333SJames Smart 	unsigned long flags = 0;
2500dd53d333SJames Smart 
2501dd53d333SJames Smart 	spin_lock_irqsave(&wq->queue->lock, flags);
2502dd53d333SJames Smart 	if (list_empty(&wq->pending_list)) {
2503dd53d333SJames Smart 		if (wq->free_count > 0) {
2504dd53d333SJames Smart 			rc = _efct_hw_wq_write(wq, wqe);
2505dd53d333SJames Smart 		} else {
2506dd53d333SJames Smart 			INIT_LIST_HEAD(&wqe->list_entry);
2507dd53d333SJames Smart 			list_add_tail(&wqe->list_entry, &wq->pending_list);
2508dd53d333SJames Smart 			wq->wq_pending_count++;
2509dd53d333SJames Smart 		}
2510dd53d333SJames Smart 
2511dd53d333SJames Smart 		spin_unlock_irqrestore(&wq->queue->lock, flags);
2512dd53d333SJames Smart 		return rc;
2513dd53d333SJames Smart 	}
2514dd53d333SJames Smart 
2515dd53d333SJames Smart 	INIT_LIST_HEAD(&wqe->list_entry);
2516dd53d333SJames Smart 	list_add_tail(&wqe->list_entry, &wq->pending_list);
2517dd53d333SJames Smart 	wq->wq_pending_count++;
2518dd53d333SJames Smart 	while (wq->free_count > 0) {
2519dd53d333SJames Smart 		wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
2520dd53d333SJames Smart 				       list_entry);
2521dd53d333SJames Smart 		if (!wqe)
2522dd53d333SJames Smart 			break;
2523dd53d333SJames Smart 
2524dd53d333SJames Smart 		list_del_init(&wqe->list_entry);
2525dd53d333SJames Smart 		rc = _efct_hw_wq_write(wq, wqe);
2526dd53d333SJames Smart 		if (rc)
2527dd53d333SJames Smart 			break;
2528dd53d333SJames Smart 
2529dd53d333SJames Smart 		if (wqe->abort_wqe_submit_needed) {
2530dd53d333SJames Smart 			wqe->abort_wqe_submit_needed = false;
2531dd53d333SJames Smart 			efct_hw_fill_abort_wqe(wq->hw, wqe);
2532dd53d333SJames Smart 
2533dd53d333SJames Smart 			INIT_LIST_HEAD(&wqe->list_entry);
2534dd53d333SJames Smart 			list_add_tail(&wqe->list_entry, &wq->pending_list);
2535dd53d333SJames Smart 			wq->wq_pending_count++;
2536dd53d333SJames Smart 		}
2537dd53d333SJames Smart 	}
2538dd53d333SJames Smart 
2539dd53d333SJames Smart 	spin_unlock_irqrestore(&wq->queue->lock, flags);
2540dd53d333SJames Smart 
2541dd53d333SJames Smart 	return rc;
2542dd53d333SJames Smart }
2543dd53d333SJames Smart 
2544dd53d333SJames Smart int
2545dd53d333SJames Smart efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
2546dd53d333SJames Smart {
2547dd53d333SJames Smart 	struct efct *efct = efc->base;
2548dd53d333SJames Smart 
2549dd53d333SJames Smart 	return efct_hw_bls_send(efct, type, bls, NULL, NULL);
2550dd53d333SJames Smart }
2551dd53d333SJames Smart 
2552dd53d333SJames Smart int
2553dd53d333SJames Smart efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
2554dd53d333SJames Smart 		 void *cb, void *arg)
2555dd53d333SJames Smart {
2556dd53d333SJames Smart 	struct efct_hw *hw = &efct->hw;
2557dd53d333SJames Smart 	struct efct_hw_io *hio;
2558dd53d333SJames Smart 	struct sli_bls_payload bls;
2559dd53d333SJames Smart 	int rc;
2560dd53d333SJames Smart 
2561dd53d333SJames Smart 	if (hw->state != EFCT_HW_STATE_ACTIVE) {
2562dd53d333SJames Smart 		efc_log_err(hw->os,
2563dd53d333SJames Smart 			    "cannot send BLS, HW state=%d\n", hw->state);
2564dd53d333SJames Smart 		return -EIO;
2565dd53d333SJames Smart 	}
2566dd53d333SJames Smart 
2567dd53d333SJames Smart 	hio = efct_hw_io_alloc(hw);
2568dd53d333SJames Smart 	if (!hio) {
2569dd53d333SJames Smart 		efc_log_err(hw->os, "HIO allocation failed\n");
2570dd53d333SJames Smart 		return -EIO;
2571dd53d333SJames Smart 	}
2572dd53d333SJames Smart 
2573dd53d333SJames Smart 	hio->done = cb;
2574dd53d333SJames Smart 	hio->arg  = arg;
2575dd53d333SJames Smart 
2576dd53d333SJames Smart 	bls_params->xri = hio->indicator;
2577dd53d333SJames Smart 	bls_params->tag = hio->reqtag;
2578dd53d333SJames Smart 
2579dd53d333SJames Smart 	if (type == FC_RCTL_BA_ACC) {
2580dd53d333SJames Smart 		hio->type = EFCT_HW_BLS_ACC;
2581dd53d333SJames Smart 		bls.type = SLI4_SLI_BLS_ACC;
2582dd53d333SJames Smart 		memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
2583dd53d333SJames Smart 	} else {
2584dd53d333SJames Smart 		hio->type = EFCT_HW_BLS_RJT;
2585dd53d333SJames Smart 		bls.type = SLI4_SLI_BLS_RJT;
2586dd53d333SJames Smart 		memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
2587dd53d333SJames Smart 	}
2588dd53d333SJames Smart 
2589dd53d333SJames Smart 	bls.ox_id = cpu_to_le16(bls_params->ox_id);
2590dd53d333SJames Smart 	bls.rx_id = cpu_to_le16(bls_params->rx_id);
2591dd53d333SJames Smart 
2592dd53d333SJames Smart 	if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
2593dd53d333SJames Smart 				   &bls, bls_params)) {
2594dd53d333SJames Smart 		efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
2595dd53d333SJames Smart 		return -EIO;
2596dd53d333SJames Smart 	}
2597dd53d333SJames Smart 
2598dd53d333SJames Smart 	hio->xbusy = true;
2599dd53d333SJames Smart 
2600dd53d333SJames Smart 	/*
2601dd53d333SJames Smart 	 * Add IO to active io wqe list before submitting, in case the
2602dd53d333SJames Smart 	 * wcqe processing preempts this thread.
2603dd53d333SJames Smart 	 */
2604dd53d333SJames Smart 	hio->wq->use_count++;
2605dd53d333SJames Smart 	rc = efct_hw_wq_write(hio->wq, &hio->wqe);
2606dd53d333SJames Smart 	if (rc >= 0) {
2607dd53d333SJames Smart 		/* non-negative return is success */
2608dd53d333SJames Smart 		rc = 0;
2609dd53d333SJames Smart 	} else {
2610dd53d333SJames Smart 		/* failed to write wqe, remove from active wqe list */
2611dd53d333SJames Smart 		efc_log_err(hw->os,
2612dd53d333SJames Smart 			    "sli_queue_write failed: %d\n", rc);
2613dd53d333SJames Smart 		hio->xbusy = false;
2614dd53d333SJames Smart 	}
2615dd53d333SJames Smart 
2616dd53d333SJames Smart 	return rc;
2617dd53d333SJames Smart }
2618dd53d333SJames Smart 
2619dd53d333SJames Smart static int
2620dd53d333SJames Smart efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
2621dd53d333SJames Smart 		      u32 ext_status, void *arg)
2622dd53d333SJames Smart {
2623dd53d333SJames Smart 	struct efc_disc_io *io = arg;
2624dd53d333SJames Smart 
2625dd53d333SJames Smart 	efc_disc_io_complete(io, length, status, ext_status);
2626dd53d333SJames Smart 	return 0;
2627dd53d333SJames Smart }
2628dd53d333SJames Smart 
2629dd53d333SJames Smart static inline void
2630dd53d333SJames Smart efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
2631dd53d333SJames Smart {
2632dd53d333SJames Smart 	u8 *cmd = io->req.virt;
2633dd53d333SJames Smart 
2634dd53d333SJames Smart 	params->cmd = *cmd;
2635dd53d333SJames Smart 	params->s_id = io->s_id;
2636dd53d333SJames Smart 	params->d_id = io->d_id;
2637dd53d333SJames Smart 	params->ox_id = io->iparam.els.ox_id;
2638dd53d333SJames Smart 	params->rpi = io->rpi;
2639dd53d333SJames Smart 	params->vpi = io->vpi;
2640dd53d333SJames Smart 	params->rpi_registered = io->rpi_registered;
2641dd53d333SJames Smart 	params->xmit_len = io->xmit_len;
2642dd53d333SJames Smart 	params->rsp_len = io->rsp_len;
2643dd53d333SJames Smart 	params->timeout = io->iparam.els.timeout;
2644dd53d333SJames Smart }
2645dd53d333SJames Smart 
2646dd53d333SJames Smart static inline void
2647dd53d333SJames Smart efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
2648dd53d333SJames Smart {
2649dd53d333SJames Smart 	params->r_ctl = io->iparam.ct.r_ctl;
2650dd53d333SJames Smart 	params->type = io->iparam.ct.type;
2651dd53d333SJames Smart 	params->df_ctl =  io->iparam.ct.df_ctl;
2652dd53d333SJames Smart 	params->d_id = io->d_id;
2653dd53d333SJames Smart 	params->ox_id = io->iparam.ct.ox_id;
2654dd53d333SJames Smart 	params->rpi = io->rpi;
2655dd53d333SJames Smart 	params->vpi = io->vpi;
2656dd53d333SJames Smart 	params->rpi_registered = io->rpi_registered;
2657dd53d333SJames Smart 	params->xmit_len = io->xmit_len;
2658dd53d333SJames Smart 	params->rsp_len = io->rsp_len;
2659dd53d333SJames Smart 	params->timeout = io->iparam.ct.timeout;
2660dd53d333SJames Smart }
2661dd53d333SJames Smart 
2662dd53d333SJames Smart /**
2663dd53d333SJames Smart  * efct_els_hw_srrs_send() - Send a single request and response cmd.
2664dd53d333SJames Smart  * @efc: efc library structure
2665dd53d333SJames Smart  * @io: Discovery IO used to hold els and ct cmd context.
2666dd53d333SJames Smart  *
2667dd53d333SJames Smart  * This routine supports communication sequences consisting of a single
2668dd53d333SJames Smart  * request and single response between two endpoints. Examples include:
2669dd53d333SJames Smart  *  - Sending an ELS request.
2670dd53d333SJames Smart  *  - Sending an ELS response - To send an ELS response, the caller must provide
2671dd53d333SJames Smart  * the OX_ID from the received request.
2672dd53d333SJames Smart  *  - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
2673dd53d333SJames Smart  * the caller must provide the R_CTL, TYPE, and DF_CTL
2674dd53d333SJames Smart  * values to place in the FC frame header.
2675dd53d333SJames Smart  *
2676dd53d333SJames Smart  * Return: Status of the request.
2677dd53d333SJames Smart  */
2678dd53d333SJames Smart int
2679dd53d333SJames Smart efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
2680dd53d333SJames Smart {
2681dd53d333SJames Smart 	struct efct *efct = efc->base;
2682dd53d333SJames Smart 	struct efct_hw_io *hio;
2683dd53d333SJames Smart 	struct efct_hw *hw = &efct->hw;
2684dd53d333SJames Smart 	struct efc_dma *send = &io->req;
2685dd53d333SJames Smart 	struct efc_dma *receive = &io->rsp;
2686dd53d333SJames Smart 	struct sli4_sge	*sge = NULL;
2687dd53d333SJames Smart 	int rc = 0;
2688dd53d333SJames Smart 	u32 len = io->xmit_len;
2689dd53d333SJames Smart 	u32 sge0_flags;
2690dd53d333SJames Smart 	u32 sge1_flags;
2691dd53d333SJames Smart 
2692dd53d333SJames Smart 	hio = efct_hw_io_alloc(hw);
2693dd53d333SJames Smart 	if (!hio) {
2694dd53d333SJames Smart 		pr_err("HIO alloc failed\n");
2695dd53d333SJames Smart 		return -EIO;
2696dd53d333SJames Smart 	}
2697dd53d333SJames Smart 
2698dd53d333SJames Smart 	if (hw->state != EFCT_HW_STATE_ACTIVE) {
2699dd53d333SJames Smart 		efc_log_debug(hw->os,
2700dd53d333SJames Smart 			      "cannot send SRRS, HW state=%d\n", hw->state);
2701dd53d333SJames Smart 		return -EIO;
2702dd53d333SJames Smart 	}
2703dd53d333SJames Smart 
2704dd53d333SJames Smart 	hio->done = efct_els_ssrs_send_cb;
2705dd53d333SJames Smart 	hio->arg  = io;
2706dd53d333SJames Smart 
2707dd53d333SJames Smart 	sge = hio->sgl->virt;
2708dd53d333SJames Smart 
2709dd53d333SJames Smart 	/* clear both SGE */
2710dd53d333SJames Smart 	memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
2711dd53d333SJames Smart 
2712dd53d333SJames Smart 	sge0_flags = le32_to_cpu(sge[0].dw2_flags);
2713dd53d333SJames Smart 	sge1_flags = le32_to_cpu(sge[1].dw2_flags);
2714dd53d333SJames Smart 	if (send->size) {
2715dd53d333SJames Smart 		sge[0].buffer_address_high =
2716dd53d333SJames Smart 			cpu_to_le32(upper_32_bits(send->phys));
2717dd53d333SJames Smart 		sge[0].buffer_address_low  =
2718dd53d333SJames Smart 			cpu_to_le32(lower_32_bits(send->phys));
2719dd53d333SJames Smart 
2720dd53d333SJames Smart 		sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
2721dd53d333SJames Smart 
2722dd53d333SJames Smart 		sge[0].buffer_length = cpu_to_le32(len);
2723dd53d333SJames Smart 	}
2724dd53d333SJames Smart 
2725dd53d333SJames Smart 	if (io->io_type == EFC_DISC_IO_ELS_REQ ||
2726dd53d333SJames Smart 	    io->io_type == EFC_DISC_IO_CT_REQ) {
2727dd53d333SJames Smart 		sge[1].buffer_address_high =
2728dd53d333SJames Smart 			cpu_to_le32(upper_32_bits(receive->phys));
2729dd53d333SJames Smart 		sge[1].buffer_address_low  =
2730dd53d333SJames Smart 			cpu_to_le32(lower_32_bits(receive->phys));
2731dd53d333SJames Smart 
2732dd53d333SJames Smart 		sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
2733dd53d333SJames Smart 		sge1_flags |= SLI4_SGE_LAST;
2734dd53d333SJames Smart 
2735dd53d333SJames Smart 		sge[1].buffer_length = cpu_to_le32(receive->size);
2736dd53d333SJames Smart 	} else {
2737dd53d333SJames Smart 		sge0_flags |= SLI4_SGE_LAST;
2738dd53d333SJames Smart 	}
2739dd53d333SJames Smart 
2740dd53d333SJames Smart 	sge[0].dw2_flags = cpu_to_le32(sge0_flags);
2741dd53d333SJames Smart 	sge[1].dw2_flags = cpu_to_le32(sge1_flags);
2742dd53d333SJames Smart 
2743dd53d333SJames Smart 	switch (io->io_type) {
2744dd53d333SJames Smart 	case EFC_DISC_IO_ELS_REQ: {
2745dd53d333SJames Smart 		struct sli_els_params els_params;
2746dd53d333SJames Smart 
2747dd53d333SJames Smart 		hio->type = EFCT_HW_ELS_REQ;
2748dd53d333SJames Smart 		efct_fill_els_params(io, &els_params);
2749dd53d333SJames Smart 		els_params.xri = hio->indicator;
2750dd53d333SJames Smart 		els_params.tag = hio->reqtag;
2751dd53d333SJames Smart 
2752dd53d333SJames Smart 		if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2753dd53d333SJames Smart 					  &els_params)) {
2754dd53d333SJames Smart 			efc_log_err(hw->os, "REQ WQE error\n");
2755dd53d333SJames Smart 			rc = -EIO;
2756dd53d333SJames Smart 		}
2757dd53d333SJames Smart 		break;
2758dd53d333SJames Smart 	}
2759dd53d333SJames Smart 	case EFC_DISC_IO_ELS_RESP: {
2760dd53d333SJames Smart 		struct sli_els_params els_params;
2761dd53d333SJames Smart 
2762dd53d333SJames Smart 		hio->type = EFCT_HW_ELS_RSP;
2763dd53d333SJames Smart 		efct_fill_els_params(io, &els_params);
2764dd53d333SJames Smart 		els_params.xri = hio->indicator;
2765dd53d333SJames Smart 		els_params.tag = hio->reqtag;
2766dd53d333SJames Smart 		if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
2767dd53d333SJames Smart 					   &els_params)){
2768dd53d333SJames Smart 			efc_log_err(hw->os, "RSP WQE error\n");
2769dd53d333SJames Smart 			rc = -EIO;
2770dd53d333SJames Smart 		}
2771dd53d333SJames Smart 		break;
2772dd53d333SJames Smart 	}
2773dd53d333SJames Smart 	case EFC_DISC_IO_CT_REQ: {
2774dd53d333SJames Smart 		struct sli_ct_params ct_params;
2775dd53d333SJames Smart 
2776dd53d333SJames Smart 		hio->type = EFCT_HW_FC_CT;
2777dd53d333SJames Smart 		efct_fill_ct_params(io, &ct_params);
2778dd53d333SJames Smart 		ct_params.xri = hio->indicator;
2779dd53d333SJames Smart 		ct_params.tag = hio->reqtag;
2780dd53d333SJames Smart 		if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2781dd53d333SJames Smart 					  &ct_params)){
2782dd53d333SJames Smart 			efc_log_err(hw->os, "GEN WQE error\n");
2783dd53d333SJames Smart 			rc = -EIO;
2784dd53d333SJames Smart 		}
2785dd53d333SJames Smart 		break;
2786dd53d333SJames Smart 	}
2787dd53d333SJames Smart 	case EFC_DISC_IO_CT_RESP: {
2788dd53d333SJames Smart 		struct sli_ct_params ct_params;
2789dd53d333SJames Smart 
2790dd53d333SJames Smart 		hio->type = EFCT_HW_FC_CT_RSP;
2791dd53d333SJames Smart 		efct_fill_ct_params(io, &ct_params);
2792dd53d333SJames Smart 		ct_params.xri = hio->indicator;
2793dd53d333SJames Smart 		ct_params.tag = hio->reqtag;
2794dd53d333SJames Smart 		if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2795dd53d333SJames Smart 					    &ct_params)){
2796dd53d333SJames Smart 			efc_log_err(hw->os, "XMIT SEQ WQE error\n");
2797dd53d333SJames Smart 			rc = -EIO;
2798dd53d333SJames Smart 		}
2799dd53d333SJames Smart 		break;
2800dd53d333SJames Smart 	}
2801dd53d333SJames Smart 	default:
2802dd53d333SJames Smart 		efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
2803dd53d333SJames Smart 		rc = -EIO;
2804dd53d333SJames Smart 	}
2805dd53d333SJames Smart 
2806dd53d333SJames Smart 	if (rc == 0) {
2807dd53d333SJames Smart 		hio->xbusy = true;
2808dd53d333SJames Smart 
2809dd53d333SJames Smart 		/*
2810dd53d333SJames Smart 		 * Add IO to active io wqe list before submitting, in case the
2811dd53d333SJames Smart 		 * wcqe processing preempts this thread.
2812dd53d333SJames Smart 		 */
2813dd53d333SJames Smart 		hio->wq->use_count++;
2814dd53d333SJames Smart 		rc = efct_hw_wq_write(hio->wq, &hio->wqe);
2815dd53d333SJames Smart 		if (rc >= 0) {
2816dd53d333SJames Smart 			/* non-negative return is success */
2817dd53d333SJames Smart 			rc = 0;
2818dd53d333SJames Smart 		} else {
2819dd53d333SJames Smart 			/* failed to write wqe, remove from active wqe list */
2820dd53d333SJames Smart 			efc_log_err(hw->os,
2821dd53d333SJames Smart 				    "sli_queue_write failed: %d\n", rc);
2822dd53d333SJames Smart 			hio->xbusy = false;
2823dd53d333SJames Smart 		}
2824dd53d333SJames Smart 	}
2825dd53d333SJames Smart 
2826dd53d333SJames Smart 	return rc;
2827dd53d333SJames Smart }
2828dd53d333SJames Smart 
2829dd53d333SJames Smart int
2830dd53d333SJames Smart efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
2831dd53d333SJames Smart 		struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
2832dd53d333SJames Smart 		void *cb, void *arg)
2833dd53d333SJames Smart {
2834dd53d333SJames Smart 	int rc = 0;
2835dd53d333SJames Smart 	bool send_wqe = true;
2836dd53d333SJames Smart 
2837dd53d333SJames Smart 	if (!io) {
2838dd53d333SJames Smart 		pr_err("bad parm hw=%p io=%p\n", hw, io);
2839dd53d333SJames Smart 		return -EIO;
2840dd53d333SJames Smart 	}
2841dd53d333SJames Smart 
2842dd53d333SJames Smart 	if (hw->state != EFCT_HW_STATE_ACTIVE) {
2843dd53d333SJames Smart 		efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
2844dd53d333SJames Smart 		return -EIO;
2845dd53d333SJames Smart 	}
2846dd53d333SJames Smart 
2847dd53d333SJames Smart 	/*
2848dd53d333SJames Smart 	 * Save state needed during later stages
2849dd53d333SJames Smart 	 */
2850dd53d333SJames Smart 	io->type  = type;
2851dd53d333SJames Smart 	io->done  = cb;
2852dd53d333SJames Smart 	io->arg   = arg;
2853dd53d333SJames Smart 
2854dd53d333SJames Smart 	/*
2855dd53d333SJames Smart 	 * Format the work queue entry used to send the IO
2856dd53d333SJames Smart 	 */
2857dd53d333SJames Smart 	switch (type) {
2858dd53d333SJames Smart 	case EFCT_HW_IO_TARGET_WRITE: {
2859dd53d333SJames Smart 		u16 *flags = &iparam->fcp_tgt.flags;
2860dd53d333SJames Smart 		struct fcp_txrdy *xfer = io->xfer_rdy.virt;
2861dd53d333SJames Smart 
2862dd53d333SJames Smart 		/*
2863dd53d333SJames Smart 		 * Fill in the XFER_RDY for IF_TYPE 0 devices
2864dd53d333SJames Smart 		 */
2865dd53d333SJames Smart 		xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
2866dd53d333SJames Smart 		xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
2867dd53d333SJames Smart 
2868dd53d333SJames Smart 		if (io->xbusy)
2869dd53d333SJames Smart 			*flags |= SLI4_IO_CONTINUATION;
2870dd53d333SJames Smart 		else
2871dd53d333SJames Smart 			*flags &= ~SLI4_IO_CONTINUATION;
2872dd53d333SJames Smart 		iparam->fcp_tgt.xri = io->indicator;
2873dd53d333SJames Smart 		iparam->fcp_tgt.tag = io->reqtag;
2874dd53d333SJames Smart 
2875dd53d333SJames Smart 		if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
2876dd53d333SJames Smart 					   &io->def_sgl, io->first_data_sge,
2877dd53d333SJames Smart 					   SLI4_CQ_DEFAULT,
2878dd53d333SJames Smart 					   0, 0, &iparam->fcp_tgt)) {
2879dd53d333SJames Smart 			efc_log_err(hw->os, "TRECEIVE WQE error\n");
2880dd53d333SJames Smart 			rc = -EIO;
2881dd53d333SJames Smart 		}
2882dd53d333SJames Smart 		break;
2883dd53d333SJames Smart 	}
2884dd53d333SJames Smart 	case EFCT_HW_IO_TARGET_READ: {
2885dd53d333SJames Smart 		u16 *flags = &iparam->fcp_tgt.flags;
2886dd53d333SJames Smart 
2887dd53d333SJames Smart 		if (io->xbusy)
2888dd53d333SJames Smart 			*flags |= SLI4_IO_CONTINUATION;
2889dd53d333SJames Smart 		else
2890dd53d333SJames Smart 			*flags &= ~SLI4_IO_CONTINUATION;
2891dd53d333SJames Smart 
2892dd53d333SJames Smart 		iparam->fcp_tgt.xri = io->indicator;
2893dd53d333SJames Smart 		iparam->fcp_tgt.tag = io->reqtag;
2894dd53d333SJames Smart 
2895dd53d333SJames Smart 		if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
2896dd53d333SJames Smart 					&io->def_sgl, io->first_data_sge,
2897dd53d333SJames Smart 					SLI4_CQ_DEFAULT,
2898dd53d333SJames Smart 					0, 0, &iparam->fcp_tgt)) {
2899dd53d333SJames Smart 			efc_log_err(hw->os, "TSEND WQE error\n");
2900dd53d333SJames Smart 			rc = -EIO;
2901dd53d333SJames Smart 		}
2902dd53d333SJames Smart 		break;
2903dd53d333SJames Smart 	}
2904dd53d333SJames Smart 	case EFCT_HW_IO_TARGET_RSP: {
2905dd53d333SJames Smart 		u16 *flags = &iparam->fcp_tgt.flags;
2906dd53d333SJames Smart 
2907dd53d333SJames Smart 		if (io->xbusy)
2908dd53d333SJames Smart 			*flags |= SLI4_IO_CONTINUATION;
2909dd53d333SJames Smart 		else
2910dd53d333SJames Smart 			*flags &= ~SLI4_IO_CONTINUATION;
2911dd53d333SJames Smart 
2912dd53d333SJames Smart 		iparam->fcp_tgt.xri = io->indicator;
2913dd53d333SJames Smart 		iparam->fcp_tgt.tag = io->reqtag;
2914dd53d333SJames Smart 
2915dd53d333SJames Smart 		if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
2916dd53d333SJames Smart 				       &io->def_sgl, SLI4_CQ_DEFAULT,
2917dd53d333SJames Smart 				       0, &iparam->fcp_tgt)) {
2918dd53d333SJames Smart 			efc_log_err(hw->os, "TRSP WQE error\n");
2919dd53d333SJames Smart 			rc = -EIO;
2920dd53d333SJames Smart 		}
2921dd53d333SJames Smart 
2922dd53d333SJames Smart 		break;
2923dd53d333SJames Smart 	}
2924dd53d333SJames Smart 	default:
2925dd53d333SJames Smart 		efc_log_err(hw->os, "unsupported IO type %#x\n", type);
2926dd53d333SJames Smart 		rc = -EIO;
2927dd53d333SJames Smart 	}
2928dd53d333SJames Smart 
2929dd53d333SJames Smart 	if (send_wqe && rc == 0) {
2930dd53d333SJames Smart 		io->xbusy = true;
2931dd53d333SJames Smart 
2932dd53d333SJames Smart 		/*
2933dd53d333SJames Smart 		 * Add IO to active io wqe list before submitting, in case the
2934dd53d333SJames Smart 		 * wcqe processing preempts this thread.
2935dd53d333SJames Smart 		 */
2936dd53d333SJames Smart 		hw->tcmd_wq_submit[io->wq->instance]++;
2937dd53d333SJames Smart 		io->wq->use_count++;
2938dd53d333SJames Smart 		rc = efct_hw_wq_write(io->wq, &io->wqe);
2939dd53d333SJames Smart 		if (rc >= 0) {
2940dd53d333SJames Smart 			/* non-negative return is success */
2941dd53d333SJames Smart 			rc = 0;
2942dd53d333SJames Smart 		} else {
2943dd53d333SJames Smart 			/* failed to write wqe, remove from active wqe list */
2944dd53d333SJames Smart 			efc_log_err(hw->os,
2945dd53d333SJames Smart 				    "sli_queue_write failed: %d\n", rc);
2946dd53d333SJames Smart 			io->xbusy = false;
2947dd53d333SJames Smart 		}
2948dd53d333SJames Smart 	}
2949dd53d333SJames Smart 
2950dd53d333SJames Smart 	return rc;
2951dd53d333SJames Smart }
2952dd53d333SJames Smart 
2953dd53d333SJames Smart int
2954dd53d333SJames Smart efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
2955dd53d333SJames Smart 		   u8 sof, u8 eof, struct efc_dma *payload,
2956dd53d333SJames Smart 		   struct efct_hw_send_frame_context *ctx,
2957dd53d333SJames Smart 		   void (*callback)(void *arg, u8 *cqe, int status),
2958dd53d333SJames Smart 		   void *arg)
2959dd53d333SJames Smart {
2960dd53d333SJames Smart 	int rc;
2961dd53d333SJames Smart 	struct efct_hw_wqe *wqe;
2962dd53d333SJames Smart 	u32 xri;
2963dd53d333SJames Smart 	struct hw_wq *wq;
2964dd53d333SJames Smart 
2965dd53d333SJames Smart 	wqe = &ctx->wqe;
2966dd53d333SJames Smart 
2967dd53d333SJames Smart 	/* populate the callback object */
2968dd53d333SJames Smart 	ctx->hw = hw;
2969dd53d333SJames Smart 
2970dd53d333SJames Smart 	/* Fetch and populate request tag */
2971dd53d333SJames Smart 	ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
2972dd53d333SJames Smart 	if (!ctx->wqcb) {
2973dd53d333SJames Smart 		efc_log_err(hw->os, "can't allocate request tag\n");
2974dd53d333SJames Smart 		return -ENOSPC;
2975dd53d333SJames Smart 	}
2976dd53d333SJames Smart 
2977dd53d333SJames Smart 	wq = hw->hw_wq[0];
2978dd53d333SJames Smart 
2979dd53d333SJames Smart 	/* Set XRI and RX_ID in the header based on which WQ, and which
2980dd53d333SJames Smart 	 * send_frame_io we are using
2981dd53d333SJames Smart 	 */
2982dd53d333SJames Smart 	xri = wq->send_frame_io->indicator;
2983dd53d333SJames Smart 
2984dd53d333SJames Smart 	/* Build the send frame WQE */
2985dd53d333SJames Smart 	rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
2986dd53d333SJames Smart 				sof, eof, (u32 *)hdr, payload, payload->len,
2987dd53d333SJames Smart 				EFCT_HW_SEND_FRAME_TIMEOUT, xri,
2988dd53d333SJames Smart 				ctx->wqcb->instance_index);
2989dd53d333SJames Smart 	if (rc) {
2990dd53d333SJames Smart 		efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
2991dd53d333SJames Smart 		return -EIO;
2992dd53d333SJames Smart 	}
2993dd53d333SJames Smart 
2994dd53d333SJames Smart 	/* Write to WQ */
2995dd53d333SJames Smart 	rc = efct_hw_wq_write(wq, wqe);
2996dd53d333SJames Smart 	if (rc) {
2997dd53d333SJames Smart 		efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
2998dd53d333SJames Smart 		return -EIO;
2999dd53d333SJames Smart 	}
3000dd53d333SJames Smart 
3001dd53d333SJames Smart 	wq->use_count++;
3002dd53d333SJames Smart 
3003dd53d333SJames Smart 	return 0;
3004dd53d333SJames Smart }
3005