Lines Matching refs:hw

102 static void csio_hw_initialize(struct csio_hw *hw);
103 static void csio_evtq_stop(struct csio_hw *hw);
104 static void csio_evtq_start(struct csio_hw *hw);
106 int csio_is_hw_ready(struct csio_hw *hw)
108 return csio_match_state(hw, csio_hws_ready);
111 int csio_is_hw_removing(struct csio_hw *hw)
113 return csio_match_state(hw, csio_hws_removing);
119 * @hw: the HW module
133 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
138 val = csio_rd_reg32(hw, reg);
155 * @hw: the adapter
163 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
166 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
167 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
168 csio_wr_reg32(hw, val, TP_PIO_DATA_A);
172 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
175 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
177 csio_wr_reg32(hw, val | value, reg);
179 csio_rd_reg32(hw, reg);
184 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
186 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
203 * @hw: hw to read
212 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
216 uint32_t base = hw->params.pci.vpd_cap_addr;
221 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
225 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
229 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
233 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
297 * @hw: HW module
303 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
311 if (csio_is_valid_vpd(hw))
314 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
315 &hw->params.pci.vpd_cap_addr);
327 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
331 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
339 hw->flags &= (~CSIO_HWF_VPD_VALID);
346 csio_err(hw, "missing VPD keyword " name "\n"); \
357 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
373 csio_valid_vpd_copied(hw);
381 * @hw: the HW module
392 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
399 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
402 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
404 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
407 *valp = csio_rd_reg32(hw, SF_DATA_A);
413 * @hw: the HW module
424 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
429 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
432 csio_wr_reg32(hw, val, SF_DATA_A);
433 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
436 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
442 * @hw: the HW module
449 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
455 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
459 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
474 * @hw: the HW module
486 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
491 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
496 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
500 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
505 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
507 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
518 * @hw: the hw
527 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
534 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
539 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
543 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
552 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
556 ret = csio_hw_flash_wait_op(hw, 8, 1);
560 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
563 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
568 csio_err(hw,
577 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
583 * @hw: the HW module
590 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
596 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
600 ret = csio_hw_sf1_write(hw, 4, 0, 1,
605 ret = csio_hw_flash_wait_op(hw, 14, 500);
613 csio_err(hw, "erase of flash sector %d failed, error %d\n",
615 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
620 csio_hw_print_fw_version(struct csio_hw *hw, char *str)
622 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
623 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
624 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
625 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
626 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
631 * @hw: HW module
637 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
639 return csio_hw_read_flash(hw, FLASH_FW_START +
646 * @hw: HW module
652 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
654 return csio_hw_read_flash(hw, FLASH_FW_START +
661 * @hw: HW module
668 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
679 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
680 csio_err(hw, "Serial Flash data invalid\n");
685 csio_err(hw, "FW image has no data\n");
690 csio_err(hw, "FW image size not multiple of 512 bytes\n");
695 csio_err(hw, "FW image size differs from size in FW header\n");
700 csio_err(hw, "FW image too large, max is %u bytes\n",
709 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
713 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
716 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
719 ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC,
722 csio_err(hw, "Flash Erase failed\n");
733 ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page);
737 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
744 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
749 ret = csio_hw_write_flash(hw,
757 csio_err(hw, "firmware download failed, error %d\n", ret);
762 csio_hw_get_flash_params(struct csio_hw *hw)
779 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
781 ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid);
782 csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
790 hw->params.sf_size = supported_flash[part].size_mb;
791 hw->params.sf_nsec =
792 hw->params.sf_size / SF_SEC_SIZE;
861 csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
867 hw->params.sf_size = size;
868 hw->params.sf_nsec = size / SF_SEC_SIZE;
871 if (hw->params.sf_size < FLASH_MIN_SIZE)
872 csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
873 flashid, hw->params.sf_size, FLASH_MIN_SIZE);
882 csio_hw_dev_ready(struct csio_hw *hw)
888 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
892 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
899 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
903 hw->pfn = src_pf;
910 * @hw: HW module
916 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
927 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
930 CSIO_INC_STATS(hw, n_err_nomem);
935 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
936 hw->pfn, CSIO_MASTER_MAY, NULL);
938 rv = csio_mb_issue(hw, mbp);
940 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
944 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
946 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
952 if (hw->pfn == mpfn) {
953 hw->flags |= CSIO_HWF_MASTER;
980 spin_unlock_irq(&hw->lock);
982 spin_lock_irq(&hw->lock);
991 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
1025 hw->flags &= ~CSIO_HWF_MASTER;
1043 if (hw->pfn == mpfn)
1044 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1045 hw->pfn, state_str);
1047 csio_info(hw,
1049 hw->pfn, mpfn, state_str);
1052 mempool_free(mbp, hw->mb_mempool);
1059 * @hw: HW module
1063 csio_do_bye(struct csio_hw *hw)
1068 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1070 CSIO_INC_STATS(hw, n_err_nomem);
1074 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1076 if (csio_mb_issue(hw, mbp)) {
1077 csio_err(hw, "Issue of BYE command failed\n");
1078 mempool_free(mbp, hw->mb_mempool);
1084 mempool_free(mbp, hw->mb_mempool);
1088 mempool_free(mbp, hw->mb_mempool);
1095 * @hw: HW module
1103 csio_do_reset(struct csio_hw *hw, bool fw_rst)
1110 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
1115 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1117 CSIO_INC_STATS(hw, n_err_nomem);
1121 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1124 if (csio_mb_issue(hw, mbp)) {
1125 csio_err(hw, "Issue of RESET command failed.n");
1126 mempool_free(mbp, hw->mb_mempool);
1132 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1133 mempool_free(mbp, hw->mb_mempool);
1137 mempool_free(mbp, hw->mb_mempool);
1143 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1151 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1156 csio_err(hw, "No FCoE Control Offload capability\n");
1165 * @hw: the HW module
1180 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1191 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1193 CSIO_INC_STATS(hw, n_err_nomem);
1197 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1201 if (csio_mb_issue(hw, mbp)) {
1202 csio_err(hw, "Issue of RESET command failed!\n");
1203 mempool_free(mbp, hw->mb_mempool);
1208 mempool_free(mbp, hw->mb_mempool);
1225 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
1226 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
1239 * @hw: the HW module
1259 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1267 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
1277 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
1279 if (csio_do_reset(hw, true) == 0)
1283 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
1288 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
1290 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
1302 * @hw: the HW module
1322 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1328 ret = csio_hw_fw_halt(hw, mbox, force);
1332 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1345 return csio_hw_fw_restart(hw, mbox, reset);
1350 * @hw: HW module
1354 csio_get_device_params(struct csio_hw *hw)
1356 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1364 hw->pport[i].portid = -1;
1366 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1368 CSIO_INC_STATS(hw, n_err_nomem);
1386 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1388 if (csio_mb_issue(hw, mbp)) {
1389 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1390 mempool_free(mbp, hw->mb_mempool);
1394 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1397 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1399 mempool_free(mbp, hw->mb_mempool);
1404 hw->port_vec = param[0];
1405 hw->vpd.cclk = param[1];
1410 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1411 !csio_is_hw_master(hw)) {
1412 hw->cfg_niq = param[5] - param[4] + 1;
1413 hw->cfg_neq = param[3] - param[2] + 1;
1414 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1415 hw->cfg_niq, hw->cfg_neq);
1418 hw->port_vec &= csio_port_mask;
1420 hw->num_pports = hweight32(hw->port_vec);
1422 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1423 hw->port_vec, hw->num_pports);
1425 for (i = 0; i < hw->num_pports; i++) {
1426 while ((hw->port_vec & (1 << j)) == 0)
1428 hw->pport[i].portid = j++;
1429 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1431 mempool_free(mbp, hw->mb_mempool);
1439 * @hw: HW module
1443 csio_config_device_caps(struct csio_hw *hw)
1449 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1451 CSIO_INC_STATS(hw, n_err_nomem);
1456 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1458 if (csio_mb_issue(hw, mbp)) {
1459 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1465 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1470 rv = csio_hw_validate_caps(hw, mbp);
1475 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1481 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1484 if (csio_mb_issue(hw, mbp)) {
1485 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1491 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1497 mempool_free(mbp, hw->mb_mempool);
1772 * @hw: HW module.
1776 csio_enable_ports(struct csio_hw *hw)
1785 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1787 CSIO_INC_STATS(hw, n_err_nomem);
1791 for (i = 0; i < hw->num_pports; i++) {
1792 portid = hw->pport[i].portid;
1801 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
1802 hw->pfn, 0, 1, &param, &val, true,
1805 if (csio_mb_issue(hw, mbp)) {
1806 csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n",
1808 mempool_free(mbp, hw->mb_mempool);
1812 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1818 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1821 if (csio_mb_issue(hw, mbp)) {
1822 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1824 mempool_free(mbp, hw->mb_mempool);
1828 csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps,
1831 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1833 mempool_free(mbp, hw->mb_mempool);
1837 csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps);
1839 csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps);
1842 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1845 if (csio_mb_issue(hw, mbp)) {
1846 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1848 mempool_free(mbp, hw->mb_mempool);
1854 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1856 mempool_free(mbp, hw->mb_mempool);
1862 mempool_free(mbp, hw->mb_mempool);
1869 * @hw: HW module
1873 csio_get_fcoe_resinfo(struct csio_hw *hw)
1875 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1880 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1882 CSIO_INC_STATS(hw, n_err_nomem);
1887 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1889 if (csio_mb_issue(hw, mbp)) {
1890 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1891 mempool_free(mbp, hw->mb_mempool);
1898 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1900 mempool_free(mbp, hw->mb_mempool);
1917 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1919 mempool_free(mbp, hw->mb_mempool);
1925 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1931 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1933 CSIO_INC_STATS(hw, n_err_nomem);
1944 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1946 if (csio_mb_issue(hw, mbp)) {
1947 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1948 mempool_free(mbp, hw->mb_mempool);
1952 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1955 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1957 mempool_free(mbp, hw->mb_mempool);
1961 mempool_free(mbp, hw->mb_mempool);
1968 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1972 struct pci_dev *pci_dev = hw->pdev;
1985 csio_err(hw, "could not find config file %s, err: %d\n",
2000 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
2008 ret = csio_memory_write(hw, mtype, maddr,
2022 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
2025 csio_info(hw, "config file upgraded to %s\n", fw_cfg_file);
2052 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
2066 rv = csio_do_reset(hw, true);
2076 spin_unlock_irq(&hw->lock);
2077 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
2078 spin_lock_irq(&hw->lock);
2086 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
2093 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2095 CSIO_INC_STATS(hw, n_err_nomem);
2105 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
2116 if (csio_mb_issue(hw, mbp)) {
2129 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
2135 if (csio_mb_issue(hw, mbp)) {
2159 if (csio_mb_issue(hw, mbp)) {
2166 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
2171 csio_warn(hw,
2177 rv = csio_hw_validate_caps(hw, mbp);
2181 mempool_free(mbp, hw->mb_mempool);
2189 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2192 rv = csio_get_device_params(hw);
2197 csio_wr_sge_init(hw);
2204 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2206 csio_info(hw, "Successfully configure using Firmware "
2216 mempool_free(mbp, hw->mb_mempool);
2217 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
2218 csio_warn(hw, "Configuration file error %d\n", rv);
2245 static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
2263 csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
2314 static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
2326 ret = csio_hw_read_flash(hw, FLASH_FW_START,
2332 csio_err(hw,
2352 csio_should_install_fs_fw(hw, card_fw_usable,
2355 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
2358 csio_err(hw,
2376 csio_err(hw, "Cannot find a usable firmware: "
2392 hw->fwrev = be32_to_cpu(card_fw->fw_ver);
2393 hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
2406 csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2412 struct pci_dev *pci_dev = hw->pdev;
2421 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
2423 csio_err(hw,
2425 CHELSIO_CHIP_VERSION(hw->chip_id));
2442 csio_err(hw, "could not find firmware image %s, err: %d\n",
2450 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
2451 hw->fw_state, reset);
2460 static int csio_hw_check_fwver(struct csio_hw *hw)
2462 if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) &&
2463 (hw->fwrev < CSIO_MIN_T6_FW)) {
2464 csio_hw_print_fw_version(hw, "T6 unsupported fw");
2473 * @hw - HW module
2477 csio_hw_configure(struct csio_hw *hw)
2483 rv = csio_hw_dev_ready(hw);
2485 CSIO_INC_STATS(hw, n_err_fatal);
2486 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2491 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
2494 rv = csio_hw_get_flash_params(hw);
2496 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2497 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2502 if (pci_is_pcie(hw->pdev))
2503 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
2506 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
2508 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2512 csio_hw_print_fw_version(hw, "Firmware revision");
2514 rv = csio_do_hello(hw, &hw->fw_state);
2516 CSIO_INC_STATS(hw, n_err_fatal);
2517 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2522 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2526 csio_hw_get_fw_version(hw, &hw->fwrev);
2527 csio_hw_get_tp_version(hw, &hw->tp_vers);
2528 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2531 spin_unlock_irq(&hw->lock);
2532 rv = csio_hw_flash_fw(hw, &reset);
2533 spin_lock_irq(&hw->lock);
2538 rv = csio_hw_check_fwver(hw);
2545 rv = csio_hw_check_fwconfig(hw, param);
2547 csio_info(hw, "Firmware doesn't support "
2556 rv = csio_hw_use_fwconfig(hw, reset, param);
2558 csio_info(hw, "Could not initialize "
2563 csio_info(hw, "Could not initialize "
2569 rv = csio_hw_check_fwver(hw);
2573 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2575 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2578 rv = csio_get_device_params(hw);
2583 rv = csio_config_device_caps(hw);
2588 csio_wr_sge_init(hw);
2591 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2602 * @hw - HW module
2606 csio_hw_initialize(struct csio_hw *hw)
2613 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2614 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2618 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2620 if (csio_mb_issue(hw, mbp)) {
2621 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2627 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2632 mempool_free(mbp, hw->mb_mempool);
2635 rv = csio_get_fcoe_resinfo(hw);
2637 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2641 spin_unlock_irq(&hw->lock);
2642 rv = csio_config_queues(hw);
2643 spin_lock_irq(&hw->lock);
2646 csio_err(hw, "Config of queues failed!: %d\n", rv);
2650 for (i = 0; i < hw->num_pports; i++)
2651 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2653 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2654 rv = csio_enable_ports(hw);
2656 csio_err(hw, "Failed to enable ports: %d\n", rv);
2661 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2665 mempool_free(mbp, hw->mb_mempool);
2674 * @hw: Pointer to HW module.
2679 csio_hw_intr_enable(struct csio_hw *hw)
2681 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
2683 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
2685 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
2686 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2688 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2694 if (hw->intr_mode == CSIO_IM_MSIX)
2695 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2697 else if (hw->intr_mode == CSIO_IM_MSI)
2698 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2701 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
2704 csio_mb_intr_enable(hw);
2707 if (csio_is_hw_master(hw)) {
2712 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
2714 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
2723 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
2726 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2732 * @hw: Pointer to HW module.
2737 csio_hw_intr_disable(struct csio_hw *hw)
2741 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
2742 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2744 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2746 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2749 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2751 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
2752 if (csio_is_hw_master(hw))
2753 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
2756 csio_mb_intr_disable(hw);
2761 csio_hw_fatal_err(struct csio_hw *hw)
2763 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
2764 csio_hw_intr_disable(hw);
2767 csio_fatal(hw, "HW Fatal error encountered!\n");
2775 * @hw - HW module
2780 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2782 hw->prev_evt = hw->cur_evt;
2783 hw->cur_evt = evt;
2784 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2788 csio_set_state(&hw->sm, csio_hws_configuring);
2789 csio_hw_configure(hw);
2793 CSIO_INC_STATS(hw, n_evt_unexp);
2800 * @hw - HW module
2805 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2807 hw->prev_evt = hw->cur_evt;
2808 hw->cur_evt = evt;
2809 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2813 csio_set_state(&hw->sm, csio_hws_initializing);
2814 csio_hw_initialize(hw);
2818 csio_set_state(&hw->sm, csio_hws_ready);
2820 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2824 csio_set_state(&hw->sm, csio_hws_uninit);
2828 csio_do_bye(hw);
2831 CSIO_INC_STATS(hw, n_evt_unexp);
2838 * @hw - HW module
2843 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2845 hw->prev_evt = hw->cur_evt;
2846 hw->cur_evt = evt;
2847 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2851 csio_set_state(&hw->sm, csio_hws_ready);
2854 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2857 csio_hw_intr_enable(hw);
2861 csio_set_state(&hw->sm, csio_hws_uninit);
2865 csio_do_bye(hw);
2869 CSIO_INC_STATS(hw, n_evt_unexp);
2876 * @hw - HW module
2881 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2884 hw->evtflag = evt;
2886 hw->prev_evt = hw->cur_evt;
2887 hw->cur_evt = evt;
2888 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2896 csio_set_state(&hw->sm, csio_hws_quiescing);
2900 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2902 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2904 csio_hw_intr_disable(hw);
2905 csio_hw_mbm_cleanup(hw);
2906 csio_evtq_stop(hw);
2907 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2908 csio_evtq_flush(hw);
2909 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2910 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2914 csio_set_state(&hw->sm, csio_hws_uninit);
2918 CSIO_INC_STATS(hw, n_evt_unexp);
2925 * @hw - HW module
2930 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2932 hw->prev_evt = hw->cur_evt;
2933 hw->cur_evt = evt;
2934 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2938 switch (hw->evtflag) {
2940 csio_set_state(&hw->sm, csio_hws_resetting);
2945 csio_set_state(&hw->sm, csio_hws_resetting);
2947 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2948 csio_wr_destroy_queues(hw, false);
2949 csio_do_reset(hw, false);
2950 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2954 csio_set_state(&hw->sm, csio_hws_removing);
2955 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2956 csio_wr_destroy_queues(hw, true);
2958 csio_do_bye(hw);
2962 csio_set_state(&hw->sm, csio_hws_quiesced);
2966 csio_set_state(&hw->sm, csio_hws_pcierr);
2967 csio_wr_destroy_queues(hw, false);
2971 CSIO_INC_STATS(hw, n_evt_unexp);
2978 CSIO_INC_STATS(hw, n_evt_unexp);
2985 * @hw - HW module
2990 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2992 hw->prev_evt = hw->cur_evt;
2993 hw->cur_evt = evt;
2994 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2998 csio_set_state(&hw->sm, csio_hws_configuring);
2999 csio_hw_configure(hw);
3003 CSIO_INC_STATS(hw, n_evt_unexp);
3010 * @hw - HW module
3015 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
3017 hw->prev_evt = hw->cur_evt;
3018 hw->cur_evt = evt;
3019 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3023 csio_evtq_start(hw);
3024 csio_set_state(&hw->sm, csio_hws_configuring);
3025 csio_hw_configure(hw);
3029 CSIO_INC_STATS(hw, n_evt_unexp);
3036 * @hw - HW module
3041 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
3043 hw->prev_evt = hw->cur_evt;
3044 hw->cur_evt = evt;
3045 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3049 if (!csio_is_hw_master(hw))
3056 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
3057 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
3063 CSIO_INC_STATS(hw, n_evt_unexp);
3071 * @hw - HW module
3076 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
3078 hw->prev_evt = hw->cur_evt;
3079 hw->cur_evt = evt;
3080 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3084 csio_evtq_start(hw);
3085 csio_set_state(&hw->sm, csio_hws_configuring);
3086 csio_hw_configure(hw);
3090 CSIO_INC_STATS(hw, n_evt_unexp);
3101 * @hw: HW instance
3113 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
3118 unsigned int status = csio_rd_reg32(hw, reg);
3125 csio_fatal(hw, "Fatal %s (0x%x)\n",
3128 csio_info(hw, "%s (0x%x)\n",
3134 csio_wr_reg32(hw, status, reg);
3141 static void csio_tp_intr_handler(struct csio_hw *hw)
3149 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
3150 csio_hw_fatal_err(hw);
3156 static void csio_sge_intr_handler(struct csio_hw *hw)
3186 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
3187 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
3189 csio_fatal(hw, "SGE parity error (%#llx)\n",
3191 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
3193 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
3196 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
3198 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
3200 csio_hw_fatal_err(hw);
3211 static void csio_cim_intr_handler(struct csio_hw *hw)
3257 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
3259 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
3262 csio_hw_fatal_err(hw);
3268 static void csio_ulprx_intr_handler(struct csio_hw *hw)
3276 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
3277 csio_hw_fatal_err(hw);
3283 static void csio_ulptx_intr_handler(struct csio_hw *hw)
3298 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
3299 csio_hw_fatal_err(hw);
3305 static void csio_pmtx_intr_handler(struct csio_hw *hw)
3321 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
3322 csio_hw_fatal_err(hw);
3328 static void csio_pmrx_intr_handler(struct csio_hw *hw)
3341 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
3342 csio_hw_fatal_err(hw);
3348 static void csio_cplsw_intr_handler(struct csio_hw *hw)
3360 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
3361 csio_hw_fatal_err(hw);
3367 static void csio_le_intr_handler(struct csio_hw *hw)
3369 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
3389 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A,
3392 csio_hw_fatal_err(hw);
3398 static void csio_mps_intr_handler(struct csio_hw *hw)
3444 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
3446 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
3448 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
3450 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
3452 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
3454 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
3456 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
3459 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
3460 csio_rd_reg32(hw, MPS_INT_CAUSE_A); /* flush */
3462 csio_hw_fatal_err(hw);
3471 static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
3485 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
3487 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
3489 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
3491 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
3492 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3496 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3498 csio_wr_reg32(hw, v, addr);
3500 csio_hw_fatal_err(hw);
3506 static void csio_ma_intr_handler(struct csio_hw *hw)
3508 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
3511 csio_fatal(hw, "MA parity error, parity status %#x\n",
3512 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
3514 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
3515 csio_fatal(hw,
3519 csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
3520 csio_hw_fatal_err(hw);
3526 static void csio_smb_intr_handler(struct csio_hw *hw)
3535 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
3536 csio_hw_fatal_err(hw);
3542 static void csio_ncsi_intr_handler(struct csio_hw *hw)
3552 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
3553 csio_hw_fatal_err(hw);
3559 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3561 uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
3568 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3570 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
3571 csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
3572 csio_hw_fatal_err(hw);
3578 static void csio_pl_intr_handler(struct csio_hw *hw)
3586 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
3587 csio_hw_fatal_err(hw);
3592 * @hw: HW module
3599 csio_hw_slow_intr_handler(struct csio_hw *hw)
3601 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
3604 CSIO_INC_STATS(hw, n_plint_unexp);
3608 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3610 CSIO_INC_STATS(hw, n_plint_cnt);
3613 csio_cim_intr_handler(hw);
3616 csio_mps_intr_handler(hw);
3619 csio_ncsi_intr_handler(hw);
3622 csio_pl_intr_handler(hw);
3625 csio_smb_intr_handler(hw);
3628 csio_xgmac_intr_handler(hw, 0);
3631 csio_xgmac_intr_handler(hw, 1);
3634 csio_xgmac_intr_handler(hw, 2);
3637 csio_xgmac_intr_handler(hw, 3);
3640 hw->chip_ops->chip_pcie_intr_handler(hw);
3643 csio_mem_intr_handler(hw, MEM_MC);
3646 csio_mem_intr_handler(hw, MEM_EDC0);
3649 csio_mem_intr_handler(hw, MEM_EDC1);
3652 csio_le_intr_handler(hw);
3655 csio_tp_intr_handler(hw);
3658 csio_ma_intr_handler(hw);
3661 csio_pmtx_intr_handler(hw);
3664 csio_pmrx_intr_handler(hw);
3667 csio_ulprx_intr_handler(hw);
3670 csio_cplsw_intr_handler(hw);
3673 csio_sge_intr_handler(hw);
3676 csio_ulptx_intr_handler(hw);
3679 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
3680 csio_rd_reg32(hw, PL_INT_CAUSE_A); /* flush */
3698 struct csio_hw *hw = (struct csio_hw *)data;
3699 struct csio_mbm *mbm = &hw->mbm;
3706 spin_lock_irq(&hw->lock);
3708 spin_unlock_irq(&hw->lock);
3720 rv = csio_mb_issue(hw, mbp_next);
3726 spin_unlock_irq(&hw->lock);
3729 csio_mb_completions(hw, &cbfn_q);
3742 struct csio_hw *hw = mbm->hw;
3745 spin_lock_irq(&hw->lock);
3746 mbp = csio_mb_tmo_handler(hw);
3747 spin_unlock_irq(&hw->lock);
3751 mbp->mb_cbfn(hw, mbp);
3757 * @hw: HW module
3765 csio_hw_mbm_cleanup(struct csio_hw *hw)
3769 csio_mb_cancel_all(hw, &cbfn_q);
3771 spin_unlock_irq(&hw->lock);
3772 csio_mb_completions(hw, &cbfn_q);
3773 spin_lock_irq(&hw->lock);
3780 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3791 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3794 if (list_empty(&hw->evt_free_q)) {
3795 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3800 evt_entry = list_first_entry(&hw->evt_free_q,
3807 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3809 CSIO_DEC_STATS(hw, n_evt_freeq);
3810 CSIO_INC_STATS(hw, n_evt_activeq);
3816 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3831 spin_lock_irqsave(&hw->lock, flags);
3832 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3837 if (list_empty(&hw->evt_free_q)) {
3838 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3844 evt_entry = list_first_entry(&hw->evt_free_q,
3863 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3864 CSIO_DEC_STATS(hw, n_evt_freeq);
3865 CSIO_INC_STATS(hw, n_evt_activeq);
3867 spin_unlock_irqrestore(&hw->lock, flags);
3872 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3875 spin_lock_irq(&hw->lock);
3877 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3878 CSIO_DEC_STATS(hw, n_evt_activeq);
3879 CSIO_INC_STATS(hw, n_evt_freeq);
3880 spin_unlock_irq(&hw->lock);
3885 csio_evtq_flush(struct csio_hw *hw)
3889 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3890 spin_unlock_irq(&hw->lock);
3892 spin_lock_irq(&hw->lock);
3895 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3899 csio_evtq_stop(struct csio_hw *hw)
3901 hw->flags |= CSIO_HWF_FWEVT_STOP;
3905 csio_evtq_start(struct csio_hw *hw)
3907 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3911 csio_evtq_cleanup(struct csio_hw *hw)
3916 if (!list_empty(&hw->evt_active_q))
3917 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3919 hw->stats.n_evt_activeq = 0;
3920 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3923 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3925 CSIO_DEC_STATS(hw, n_evt_freeq);
3928 hw->stats.n_evt_freeq = 0;
3933 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3943 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3945 CSIO_INC_STATS(hw, n_cpl_unexp);
3954 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3960 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3961 CSIO_INC_STATS(hw, n_cpl_unexp);
3969 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3971 CSIO_INC_STATS(hw, n_evt_drop);
3977 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3986 csio_dbg(hw, "event worker thread active evts#%d\n",
3987 hw->stats.n_evt_activeq);
3989 spin_lock_irq(&hw->lock);
3990 while (!list_empty(&hw->evt_active_q)) {
3991 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3992 spin_unlock_irq(&hw->lock);
3998 spin_lock_irq(&hw->lock);
3999 if (hw->flags & CSIO_HWF_FWEVT_STOP)
4001 spin_unlock_irq(&hw->lock);
4003 CSIO_INC_STATS(hw, n_evt_drop);
4014 rv = csio_mb_fwevt_handler(hw,
4019 csio_fcoe_fwevt_handler(hw,
4023 csio_fcoe_fwevt_handler(hw,
4026 csio_warn(hw,
4029 CSIO_INC_STATS(hw, n_evt_drop);
4034 csio_mberr_worker(hw);
4043 csio_warn(hw, "Unhandled event %x on evtq\n",
4045 CSIO_INC_STATS(hw, n_evt_unexp);
4049 csio_free_evt(hw, evt_msg);
4052 spin_lock_irq(&hw->lock);
4054 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
4055 spin_unlock_irq(&hw->lock);
4059 csio_fwevtq_handler(struct csio_hw *hw)
4063 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
4064 CSIO_INC_STATS(hw, n_int_stray);
4068 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
4114 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
4116 spin_lock_irq(&mgmtm->hw->lock);
4129 io_req->io_cbfn(mgmtm->hw, io_req);
4140 spin_unlock_irq(&mgmtm->hw->lock);
4146 struct csio_hw *hw = mgmtm->hw;
4154 spin_unlock_irq(&hw->lock);
4156 spin_lock_irq(&hw->lock);
4168 io_req->io_cbfn(mgmtm->hw, io_req);
4176 * @hw - HW module
4188 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
4195 mgmtm->hw = hw;
4196 /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
4219 * @hw: Pointer to HW module.
4226 csio_hw_start(struct csio_hw *hw)
4228 spin_lock_irq(&hw->lock);
4229 csio_post_event(&hw->sm, CSIO_HWE_CFG);
4230 spin_unlock_irq(&hw->lock);
4232 if (csio_is_hw_ready(hw))
4234 else if (csio_match_state(hw, csio_hws_uninit))
4241 csio_hw_stop(struct csio_hw *hw)
4243 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
4245 if (csio_is_hw_removing(hw))
4256 * @hw: HW module.
4261 csio_hw_reset(struct csio_hw *hw)
4263 if (!csio_is_hw_master(hw))
4266 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
4267 csio_dbg(hw, "Max hw reset attempts reached..");
4271 hw->rst_retries++;
4272 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
4274 if (csio_is_hw_ready(hw)) {
4275 hw->rst_retries = 0;
4276 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
4284 * @hw: HW module.
4287 csio_hw_get_device_id(struct csio_hw *hw)
4290 if (csio_is_dev_id_cached(hw))
4294 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
4295 &hw->params.pci.vendor_id);
4296 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
4297 &hw->params.pci.device_id);
4299 csio_dev_id_cached(hw);
4300 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
4305 * csio_hw_set_description - Set the model, description of the hw.
4306 * @hw: HW module.
4311 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
4320 memcpy(hw->hw_ver,
4322 memcpy(hw->model_desc,
4327 memcpy(hw->model_desc, tempName, 32);
4334 * @hw: Pointer to HW module.
4339 csio_hw_init(struct csio_hw *hw)
4346 INIT_LIST_HEAD(&hw->sm.sm_list);
4347 csio_init_state(&hw->sm, csio_hws_uninit);
4348 spin_lock_init(&hw->lock);
4349 INIT_LIST_HEAD(&hw->sln_head);
4352 csio_hw_get_device_id(hw);
4354 strcpy(hw->name, CSIO_HW_NAME);
4357 hw->chip_ops = &t5_ops;
4361 ven_id = hw->params.pci.vendor_id;
4362 dev_id = hw->params.pci.device_id;
4364 csio_hw_set_description(hw, ven_id, dev_id);
4367 hw->params.log_level = (uint32_t) csio_dbg_level;
4369 csio_set_fwevt_intr_idx(hw, -1);
4370 csio_set_nondata_intr_idx(hw, -1);
4373 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
4376 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
4380 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
4384 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
4388 INIT_LIST_HEAD(&hw->evt_active_q);
4389 INIT_LIST_HEAD(&hw->evt_free_q);
4395 csio_err(hw, "Failed to initialize eventq");
4399 list_add_tail(&evt_entry->list, &hw->evt_free_q);
4400 CSIO_INC_STATS(hw, n_evt_freeq);
4403 hw->dev_num = dev_num;
4409 csio_evtq_cleanup(hw);
4410 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
4412 csio_scsim_exit(csio_hw_to_scsim(hw));
4414 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
4416 csio_mbm_exit(csio_hw_to_mbm(hw));
4423 * @hw: Pointer to HW module.
4427 csio_hw_exit(struct csio_hw *hw)
4429 csio_evtq_cleanup(hw);
4430 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
4431 csio_scsim_exit(csio_hw_to_scsim(hw));
4432 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
4433 csio_mbm_exit(csio_hw_to_mbm(hw));