Lines Matching +full:hb +full:- +full:pll +full:- +full:clock
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
151 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
153 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
156 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
160 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
162 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
269 * Reset entry actions -- initialize state machine
274 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); in bfa_ioc_sm_reset_entry()
307 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); in bfa_ioc_sm_enabling_entry()
327 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_enabling()
330 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); in bfa_ioc_sm_enabling()
334 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_enabling()
344 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_enabling()
390 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_getattr()
393 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); in bfa_ioc_sm_getattr()
435 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_acq_addr()
438 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); in bfa_ioc_sm_acq_addr()
457 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_sm_op_entry()
459 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); in bfa_ioc_sm_op_entry()
484 if (ioc->iocpf.auto_recover) in bfa_ioc_sm_op()
492 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); in bfa_ioc_sm_op()
504 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_sm_disabling_entry()
505 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); in bfa_ioc_sm_disabling_entry()
529 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); in bfa_ioc_sm_disabling()
562 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_sm_disabled()
567 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_disabled()
600 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail_retry()
603 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); in bfa_ioc_sm_fail_retry()
607 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail_retry()
620 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_fail_retry()
646 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_fail()
655 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); in bfa_ioc_sm_fail()
660 * HB failure notification, ignore. in bfa_ioc_sm_fail()
681 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_sm_hwfail()
685 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_sm_hwfail()
702 * Reset entry actions -- initialize state machine
707 iocpf->fw_mismatch_notified = BFA_FALSE; in bfa_iocpf_sm_reset_entry()
708 iocpf->auto_recover = bfa_auto_recover; in bfa_iocpf_sm_reset_entry()
717 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_reset()
741 u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); in bfa_iocpf_sm_fwcheck_entry()
747 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); in bfa_iocpf_sm_fwcheck_entry()
752 bfa_trc(iocpf->ioc, fwstate); in bfa_iocpf_sm_fwcheck_entry()
753 bfa_trc(iocpf->ioc, fwhdr.exec); in bfa_iocpf_sm_fwcheck_entry()
754 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); in bfa_iocpf_sm_fwcheck_entry()
759 readl(iocpf->ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
760 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck_entry()
762 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_fwcheck_entry()
771 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fwcheck()
783 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck()
787 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fwcheck()
822 if (iocpf->fw_mismatch_notified == BFA_FALSE) in bfa_iocpf_sm_mismatch_entry()
823 bfa_ioc_pf_fwmismatch(iocpf->ioc); in bfa_iocpf_sm_mismatch_entry()
825 iocpf->fw_mismatch_notified = BFA_TRUE; in bfa_iocpf_sm_mismatch_entry()
826 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_mismatch_entry()
835 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_mismatch()
866 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_semwait_entry()
875 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_semwait()
885 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_semwait()
908 iocpf->poll_time = 0; in bfa_iocpf_sm_hwinit_entry()
909 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); in bfa_iocpf_sm_hwinit_entry()
919 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_hwinit()
929 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_hwinit()
937 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_hwinit()
949 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_enabling_entry()
953 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); in bfa_iocpf_sm_enabling_entry()
954 bfa_ioc_send_enable(iocpf->ioc); in bfa_iocpf_sm_enabling_entry()
964 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_enabling()
971 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
982 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
990 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_enabling()
1002 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED); in bfa_iocpf_sm_ready_entry()
1008 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_ready()
1033 bfa_iocpf_timer_start(iocpf->ioc); in bfa_iocpf_sm_disabling_entry()
1034 bfa_ioc_send_disable(iocpf->ioc); in bfa_iocpf_sm_disabling_entry()
1043 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabling()
1060 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); in bfa_iocpf_sm_disabling()
1075 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_disabling_sync_entry()
1079 * IOC hb ack request is being removed.
1084 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabling_sync()
1091 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_disabling_sync()
1114 bfa_ioc_mbox_flush(iocpf->ioc); in bfa_iocpf_sm_disabled_entry()
1115 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); in bfa_iocpf_sm_disabled_entry()
1121 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_disabled()
1143 bfa_ioc_debug_save_ftrc(iocpf->ioc); in bfa_iocpf_sm_initfail_sync_entry()
1144 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_initfail_sync_entry()
1153 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_initfail_sync()
1161 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); in bfa_iocpf_sm_initfail_sync()
1162 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_initfail_sync()
1193 bfa_trc(iocpf->ioc, 0); in bfa_iocpf_sm_initfail_entry()
1202 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_initfail()
1227 bfa_ioc_lpu_stop(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1232 bfa_ioc_mbox_flush(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1234 bfa_ioc_hw_sem_get(iocpf->ioc); in bfa_iocpf_sm_fail_sync_entry()
1240 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fail_sync()
1248 if (!iocpf->auto_recover) { in bfa_iocpf_sm_fail_sync()
1250 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); in bfa_iocpf_sm_fail_sync()
1251 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fail_sync()
1257 writel(1, ioc->ioc_regs.ioc_sem_reg); in bfa_iocpf_sm_fail_sync()
1284 bfa_trc(iocpf->ioc, 0); in bfa_iocpf_sm_fail_entry()
1293 struct bfa_ioc_s *ioc = iocpf->ioc; in bfa_iocpf_sm_fail()
1320 list_for_each(qe, &ioc->notify_q) { in bfa_ioc_event_notify()
1322 notify->cbfn(notify->cbarg, event); in bfa_ioc_event_notify()
1329 ioc->cbfn->disable_cbfn(ioc->bfa); in bfa_ioc_disable_comp()
1363 r32 = readl(ioc->ioc_regs.ioc_sem_reg); in bfa_ioc_hw_sem_get()
1366 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); in bfa_ioc_hw_sem_get()
1370 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); in bfa_ioc_hw_sem_get()
1387 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1392 * i2c workaround 12.5khz clock in bfa_ioc_lmem_init()
1395 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1402 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1414 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lmem_init()
1425 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_start()
1428 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_start()
1439 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_stop()
1442 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); in bfa_ioc_lpu_stop()
1456 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_fwver_get()
1458 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_fwver_get()
1463 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_ioc_fwver_get()
1481 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { in bfa_ioc_fwver_cmp()
1483 bfa_trc(ioc, fwhdr->md5sum[i]); in bfa_ioc_fwver_cmp()
1484 bfa_trc(ioc, drv_fwhdr->md5sum[i]); in bfa_ioc_fwver_cmp()
1489 bfa_trc(ioc, fwhdr->md5sum[0]); in bfa_ioc_fwver_cmp()
1506 if (fwhdr.signature != drv_fwhdr->signature) { in bfa_ioc_fwver_valid()
1508 bfa_trc(ioc, drv_fwhdr->signature); in bfa_ioc_fwver_valid()
1529 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgflush()
1531 writel(1, ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgflush()
1542 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_hwinit()
1575 * just re-enable IOC. in bfa_ioc_hwinit()
1584 * When using MSI-X any pending firmware ready event should in bfa_ioc_hwinit()
1585 * be flushed. Otherwise MSI-X interrupts are not delivered. in bfa_ioc_hwinit()
1588 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); in bfa_ioc_hwinit()
1624 ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); in bfa_ioc_mbox_send()
1627 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); in bfa_ioc_mbox_send()
1632 writel(1, ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_send()
1633 (void) readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_send()
1644 enable_req.clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_enable()
1667 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); in bfa_ioc_send_getattr()
1677 hb_count = readl(ioc->ioc_regs.heartbeat); in bfa_ioc_hb_check()
1678 if (ioc->hb_count == hb_count) { in bfa_ioc_hb_check()
1682 ioc->hb_count = hb_count; in bfa_ioc_hb_check()
1692 ioc->hb_count = readl(ioc->ioc_regs.heartbeat); in bfa_ioc_hb_monitor()
1718 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_ioc_download_fw()
1721 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1734 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, in bfa_ioc_download_fw()
1745 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1749 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_download_fw()
1750 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_download_fw()
1755 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, in bfa_ioc_download_fw()
1756 ioc->port0_mode, ioc->port1_mode); in bfa_ioc_download_fw()
1757 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, in bfa_ioc_download_fw()
1759 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF, in bfa_ioc_download_fw()
1761 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF, in bfa_ioc_download_fw()
1772 struct bfi_ioc_attr_s *attr = ioc->attr; in bfa_ioc_getattr_reply()
1774 attr->adapter_prop = be32_to_cpu(attr->adapter_prop); in bfa_ioc_getattr_reply()
1775 attr->card_type = be32_to_cpu(attr->card_type); in bfa_ioc_getattr_reply()
1776 attr->maxfrsize = be16_to_cpu(attr->maxfrsize); in bfa_ioc_getattr_reply()
1777 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); in bfa_ioc_getattr_reply()
1788 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_attach()
1791 INIT_LIST_HEAD(&mod->cmd_q); in bfa_ioc_mbox_attach()
1793 mod->mbhdlr[mc].cbfn = NULL; in bfa_ioc_mbox_attach()
1794 mod->mbhdlr[mc].cbarg = ioc->bfa; in bfa_ioc_mbox_attach()
1799 * Mbox poll timer -- restarts any pending mailbox requests.
1804 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_poll()
1811 if (list_empty(&mod->cmd_q)) in bfa_ioc_mbox_poll()
1817 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_poll()
1824 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_poll()
1825 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); in bfa_ioc_mbox_poll()
1834 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_flush()
1837 while (!list_empty(&mod->cmd_q)) in bfa_ioc_mbox_flush()
1838 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_flush()
1857 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); in bfa_ioc_smem_read()
1864 * Hold semaphore to serialize pll init and fwtrc. in bfa_ioc_smem_read()
1866 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { in bfa_ioc_smem_read()
1871 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
1876 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_ioc_smem_read()
1886 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
1889 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_smem_read()
1890 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_read()
1894 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_read()
1895 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_read()
1914 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); in bfa_ioc_smem_clr()
1921 * Hold semaphore to serialize pll init and fwtrc. in bfa_ioc_smem_clr()
1923 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { in bfa_ioc_smem_clr()
1928 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
1933 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); in bfa_ioc_smem_clr()
1942 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
1945 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), in bfa_ioc_smem_clr()
1946 ioc->ioc_regs.host_page_num_fn); in bfa_ioc_smem_clr()
1951 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_clr()
1952 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_smem_clr()
1960 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_fail_notify()
1965 ioc->cbfn->hbfail_cbfn(ioc->bfa); in bfa_ioc_fail_notify()
1979 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_pf_fwmismatch()
1983 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); in bfa_ioc_pf_fwmismatch()
1997 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2001 ioc->pllinit = BFA_TRUE; in bfa_ioc_pll_init()
2005 readl(ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2006 writel(1, ioc->ioc_regs.ioc_init_sem_reg); in bfa_ioc_pll_init()
2027 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_boot()
2028 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_boot()
2030 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_boot()
2031 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_boot()
2059 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_is_initialized()
2073 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2082 r32 = readl(ioc->ioc_regs.lpu_mbox + in bfa_ioc_msgget()
2090 writel(1, ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2091 readl(ioc->ioc_regs.lpu_mbox_cmd); in bfa_ioc_msgget()
2100 struct bfa_iocpf_s *iocpf = &ioc->iocpf; in bfa_ioc_isr()
2106 switch (msg->mh.msg_id) { in bfa_ioc_isr()
2111 ioc->port_mode = ioc->port_mode_cfg = in bfa_ioc_isr()
2112 (enum bfa_mode_s)msg->fw_event.port_mode; in bfa_ioc_isr()
2113 ioc->ad_cap_bm = msg->fw_event.cap_bm; in bfa_ioc_isr()
2130 bfa_trc(ioc, msg->mh.msg_id); in bfa_ioc_isr()
2145 ioc->bfa = bfa; in bfa_ioc_attach()
2146 ioc->cbfn = cbfn; in bfa_ioc_attach()
2147 ioc->timer_mod = timer_mod; in bfa_ioc_attach()
2148 ioc->fcmode = BFA_FALSE; in bfa_ioc_attach()
2149 ioc->pllinit = BFA_FALSE; in bfa_ioc_attach()
2150 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_attach()
2151 ioc->iocpf.ioc = ioc; in bfa_ioc_attach()
2154 INIT_LIST_HEAD(&ioc->notify_q); in bfa_ioc_attach()
2167 INIT_LIST_HEAD(&ioc->notify_q); in bfa_ioc_detach()
2179 ioc->clscode = clscode; in bfa_ioc_pci_init()
2180 ioc->pcidev = *pcidev; in bfa_ioc_pci_init()
2185 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; in bfa_ioc_pci_init()
2186 ioc->asic_mode = BFI_ASIC_MODE_FC; in bfa_ioc_pci_init()
2188 switch (pcidev->device_id) { in bfa_ioc_pci_init()
2191 ioc->asic_gen = BFI_ASIC_GEN_CB; in bfa_ioc_pci_init()
2192 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2193 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2194 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2198 ioc->asic_gen = BFI_ASIC_GEN_CT; in bfa_ioc_pci_init()
2199 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; in bfa_ioc_pci_init()
2200 ioc->asic_mode = BFI_ASIC_MODE_ETH; in bfa_ioc_pci_init()
2201 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; in bfa_ioc_pci_init()
2202 ioc->ad_cap_bm = BFA_CM_CNA; in bfa_ioc_pci_init()
2206 ioc->asic_gen = BFI_ASIC_GEN_CT; in bfa_ioc_pci_init()
2207 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2208 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2209 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2213 ioc->asic_gen = BFI_ASIC_GEN_CT2; in bfa_ioc_pci_init()
2215 pcidev->ssid == BFA_PCI_CT2_SSID_FC) { in bfa_ioc_pci_init()
2216 ioc->asic_mode = BFI_ASIC_MODE_FC16; in bfa_ioc_pci_init()
2217 ioc->fcmode = BFA_TRUE; in bfa_ioc_pci_init()
2218 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; in bfa_ioc_pci_init()
2219 ioc->ad_cap_bm = BFA_CM_HBA; in bfa_ioc_pci_init()
2221 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; in bfa_ioc_pci_init()
2222 ioc->asic_mode = BFI_ASIC_MODE_ETH; in bfa_ioc_pci_init()
2223 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { in bfa_ioc_pci_init()
2224 ioc->port_mode = in bfa_ioc_pci_init()
2225 ioc->port_mode_cfg = BFA_MODE_CNA; in bfa_ioc_pci_init()
2226 ioc->ad_cap_bm = BFA_CM_CNA; in bfa_ioc_pci_init()
2228 ioc->port_mode = in bfa_ioc_pci_init()
2229 ioc->port_mode_cfg = BFA_MODE_NIC; in bfa_ioc_pci_init()
2230 ioc->ad_cap_bm = BFA_CM_NIC; in bfa_ioc_pci_init()
2242 if (ioc->asic_gen == BFI_ASIC_GEN_CB) in bfa_ioc_pci_init()
2244 else if (ioc->asic_gen == BFI_ASIC_GEN_CT) in bfa_ioc_pci_init()
2247 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); in bfa_ioc_pci_init()
2268 ioc->attr_dma.kva = dm_kva; in bfa_ioc_mem_claim()
2269 ioc->attr_dma.pa = dm_pa; in bfa_ioc_mem_claim()
2270 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; in bfa_ioc_mem_claim()
2277 ioc->dbg_fwsave_once = BFA_TRUE; in bfa_ioc_enable()
2297 ioc->dbg_fwsave = dbg_fwsave; in bfa_ioc_debug_memclaim()
2298 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0; in bfa_ioc_debug_memclaim()
2310 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_register()
2314 mod->mbhdlr[mc].cbfn = mcfuncs[mc]; in bfa_ioc_mbox_register()
2324 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_regisr()
2326 mod->mbhdlr[mc].cbfn = cbfn; in bfa_ioc_mbox_regisr()
2327 mod->mbhdlr[mc].cbarg = cbarg; in bfa_ioc_mbox_regisr()
2340 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_queue()
2346 if (!list_empty(&mod->cmd_q)) { in bfa_ioc_mbox_queue()
2347 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
2354 stat = readl(ioc->ioc_regs.hfn_mbox_cmd); in bfa_ioc_mbox_queue()
2356 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
2361 * mailbox is free -- queue command to firmware in bfa_ioc_mbox_queue()
2363 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); in bfa_ioc_mbox_queue()
2372 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; in bfa_ioc_mbox_isr()
2386 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) in bfa_ioc_mbox_isr()
2389 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); in bfa_ioc_mbox_isr()
2404 ioc->stats.hb_count = ioc->hb_count; in bfa_ioc_error_isr()
2434 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) || in bfa_ioc_fw_mismatch()
2435 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); in bfa_ioc_fw_mismatch()
2447 * Check if adapter is disabled -- both IOCs should be in a disabled
2458 ioc_state = readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_adapter_is_disabled()
2462 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { in bfa_ioc_adapter_is_disabled()
2463 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_adapter_is_disabled()
2477 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); in bfa_ioc_reset_fwstate()
2478 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); in bfa_ioc_reset_fwstate()
2488 ioc_attr = ioc->attr; in bfa_ioc_get_adapter_attr()
2490 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); in bfa_ioc_get_adapter_attr()
2491 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); in bfa_ioc_get_adapter_attr()
2492 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); in bfa_ioc_get_adapter_attr()
2493 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); in bfa_ioc_get_adapter_attr()
2494 memcpy(&ad_attr->vpd, &ioc_attr->vpd, in bfa_ioc_get_adapter_attr()
2497 ad_attr->nports = bfa_ioc_get_nports(ioc); in bfa_ioc_get_adapter_attr()
2498 ad_attr->max_speed = bfa_ioc_speed_sup(ioc); in bfa_ioc_get_adapter_attr()
2500 bfa_ioc_get_adapter_model(ioc, ad_attr->model); in bfa_ioc_get_adapter_attr()
2502 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); in bfa_ioc_get_adapter_attr()
2504 ad_attr->card_type = ioc_attr->card_type; in bfa_ioc_get_adapter_attr()
2505 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); in bfa_ioc_get_adapter_attr()
2507 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) in bfa_ioc_get_adapter_attr()
2508 ad_attr->prototype = 1; in bfa_ioc_get_adapter_attr()
2510 ad_attr->prototype = 0; in bfa_ioc_get_adapter_attr()
2512 ad_attr->pwwn = ioc->attr->pwwn; in bfa_ioc_get_adapter_attr()
2513 ad_attr->mac = bfa_ioc_get_mac(ioc); in bfa_ioc_get_adapter_attr()
2515 ad_attr->pcie_gen = ioc_attr->pcie_gen; in bfa_ioc_get_adapter_attr()
2516 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; in bfa_ioc_get_adapter_attr()
2517 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; in bfa_ioc_get_adapter_attr()
2518 ad_attr->asic_rev = ioc_attr->asic_rev; in bfa_ioc_get_adapter_attr()
2520 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); in bfa_ioc_get_adapter_attr()
2522 ad_attr->cna_capable = bfa_ioc_is_cna(ioc); in bfa_ioc_get_adapter_attr()
2523 ad_attr->trunk_capable = (ad_attr->nports > 1) && in bfa_ioc_get_adapter_attr()
2524 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; in bfa_ioc_get_adapter_attr()
2530 if (ioc->clscode == BFI_PCIFN_CLASS_ETH) in bfa_ioc_get_type()
2533 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC); in bfa_ioc_get_type()
2535 return (ioc->attr->port_mode == BFI_PORT_MODE_FC) in bfa_ioc_get_type()
2544 (void *)ioc->attr->brcd_serialnum, in bfa_ioc_get_adapter_serial_num()
2552 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); in bfa_ioc_get_adapter_fw_ver()
2565 chip_rev[3] = '-'; in bfa_ioc_get_pci_chip_rev()
2566 chip_rev[4] = ioc->attr->asic_rev; in bfa_ioc_get_pci_chip_rev()
2574 memcpy(optrom_ver, ioc->attr->optrom_version, in bfa_ioc_get_adapter_optrom_ver()
2593 ioc_attr = ioc->attr; in bfa_ioc_get_adapter_model()
2595 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", in bfa_ioc_get_adapter_model()
2596 BFA_MFG_NAME, ioc_attr->card_type); in bfa_ioc_get_adapter_model()
2603 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); in bfa_ioc_get_state()
2608 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); in bfa_ioc_get_state()
2644 ioc_attr->state = bfa_ioc_get_state(ioc); in bfa_ioc_get_attr()
2645 ioc_attr->port_id = ioc->port_id; in bfa_ioc_get_attr()
2646 ioc_attr->port_mode = ioc->port_mode; in bfa_ioc_get_attr()
2647 ioc_attr->port_mode_cfg = ioc->port_mode_cfg; in bfa_ioc_get_attr()
2648 ioc_attr->cap_bm = ioc->ad_cap_bm; in bfa_ioc_get_attr()
2650 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); in bfa_ioc_get_attr()
2652 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); in bfa_ioc_get_attr()
2654 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; in bfa_ioc_get_attr()
2655 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; in bfa_ioc_get_attr()
2656 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); in bfa_ioc_get_attr()
2666 return ioc->attr->fcoe_mac; in bfa_ioc_get_mac()
2668 return ioc->attr->mac; in bfa_ioc_get_mac()
2676 m = ioc->attr->mfg_mac; in bfa_ioc_get_mfg_mac()
2677 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) in bfa_ioc_get_mfg_mac()
2678 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); in bfa_ioc_get_mfg_mac()
2680 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), in bfa_ioc_get_mfg_mac()
2692 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_ioc_aen_post()
2703 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; in bfa_ioc_aen_post()
2706 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn; in bfa_ioc_aen_post()
2707 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); in bfa_ioc_aen_post()
2710 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc); in bfa_ioc_aen_post()
2718 aen_entry->aen_data.ioc.ioc_type = ioc_type; in bfa_ioc_aen_post()
2719 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, in bfa_ioc_aen_post()
2731 if (ioc->dbg_fwsave_len == 0) in bfa_ioc_debug_fwsave()
2735 if (tlen > ioc->dbg_fwsave_len) in bfa_ioc_debug_fwsave()
2736 tlen = ioc->dbg_fwsave_len; in bfa_ioc_debug_fwsave()
2738 memcpy(trcdata, ioc->dbg_fwsave, tlen); in bfa_ioc_debug_fwsave()
2771 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, in bfa_ioc_send_fwsync()
2773 req->clscode = cpu_to_be16(ioc->clscode); in bfa_ioc_send_fwsync()
2796 fwsync_iter--; in bfa_ioc_fwsync()
2827 dlen = smem_len - loff; in bfa_ioc_debug_fwcore()
2857 if (ioc->stats_busy) { in bfa_ioc_fw_stats_get()
2858 bfa_trc(ioc, ioc->stats_busy); in bfa_ioc_fw_stats_get()
2861 ioc->stats_busy = BFA_TRUE; in bfa_ioc_fw_stats_get()
2866 ioc->stats_busy = BFA_FALSE; in bfa_ioc_fw_stats_get()
2878 if (ioc->stats_busy) { in bfa_ioc_fw_stats_clear()
2879 bfa_trc(ioc, ioc->stats_busy); in bfa_ioc_fw_stats_clear()
2882 ioc->stats_busy = BFA_TRUE; in bfa_ioc_fw_stats_clear()
2887 ioc->stats_busy = BFA_FALSE; in bfa_ioc_fw_stats_clear()
2899 if (ioc->dbg_fwsave_once) { in bfa_ioc_debug_save_ftrc()
2900 ioc->dbg_fwsave_once = BFA_FALSE; in bfa_ioc_debug_save_ftrc()
2901 if (ioc->dbg_fwsave_len) { in bfa_ioc_debug_save_ftrc()
2902 tlen = ioc->dbg_fwsave_len; in bfa_ioc_debug_save_ftrc()
2903 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); in bfa_ioc_debug_save_ftrc()
2915 ioc->stats.hb_count = ioc->hb_count; in bfa_ioc_recover()
2924 if (ioc->attr->nwwn == 0) in bfa_ioc_check_attr_wwns()
2926 if (ioc->attr->pwwn == 0) in bfa_ioc_check_attr_wwns()
2939 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); in bfa_iocpf_timeout()
2953 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); in bfa_ioc_poll_fwinit()
2958 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); in bfa_ioc_poll_fwinit()
2962 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) in bfa_ioc_poll_fwinit()
2965 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; in bfa_ioc_poll_fwinit()
2984 struct list_head *qh = &mod->timer_q; in bfa_timer_beat()
2997 if (elem->timeout <= BFA_TIMER_FREQ) { in bfa_timer_beat()
2998 elem->timeout = 0; in bfa_timer_beat()
2999 list_del(&elem->qe); in bfa_timer_beat()
3000 list_add_tail(&elem->qe, &timedout_q); in bfa_timer_beat()
3002 elem->timeout -= BFA_TIMER_FREQ; in bfa_timer_beat()
3013 elem->timercb(elem->arg); in bfa_timer_beat()
3026 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer)); in bfa_timer_begin()
3028 timer->timeout = timeout; in bfa_timer_begin()
3029 timer->timercb = timercb; in bfa_timer_begin()
3030 timer->arg = arg; in bfa_timer_begin()
3032 list_add_tail(&timer->qe, &mod->timer_q); in bfa_timer_begin()
3041 WARN_ON(list_empty(&timer->qe)); in bfa_timer_stop()
3043 list_del(&timer->qe); in bfa_timer_stop()
3058 cfg_inst = &cfg->inst[i]; in bfa_ablk_config_swap()
3060 be16 = cfg_inst->pf_cfg[j].pers; in bfa_ablk_config_swap()
3061 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16); in bfa_ablk_config_swap()
3062 be16 = cfg_inst->pf_cfg[j].num_qpairs; in bfa_ablk_config_swap()
3063 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); in bfa_ablk_config_swap()
3064 be16 = cfg_inst->pf_cfg[j].num_vectors; in bfa_ablk_config_swap()
3065 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); in bfa_ablk_config_swap()
3066 be32 = cfg_inst->pf_cfg[j].bw; in bfa_ablk_config_swap()
3067 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32); in bfa_ablk_config_swap()
3079 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK); in bfa_ablk_isr()
3080 bfa_trc(ablk->ioc, msg->mh.msg_id); in bfa_ablk_isr()
3082 switch (msg->mh.msg_id) { in bfa_ablk_isr()
3084 if (rsp->status == BFA_STATUS_OK) { in bfa_ablk_isr()
3085 memcpy(ablk->cfg, ablk->dma_addr.kva, in bfa_ablk_isr()
3087 bfa_ablk_config_swap(ablk->cfg); in bfa_ablk_isr()
3088 ablk->cfg = NULL; in bfa_ablk_isr()
3095 ablk->ioc->port_mode_cfg = rsp->port_mode; in bfa_ablk_isr()
3101 /* No-op */ in bfa_ablk_isr()
3105 *(ablk->pcifn) = rsp->pcifn; in bfa_ablk_isr()
3106 ablk->pcifn = NULL; in bfa_ablk_isr()
3113 ablk->busy = BFA_FALSE; in bfa_ablk_isr()
3114 if (ablk->cbfn) { in bfa_ablk_isr()
3115 cbfn = ablk->cbfn; in bfa_ablk_isr()
3116 ablk->cbfn = NULL; in bfa_ablk_isr()
3117 cbfn(ablk->cbarg, rsp->status); in bfa_ablk_isr()
3126 bfa_trc(ablk->ioc, event); in bfa_ablk_notify()
3130 WARN_ON(ablk->busy != BFA_FALSE); in bfa_ablk_notify()
3136 ablk->pcifn = NULL; in bfa_ablk_notify()
3137 if (ablk->busy) { in bfa_ablk_notify()
3138 if (ablk->cbfn) in bfa_ablk_notify()
3139 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED); in bfa_ablk_notify()
3140 ablk->cbfn = NULL; in bfa_ablk_notify()
3141 ablk->busy = BFA_FALSE; in bfa_ablk_notify()
3160 ablk->dma_addr.kva = dma_kva; in bfa_ablk_memclaim()
3161 ablk->dma_addr.pa = dma_pa; in bfa_ablk_memclaim()
3167 ablk->ioc = ioc; in bfa_ablk_attach()
3169 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk); in bfa_ablk_attach()
3170 bfa_q_qe_init(&ablk->ioc_notify); in bfa_ablk_attach()
3171 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk); in bfa_ablk_attach()
3172 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q); in bfa_ablk_attach()
3183 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_query()
3184 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_query()
3188 if (ablk->busy) { in bfa_ablk_query()
3189 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_query()
3193 ablk->cfg = ablk_cfg; in bfa_ablk_query()
3194 ablk->cbfn = cbfn; in bfa_ablk_query()
3195 ablk->cbarg = cbarg; in bfa_ablk_query()
3196 ablk->busy = BFA_TRUE; in bfa_ablk_query()
3198 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg; in bfa_ablk_query()
3199 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY, in bfa_ablk_query()
3200 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_query()
3201 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa); in bfa_ablk_query()
3202 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_query()
3214 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_create()
3215 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_create()
3219 if (ablk->busy) { in bfa_ablk_pf_create()
3220 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_create()
3224 ablk->pcifn = pcifn; in bfa_ablk_pf_create()
3225 ablk->cbfn = cbfn; in bfa_ablk_pf_create()
3226 ablk->cbarg = cbarg; in bfa_ablk_pf_create()
3227 ablk->busy = BFA_TRUE; in bfa_ablk_pf_create()
3229 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_create()
3230 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, in bfa_ablk_pf_create()
3231 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_create()
3232 m->pers = cpu_to_be16((u16)personality); in bfa_ablk_pf_create()
3233 m->bw = cpu_to_be32(bw); in bfa_ablk_pf_create()
3234 m->port = port; in bfa_ablk_pf_create()
3235 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_create()
3246 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_delete()
3247 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_delete()
3251 if (ablk->busy) { in bfa_ablk_pf_delete()
3252 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_delete()
3256 ablk->cbfn = cbfn; in bfa_ablk_pf_delete()
3257 ablk->cbarg = cbarg; in bfa_ablk_pf_delete()
3258 ablk->busy = BFA_TRUE; in bfa_ablk_pf_delete()
3260 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_delete()
3261 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE, in bfa_ablk_pf_delete()
3262 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_delete()
3263 m->pcifn = (u8)pcifn; in bfa_ablk_pf_delete()
3264 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_delete()
3275 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_adapter_config()
3276 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_adapter_config()
3280 if (ablk->busy) { in bfa_ablk_adapter_config()
3281 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_adapter_config()
3285 ablk->cbfn = cbfn; in bfa_ablk_adapter_config()
3286 ablk->cbarg = cbarg; in bfa_ablk_adapter_config()
3287 ablk->busy = BFA_TRUE; in bfa_ablk_adapter_config()
3289 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; in bfa_ablk_adapter_config()
3290 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG, in bfa_ablk_adapter_config()
3291 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_adapter_config()
3292 m->mode = (u8)mode; in bfa_ablk_adapter_config()
3293 m->max_pf = (u8)max_pf; in bfa_ablk_adapter_config()
3294 m->max_vf = (u8)max_vf; in bfa_ablk_adapter_config()
3295 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_adapter_config()
3306 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_port_config()
3307 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_port_config()
3311 if (ablk->busy) { in bfa_ablk_port_config()
3312 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_port_config()
3316 ablk->cbfn = cbfn; in bfa_ablk_port_config()
3317 ablk->cbarg = cbarg; in bfa_ablk_port_config()
3318 ablk->busy = BFA_TRUE; in bfa_ablk_port_config()
3320 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg; in bfa_ablk_port_config()
3321 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG, in bfa_ablk_port_config()
3322 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_port_config()
3323 m->port = (u8)port; in bfa_ablk_port_config()
3324 m->mode = (u8)mode; in bfa_ablk_port_config()
3325 m->max_pf = (u8)max_pf; in bfa_ablk_port_config()
3326 m->max_vf = (u8)max_vf; in bfa_ablk_port_config()
3327 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_port_config()
3338 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_pf_update()
3339 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_pf_update()
3343 if (ablk->busy) { in bfa_ablk_pf_update()
3344 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_pf_update()
3348 ablk->cbfn = cbfn; in bfa_ablk_pf_update()
3349 ablk->cbarg = cbarg; in bfa_ablk_pf_update()
3350 ablk->busy = BFA_TRUE; in bfa_ablk_pf_update()
3352 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg; in bfa_ablk_pf_update()
3353 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, in bfa_ablk_pf_update()
3354 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_pf_update()
3355 m->pcifn = (u8)pcifn; in bfa_ablk_pf_update()
3356 m->bw = cpu_to_be32(bw); in bfa_ablk_pf_update()
3357 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_pf_update()
3367 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_optrom_en()
3368 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_optrom_en()
3372 if (ablk->busy) { in bfa_ablk_optrom_en()
3373 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_optrom_en()
3377 ablk->cbfn = cbfn; in bfa_ablk_optrom_en()
3378 ablk->cbarg = cbarg; in bfa_ablk_optrom_en()
3379 ablk->busy = BFA_TRUE; in bfa_ablk_optrom_en()
3381 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; in bfa_ablk_optrom_en()
3382 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE, in bfa_ablk_optrom_en()
3383 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_optrom_en()
3384 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_optrom_en()
3394 if (!bfa_ioc_is_operational(ablk->ioc)) { in bfa_ablk_optrom_dis()
3395 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE); in bfa_ablk_optrom_dis()
3399 if (ablk->busy) { in bfa_ablk_optrom_dis()
3400 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY); in bfa_ablk_optrom_dis()
3404 ablk->cbfn = cbfn; in bfa_ablk_optrom_dis()
3405 ablk->cbarg = cbarg; in bfa_ablk_optrom_dis()
3406 ablk->busy = BFA_TRUE; in bfa_ablk_optrom_dis()
3408 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg; in bfa_ablk_optrom_dis()
3409 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE, in bfa_ablk_optrom_dis()
3410 bfa_ioc_portid(ablk->ioc)); in bfa_ablk_optrom_dis()
3411 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); in bfa_ablk_optrom_dis()
3429 bfa_trc(sfp, sfp->lock); in bfa_cb_sfp_show()
3430 if (sfp->cbfn) in bfa_cb_sfp_show()
3431 sfp->cbfn(sfp->cbarg, sfp->status); in bfa_cb_sfp_show()
3432 sfp->lock = 0; in bfa_cb_sfp_show()
3433 sfp->cbfn = NULL; in bfa_cb_sfp_show()
3439 bfa_trc(sfp, sfp->portspeed); in bfa_cb_sfp_state_query()
3440 if (sfp->media) { in bfa_cb_sfp_state_query()
3442 if (sfp->state_query_cbfn) in bfa_cb_sfp_state_query()
3443 sfp->state_query_cbfn(sfp->state_query_cbarg, in bfa_cb_sfp_state_query()
3444 sfp->status); in bfa_cb_sfp_state_query()
3445 sfp->media = NULL; in bfa_cb_sfp_state_query()
3448 if (sfp->portspeed) { in bfa_cb_sfp_state_query()
3449 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); in bfa_cb_sfp_state_query()
3450 if (sfp->state_query_cbfn) in bfa_cb_sfp_state_query()
3451 sfp->state_query_cbfn(sfp->state_query_cbarg, in bfa_cb_sfp_state_query()
3452 sfp->status); in bfa_cb_sfp_state_query()
3453 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; in bfa_cb_sfp_state_query()
3456 sfp->state_query_lock = 0; in bfa_cb_sfp_state_query()
3457 sfp->state_query_cbfn = NULL; in bfa_cb_sfp_state_query()
3469 bfa_trc(sfp, sfp->lock); in bfa_sfp_notify()
3470 bfa_trc(sfp, sfp->state_query_lock); in bfa_sfp_notify()
3475 if (sfp->lock) { in bfa_sfp_notify()
3476 sfp->status = BFA_STATUS_IOC_FAILURE; in bfa_sfp_notify()
3480 if (sfp->state_query_lock) { in bfa_sfp_notify()
3481 sfp->status = BFA_STATUS_IOC_FAILURE; in bfa_sfp_notify()
3497 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad; in bfa_sfp_scn_aen_post()
3501 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) | in bfa_sfp_scn_aen_post()
3502 ((u64)rsp->event)); in bfa_sfp_scn_aen_post()
3508 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc); in bfa_sfp_scn_aen_post()
3509 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn; in bfa_sfp_scn_aen_post()
3510 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc); in bfa_sfp_scn_aen_post()
3512 switch (rsp->event) { in bfa_sfp_scn_aen_post()
3527 aen_entry->aen_data.port.level = rsp->pomlvl; in bfa_sfp_scn_aen_post()
3530 bfa_trc(sfp, rsp->event); in bfa_sfp_scn_aen_post()
3535 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq, in bfa_sfp_scn_aen_post()
3545 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_getdata_send()
3547 bfa_trc(sfp, req->memtype); in bfa_sfp_getdata_send()
3550 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW, in bfa_sfp_getdata_send()
3551 bfa_ioc_portid(sfp->ioc)); in bfa_sfp_getdata_send()
3554 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd); in bfa_sfp_getdata_send()
3563 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_getdata()
3565 WARN_ON(sfp->lock != 0); in bfa_sfp_getdata()
3566 bfa_trc(sfp, sfp->state); in bfa_sfp_getdata()
3568 sfp->lock = 1; in bfa_sfp_getdata()
3569 sfp->memtype = memtype; in bfa_sfp_getdata()
3570 req->memtype = memtype; in bfa_sfp_getdata()
3573 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa); in bfa_sfp_getdata()
3586 switch (rsp->event) { in bfa_sfp_scn()
3588 sfp->state = BFA_SFP_STATE_INSERTED; in bfa_sfp_scn()
3589 sfp->data_valid = 0; in bfa_sfp_scn()
3593 sfp->state = BFA_SFP_STATE_REMOVED; in bfa_sfp_scn()
3594 sfp->data_valid = 0; in bfa_sfp_scn()
3598 sfp->state = BFA_SFP_STATE_FAILED; in bfa_sfp_scn()
3599 sfp->data_valid = 0; in bfa_sfp_scn()
3603 sfp->state = BFA_SFP_STATE_UNSUPPORT; in bfa_sfp_scn()
3605 if (!sfp->lock) in bfa_sfp_scn()
3612 sfp->state = BFA_SFP_STATE_VALID; in bfa_sfp_scn()
3613 if (!sfp->lock) in bfa_sfp_scn()
3617 bfa_trc(sfp, rsp->event); in bfa_sfp_scn()
3630 if (!sfp->lock) { in bfa_sfp_show_comp()
3634 bfa_trc(sfp, sfp->lock); in bfa_sfp_show_comp()
3638 bfa_trc(sfp, rsp->status); in bfa_sfp_show_comp()
3639 if (rsp->status == BFA_STATUS_OK) { in bfa_sfp_show_comp()
3640 sfp->data_valid = 1; in bfa_sfp_show_comp()
3641 if (sfp->state == BFA_SFP_STATE_VALID) in bfa_sfp_show_comp()
3642 sfp->status = BFA_STATUS_OK; in bfa_sfp_show_comp()
3643 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT) in bfa_sfp_show_comp()
3644 sfp->status = BFA_STATUS_SFP_UNSUPP; in bfa_sfp_show_comp()
3646 bfa_trc(sfp, sfp->state); in bfa_sfp_show_comp()
3648 sfp->data_valid = 0; in bfa_sfp_show_comp()
3649 sfp->status = rsp->status; in bfa_sfp_show_comp()
3653 bfa_trc(sfp, sfp->memtype); in bfa_sfp_show_comp()
3654 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) { in bfa_sfp_show_comp()
3655 bfa_trc(sfp, sfp->data_valid); in bfa_sfp_show_comp()
3656 if (sfp->data_valid) { in bfa_sfp_show_comp()
3658 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base); in bfa_sfp_show_comp()
3659 memcpy(des, sfp->dbuf_kva, size); in bfa_sfp_show_comp()
3666 sfp->lock = 0; in bfa_sfp_show_comp()
3668 bfa_trc(sfp, sfp->state_query_lock); in bfa_sfp_show_comp()
3669 if (sfp->state_query_lock) { in bfa_sfp_show_comp()
3670 sfp->state = rsp->state; in bfa_sfp_show_comp()
3682 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg; in bfa_sfp_state_query()
3685 WARN_ON(sfp->state != BFA_SFP_STATE_INIT); in bfa_sfp_state_query()
3686 WARN_ON(sfp->state_query_lock != 0); in bfa_sfp_state_query()
3687 bfa_trc(sfp, sfp->state); in bfa_sfp_state_query()
3689 sfp->state_query_lock = 1; in bfa_sfp_state_query()
3690 req->memtype = 0; in bfa_sfp_state_query()
3692 if (!sfp->lock) in bfa_sfp_state_query()
3699 enum bfa_defs_sfp_media_e *media = sfp->media; in bfa_sfp_media_get()
3703 if (sfp->state == BFA_SFP_STATE_UNSUPPORT) in bfa_sfp_media_get()
3705 else if (sfp->state == BFA_SFP_STATE_VALID) { in bfa_sfp_media_get()
3707 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; in bfa_sfp_media_get()
3708 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 | in bfa_sfp_media_get()
3709 (sfpmem->srlid_base.xcvr[5] >> 1); in bfa_sfp_media_get()
3711 e10g.b = sfpmem->srlid_base.xcvr[0]; in bfa_sfp_media_get()
3739 bfa_trc(sfp, sfp->state); in bfa_sfp_media_get()
3745 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva; in bfa_sfp_speed_valid()
3746 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr; in bfa_sfp_speed_valid()
3747 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3; in bfa_sfp_speed_valid()
3748 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g; in bfa_sfp_speed_valid()
3780 switch (msg->mh.msg_id) { in bfa_sfp_intr()
3790 bfa_trc(sfp, msg->mh.msg_id); in bfa_sfp_intr()
3811 sfp->dev = dev; in bfa_sfp_attach()
3812 sfp->ioc = ioc; in bfa_sfp_attach()
3813 sfp->trcmod = trcmod; in bfa_sfp_attach()
3815 sfp->cbfn = NULL; in bfa_sfp_attach()
3816 sfp->cbarg = NULL; in bfa_sfp_attach()
3817 sfp->sfpmem = NULL; in bfa_sfp_attach()
3818 sfp->lock = 0; in bfa_sfp_attach()
3819 sfp->data_valid = 0; in bfa_sfp_attach()
3820 sfp->state = BFA_SFP_STATE_INIT; in bfa_sfp_attach()
3821 sfp->state_query_lock = 0; in bfa_sfp_attach()
3822 sfp->state_query_cbfn = NULL; in bfa_sfp_attach()
3823 sfp->state_query_cbarg = NULL; in bfa_sfp_attach()
3824 sfp->media = NULL; in bfa_sfp_attach()
3825 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; in bfa_sfp_attach()
3826 sfp->is_elb = BFA_FALSE; in bfa_sfp_attach()
3828 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp); in bfa_sfp_attach()
3829 bfa_q_qe_init(&sfp->ioc_notify); in bfa_sfp_attach()
3830 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp); in bfa_sfp_attach()
3831 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q); in bfa_sfp_attach()
3840 sfp->dbuf_kva = dm_kva; in bfa_sfp_memclaim()
3841 sfp->dbuf_pa = dm_pa; in bfa_sfp_memclaim()
3842 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s)); in bfa_sfp_memclaim()
3851 * @param[in] sfp - bfa sfp module
3853 * @param[out] sfpmem - sfp eeprom data
3861 if (!bfa_ioc_is_operational(sfp->ioc)) { in bfa_sfp_show()
3866 if (sfp->lock) { in bfa_sfp_show()
3871 sfp->cbfn = cbfn; in bfa_sfp_show()
3872 sfp->cbarg = cbarg; in bfa_sfp_show()
3873 sfp->sfpmem = sfpmem; in bfa_sfp_show()
3882 * @param[in] sfp - bfa sfp module
3884 * @param[out] media - port speed from user
3891 if (!bfa_ioc_is_operational(sfp->ioc)) { in bfa_sfp_media()
3896 sfp->media = media; in bfa_sfp_media()
3897 if (sfp->state == BFA_SFP_STATE_INIT) { in bfa_sfp_media()
3898 if (sfp->state_query_lock) { in bfa_sfp_media()
3902 sfp->state_query_cbfn = cbfn; in bfa_sfp_media()
3903 sfp->state_query_cbarg = cbarg; in bfa_sfp_media()
3916 * @param[in] sfp - bfa sfp module
3917 * @param[in] portspeed - port speed from user
3926 if (!bfa_ioc_is_operational(sfp->ioc)) in bfa_sfp_speed()
3930 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type)) in bfa_sfp_speed()
3934 sfp->portspeed = portspeed; in bfa_sfp_speed()
3935 if (sfp->state == BFA_SFP_STATE_INIT) { in bfa_sfp_speed()
3936 if (sfp->state_query_lock) { in bfa_sfp_speed()
3940 sfp->state_query_cbfn = cbfn; in bfa_sfp_speed()
3941 sfp->state_query_cbarg = cbarg; in bfa_sfp_speed()
3947 if (sfp->state == BFA_SFP_STATE_REMOVED || in bfa_sfp_speed()
3948 sfp->state == BFA_SFP_STATE_FAILED) { in bfa_sfp_speed()
3949 bfa_trc(sfp, sfp->state); in bfa_sfp_speed()
3953 if (sfp->state == BFA_SFP_STATE_INSERTED) { in bfa_sfp_speed()
3954 bfa_trc(sfp, sfp->state); in bfa_sfp_speed()
3959 if (sfp->is_elb) in bfa_sfp_speed()
3982 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; in bfa_flash_aen_audit_post()
3989 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn; in bfa_flash_aen_audit_post()
3990 aen_entry->aen_data.audit.partition_inst = inst; in bfa_flash_aen_audit_post()
3991 aen_entry->aen_data.audit.partition_type = type; in bfa_flash_aen_audit_post()
3994 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq, in bfa_flash_aen_audit_post()
4001 flash->op_busy = 0; in bfa_flash_cb()
4002 if (flash->cbfn) in bfa_flash_cb()
4003 flash->cbfn(flash->cbarg, flash->status); in bfa_flash_cb()
4015 if (flash->op_busy) { in bfa_flash_notify()
4016 flash->status = BFA_STATUS_IOC_FAILURE; in bfa_flash_notify()
4017 flash->cbfn(flash->cbarg, flash->status); in bfa_flash_notify()
4018 flash->op_busy = 0; in bfa_flash_notify()
4030 * @param[in] cbarg - callback argument
4037 (struct bfi_flash_query_req_s *) flash->mb.msg; in bfa_flash_query_send()
4039 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, in bfa_flash_query_send()
4040 bfa_ioc_portid(flash->ioc)); in bfa_flash_query_send()
4041 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s), in bfa_flash_query_send()
4042 flash->dbuf_pa); in bfa_flash_query_send()
4043 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_query_send()
4049 * @param[in] cbarg - callback argument
4055 (struct bfi_flash_write_req_s *) flash->mb.msg; in bfa_flash_write_send()
4058 msg->type = be32_to_cpu(flash->type); in bfa_flash_write_send()
4059 msg->instance = flash->instance; in bfa_flash_write_send()
4060 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); in bfa_flash_write_send()
4061 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? in bfa_flash_write_send()
4062 flash->residue : BFA_FLASH_DMA_BUF_SZ; in bfa_flash_write_send()
4063 msg->length = be32_to_cpu(len); in bfa_flash_write_send()
4066 msg->last = (len == flash->residue) ? 1 : 0; in bfa_flash_write_send()
4068 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, in bfa_flash_write_send()
4069 bfa_ioc_portid(flash->ioc)); in bfa_flash_write_send()
4070 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); in bfa_flash_write_send()
4071 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); in bfa_flash_write_send()
4072 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_write_send()
4074 flash->residue -= len; in bfa_flash_write_send()
4075 flash->offset += len; in bfa_flash_write_send()
4081 * @param[in] cbarg - callback argument
4088 (struct bfi_flash_read_req_s *) flash->mb.msg; in bfa_flash_read_send()
4091 msg->type = be32_to_cpu(flash->type); in bfa_flash_read_send()
4092 msg->instance = flash->instance; in bfa_flash_read_send()
4093 msg->offset = be32_to_cpu(flash->addr_off + flash->offset); in bfa_flash_read_send()
4094 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? in bfa_flash_read_send()
4095 flash->residue : BFA_FLASH_DMA_BUF_SZ; in bfa_flash_read_send()
4096 msg->length = be32_to_cpu(len); in bfa_flash_read_send()
4097 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, in bfa_flash_read_send()
4098 bfa_ioc_portid(flash->ioc)); in bfa_flash_read_send()
4099 bfa_alen_set(&msg->alen, len, flash->dbuf_pa); in bfa_flash_read_send()
4100 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_read_send()
4106 * @param[in] cbarg - callback argument
4113 (struct bfi_flash_erase_req_s *) flash->mb.msg; in bfa_flash_erase_send()
4115 msg->type = be32_to_cpu(flash->type); in bfa_flash_erase_send()
4116 msg->instance = flash->instance; in bfa_flash_erase_send()
4117 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ, in bfa_flash_erase_send()
4118 bfa_ioc_portid(flash->ioc)); in bfa_flash_erase_send()
4119 bfa_ioc_mbox_queue(flash->ioc, &flash->mb); in bfa_flash_erase_send()
4125 * @param[in] flasharg - flash structure
4126 * @param[in] msg - message structure
4144 bfa_trc(flash, msg->mh.msg_id); in bfa_flash_intr()
4146 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) { in bfa_flash_intr()
4152 switch (msg->mh.msg_id) { in bfa_flash_intr()
4154 status = be32_to_cpu(m.query->status); in bfa_flash_intr()
4160 attr = (struct bfa_flash_attr_s *) flash->ubuf; in bfa_flash_intr()
4161 f = (struct bfa_flash_attr_s *) flash->dbuf_kva; in bfa_flash_intr()
4162 attr->status = be32_to_cpu(f->status); in bfa_flash_intr()
4163 attr->npart = be32_to_cpu(f->npart); in bfa_flash_intr()
4164 bfa_trc(flash, attr->status); in bfa_flash_intr()
4165 bfa_trc(flash, attr->npart); in bfa_flash_intr()
4166 for (i = 0; i < attr->npart; i++) { in bfa_flash_intr()
4167 attr->part[i].part_type = in bfa_flash_intr()
4168 be32_to_cpu(f->part[i].part_type); in bfa_flash_intr()
4169 attr->part[i].part_instance = in bfa_flash_intr()
4170 be32_to_cpu(f->part[i].part_instance); in bfa_flash_intr()
4171 attr->part[i].part_off = in bfa_flash_intr()
4172 be32_to_cpu(f->part[i].part_off); in bfa_flash_intr()
4173 attr->part[i].part_size = in bfa_flash_intr()
4174 be32_to_cpu(f->part[i].part_size); in bfa_flash_intr()
4175 attr->part[i].part_len = in bfa_flash_intr()
4176 be32_to_cpu(f->part[i].part_len); in bfa_flash_intr()
4177 attr->part[i].part_status = in bfa_flash_intr()
4178 be32_to_cpu(f->part[i].part_status); in bfa_flash_intr()
4181 flash->status = status; in bfa_flash_intr()
4185 status = be32_to_cpu(m.erase->status); in bfa_flash_intr()
4187 flash->status = status; in bfa_flash_intr()
4191 status = be32_to_cpu(m.write->status); in bfa_flash_intr()
4193 if (status != BFA_STATUS_OK || flash->residue == 0) { in bfa_flash_intr()
4194 flash->status = status; in bfa_flash_intr()
4197 bfa_trc(flash, flash->offset); in bfa_flash_intr()
4202 status = be32_to_cpu(m.read->status); in bfa_flash_intr()
4205 flash->status = status; in bfa_flash_intr()
4208 u32 len = be32_to_cpu(m.read->length); in bfa_flash_intr()
4209 bfa_trc(flash, flash->offset); in bfa_flash_intr()
4211 memcpy(flash->ubuf + flash->offset, in bfa_flash_intr()
4212 flash->dbuf_kva, len); in bfa_flash_intr()
4213 flash->residue -= len; in bfa_flash_intr()
4214 flash->offset += len; in bfa_flash_intr()
4215 if (flash->residue == 0) { in bfa_flash_intr()
4216 flash->status = status; in bfa_flash_intr()
4225 status = be32_to_cpu(m.event->status); in bfa_flash_intr()
4228 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR); in bfa_flash_intr()
4231 param = be32_to_cpu(m.event->param); in bfa_flash_intr()
4233 bfa_ioc_aen_post(flash->ioc, in bfa_flash_intr()
4246 * @param[in] mincfg - minimal cfg variable
4260 * @param[in] flash - flash structure
4261 * @param[in] ioc - ioc structure
4262 * @param[in] dev - device structure
4263 * @param[in] trcmod - trace module
4264 * @param[in] logmod - log module
4270 flash->ioc = ioc; in bfa_flash_attach()
4271 flash->trcmod = trcmod; in bfa_flash_attach()
4272 flash->cbfn = NULL; in bfa_flash_attach()
4273 flash->cbarg = NULL; in bfa_flash_attach()
4274 flash->op_busy = 0; in bfa_flash_attach()
4276 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); in bfa_flash_attach()
4277 bfa_q_qe_init(&flash->ioc_notify); in bfa_flash_attach()
4278 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); in bfa_flash_attach()
4279 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); in bfa_flash_attach()
4283 flash->dbuf_kva = NULL; in bfa_flash_attach()
4284 flash->dbuf_pa = 0; in bfa_flash_attach()
4291 * @param[in] flash - flash structure
4292 * @param[in] dm_kva - pointer to virtual memory address
4293 * @param[in] dm_pa - physical memory address
4294 * @param[in] mincfg - minimal cfg variable
4303 flash->dbuf_kva = dm_kva; in bfa_flash_memclaim()
4304 flash->dbuf_pa = dm_pa; in bfa_flash_memclaim()
4305 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); in bfa_flash_memclaim()
4313 * @param[in] flash - flash structure
4314 * @param[in] attr - flash attribute structure
4315 * @param[in] cbfn - callback function
4316 * @param[in] cbarg - callback argument
4326 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_get_attr()
4329 if (flash->op_busy) { in bfa_flash_get_attr()
4330 bfa_trc(flash, flash->op_busy); in bfa_flash_get_attr()
4334 flash->op_busy = 1; in bfa_flash_get_attr()
4335 flash->cbfn = cbfn; in bfa_flash_get_attr()
4336 flash->cbarg = cbarg; in bfa_flash_get_attr()
4337 flash->ubuf = (u8 *) attr; in bfa_flash_get_attr()
4346 * @param[in] flash - flash structure
4347 * @param[in] type - flash partition type
4348 * @param[in] instance - flash partition instance
4349 * @param[in] cbfn - callback function
4350 * @param[in] cbarg - callback argument
4362 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_erase_part()
4365 if (flash->op_busy) { in bfa_flash_erase_part()
4366 bfa_trc(flash, flash->op_busy); in bfa_flash_erase_part()
4370 flash->op_busy = 1; in bfa_flash_erase_part()
4371 flash->cbfn = cbfn; in bfa_flash_erase_part()
4372 flash->cbarg = cbarg; in bfa_flash_erase_part()
4373 flash->type = type; in bfa_flash_erase_part()
4374 flash->instance = instance; in bfa_flash_erase_part()
4377 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE, in bfa_flash_erase_part()
4385 * @param[in] flash - flash structure
4386 * @param[in] type - flash partition type
4387 * @param[in] instance - flash partition instance
4388 * @param[in] buf - update data buffer
4389 * @param[in] len - data buffer length
4390 * @param[in] offset - offset relative to the partition starting address
4391 * @param[in] cbfn - callback function
4392 * @param[in] cbarg - callback argument
4407 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_update_part()
4411 * 'len' must be in word (4-byte) boundary in bfa_flash_update_part()
4420 if (flash->op_busy) { in bfa_flash_update_part()
4421 bfa_trc(flash, flash->op_busy); in bfa_flash_update_part()
4425 flash->op_busy = 1; in bfa_flash_update_part()
4426 flash->cbfn = cbfn; in bfa_flash_update_part()
4427 flash->cbarg = cbarg; in bfa_flash_update_part()
4428 flash->type = type; in bfa_flash_update_part()
4429 flash->instance = instance; in bfa_flash_update_part()
4430 flash->residue = len; in bfa_flash_update_part()
4431 flash->offset = 0; in bfa_flash_update_part()
4432 flash->addr_off = offset; in bfa_flash_update_part()
4433 flash->ubuf = buf; in bfa_flash_update_part()
4442 * @param[in] flash - flash structure
4443 * @param[in] type - flash partition type
4444 * @param[in] instance - flash partition instance
4445 * @param[in] buf - read data buffer
4446 * @param[in] len - data buffer length
4447 * @param[in] offset - offset relative to the partition starting address
4448 * @param[in] cbfn - callback function
4449 * @param[in] cbarg - callback argument
4464 if (!bfa_ioc_is_operational(flash->ioc)) in bfa_flash_read_part()
4468 * 'len' must be in word (4-byte) boundary in bfa_flash_read_part()
4474 if (flash->op_busy) { in bfa_flash_read_part()
4475 bfa_trc(flash, flash->op_busy); in bfa_flash_read_part()
4479 flash->op_busy = 1; in bfa_flash_read_part()
4480 flash->cbfn = cbfn; in bfa_flash_read_part()
4481 flash->cbarg = cbarg; in bfa_flash_read_part()
4482 flash->type = type; in bfa_flash_read_part()
4483 flash->instance = instance; in bfa_flash_read_part()
4484 flash->residue = len; in bfa_flash_read_part()
4485 flash->offset = 0; in bfa_flash_read_part()
4486 flash->addr_off = offset; in bfa_flash_read_part()
4487 flash->ubuf = buf; in bfa_flash_read_part()
4507 bfa_trc(diag, diag->block); in bfa_diag_notify()
4508 bfa_trc(diag, diag->fwping.lock); in bfa_diag_notify()
4509 bfa_trc(diag, diag->tsensor.lock); in bfa_diag_notify()
4514 if (diag->fwping.lock) { in bfa_diag_notify()
4515 diag->fwping.status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4516 diag->fwping.cbfn(diag->fwping.cbarg, in bfa_diag_notify()
4517 diag->fwping.status); in bfa_diag_notify()
4518 diag->fwping.lock = 0; in bfa_diag_notify()
4521 if (diag->tsensor.lock) { in bfa_diag_notify()
4522 diag->tsensor.status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4523 diag->tsensor.cbfn(diag->tsensor.cbarg, in bfa_diag_notify()
4524 diag->tsensor.status); in bfa_diag_notify()
4525 diag->tsensor.lock = 0; in bfa_diag_notify()
4528 if (diag->block) { in bfa_diag_notify()
4529 if (diag->timer_active) { in bfa_diag_notify()
4530 bfa_timer_stop(&diag->timer); in bfa_diag_notify()
4531 diag->timer_active = 0; in bfa_diag_notify()
4534 diag->status = BFA_STATUS_IOC_FAILURE; in bfa_diag_notify()
4535 diag->cbfn(diag->cbarg, diag->status); in bfa_diag_notify()
4536 diag->block = 0; in bfa_diag_notify()
4549 struct bfa_ioc_s *ioc = diag->ioc; in bfa_diag_memtest_done()
4550 struct bfa_diag_memtest_result *res = diag->result; in bfa_diag_memtest_done()
4554 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); in bfa_diag_memtest_done()
4557 writel(pgnum, ioc->ioc_regs.host_page_num_fn); in bfa_diag_memtest_done()
4563 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); in bfa_diag_memtest_done()
4570 res->status = swab32(res->status); in bfa_diag_memtest_done()
4571 bfa_trc(diag, res->status); in bfa_diag_memtest_done()
4573 if (res->status == BFI_BOOT_MEMTEST_RES_SIG) in bfa_diag_memtest_done()
4574 diag->status = BFA_STATUS_OK; in bfa_diag_memtest_done()
4576 diag->status = BFA_STATUS_MEMTEST_FAILED; in bfa_diag_memtest_done()
4577 res->addr = swab32(res->addr); in bfa_diag_memtest_done()
4578 res->exp = swab32(res->exp); in bfa_diag_memtest_done()
4579 res->act = swab32(res->act); in bfa_diag_memtest_done()
4580 res->err_status = swab32(res->err_status); in bfa_diag_memtest_done()
4581 res->err_status1 = swab32(res->err_status1); in bfa_diag_memtest_done()
4582 res->err_addr = swab32(res->err_addr); in bfa_diag_memtest_done()
4583 bfa_trc(diag, res->addr); in bfa_diag_memtest_done()
4584 bfa_trc(diag, res->exp); in bfa_diag_memtest_done()
4585 bfa_trc(diag, res->act); in bfa_diag_memtest_done()
4586 bfa_trc(diag, res->err_status); in bfa_diag_memtest_done()
4587 bfa_trc(diag, res->err_status1); in bfa_diag_memtest_done()
4588 bfa_trc(diag, res->err_addr); in bfa_diag_memtest_done()
4590 diag->timer_active = 0; in bfa_diag_memtest_done()
4591 diag->cbfn(diag->cbarg, diag->status); in bfa_diag_memtest_done()
4592 diag->block = 0; in bfa_diag_memtest_done()
4608 bfa_trc(diag, diag->fwping.dbuf_pa); in diag_fwping_send()
4612 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data; in diag_fwping_send()
4615 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg; in diag_fwping_send()
4618 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ, in diag_fwping_send()
4619 diag->fwping.dbuf_pa); in diag_fwping_send()
4621 fwping_req->count = cpu_to_be32(diag->fwping.count); in diag_fwping_send()
4623 fwping_req->data = diag->fwping.data; in diag_fwping_send()
4626 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING, in diag_fwping_send()
4627 bfa_ioc_portid(diag->ioc)); in diag_fwping_send()
4630 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd); in diag_fwping_send()
4637 u32 rsp_data = diag_rsp->data; in diag_fwping_comp()
4638 u8 rsp_dma_status = diag_rsp->dma_status; in diag_fwping_comp()
4645 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) : in diag_fwping_comp()
4646 diag->fwping.data; in diag_fwping_comp()
4648 if (diag->fwping.data != rsp_data) { in diag_fwping_comp()
4650 diag->fwping.result->dmastatus = in diag_fwping_comp()
4652 diag->fwping.status = BFA_STATUS_DATACORRUPTED; in diag_fwping_comp()
4653 diag->fwping.cbfn(diag->fwping.cbarg, in diag_fwping_comp()
4654 diag->fwping.status); in diag_fwping_comp()
4655 diag->fwping.lock = 0; in diag_fwping_comp()
4660 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) { in diag_fwping_comp()
4664 *((u32 *)diag->fwping.dbuf_kva + i)); in diag_fwping_comp()
4665 diag->fwping.result->dmastatus = in diag_fwping_comp()
4667 diag->fwping.status = BFA_STATUS_DATACORRUPTED; in diag_fwping_comp()
4668 diag->fwping.cbfn(diag->fwping.cbarg, in diag_fwping_comp()
4669 diag->fwping.status); in diag_fwping_comp()
4670 diag->fwping.lock = 0; in diag_fwping_comp()
4674 diag->fwping.result->dmastatus = BFA_STATUS_OK; in diag_fwping_comp()
4675 diag->fwping.status = BFA_STATUS_OK; in diag_fwping_comp()
4676 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); in diag_fwping_comp()
4677 diag->fwping.lock = 0; in diag_fwping_comp()
4679 diag->fwping.status = BFA_STATUS_HDMA_FAILED; in diag_fwping_comp()
4680 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status); in diag_fwping_comp()
4681 diag->fwping.lock = 0; in diag_fwping_comp()
4694 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg; in diag_tempsensor_send()
4695 bfa_trc(diag, msg->temp); in diag_tempsensor_send()
4697 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR, in diag_tempsensor_send()
4698 bfa_ioc_portid(diag->ioc)); in diag_tempsensor_send()
4700 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd); in diag_tempsensor_send()
4706 if (!diag->tsensor.lock) { in diag_tempsensor_comp()
4708 bfa_trc(diag, diag->tsensor.lock); in diag_tempsensor_comp()
4716 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); in diag_tempsensor_comp()
4717 diag->tsensor.temp->ts_junc = rsp->ts_junc; in diag_tempsensor_comp()
4718 diag->tsensor.temp->ts_brd = rsp->ts_brd; in diag_tempsensor_comp()
4719 diag->tsensor.temp->status = BFA_STATUS_OK; in diag_tempsensor_comp()
4721 if (rsp->ts_brd) { in diag_tempsensor_comp()
4722 if (rsp->status == BFA_STATUS_OK) { in diag_tempsensor_comp()
4723 diag->tsensor.temp->brd_temp = in diag_tempsensor_comp()
4724 be16_to_cpu(rsp->brd_temp); in diag_tempsensor_comp()
4726 bfa_trc(diag, rsp->status); in diag_tempsensor_comp()
4727 diag->tsensor.temp->brd_temp = 0; in diag_tempsensor_comp()
4728 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY; in diag_tempsensor_comp()
4731 bfa_trc(diag, rsp->ts_junc); in diag_tempsensor_comp()
4732 bfa_trc(diag, rsp->temp); in diag_tempsensor_comp()
4733 bfa_trc(diag, rsp->ts_brd); in diag_tempsensor_comp()
4734 bfa_trc(diag, rsp->brd_temp); in diag_tempsensor_comp()
4735 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); in diag_tempsensor_comp()
4736 diag->tsensor.lock = 0; in diag_tempsensor_comp()
4747 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg; in diag_ledtest_send()
4749 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST, in diag_ledtest_send()
4750 bfa_ioc_portid(diag->ioc)); in diag_ledtest_send()
4756 if (ledtest->freq) in diag_ledtest_send()
4757 ledtest->freq = 500 / ledtest->freq; in diag_ledtest_send()
4759 if (ledtest->freq == 0) in diag_ledtest_send()
4760 ledtest->freq = 1; in diag_ledtest_send()
4762 bfa_trc(diag, ledtest->freq); in diag_ledtest_send()
4763 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */ in diag_ledtest_send()
4764 msg->cmd = (u8) ledtest->cmd; in diag_ledtest_send()
4765 msg->color = (u8) ledtest->color; in diag_ledtest_send()
4766 msg->portid = bfa_ioc_portid(diag->ioc); in diag_ledtest_send()
4767 msg->led = ledtest->led; in diag_ledtest_send()
4768 msg->freq = cpu_to_be16(ledtest->freq); in diag_ledtest_send()
4771 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd); in diag_ledtest_send()
4777 bfa_trc(diag, diag->ledtest.lock); in diag_ledtest_comp()
4778 diag->ledtest.lock = BFA_FALSE; in diag_ledtest_comp()
4790 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg; in diag_portbeacon_send()
4792 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON, in diag_portbeacon_send()
4793 bfa_ioc_portid(diag->ioc)); in diag_portbeacon_send()
4794 msg->beacon = beacon; in diag_portbeacon_send()
4795 msg->period = cpu_to_be32(sec); in diag_portbeacon_send()
4797 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd); in diag_portbeacon_send()
4803 bfa_trc(diag, diag->beacon.state); in diag_portbeacon_comp()
4804 diag->beacon.state = BFA_FALSE; in diag_portbeacon_comp()
4805 if (diag->cbfn_beacon) in diag_portbeacon_comp()
4806 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e); in diag_portbeacon_comp()
4817 switch (msg->mh.msg_id) { in bfa_diag_intr()
4831 bfa_trc(diag, msg->mh.msg_id); in bfa_diag_intr()
4839 * @param[in] *diag - diag data struct
4840 * @param[in] *memtest - mem test params input from upper layer,
4841 * @param[in] pattern - mem test pattern
4842 * @param[in] *result - mem test result
4843 * @param[in] cbfn - mem test callback functioin
4844 * @param[in] cbarg - callback functioin arg
4855 if (!bfa_ioc_adapter_is_disabled(diag->ioc)) in bfa_diag_memtest()
4859 if (diag->block) { in bfa_diag_memtest()
4860 bfa_trc(diag, diag->block); in bfa_diag_memtest()
4863 diag->block = 1; in bfa_diag_memtest()
4865 diag->result = result; in bfa_diag_memtest()
4866 diag->cbfn = cbfn; in bfa_diag_memtest()
4867 diag->cbarg = cbarg; in bfa_diag_memtest()
4870 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); in bfa_diag_memtest()
4872 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, in bfa_diag_memtest()
4874 diag->timer_active = 1; in bfa_diag_memtest()
4881 * @param[in] *diag - diag data struct
4882 * @param[in] cnt - dma loop count for testing PCIE
4883 * @param[in] data - data pattern to pass in fw
4884 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4885 * @param[in] cbfn - callback function
4886 * @param[in] *cbarg - callback functioin arg
4898 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_fwping()
4901 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) && in bfa_diag_fwping()
4902 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH)) in bfa_diag_fwping()
4906 if (diag->block || diag->fwping.lock) { in bfa_diag_fwping()
4907 bfa_trc(diag, diag->block); in bfa_diag_fwping()
4908 bfa_trc(diag, diag->fwping.lock); in bfa_diag_fwping()
4913 diag->fwping.lock = 1; in bfa_diag_fwping()
4914 diag->fwping.cbfn = cbfn; in bfa_diag_fwping()
4915 diag->fwping.cbarg = cbarg; in bfa_diag_fwping()
4916 diag->fwping.result = result; in bfa_diag_fwping()
4917 diag->fwping.data = data; in bfa_diag_fwping()
4918 diag->fwping.count = cnt; in bfa_diag_fwping()
4921 diag->fwping.result->data = 0; in bfa_diag_fwping()
4922 diag->fwping.result->status = BFA_STATUS_OK; in bfa_diag_fwping()
4932 * @param[in] *diag - diag data struct
4933 * @param[in] *result - pt to bfa_diag_temp_t data struct
4934 * @param[in] cbfn - callback function
4935 * @param[in] *cbarg - callback functioin arg
4945 if (diag->block || diag->tsensor.lock) { in bfa_diag_tsensor_query()
4946 bfa_trc(diag, diag->block); in bfa_diag_tsensor_query()
4947 bfa_trc(diag, diag->tsensor.lock); in bfa_diag_tsensor_query()
4951 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_tsensor_query()
4955 diag->tsensor.lock = 1; in bfa_diag_tsensor_query()
4956 diag->tsensor.temp = result; in bfa_diag_tsensor_query()
4957 diag->tsensor.cbfn = cbfn; in bfa_diag_tsensor_query()
4958 diag->tsensor.cbarg = cbarg; in bfa_diag_tsensor_query()
4969 * @param[in] *diag - diag data struct
4970 * @param[in] *ledtest - pt to ledtest data structure
4977 bfa_trc(diag, ledtest->cmd); in bfa_diag_ledtest()
4979 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_ledtest()
4982 if (diag->beacon.state) in bfa_diag_ledtest()
4985 if (diag->ledtest.lock) in bfa_diag_ledtest()
4989 diag->ledtest.lock = BFA_TRUE; in bfa_diag_ledtest()
4998 * @param[in] *diag - diag data struct
4999 * @param[in] beacon - port beaconing 1:ON 0:OFF
5000 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5001 * @param[in] sec - beaconing duration in seconds
5013 if (!bfa_ioc_is_operational(diag->ioc)) in bfa_diag_beacon_port()
5016 if (diag->ledtest.lock) in bfa_diag_beacon_port()
5019 if (diag->beacon.state && beacon) /* beacon alread on */ in bfa_diag_beacon_port()
5022 diag->beacon.state = beacon; in bfa_diag_beacon_port()
5023 diag->beacon.link_e2e = link_e2e_beacon; in bfa_diag_beacon_port()
5024 if (diag->cbfn_beacon) in bfa_diag_beacon_port()
5025 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon); in bfa_diag_beacon_port()
5049 diag->dev = dev; in bfa_diag_attach()
5050 diag->ioc = ioc; in bfa_diag_attach()
5051 diag->trcmod = trcmod; in bfa_diag_attach()
5053 diag->block = 0; in bfa_diag_attach()
5054 diag->cbfn = NULL; in bfa_diag_attach()
5055 diag->cbarg = NULL; in bfa_diag_attach()
5056 diag->result = NULL; in bfa_diag_attach()
5057 diag->cbfn_beacon = cbfn_beacon; in bfa_diag_attach()
5059 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag); in bfa_diag_attach()
5060 bfa_q_qe_init(&diag->ioc_notify); in bfa_diag_attach()
5061 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag); in bfa_diag_attach()
5062 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q); in bfa_diag_attach()
5068 diag->fwping.dbuf_kva = dm_kva; in bfa_diag_memclaim()
5069 diag->fwping.dbuf_pa = dm_pa; in bfa_diag_memclaim()
5070 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ); in bfa_diag_memclaim()
5091 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING); in bfa_phy_present()
5104 if (phy->op_busy) { in bfa_phy_notify()
5105 phy->status = BFA_STATUS_IOC_FAILURE; in bfa_phy_notify()
5106 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_notify()
5107 phy->op_busy = 0; in bfa_phy_notify()
5119 * @param[in] cbarg - callback argument
5126 (struct bfi_phy_query_req_s *) phy->mb.msg; in bfa_phy_query_send()
5128 msg->instance = phy->instance; in bfa_phy_query_send()
5129 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ, in bfa_phy_query_send()
5130 bfa_ioc_portid(phy->ioc)); in bfa_phy_query_send()
5131 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa); in bfa_phy_query_send()
5132 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_query_send()
5138 * @param[in] cbarg - callback argument
5145 (struct bfi_phy_write_req_s *) phy->mb.msg; in bfa_phy_write_send()
5150 msg->instance = phy->instance; in bfa_phy_write_send()
5151 msg->offset = cpu_to_be32(phy->addr_off + phy->offset); in bfa_phy_write_send()
5152 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? in bfa_phy_write_send()
5153 phy->residue : BFA_PHY_DMA_BUF_SZ; in bfa_phy_write_send()
5154 msg->length = cpu_to_be32(len); in bfa_phy_write_send()
5157 msg->last = (len == phy->residue) ? 1 : 0; in bfa_phy_write_send()
5159 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ, in bfa_phy_write_send()
5160 bfa_ioc_portid(phy->ioc)); in bfa_phy_write_send()
5161 bfa_alen_set(&msg->alen, len, phy->dbuf_pa); in bfa_phy_write_send()
5163 buf = (u16 *) (phy->ubuf + phy->offset); in bfa_phy_write_send()
5164 dbuf = (u16 *)phy->dbuf_kva; in bfa_phy_write_send()
5169 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_write_send()
5171 phy->residue -= len; in bfa_phy_write_send()
5172 phy->offset += len; in bfa_phy_write_send()
5178 * @param[in] cbarg - callback argument
5185 (struct bfi_phy_read_req_s *) phy->mb.msg; in bfa_phy_read_send()
5188 msg->instance = phy->instance; in bfa_phy_read_send()
5189 msg->offset = cpu_to_be32(phy->addr_off + phy->offset); in bfa_phy_read_send()
5190 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ? in bfa_phy_read_send()
5191 phy->residue : BFA_PHY_DMA_BUF_SZ; in bfa_phy_read_send()
5192 msg->length = cpu_to_be32(len); in bfa_phy_read_send()
5193 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ, in bfa_phy_read_send()
5194 bfa_ioc_portid(phy->ioc)); in bfa_phy_read_send()
5195 bfa_alen_set(&msg->alen, len, phy->dbuf_pa); in bfa_phy_read_send()
5196 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_read_send()
5202 * @param[in] cbarg - callback argument
5209 (struct bfi_phy_stats_req_s *) phy->mb.msg; in bfa_phy_stats_send()
5211 msg->instance = phy->instance; in bfa_phy_stats_send()
5212 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ, in bfa_phy_stats_send()
5213 bfa_ioc_portid(phy->ioc)); in bfa_phy_stats_send()
5214 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa); in bfa_phy_stats_send()
5215 bfa_ioc_mbox_queue(phy->ioc, &phy->mb); in bfa_phy_stats_send()
5221 * @param[in] mincfg - minimal cfg variable
5236 * @param[in] phy - phy structure
5237 * @param[in] ioc - ioc structure
5238 * @param[in] dev - device structure
5239 * @param[in] trcmod - trace module
5240 * @param[in] logmod - log module
5246 phy->ioc = ioc; in bfa_phy_attach()
5247 phy->trcmod = trcmod; in bfa_phy_attach()
5248 phy->cbfn = NULL; in bfa_phy_attach()
5249 phy->cbarg = NULL; in bfa_phy_attach()
5250 phy->op_busy = 0; in bfa_phy_attach()
5252 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy); in bfa_phy_attach()
5253 bfa_q_qe_init(&phy->ioc_notify); in bfa_phy_attach()
5254 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy); in bfa_phy_attach()
5255 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q); in bfa_phy_attach()
5259 phy->dbuf_kva = NULL; in bfa_phy_attach()
5260 phy->dbuf_pa = 0; in bfa_phy_attach()
5267 * @param[in] phy - phy structure
5268 * @param[in] dm_kva - pointer to virtual memory address
5269 * @param[in] dm_pa - physical memory address
5270 * @param[in] mincfg - minimal cfg variable
5279 phy->dbuf_kva = dm_kva; in bfa_phy_memclaim()
5280 phy->dbuf_pa = dm_pa; in bfa_phy_memclaim()
5281 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ); in bfa_phy_memclaim()
5298 * @param[in] phy - phy structure
5299 * @param[in] attr - phy attribute structure
5300 * @param[in] cbfn - callback function
5301 * @param[in] cbarg - callback argument
5315 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_get_attr()
5318 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_get_attr()
5319 bfa_trc(phy, phy->op_busy); in bfa_phy_get_attr()
5323 phy->op_busy = 1; in bfa_phy_get_attr()
5324 phy->cbfn = cbfn; in bfa_phy_get_attr()
5325 phy->cbarg = cbarg; in bfa_phy_get_attr()
5326 phy->instance = instance; in bfa_phy_get_attr()
5327 phy->ubuf = (uint8_t *) attr; in bfa_phy_get_attr()
5336 * @param[in] phy - phy structure
5337 * @param[in] instance - phy image instance
5338 * @param[in] stats - pointer to phy stats
5339 * @param[in] cbfn - callback function
5340 * @param[in] cbarg - callback argument
5355 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_get_stats()
5358 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_get_stats()
5359 bfa_trc(phy, phy->op_busy); in bfa_phy_get_stats()
5363 phy->op_busy = 1; in bfa_phy_get_stats()
5364 phy->cbfn = cbfn; in bfa_phy_get_stats()
5365 phy->cbarg = cbarg; in bfa_phy_get_stats()
5366 phy->instance = instance; in bfa_phy_get_stats()
5367 phy->ubuf = (u8 *) stats; in bfa_phy_get_stats()
5376 * @param[in] phy - phy structure
5377 * @param[in] instance - phy image instance
5378 * @param[in] buf - update data buffer
5379 * @param[in] len - data buffer length
5380 * @param[in] offset - offset relative to starting address
5381 * @param[in] cbfn - callback function
5382 * @param[in] cbarg - callback argument
5399 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_update()
5402 /* 'len' must be in word (4-byte) boundary */ in bfa_phy_update()
5406 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_update()
5407 bfa_trc(phy, phy->op_busy); in bfa_phy_update()
5411 phy->op_busy = 1; in bfa_phy_update()
5412 phy->cbfn = cbfn; in bfa_phy_update()
5413 phy->cbarg = cbarg; in bfa_phy_update()
5414 phy->instance = instance; in bfa_phy_update()
5415 phy->residue = len; in bfa_phy_update()
5416 phy->offset = 0; in bfa_phy_update()
5417 phy->addr_off = offset; in bfa_phy_update()
5418 phy->ubuf = buf; in bfa_phy_update()
5427 * @param[in] phy - phy structure
5428 * @param[in] instance - phy image instance
5429 * @param[in] buf - read data buffer
5430 * @param[in] len - data buffer length
5431 * @param[in] offset - offset relative to starting address
5432 * @param[in] cbfn - callback function
5433 * @param[in] cbarg - callback argument
5450 if (!bfa_ioc_is_operational(phy->ioc)) in bfa_phy_read()
5453 /* 'len' must be in word (4-byte) boundary */ in bfa_phy_read()
5457 if (phy->op_busy || bfa_phy_busy(phy->ioc)) { in bfa_phy_read()
5458 bfa_trc(phy, phy->op_busy); in bfa_phy_read()
5462 phy->op_busy = 1; in bfa_phy_read()
5463 phy->cbfn = cbfn; in bfa_phy_read()
5464 phy->cbarg = cbarg; in bfa_phy_read()
5465 phy->instance = instance; in bfa_phy_read()
5466 phy->residue = len; in bfa_phy_read()
5467 phy->offset = 0; in bfa_phy_read()
5468 phy->addr_off = offset; in bfa_phy_read()
5469 phy->ubuf = buf; in bfa_phy_read()
5478 * @param[in] phyarg - phy structure
5479 * @param[in] msg - message structure
5496 bfa_trc(phy, msg->mh.msg_id); in bfa_phy_intr()
5498 if (!phy->op_busy) { in bfa_phy_intr()
5504 switch (msg->mh.msg_id) { in bfa_phy_intr()
5506 status = be32_to_cpu(m.query->status); in bfa_phy_intr()
5511 (struct bfa_phy_attr_s *) phy->ubuf; in bfa_phy_intr()
5512 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva, in bfa_phy_intr()
5514 bfa_trc(phy, attr->status); in bfa_phy_intr()
5515 bfa_trc(phy, attr->length); in bfa_phy_intr()
5518 phy->status = status; in bfa_phy_intr()
5519 phy->op_busy = 0; in bfa_phy_intr()
5520 if (phy->cbfn) in bfa_phy_intr()
5521 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5524 status = be32_to_cpu(m.stats->status); in bfa_phy_intr()
5529 (struct bfa_phy_stats_s *) phy->ubuf; in bfa_phy_intr()
5530 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva, in bfa_phy_intr()
5532 bfa_trc(phy, stats->status); in bfa_phy_intr()
5535 phy->status = status; in bfa_phy_intr()
5536 phy->op_busy = 0; in bfa_phy_intr()
5537 if (phy->cbfn) in bfa_phy_intr()
5538 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5541 status = be32_to_cpu(m.write->status); in bfa_phy_intr()
5544 if (status != BFA_STATUS_OK || phy->residue == 0) { in bfa_phy_intr()
5545 phy->status = status; in bfa_phy_intr()
5546 phy->op_busy = 0; in bfa_phy_intr()
5547 if (phy->cbfn) in bfa_phy_intr()
5548 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5550 bfa_trc(phy, phy->offset); in bfa_phy_intr()
5555 status = be32_to_cpu(m.read->status); in bfa_phy_intr()
5559 phy->status = status; in bfa_phy_intr()
5560 phy->op_busy = 0; in bfa_phy_intr()
5561 if (phy->cbfn) in bfa_phy_intr()
5562 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5564 u32 len = be32_to_cpu(m.read->length); in bfa_phy_intr()
5565 u16 *buf = (u16 *)(phy->ubuf + phy->offset); in bfa_phy_intr()
5566 u16 *dbuf = (u16 *)phy->dbuf_kva; in bfa_phy_intr()
5569 bfa_trc(phy, phy->offset); in bfa_phy_intr()
5575 phy->residue -= len; in bfa_phy_intr()
5576 phy->offset += len; in bfa_phy_intr()
5578 if (phy->residue == 0) { in bfa_phy_intr()
5579 phy->status = status; in bfa_phy_intr()
5580 phy->op_busy = 0; in bfa_phy_intr()
5581 if (phy->cbfn) in bfa_phy_intr()
5582 phy->cbfn(phy->cbarg, phy->status); in bfa_phy_intr()
5638 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_uninit()
5642 if (dconf->min_cfg) { in bfa_dconf_sm_uninit()
5643 bfa_trc(dconf->bfa, dconf->min_cfg); in bfa_dconf_sm_uninit()
5647 dconf->flashdone = BFA_FALSE; in bfa_dconf_sm_uninit()
5648 bfa_trc(dconf->bfa, dconf->flashdone); in bfa_dconf_sm_uninit()
5649 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), in bfa_dconf_sm_uninit()
5650 BFA_FLASH_PART_DRV, dconf->instance, in bfa_dconf_sm_uninit()
5651 dconf->dconf, in bfa_dconf_sm_uninit()
5653 bfa_dconf_init_cb, dconf->bfa); in bfa_dconf_sm_uninit()
5655 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); in bfa_dconf_sm_uninit()
5661 dconf->flashdone = BFA_TRUE; in bfa_dconf_sm_uninit()
5667 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_uninit()
5678 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_flash_read()
5688 dconf->flashdone = BFA_TRUE; in bfa_dconf_sm_flash_read()
5689 bfa_trc(dconf->bfa, dconf->flashdone); in bfa_dconf_sm_flash_read()
5694 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_flash_read()
5704 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_ready()
5708 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_ready()
5713 dconf->flashdone = BFA_TRUE; in bfa_dconf_sm_ready()
5714 bfa_trc(dconf->bfa, dconf->flashdone); in bfa_dconf_sm_ready()
5721 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_ready()
5732 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_dirty()
5740 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5741 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_dirty()
5745 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5746 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_dirty()
5754 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_dirty()
5758 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_dirty()
5769 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_final_sync()
5774 bfa_timer_stop(&dconf->timer); in bfa_dconf_sm_final_sync()
5777 dconf->flashdone = BFA_TRUE; in bfa_dconf_sm_final_sync()
5778 bfa_trc(dconf->bfa, dconf->flashdone); in bfa_dconf_sm_final_sync()
5779 bfa_ioc_disable(&dconf->bfa->ioc); in bfa_dconf_sm_final_sync()
5782 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_final_sync()
5789 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_sync()
5796 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_sync()
5801 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_sync()
5809 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_sync()
5817 bfa_trc(dconf->bfa, event); in bfa_dconf_sm_iocdown_dirty()
5821 bfa_timer_start(dconf->bfa, &dconf->timer, in bfa_dconf_sm_iocdown_dirty()
5826 dconf->flashdone = BFA_TRUE; in bfa_dconf_sm_iocdown_dirty()
5832 bfa_sm_fault(dconf->bfa, event); in bfa_dconf_sm_iocdown_dirty()
5845 if (cfg->drvcfg.min_cfg) in bfa_dconf_meminfo()
5859 dconf->bfad = bfad; in bfa_dconf_attach()
5860 dconf->bfa = bfa; in bfa_dconf_attach()
5861 dconf->instance = bfa->ioc.port_id; in bfa_dconf_attach()
5862 bfa_trc(bfa, dconf->instance); in bfa_dconf_attach()
5864 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf); in bfa_dconf_attach()
5865 if (cfg->drvcfg.min_cfg) { in bfa_dconf_attach()
5867 dconf->min_cfg = BFA_TRUE; in bfa_dconf_attach()
5872 dconf->flashdone = BFA_TRUE; in bfa_dconf_attach()
5874 dconf->min_cfg = BFA_FALSE; in bfa_dconf_attach()
5888 dconf->flashdone = BFA_TRUE; in bfa_dconf_init_cb()
5889 bfa_trc(bfa, dconf->flashdone); in bfa_dconf_init_cb()
5893 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) in bfa_dconf_init_cb()
5894 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE; in bfa_dconf_init_cb()
5895 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) in bfa_dconf_init_cb()
5896 dconf->dconf->hdr.version = BFI_DCONF_VERSION; in bfa_dconf_init_cb()
5938 bfa_trc(dconf->bfa, 0); in bfa_dconf_flash_write()
5940 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa), in bfa_dconf_flash_write()
5941 BFA_FLASH_PART_DRV, dconf->instance, in bfa_dconf_flash_write()
5942 dconf->dconf, sizeof(struct bfa_dconf_s), 0, in bfa_dconf_flash_write()
5946 bfa_trc(dconf->bfa, bfa_status); in bfa_dconf_flash_write()
5955 bfa_trc(dconf->bfa, 0); in bfa_dconf_update()
5959 if (dconf->min_cfg) { in bfa_dconf_update()
5960 bfa_trc(dconf->bfa, dconf->min_cfg); in bfa_dconf_update()
5980 BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE; in bfa_dconf_modexit()
5981 bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone); in bfa_dconf_modexit()