Lines Matching +full:parallel +full:- +full:memories

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2022 HabanaLabs, Ltd.
27 * - Range registers
28 * - MMU
31 * - Range registers (protect the first 512MB)
34 * - Range registers
35 * - Protection bits
40 * - DMA is not secured.
41 * - PQ and CQ are secured.
42 * - CP is secured: The driver needs to parse CB but WREG should be allowed
52 * - Clear SRAM on context switch (happens on context switch when device is
54 * - MMU page tables area clear (happens on init)
56 * QMAN DMA 2-7, TPC, MME, NIC:
62 #define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb"
63 #define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb"
408 [SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
411 [SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
414 [SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
423 mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
425 mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
426 [SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
520 struct asic_fixed_properties *prop = &hdev->asic_prop; in set_default_power_values()
522 if (hdev->card_type == cpucp_card_type_pmc) { in set_default_power_values()
523 prop->max_power_default = MAX_POWER_DEFAULT_PMC; in set_default_power_values()
525 if (prop->fw_security_enabled) in set_default_power_values()
526 prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC; in set_default_power_values()
528 prop->dc_power_default = DC_POWER_DEFAULT_PMC; in set_default_power_values()
530 prop->max_power_default = MAX_POWER_DEFAULT_PCI; in set_default_power_values()
531 prop->dc_power_default = DC_POWER_DEFAULT_PCI; in set_default_power_values()
537 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_set_fixed_properties()
541 prop->max_queues = GAUDI_QUEUE_ID_SIZE; in gaudi_set_fixed_properties()
542 prop->hw_queues_props = kcalloc(prop->max_queues, in gaudi_set_fixed_properties()
546 if (!prop->hw_queues_props) in gaudi_set_fixed_properties()
547 return -ENOMEM; in gaudi_set_fixed_properties()
549 for (i = 0 ; i < prop->max_queues ; i++) { in gaudi_set_fixed_properties()
551 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; in gaudi_set_fixed_properties()
552 prop->hw_queues_props[i].driver_only = 0; in gaudi_set_fixed_properties()
553 prop->hw_queues_props[i].supports_sync_stream = 1; in gaudi_set_fixed_properties()
554 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
558 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; in gaudi_set_fixed_properties()
559 prop->hw_queues_props[i].driver_only = 1; in gaudi_set_fixed_properties()
560 prop->hw_queues_props[i].supports_sync_stream = 0; in gaudi_set_fixed_properties()
561 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
564 prop->hw_queues_props[i].type = QUEUE_TYPE_INT; in gaudi_set_fixed_properties()
565 prop->hw_queues_props[i].driver_only = 0; in gaudi_set_fixed_properties()
566 prop->hw_queues_props[i].supports_sync_stream = 0; in gaudi_set_fixed_properties()
567 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
571 prop->hw_queues_props[i].collective_mode = in gaudi_set_fixed_properties()
575 prop->cache_line_size = DEVICE_CACHE_LINE_SIZE; in gaudi_set_fixed_properties()
576 prop->cfg_base_address = CFG_BASE; in gaudi_set_fixed_properties()
577 prop->device_dma_offset_for_host_access = HOST_PHYS_BASE; in gaudi_set_fixed_properties()
578 prop->host_base_address = HOST_PHYS_BASE; in gaudi_set_fixed_properties()
579 prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE; in gaudi_set_fixed_properties()
580 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; in gaudi_set_fixed_properties()
581 prop->completion_mode = HL_COMPLETION_MODE_JOB; in gaudi_set_fixed_properties()
582 prop->collective_first_sob = 0; in gaudi_set_fixed_properties()
583 prop->collective_first_mon = 0; in gaudi_set_fixed_properties()
586 prop->sync_stream_first_sob = in gaudi_set_fixed_properties()
593 prop->sync_stream_first_mon = in gaudi_set_fixed_properties()
597 prop->dram_base_address = DRAM_PHYS_BASE; in gaudi_set_fixed_properties()
598 prop->dram_size = GAUDI_HBM_SIZE_32GB; in gaudi_set_fixed_properties()
599 prop->dram_end_address = prop->dram_base_address + prop->dram_size; in gaudi_set_fixed_properties()
600 prop->dram_user_base_address = DRAM_BASE_ADDR_USER; in gaudi_set_fixed_properties()
602 prop->sram_base_address = SRAM_BASE_ADDR; in gaudi_set_fixed_properties()
603 prop->sram_size = SRAM_SIZE; in gaudi_set_fixed_properties()
604 prop->sram_end_address = prop->sram_base_address + prop->sram_size; in gaudi_set_fixed_properties()
605 prop->sram_user_base_address = in gaudi_set_fixed_properties()
606 prop->sram_base_address + SRAM_USER_BASE_OFFSET; in gaudi_set_fixed_properties()
608 prop->mmu_cache_mng_addr = MMU_CACHE_MNG_ADDR; in gaudi_set_fixed_properties()
609 prop->mmu_cache_mng_size = MMU_CACHE_MNG_SIZE; in gaudi_set_fixed_properties()
611 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR; in gaudi_set_fixed_properties()
612 if (hdev->pldm) in gaudi_set_fixed_properties()
613 prop->mmu_pgt_size = 0x800000; /* 8MB */ in gaudi_set_fixed_properties()
615 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; in gaudi_set_fixed_properties()
616 prop->mmu_pte_size = HL_PTE_SIZE; in gaudi_set_fixed_properties()
617 prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE; in gaudi_set_fixed_properties()
618 prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE; in gaudi_set_fixed_properties()
619 prop->dram_page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
620 prop->device_mem_alloc_default_page_size = prop->dram_page_size; in gaudi_set_fixed_properties()
621 prop->dram_supports_virtual_memory = false; in gaudi_set_fixed_properties()
623 prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT; in gaudi_set_fixed_properties()
624 prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT; in gaudi_set_fixed_properties()
625 prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT; in gaudi_set_fixed_properties()
626 prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT; in gaudi_set_fixed_properties()
627 prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT; in gaudi_set_fixed_properties()
628 prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK; in gaudi_set_fixed_properties()
629 prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK; in gaudi_set_fixed_properties()
630 prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK; in gaudi_set_fixed_properties()
631 prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK; in gaudi_set_fixed_properties()
632 prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK; in gaudi_set_fixed_properties()
633 prop->pmmu.start_addr = VA_HOST_SPACE_START; in gaudi_set_fixed_properties()
634 prop->pmmu.end_addr = in gaudi_set_fixed_properties()
635 (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1; in gaudi_set_fixed_properties()
636 prop->pmmu.page_size = PAGE_SIZE_4KB; in gaudi_set_fixed_properties()
637 prop->pmmu.num_hops = MMU_ARCH_5_HOPS; in gaudi_set_fixed_properties()
638 prop->pmmu.last_mask = LAST_MASK; in gaudi_set_fixed_properties()
639 /* TODO: will be duplicated until implementing per-MMU props */ in gaudi_set_fixed_properties()
640 prop->pmmu.hop_table_size = prop->mmu_hop_table_size; in gaudi_set_fixed_properties()
641 prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size; in gaudi_set_fixed_properties()
644 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); in gaudi_set_fixed_properties()
645 prop->pmmu_huge.page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
648 memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu)); in gaudi_set_fixed_properties()
649 prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2); in gaudi_set_fixed_properties()
650 prop->dmmu.end_addr = VA_HOST_SPACE_END; in gaudi_set_fixed_properties()
651 prop->dmmu.page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
653 prop->cfg_size = CFG_SIZE; in gaudi_set_fixed_properties()
654 prop->max_asid = MAX_ASID; in gaudi_set_fixed_properties()
655 prop->num_of_events = GAUDI_EVENT_SIZE; in gaudi_set_fixed_properties()
656 prop->max_num_of_engines = GAUDI_ENGINE_ID_SIZE; in gaudi_set_fixed_properties()
657 prop->tpc_enabled_mask = TPC_ENABLED_MASK; in gaudi_set_fixed_properties()
661 prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT; in gaudi_set_fixed_properties()
662 prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE; in gaudi_set_fixed_properties()
664 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE; in gaudi_set_fixed_properties()
665 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI; in gaudi_set_fixed_properties()
667 strscpy_pad(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, in gaudi_set_fixed_properties()
670 prop->max_pending_cs = GAUDI_MAX_PENDING_CS; in gaudi_set_fixed_properties()
672 prop->first_available_user_sob[HL_GAUDI_WS_DCORE] = in gaudi_set_fixed_properties()
673 prop->sync_stream_first_sob + in gaudi_set_fixed_properties()
675 prop->first_available_user_mon[HL_GAUDI_WS_DCORE] = in gaudi_set_fixed_properties()
676 prop->sync_stream_first_mon + in gaudi_set_fixed_properties()
679 prop->first_available_user_interrupt = USHRT_MAX; in gaudi_set_fixed_properties()
680 prop->tpc_interrupt_id = USHRT_MAX; in gaudi_set_fixed_properties()
683 prop->eq_interrupt_id = 0; in gaudi_set_fixed_properties()
686 prop->first_available_cq[i] = USHRT_MAX; in gaudi_set_fixed_properties()
688 prop->fw_cpu_boot_dev_sts0_valid = false; in gaudi_set_fixed_properties()
689 prop->fw_cpu_boot_dev_sts1_valid = false; in gaudi_set_fixed_properties()
690 prop->hard_reset_done_by_fw = false; in gaudi_set_fixed_properties()
691 prop->gic_interrupts_enable = true; in gaudi_set_fixed_properties()
693 prop->server_type = HL_SERVER_TYPE_UNKNOWN; in gaudi_set_fixed_properties()
695 prop->clk_pll_index = HL_GAUDI_MME_PLL; in gaudi_set_fixed_properties()
696 prop->max_freq_value = GAUDI_MAX_CLK_FREQ; in gaudi_set_fixed_properties()
698 prop->use_get_power_for_reset_history = true; in gaudi_set_fixed_properties()
700 prop->configurable_stop_on_err = true; in gaudi_set_fixed_properties()
702 prop->set_max_power_on_device_init = true; in gaudi_set_fixed_properties()
704 prop->dma_mask = 48; in gaudi_set_fixed_properties()
706 prop->hbw_flush_reg = mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL; in gaudi_set_fixed_properties()
721 hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] + in gaudi_pci_bars_map()
722 (CFG_BASE - SPI_FLASH_BASE_ADDR); in gaudi_pci_bars_map()
729 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_set_hbm_bar_base()
734 if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr)) in gaudi_set_hbm_bar_base()
737 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_set_hbm_bar_base()
740 /* Inbound Region 2 - Bar 4 - Point to HBM */ in gaudi_set_hbm_bar_base()
749 old_addr = gaudi->hbm_bar_cur_addr; in gaudi_set_hbm_bar_base()
750 gaudi->hbm_bar_cur_addr = addr; in gaudi_set_hbm_bar_base()
762 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_init_iatu()
765 /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */ in gaudi_init_iatu()
773 /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */ in gaudi_init_iatu()
781 /* Inbound Region 2 - Bar 4 - Point to HBM */ in gaudi_init_iatu()
789 /* Outbound Region 0 - Point to Host */ in gaudi_init_iatu()
805 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_early_init()
806 struct pci_dev *pdev = hdev->pdev; in gaudi_early_init()
813 dev_err(hdev->dev, "Failed setting fixed properties\n"); in gaudi_early_init()
821 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n", in gaudi_early_init()
823 rc = -ENODEV; in gaudi_early_init()
830 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n", in gaudi_early_init()
832 rc = -ENODEV; in gaudi_early_init()
836 prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); in gaudi_early_init()
837 hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID); in gaudi_early_init()
840 if (hdev->asic_prop.fw_security_enabled) { in gaudi_early_init()
841 hdev->asic_prop.iatu_done_by_fw = true; in gaudi_early_init()
844 * GIC-security-bit can ONLY be set by CPUCP, so in this stage in gaudi_early_init()
847 hdev->asic_prop.gic_interrupts_enable = false; in gaudi_early_init()
859 hdev->asic_prop.iatu_done_by_fw = true; in gaudi_early_init()
867 * version to determine whether we run with a security-enabled firmware in gaudi_early_init()
871 if (hdev->reset_on_preboot_fail) in gaudi_early_init()
873 hdev->asic_funcs->hw_fini(hdev, true, false); in gaudi_early_init()
878 dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n"); in gaudi_early_init()
879 rc = hdev->asic_funcs->hw_fini(hdev, true, false); in gaudi_early_init()
881 dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc); in gaudi_early_init()
891 kfree(hdev->asic_prop.hw_queues_props); in gaudi_early_init()
897 kfree(hdev->asic_prop.hw_queues_props); in gaudi_early_fini()
904 * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
912 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_fetch_psoc_frequency()
916 if ((hdev->fw_components & FW_TYPE_LINUX) && in gaudi_fetch_psoc_frequency()
917 (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PLL_INFO_EN)) { in gaudi_fetch_psoc_frequency()
918 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_fetch_psoc_frequency()
920 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_fetch_psoc_frequency()
952 dev_warn(hdev->dev, "Received invalid div select value: %#x", div_sel); in gaudi_fetch_psoc_frequency()
957 prop->psoc_timestamp_frequency = freq; in gaudi_fetch_psoc_frequency()
958 prop->psoc_pci_pll_nr = nr; in gaudi_fetch_psoc_frequency()
959 prop->psoc_pci_pll_nf = nf; in gaudi_fetch_psoc_frequency()
960 prop->psoc_pci_pll_od = od; in gaudi_fetch_psoc_frequency()
961 prop->psoc_pci_pll_div_factor = div_fctr; in gaudi_fetch_psoc_frequency()
969 struct asic_fixed_properties *prop = &hdev->asic_prop; in _gaudi_init_tpc_mem()
980 return -EFAULT; in _gaudi_init_tpc_mem()
982 init_tpc_mem_pkt = cb->kernel_address; in _gaudi_init_tpc_mem()
986 init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size); in _gaudi_init_tpc_mem()
993 init_tpc_mem_pkt->ctl = cpu_to_le32(ctl); in _gaudi_init_tpc_mem()
995 init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr); in _gaudi_init_tpc_mem()
999 round_up(prop->sram_user_base_address, SZ_8K)); in _gaudi_init_tpc_mem()
1000 init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr); in _gaudi_init_tpc_mem()
1004 dev_err(hdev->dev, "Failed to allocate a new job\n"); in _gaudi_init_tpc_mem()
1005 rc = -ENOMEM; in _gaudi_init_tpc_mem()
1009 job->id = 0; in _gaudi_init_tpc_mem()
1010 job->user_cb = cb; in _gaudi_init_tpc_mem()
1011 atomic_inc(&job->user_cb->cs_cnt); in _gaudi_init_tpc_mem()
1012 job->user_cb_size = cb_size; in _gaudi_init_tpc_mem()
1013 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in _gaudi_init_tpc_mem()
1014 job->patched_cb = job->user_cb; in _gaudi_init_tpc_mem()
1015 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); in _gaudi_init_tpc_mem()
1031 hl_userptr_delete_list(hdev, &job->userptr_list); in _gaudi_init_tpc_mem()
1034 atomic_dec(&cb->cs_cnt); in _gaudi_init_tpc_mem()
1038 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in _gaudi_init_tpc_mem()
1044 * gaudi_init_tpc_mem() - Initialize TPC memories.
1047 * Copy TPC kernel fw from firmware file and run it to initialize TPC memories.
1060 rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev); in gaudi_init_tpc_mem()
1061 if (rc == -EINTR && count-- > 0) { in gaudi_init_tpc_mem()
1067 dev_err(hdev->dev, "Failed to load firmware file %s\n", in gaudi_init_tpc_mem()
1072 fw_size = fw->size; in gaudi_init_tpc_mem()
1075 dev_err(hdev->dev, in gaudi_init_tpc_mem()
1078 rc = -ENOMEM; in gaudi_init_tpc_mem()
1082 memcpy(cpu_addr, fw->data, fw_size); in gaudi_init_tpc_mem()
1086 hl_asic_dma_free_coherent(hdev, fw->size, cpu_addr, dma_handle); in gaudi_init_tpc_mem()
1095 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_collective_map_sobs()
1096 struct gaudi_collective_properties *prop = &gaudi->collective_props; in gaudi_collective_map_sobs()
1102 stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream]; in gaudi_collective_map_sobs()
1103 sob_id = prop->hw_sob_group[sob_group_id].base_sob_id; in gaudi_collective_map_sobs()
1107 q = &hdev->kernel_queues[queue_id + (4 * i)]; in gaudi_collective_map_sobs()
1108 q->sync_stream_prop.collective_sob_id = sob_id + i; in gaudi_collective_map_sobs()
1115 q = &hdev->kernel_queues[queue_id]; in gaudi_collective_map_sobs()
1116 q->sync_stream_prop.collective_sob_id = in gaudi_collective_map_sobs()
1120 q = &hdev->kernel_queues[queue_id]; in gaudi_collective_map_sobs()
1121 q->sync_stream_prop.collective_sob_id = in gaudi_collective_map_sobs()
1129 struct hl_device *hdev = hw_sob_group->hdev; in gaudi_sob_group_hw_reset()
1134 (hw_sob_group->base_sob_id * 4) + (i * 4)), 0); in gaudi_sob_group_hw_reset()
1136 kref_init(&hw_sob_group->kref); in gaudi_sob_group_hw_reset()
1143 struct hl_device *hdev = hw_sob_group->hdev; in gaudi_sob_group_reset_error()
1145 dev_crit(hdev->dev, in gaudi_sob_group_reset_error()
1147 hw_sob_group->base_sob_id); in gaudi_sob_group_reset_error()
1155 prop = &gaudi->collective_props; in gaudi_collective_mstr_sob_mask_set()
1157 memset(prop->mstr_sob_mask, 0, sizeof(prop->mstr_sob_mask)); in gaudi_collective_mstr_sob_mask_set()
1160 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i)) in gaudi_collective_mstr_sob_mask_set()
1161 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |= in gaudi_collective_mstr_sob_mask_set()
1164 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |= in gaudi_collective_mstr_sob_mask_set()
1174 gaudi = hdev->asic_specific; in gaudi_collective_init()
1175 prop = &gaudi->collective_props; in gaudi_collective_init()
1176 sob_id = hdev->asic_prop.collective_first_sob; in gaudi_collective_init()
1184 prop->hw_sob_group[i].hdev = hdev; in gaudi_collective_init()
1185 prop->hw_sob_group[i].base_sob_id = sob_id; in gaudi_collective_init()
1187 gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref); in gaudi_collective_init()
1191 prop->next_sob_group_val[i] = 1; in gaudi_collective_init()
1192 prop->curr_sob_group_idx[i] = 0; in gaudi_collective_init()
1203 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_reset_sob_group()
1204 struct gaudi_collective_properties *cprop = &gaudi->collective_props; in gaudi_reset_sob_group()
1206 kref_put(&cprop->hw_sob_group[sob_group].kref, in gaudi_reset_sob_group()
1219 gaudi = hdev->asic_specific; in gaudi_collective_master_init_job()
1220 cprop = &gaudi->collective_props; in gaudi_collective_master_init_job()
1221 queue_id = job->hw_queue_id; in gaudi_collective_master_init_job()
1222 prop = &hdev->kernel_queues[queue_id].sync_stream_prop; in gaudi_collective_master_init_job()
1225 cprop->hw_sob_group[sob_group_offset].base_sob_id; in gaudi_collective_master_init_job()
1226 master_monitor = prop->collective_mstr_mon_id[0]; in gaudi_collective_master_init_job()
1228 cprop->hw_sob_group[sob_group_offset].queue_id = queue_id; in gaudi_collective_master_init_job()
1230 dev_dbg(hdev->dev, in gaudi_collective_master_init_job()
1232 master_sob_base, cprop->mstr_sob_mask[0], in gaudi_collective_master_init_job()
1233 cprop->next_sob_group_val[stream], in gaudi_collective_master_init_job()
1236 wait_prop.data = (void *) job->patched_cb; in gaudi_collective_master_init_job()
1238 wait_prop.sob_mask = cprop->mstr_sob_mask[0]; in gaudi_collective_master_init_job()
1239 wait_prop.sob_val = cprop->next_sob_group_val[stream]; in gaudi_collective_master_init_job()
1246 master_monitor = prop->collective_mstr_mon_id[1]; in gaudi_collective_master_init_job()
1248 dev_dbg(hdev->dev, in gaudi_collective_master_init_job()
1250 master_sob_base, cprop->mstr_sob_mask[1], in gaudi_collective_master_init_job()
1251 cprop->next_sob_group_val[stream], in gaudi_collective_master_init_job()
1255 wait_prop.sob_mask = cprop->mstr_sob_mask[1]; in gaudi_collective_master_init_job()
1268 queue_id = job->hw_queue_id; in gaudi_collective_slave_init_job()
1269 prop = &hdev->kernel_queues[queue_id].sync_stream_prop; in gaudi_collective_slave_init_job()
1271 if (job->cs->encaps_signals) { in gaudi_collective_slave_init_job()
1276 hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job, in gaudi_collective_slave_init_job()
1279 dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n", in gaudi_collective_slave_init_job()
1280 job->cs->sequence, in gaudi_collective_slave_init_job()
1281 cs_cmpl->hw_sob->sob_id, in gaudi_collective_slave_init_job()
1282 cs_cmpl->sob_val); in gaudi_collective_slave_init_job()
1286 wait_prop.data = (void *) job->user_cb; in gaudi_collective_slave_init_job()
1287 wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; in gaudi_collective_slave_init_job()
1289 wait_prop.sob_val = cs_cmpl->sob_val; in gaudi_collective_slave_init_job()
1290 wait_prop.mon_id = prop->collective_slave_mon_id; in gaudi_collective_slave_init_job()
1294 dev_dbg(hdev->dev, in gaudi_collective_slave_init_job()
1296 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, in gaudi_collective_slave_init_job()
1297 prop->collective_slave_mon_id, queue_id); in gaudi_collective_slave_init_job()
1301 dev_dbg(hdev->dev, in gaudi_collective_slave_init_job()
1303 prop->collective_sob_id, queue_id); in gaudi_collective_slave_init_job()
1305 cb_size += gaudi_gen_signal_cb(hdev, job->user_cb, in gaudi_collective_slave_init_job()
1306 prop->collective_sob_id, cb_size, false); in gaudi_collective_slave_init_job()
1312 container_of(cs->signal_fence, struct hl_cs_compl, base_fence); in gaudi_collective_wait_init_cs()
1314 container_of(cs->fence, struct hl_cs_compl, base_fence); in gaudi_collective_wait_init_cs()
1315 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl; in gaudi_collective_wait_init_cs()
1323 ctx = cs->ctx; in gaudi_collective_wait_init_cs()
1324 hdev = ctx->hdev; in gaudi_collective_wait_init_cs()
1325 gaudi = hdev->asic_specific; in gaudi_collective_wait_init_cs()
1326 cprop = &gaudi->collective_props; in gaudi_collective_wait_init_cs()
1328 if (cs->encaps_signals) { in gaudi_collective_wait_init_cs()
1329 cs_cmpl->hw_sob = handle->hw_sob; in gaudi_collective_wait_init_cs()
1336 cs_cmpl->sob_val = 0; in gaudi_collective_wait_init_cs()
1339 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; in gaudi_collective_wait_init_cs()
1340 cs_cmpl->sob_val = signal_cs_cmpl->sob_val; in gaudi_collective_wait_init_cs()
1355 spin_lock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1357 if (completion_done(&cs->signal_fence->completion)) { in gaudi_collective_wait_init_cs()
1358 spin_unlock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1359 return -EINVAL; in gaudi_collective_wait_init_cs()
1362 kref_get(&cs_cmpl->hw_sob->kref); in gaudi_collective_wait_init_cs()
1364 spin_unlock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1367 job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node); in gaudi_collective_wait_init_cs()
1368 stream = job->hw_queue_id % 4; in gaudi_collective_wait_init_cs()
1370 stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream]; in gaudi_collective_wait_init_cs()
1372 list_for_each_entry(job, &cs->job_list, cs_node) { in gaudi_collective_wait_init_cs()
1373 queue_id = job->hw_queue_id; in gaudi_collective_wait_init_cs()
1375 if (hdev->kernel_queues[queue_id].collective_mode == in gaudi_collective_wait_init_cs()
1383 cs_cmpl->sob_group = sob_group_offset; in gaudi_collective_wait_init_cs()
1386 kref_get(&cprop->hw_sob_group[sob_group_offset].kref); in gaudi_collective_wait_init_cs()
1387 cprop->next_sob_group_val[stream]++; in gaudi_collective_wait_init_cs()
1389 if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) { in gaudi_collective_wait_init_cs()
1395 kref_put(&cprop->hw_sob_group[sob_group_offset].kref, in gaudi_collective_wait_init_cs()
1397 cprop->next_sob_group_val[stream] = 1; in gaudi_collective_wait_init_cs()
1399 cprop->curr_sob_group_idx[stream] = in gaudi_collective_wait_init_cs()
1400 (cprop->curr_sob_group_idx[stream] + 1) & in gaudi_collective_wait_init_cs()
1401 (HL_RSVD_SOBS - 1); in gaudi_collective_wait_init_cs()
1405 dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n", in gaudi_collective_wait_init_cs()
1406 cprop->curr_sob_group_idx[stream], stream); in gaudi_collective_wait_init_cs()
1410 hl_fence_put(cs->signal_fence); in gaudi_collective_wait_init_cs()
1411 cs->signal_fence = NULL; in gaudi_collective_wait_init_cs()
1424 return cacheline_end - user_cb_size + additional_commands; in gaudi_get_patched_cb_extra_size()
1441 cntr = &hdev->aggregated_cs_counters; in gaudi_collective_wait_create_job()
1466 hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id]; in gaudi_collective_wait_create_job()
1467 job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true); in gaudi_collective_wait_create_job()
1469 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1470 atomic64_inc(&cntr->out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1471 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_collective_wait_create_job()
1472 return -ENOMEM; in gaudi_collective_wait_create_job()
1478 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1479 atomic64_inc(&cntr->out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1481 return -EFAULT; in gaudi_collective_wait_create_job()
1484 job->id = 0; in gaudi_collective_wait_create_job()
1485 job->cs = cs; in gaudi_collective_wait_create_job()
1486 job->user_cb = cb; in gaudi_collective_wait_create_job()
1487 atomic_inc(&job->user_cb->cs_cnt); in gaudi_collective_wait_create_job()
1488 job->user_cb_size = cb_size; in gaudi_collective_wait_create_job()
1489 job->hw_queue_id = queue_id; in gaudi_collective_wait_create_job()
1495 if (cs->encaps_signals) in gaudi_collective_wait_create_job()
1496 job->encaps_sig_wait_offset = encaps_signal_offset; in gaudi_collective_wait_create_job()
1500 * We call hl_cb_destroy() out of two reasons - we don't need in gaudi_collective_wait_create_job()
1505 job->patched_cb = job->user_cb; in gaudi_collective_wait_create_job()
1507 job->patched_cb = NULL; in gaudi_collective_wait_create_job()
1509 job->job_cb_size = job->user_cb_size; in gaudi_collective_wait_create_job()
1510 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_collective_wait_create_job()
1513 if (hw_queue_prop->type == QUEUE_TYPE_EXT) in gaudi_collective_wait_create_job()
1516 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in gaudi_collective_wait_create_job()
1518 list_add_tail(&job->cs_node, &cs->job_list); in gaudi_collective_wait_create_job()
1530 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_collective_wait_create_jobs()
1538 hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id]; in gaudi_collective_wait_create_jobs()
1539 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { in gaudi_collective_wait_create_jobs()
1540 dev_err(hdev->dev, in gaudi_collective_wait_create_jobs()
1543 return -EINVAL; in gaudi_collective_wait_create_jobs()
1549 dev_err(hdev->dev, in gaudi_collective_wait_create_jobs()
1552 return -EINVAL; in gaudi_collective_wait_create_jobs()
1568 * First monitor for NICs 0-7, second monitor for NICs 8-9 and the in gaudi_collective_wait_create_jobs()
1572 * all wait for the user to signal sob 'cs_cmpl->sob_val'. in gaudi_collective_wait_create_jobs()
1582 if (gaudi->hw_cap_initialized & in gaudi_collective_wait_create_jobs()
1612 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_late_init()
1615 rc = gaudi->cpucp_info_get(hdev); in gaudi_late_init()
1617 dev_err(hdev->dev, "Failed to get cpucp info\n"); in gaudi_late_init()
1621 if ((hdev->card_type == cpucp_card_type_pci) && in gaudi_late_init()
1622 (hdev->nic_ports_mask & 0x3)) { in gaudi_late_init()
1623 dev_info(hdev->dev, in gaudi_late_init()
1625 hdev->nic_ports_mask &= ~0x3; in gaudi_late_init()
1639 gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1); in gaudi_late_init()
1644 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); in gaudi_late_init()
1649 rc = hdev->asic_funcs->scrub_device_mem(hdev); in gaudi_late_init()
1655 dev_err(hdev->dev, "Failed to fetch psoc frequency\n"); in gaudi_late_init()
1661 dev_err(hdev->dev, "Failed to clear MMU page tables range\n"); in gaudi_late_init()
1667 dev_err(hdev->dev, "Failed to initialize TPC memories\n"); in gaudi_late_init()
1673 dev_err(hdev->dev, "Failed to init collective\n"); in gaudi_late_init()
1704 * The device CPU works with 40-bits addresses, while bit 39 must be set in gaudi_alloc_cpu_accessible_dma_mem()
1717 rc = -ENOMEM; in gaudi_alloc_cpu_accessible_dma_mem()
1721 end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1; in gaudi_alloc_cpu_accessible_dma_mem()
1728 dev_err(hdev->dev, in gaudi_alloc_cpu_accessible_dma_mem()
1730 rc = -EFAULT; in gaudi_alloc_cpu_accessible_dma_mem()
1734 hdev->cpu_accessible_dma_mem = virt_addr_arr[i]; in gaudi_alloc_cpu_accessible_dma_mem()
1735 hdev->cpu_accessible_dma_address = dma_addr_arr[i]; in gaudi_alloc_cpu_accessible_dma_mem()
1736 hdev->cpu_pci_msb_addr = in gaudi_alloc_cpu_accessible_dma_mem()
1737 GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address); in gaudi_alloc_cpu_accessible_dma_mem()
1739 if (!hdev->asic_prop.fw_security_enabled) in gaudi_alloc_cpu_accessible_dma_mem()
1740 GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); in gaudi_alloc_cpu_accessible_dma_mem()
1752 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_free_internal_qmans_pq_mem()
1757 q = &gaudi->internal_qmans[i]; in gaudi_free_internal_qmans_pq_mem()
1758 if (!q->pq_kernel_addr) in gaudi_free_internal_qmans_pq_mem()
1760 hl_asic_dma_free_coherent(hdev, q->pq_size, q->pq_kernel_addr, q->pq_dma_addr); in gaudi_free_internal_qmans_pq_mem()
1766 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_alloc_internal_qmans_pq_mem()
1774 q = &gaudi->internal_qmans[i]; in gaudi_alloc_internal_qmans_pq_mem()
1778 q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1781 q->pq_size = MME_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1784 q->pq_size = TPC_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1787 q->pq_size = NIC_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1790 dev_err(hdev->dev, "Bad internal queue index %d", i); in gaudi_alloc_internal_qmans_pq_mem()
1791 rc = -EINVAL; in gaudi_alloc_internal_qmans_pq_mem()
1795 q->pq_kernel_addr = hl_asic_dma_alloc_coherent(hdev, q->pq_size, &q->pq_dma_addr, in gaudi_alloc_internal_qmans_pq_mem()
1797 if (!q->pq_kernel_addr) { in gaudi_alloc_internal_qmans_pq_mem()
1798 rc = -ENOMEM; in gaudi_alloc_internal_qmans_pq_mem()
1812 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_set_pci_memory_regions()
1816 region = &hdev->pci_mem_region[PCI_REGION_CFG]; in gaudi_set_pci_memory_regions()
1817 region->region_base = CFG_BASE; in gaudi_set_pci_memory_regions()
1818 region->region_size = CFG_SIZE; in gaudi_set_pci_memory_regions()
1819 region->offset_in_bar = CFG_BASE - SPI_FLASH_BASE_ADDR; in gaudi_set_pci_memory_regions()
1820 region->bar_size = CFG_BAR_SIZE; in gaudi_set_pci_memory_regions()
1821 region->bar_id = CFG_BAR_ID; in gaudi_set_pci_memory_regions()
1822 region->used = 1; in gaudi_set_pci_memory_regions()
1825 region = &hdev->pci_mem_region[PCI_REGION_SRAM]; in gaudi_set_pci_memory_regions()
1826 region->region_base = SRAM_BASE_ADDR; in gaudi_set_pci_memory_regions()
1827 region->region_size = SRAM_SIZE; in gaudi_set_pci_memory_regions()
1828 region->offset_in_bar = 0; in gaudi_set_pci_memory_regions()
1829 region->bar_size = SRAM_BAR_SIZE; in gaudi_set_pci_memory_regions()
1830 region->bar_id = SRAM_BAR_ID; in gaudi_set_pci_memory_regions()
1831 region->used = 1; in gaudi_set_pci_memory_regions()
1834 region = &hdev->pci_mem_region[PCI_REGION_DRAM]; in gaudi_set_pci_memory_regions()
1835 region->region_base = DRAM_PHYS_BASE; in gaudi_set_pci_memory_regions()
1836 region->region_size = hdev->asic_prop.dram_size; in gaudi_set_pci_memory_regions()
1837 region->offset_in_bar = 0; in gaudi_set_pci_memory_regions()
1838 region->bar_size = prop->dram_pci_bar_size; in gaudi_set_pci_memory_regions()
1839 region->bar_id = HBM_BAR_ID; in gaudi_set_pci_memory_regions()
1840 region->used = 1; in gaudi_set_pci_memory_regions()
1843 region = &hdev->pci_mem_region[PCI_REGION_SP_SRAM]; in gaudi_set_pci_memory_regions()
1844 region->region_base = PSOC_SCRATCHPAD_ADDR; in gaudi_set_pci_memory_regions()
1845 region->region_size = PSOC_SCRATCHPAD_SIZE; in gaudi_set_pci_memory_regions()
1846 region->offset_in_bar = PSOC_SCRATCHPAD_ADDR - SPI_FLASH_BASE_ADDR; in gaudi_set_pci_memory_regions()
1847 region->bar_size = CFG_BAR_SIZE; in gaudi_set_pci_memory_regions()
1848 region->bar_id = CFG_BAR_ID; in gaudi_set_pci_memory_regions()
1849 region->used = 1; in gaudi_set_pci_memory_regions()
1861 return -ENOMEM; in gaudi_sw_init()
1866 dev_err(hdev->dev, in gaudi_sw_init()
1869 rc = -EINVAL; in gaudi_sw_init()
1873 gaudi->events[event_id++] = in gaudi_sw_init()
1878 gaudi->cpucp_info_get = gaudi_cpucp_info_get; in gaudi_sw_init()
1880 hdev->asic_specific = gaudi; in gaudi_sw_init()
1883 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), in gaudi_sw_init()
1884 &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0); in gaudi_sw_init()
1885 if (!hdev->dma_pool) { in gaudi_sw_init()
1886 dev_err(hdev->dev, "failed to create DMA pool\n"); in gaudi_sw_init()
1887 rc = -ENOMEM; in gaudi_sw_init()
1895 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); in gaudi_sw_init()
1896 if (!hdev->cpu_accessible_dma_pool) { in gaudi_sw_init()
1897 dev_err(hdev->dev, in gaudi_sw_init()
1899 rc = -ENOMEM; in gaudi_sw_init()
1903 rc = gen_pool_add(hdev->cpu_accessible_dma_pool, in gaudi_sw_init()
1904 (uintptr_t) hdev->cpu_accessible_dma_mem, in gaudi_sw_init()
1905 HL_CPU_ACCESSIBLE_MEM_SIZE, -1); in gaudi_sw_init()
1907 dev_err(hdev->dev, in gaudi_sw_init()
1909 rc = -EFAULT; in gaudi_sw_init()
1917 spin_lock_init(&gaudi->hw_queues_lock); in gaudi_sw_init()
1919 hdev->supports_sync_stream = true; in gaudi_sw_init()
1920 hdev->supports_coresight = true; in gaudi_sw_init()
1921 hdev->supports_staged_submission = true; in gaudi_sw_init()
1922 hdev->supports_wait_for_multi_cs = true; in gaudi_sw_init()
1924 hdev->asic_funcs->set_pci_memory_regions(hdev); in gaudi_sw_init()
1925 hdev->stream_master_qid_arr = in gaudi_sw_init()
1926 hdev->asic_funcs->get_stream_master_qid_arr(); in gaudi_sw_init()
1927 hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE; in gaudi_sw_init()
1932 gen_pool_destroy(hdev->cpu_accessible_dma_pool); in gaudi_sw_init()
1934 if (!hdev->asic_prop.fw_security_enabled) in gaudi_sw_init()
1935 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, in gaudi_sw_init()
1936 hdev->cpu_pci_msb_addr); in gaudi_sw_init()
1937 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, in gaudi_sw_init()
1938 hdev->cpu_accessible_dma_address); in gaudi_sw_init()
1940 dma_pool_destroy(hdev->dma_pool); in gaudi_sw_init()
1948 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_sw_fini()
1952 gen_pool_destroy(hdev->cpu_accessible_dma_pool); in gaudi_sw_fini()
1954 if (!hdev->asic_prop.fw_security_enabled) in gaudi_sw_fini()
1955 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, in gaudi_sw_fini()
1956 hdev->cpu_pci_msb_addr); in gaudi_sw_fini()
1958 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, in gaudi_sw_fini()
1959 hdev->cpu_accessible_dma_address); in gaudi_sw_fini()
1961 dma_pool_destroy(hdev->dma_pool); in gaudi_sw_fini()
1973 if (hdev->disabled) in gaudi_irq_handler_single()
1976 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) in gaudi_irq_handler_single()
1977 hl_irq_handler_cq(irq, &hdev->completion_queue[i]); in gaudi_irq_handler_single()
1979 hl_irq_handler_eq(irq, &hdev->event_queue); in gaudi_irq_handler_single()
1994 dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n", in gaudi_pci_irq_vector()
2000 return pci_irq_vector(hdev->pdev, msi_vec); in gaudi_pci_irq_vector()
2007 dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n"); in gaudi_enable_msi_single()
2013 dev_err(hdev->dev, in gaudi_enable_msi_single()
2021 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_enable_msi()
2024 if (gaudi->hw_cap_initialized & HW_CAP_MSI) in gaudi_enable_msi()
2027 rc = pci_alloc_irq_vectors(hdev->pdev, 1, 1, PCI_IRQ_MSI); in gaudi_enable_msi()
2029 dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc); in gaudi_enable_msi()
2037 gaudi->hw_cap_initialized |= HW_CAP_MSI; in gaudi_enable_msi()
2042 pci_free_irq_vectors(hdev->pdev); in gaudi_enable_msi()
2048 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_sync_irqs()
2050 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) in gaudi_sync_irqs()
2059 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_msi()
2061 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) in gaudi_disable_msi()
2066 pci_free_irq_vectors(hdev->pdev); in gaudi_disable_msi()
2068 gaudi->hw_cap_initialized &= ~HW_CAP_MSI; in gaudi_disable_msi()
2073 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_scrambler_sram()
2075 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_scrambler_sram()
2078 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & in gaudi_init_scrambler_sram()
2082 if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER) in gaudi_init_scrambler_sram()
2136 gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER; in gaudi_init_scrambler_sram()
2141 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_scrambler_hbm()
2143 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_scrambler_hbm()
2146 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_scrambler_hbm()
2150 if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER) in gaudi_init_scrambler_hbm()
2204 gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER; in gaudi_init_scrambler_hbm()
2209 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_e2e()
2212 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_e2e()
2461 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_hbm_cred()
2464 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_hbm_cred()
2540 writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i); in gaudi_init_golden_registers()
2552 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_pci_dma_qman()
2605 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_pci_dma_qman()
2607 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl); in gaudi_init_pci_dma_qman()
2611 if (hdev->stop_on_err) in gaudi_init_pci_dma_qman()
2642 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_dma_core()
2651 /* WA for H/W bug H3-2116 */ in gaudi_init_dma_core()
2655 if (hdev->stop_on_err) in gaudi_init_dma_core()
2660 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_dma_core()
2662 le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl); in gaudi_init_dma_core()
2689 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_pci_dma_qmans()
2693 if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA) in gaudi_init_pci_dma_qmans()
2713 q = &hdev->kernel_queues[q_idx]; in gaudi_init_pci_dma_qmans()
2714 q->cq_id = cq_id++; in gaudi_init_pci_dma_qmans()
2715 q->msi_vec = nic_skip + cpu_skip + msi_vec++; in gaudi_init_pci_dma_qmans()
2717 q->bus_address); in gaudi_init_pci_dma_qmans()
2725 gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA; in gaudi_init_pci_dma_qmans()
2732 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_hbm_dma_qman()
2776 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_hbm_dma_qman()
2778 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl); in gaudi_init_hbm_dma_qman()
2789 if (hdev->stop_on_err) in gaudi_init_hbm_dma_qman()
2835 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_hbm_dma_qmans()
2840 if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA) in gaudi_init_hbm_dma_qmans()
2853 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_hbm_dma_qmans()
2854 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_hbm_dma_qmans()
2867 gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA; in gaudi_init_hbm_dma_qmans()
2874 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_mme_qman()
2909 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_mme_qman()
2911 le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl); in gaudi_init_mme_qman()
2922 (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2; in gaudi_init_mme_qman()
2925 if (hdev->stop_on_err) in gaudi_init_mme_qman()
2959 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_mme_qmans()
2965 if (gaudi->hw_cap_initialized & HW_CAP_MME) in gaudi_init_mme_qmans()
2973 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; in gaudi_init_mme_qmans()
2977 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_mme_qmans()
2978 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_mme_qmans()
2986 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; in gaudi_init_mme_qmans()
2993 gaudi->hw_cap_initialized |= HW_CAP_MME; in gaudi_init_mme_qmans()
3000 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_tpc_qman()
3026 (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); in gaudi_init_tpc_qman()
3045 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_tpc_qman()
3047 le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl); in gaudi_init_tpc_qman()
3058 if (hdev->stop_on_err) in gaudi_init_tpc_qman()
3104 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_tpc_qmans()
3108 u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH - in gaudi_init_tpc_qmans()
3112 if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK) in gaudi_init_tpc_qmans()
3122 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_tpc_qmans()
3123 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_tpc_qmans()
3140 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; in gaudi_init_tpc_qmans()
3142 gaudi->hw_cap_initialized |= in gaudi_init_tpc_qmans()
3151 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_nic_qman()
3202 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_nic_qman()
3204 le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl); in gaudi_init_nic_qman()
3208 if (hdev->stop_on_err) in gaudi_init_nic_qman()
3237 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_nic_qmans()
3242 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_init_nic_qmans()
3244 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_init_nic_qmans()
3247 if (!hdev->nic_ports_mask) in gaudi_init_nic_qmans()
3250 if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK) in gaudi_init_nic_qmans()
3253 dev_dbg(hdev->dev, "Initializing NIC QMANs\n"); in gaudi_init_nic_qmans()
3256 if (!(hdev->nic_ports_mask & (1 << nic_id))) { in gaudi_init_nic_qmans()
3259 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_init_nic_qmans()
3268 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_nic_qmans()
3269 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_nic_qmans()
3279 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_init_nic_qmans()
3283 gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id); in gaudi_init_nic_qmans()
3289 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_pci_dma_qmans()
3291 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_disable_pci_dma_qmans()
3301 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_hbm_dma_qmans()
3303 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_disable_hbm_dma_qmans()
3315 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_mme_qmans()
3317 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_disable_mme_qmans()
3326 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_tpc_qmans()
3330 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_disable_tpc_qmans()
3335 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; in gaudi_disable_tpc_qmans()
3341 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_nic_qmans()
3344 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_disable_nic_qmans()
3346 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_disable_nic_qmans()
3352 if (gaudi->hw_cap_initialized & nic_mask) in gaudi_disable_nic_qmans()
3357 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_disable_nic_qmans()
3365 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_pci_dma_qmans()
3367 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_stop_pci_dma_qmans()
3378 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_hbm_dma_qmans()
3380 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_stop_hbm_dma_qmans()
3394 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_mme_qmans()
3396 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_stop_mme_qmans()
3406 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_tpc_qmans()
3408 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_stop_tpc_qmans()
3423 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_nic_qmans()
3427 if (gaudi->hw_cap_initialized & HW_CAP_NIC0) in gaudi_stop_nic_qmans()
3433 if (gaudi->hw_cap_initialized & HW_CAP_NIC1) in gaudi_stop_nic_qmans()
3439 if (gaudi->hw_cap_initialized & HW_CAP_NIC2) in gaudi_stop_nic_qmans()
3445 if (gaudi->hw_cap_initialized & HW_CAP_NIC3) in gaudi_stop_nic_qmans()
3451 if (gaudi->hw_cap_initialized & HW_CAP_NIC4) in gaudi_stop_nic_qmans()
3457 if (gaudi->hw_cap_initialized & HW_CAP_NIC5) in gaudi_stop_nic_qmans()
3463 if (gaudi->hw_cap_initialized & HW_CAP_NIC6) in gaudi_stop_nic_qmans()
3469 if (gaudi->hw_cap_initialized & HW_CAP_NIC7) in gaudi_stop_nic_qmans()
3475 if (gaudi->hw_cap_initialized & HW_CAP_NIC8) in gaudi_stop_nic_qmans()
3481 if (gaudi->hw_cap_initialized & HW_CAP_NIC9) in gaudi_stop_nic_qmans()
3490 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_pci_dma_stall()
3492 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_pci_dma_stall()
3502 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hbm_dma_stall()
3504 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_hbm_dma_stall()
3516 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mme_stall()
3518 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_mme_stall()
3521 /* WA for H3-1800 bug: do ACC and SBAB writes twice */ in gaudi_mme_stall()
3542 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_tpc_stall()
3544 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_tpc_stall()
3562 if (hdev->asic_prop.fw_security_enabled) in gaudi_disable_clock_gating()
3569 qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG); in gaudi_disable_clock_gating()
3581 qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG); in gaudi_disable_clock_gating()
3588 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); in gaudi_enable_timestamp()
3590 /* Zero the lower/upper parts of the 64-bit counter */ in gaudi_enable_timestamp()
3591 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0); in gaudi_enable_timestamp()
3592 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0); in gaudi_enable_timestamp()
3595 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1); in gaudi_enable_timestamp()
3601 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); in gaudi_disable_timestamp()
3608 if (hdev->pldm) in gaudi_halt_engines()
3645 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_mmu_init()
3646 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_init()
3650 if (gaudi->hw_cap_initialized & HW_CAP_MMU) in gaudi_mmu_init()
3653 for (i = 0 ; i < prop->max_asid ; i++) { in gaudi_mmu_init()
3654 hop0_addr = prop->mmu_pgt_addr + in gaudi_mmu_init()
3655 (i * prop->mmu_hop_table_size); in gaudi_mmu_init()
3659 dev_err(hdev->dev, in gaudi_mmu_init()
3666 WREG32(mmSTLB_CACHE_INV_BASE_39_8, prop->mmu_cache_mng_addr >> 8); in gaudi_mmu_init()
3667 WREG32(mmSTLB_CACHE_INV_BASE_49_40, prop->mmu_cache_mng_addr >> 40); in gaudi_mmu_init()
3685 gaudi->mmu_cache_inv_pi = 1; in gaudi_mmu_init()
3687 gaudi->hw_cap_initialized |= HW_CAP_MMU; in gaudi_mmu_init()
3696 dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET; in gaudi_load_firmware_to_device()
3705 dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET; in gaudi_load_boot_fit_to_device()
3715 dynamic_loader = &hdev->fw_loader.dynamic_loader; in gaudi_init_dynamic_firmware_loader()
3720 * hard-coded) in later stages of the protocol those values will be in gaudi_init_dynamic_firmware_loader()
3722 * will always be up-to-date in gaudi_init_dynamic_firmware_loader()
3724 dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs; in gaudi_init_dynamic_firmware_loader()
3725 dyn_regs->kmd_msg_to_cpu = in gaudi_init_dynamic_firmware_loader()
3727 dyn_regs->cpu_cmd_status_to_host = in gaudi_init_dynamic_firmware_loader()
3730 dynamic_loader->wait_for_bl_timeout = GAUDI_WAIT_FOR_BL_TIMEOUT_USEC; in gaudi_init_dynamic_firmware_loader()
3737 static_loader = &hdev->fw_loader.static_loader; in gaudi_init_static_firmware_loader()
3739 static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN; in gaudi_init_static_firmware_loader()
3740 static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN; in gaudi_init_static_firmware_loader()
3741 static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU; in gaudi_init_static_firmware_loader()
3742 static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST; in gaudi_init_static_firmware_loader()
3743 static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS; in gaudi_init_static_firmware_loader()
3744 static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0; in gaudi_init_static_firmware_loader()
3745 static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1; in gaudi_init_static_firmware_loader()
3746 static_loader->boot_err0_reg = mmCPU_BOOT_ERR0; in gaudi_init_static_firmware_loader()
3747 static_loader->boot_err1_reg = mmCPU_BOOT_ERR1; in gaudi_init_static_firmware_loader()
3748 static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET; in gaudi_init_static_firmware_loader()
3749 static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET; in gaudi_init_static_firmware_loader()
3750 static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR)); in gaudi_init_static_firmware_loader()
3751 static_loader->cpu_reset_wait_msec = hdev->pldm ? in gaudi_init_static_firmware_loader()
3758 struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load; in gaudi_init_firmware_preload_params()
3760 pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS; in gaudi_init_firmware_preload_params()
3761 pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0; in gaudi_init_firmware_preload_params()
3762 pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1; in gaudi_init_firmware_preload_params()
3763 pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0; in gaudi_init_firmware_preload_params()
3764 pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1; in gaudi_init_firmware_preload_params()
3765 pre_fw_load->wait_for_preboot_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC; in gaudi_init_firmware_preload_params()
3770 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_init_firmware_loader()
3771 struct fw_load_mgr *fw_loader = &hdev->fw_loader; in gaudi_init_firmware_loader()
3774 fw_loader->fw_comp_loaded = FW_TYPE_NONE; in gaudi_init_firmware_loader()
3775 fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE; in gaudi_init_firmware_loader()
3776 fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE; in gaudi_init_firmware_loader()
3777 fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC; in gaudi_init_firmware_loader()
3778 fw_loader->boot_fit_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC; in gaudi_init_firmware_loader()
3779 fw_loader->skip_bmc = !hdev->bmc_enable; in gaudi_init_firmware_loader()
3780 fw_loader->sram_bar_id = SRAM_BAR_ID; in gaudi_init_firmware_loader()
3781 fw_loader->dram_bar_id = HBM_BAR_ID; in gaudi_init_firmware_loader()
3783 if (prop->dynamic_fw_load) in gaudi_init_firmware_loader()
3791 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_cpu()
3794 if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU)) in gaudi_init_cpu()
3797 if (gaudi->hw_cap_initialized & HW_CAP_CPU) in gaudi_init_cpu()
3804 if (!hdev->asic_prop.fw_security_enabled) in gaudi_init_cpu()
3805 WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); in gaudi_init_cpu()
3812 gaudi->hw_cap_initialized |= HW_CAP_CPU; in gaudi_init_cpu()
3820 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_cpu_queues()
3821 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_init_cpu_queues()
3822 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_cpu_queues()
3826 &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; in gaudi_init_cpu_queues()
3829 if (!hdev->cpu_queues_enable) in gaudi_init_cpu_queues()
3832 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) in gaudi_init_cpu_queues()
3835 eq = &hdev->event_queue; in gaudi_init_cpu_queues()
3837 WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address)); in gaudi_init_cpu_queues()
3838 WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address)); in gaudi_init_cpu_queues()
3840 WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address)); in gaudi_init_cpu_queues()
3841 WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address)); in gaudi_init_cpu_queues()
3844 lower_32_bits(hdev->cpu_accessible_dma_address)); in gaudi_init_cpu_queues()
3846 upper_32_bits(hdev->cpu_accessible_dma_address)); in gaudi_init_cpu_queues()
3859 irq_handler_offset = prop->gic_interrupts_enable ? in gaudi_init_cpu_queues()
3861 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq); in gaudi_init_cpu_queues()
3875 dev_err(hdev->dev, in gaudi_init_cpu_queues()
3876 "Failed to communicate with Device CPU (CPU-CP timeout)\n"); in gaudi_init_cpu_queues()
3877 return -EIO; in gaudi_init_cpu_queues()
3881 if (prop->fw_cpu_boot_dev_sts0_valid) in gaudi_init_cpu_queues()
3882 prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0); in gaudi_init_cpu_queues()
3883 if (prop->fw_cpu_boot_dev_sts1_valid) in gaudi_init_cpu_queues()
3884 prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1); in gaudi_init_cpu_queues()
3886 gaudi->hw_cap_initialized |= HW_CAP_CPU_Q; in gaudi_init_cpu_queues()
3895 if (!hdev->asic_prop.fw_security_enabled) { in gaudi_pre_hw_init()
3920 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_init()
3929 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_hw_init()
3930 gaudi->hbm_bar_cur_addr = DRAM_PHYS_BASE; in gaudi_hw_init()
3933 * Before pushing u-boot/linux to device, need to set the hbm bar to in gaudi_hw_init()
3937 dev_err(hdev->dev, in gaudi_hw_init()
3939 return -EIO; in gaudi_hw_init()
3944 dev_err(hdev->dev, "failed to initialize CPU\n"); in gaudi_hw_init()
3987 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", in gaudi_hw_init()
4009 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_hw_fini()
4011 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_fini()
4015 dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n"); in gaudi_hw_fini()
4019 if (hdev->pldm) { in gaudi_hw_fini()
4028 dev_dbg(hdev->dev, in gaudi_hw_fini()
4035 driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled && in gaudi_hw_fini()
4036 !hdev->asic_prop.hard_reset_done_by_fw); in gaudi_hw_fini()
4049 if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) { in gaudi_hw_fini()
4050 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_hw_fini()
4052 le32_to_cpu(dyn_regs->gic_host_halt_irq); in gaudi_hw_fini()
4057 /* This is a hail-mary attempt to revive the card in the small chance that the in gaudi_hw_fini()
4068 if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) { in gaudi_hw_fini()
4069 if (hdev->asic_prop.hard_reset_done_by_fw) in gaudi_hw_fini()
4075 if (hdev->asic_prop.hard_reset_done_by_fw) in gaudi_hw_fini()
4111 /* Tell ASIC not to re-initialize PCIe */ in gaudi_hw_fini()
4114 /* Restart BTL/BLR upon hard-reset */ in gaudi_hw_fini()
4120 dev_dbg(hdev->dev, in gaudi_hw_fini()
4124 dev_dbg(hdev->dev, in gaudi_hw_fini()
4138 dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status); in gaudi_hw_fini()
4139 return -ETIMEDOUT; in gaudi_hw_fini()
4143 gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM | in gaudi_hw_fini()
4149 memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); in gaudi_hw_fini()
4151 hdev->device_cpu_is_halted = false; in gaudi_hw_fini()
4162 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); in gaudi_suspend()
4180 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, in gaudi_mmap()
4181 (dma_addr - HOST_PHYS_BASE), size); in gaudi_mmap()
4183 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); in gaudi_mmap()
4191 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_ring_doorbell()
4193 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_ring_doorbell()
4215 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4222 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4229 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4236 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4243 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4250 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4255 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) in gaudi_ring_doorbell()
4422 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC0)) in gaudi_ring_doorbell()
4425 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4430 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC1)) in gaudi_ring_doorbell()
4433 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4438 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC2)) in gaudi_ring_doorbell()
4441 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4446 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC3)) in gaudi_ring_doorbell()
4449 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4454 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC4)) in gaudi_ring_doorbell()
4457 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4462 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC5)) in gaudi_ring_doorbell()
4465 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4470 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC6)) in gaudi_ring_doorbell()
4473 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4478 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC7)) in gaudi_ring_doorbell()
4481 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4486 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC8)) in gaudi_ring_doorbell()
4489 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4494 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC9)) in gaudi_ring_doorbell()
4497 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4507 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n", in gaudi_ring_doorbell()
4521 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_ring_doorbell()
4523 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq); in gaudi_ring_doorbell()
4543 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size, in gaudi_dma_alloc_coherent()
4557 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE; in gaudi_dma_free_coherent()
4559 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); in gaudi_dma_free_coherent()
4564 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_scrub_device_dram()
4565 u64 cur_addr = prop->dram_user_base_address; in gaudi_scrub_device_dram()
4569 while (cur_addr < prop->dram_end_address) { in gaudi_scrub_device_dram()
4574 min((u64)SZ_2G, prop->dram_end_address - cur_addr); in gaudi_scrub_device_dram()
4576 dev_dbg(hdev->dev, in gaudi_scrub_device_dram()
4577 "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n", in gaudi_scrub_device_dram()
4596 if (cur_addr == prop->dram_end_address) in gaudi_scrub_device_dram()
4612 dev_err(hdev->dev, in gaudi_scrub_device_dram()
4615 return -EIO; in gaudi_scrub_device_dram()
4625 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_scrub_device_mem()
4627 u64 addr, size, val = hdev->memory_scrub_val; in gaudi_scrub_device_mem()
4631 if (!hdev->memory_scrub) in gaudi_scrub_device_mem()
4635 while (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) { in gaudi_scrub_device_mem()
4637 dev_err(hdev->dev, "waiting for idle timeout\n"); in gaudi_scrub_device_mem()
4638 return -ETIMEDOUT; in gaudi_scrub_device_mem()
4644 addr = prop->sram_user_base_address; in gaudi_scrub_device_mem()
4645 size = hdev->pldm ? 0x10000 : prop->sram_size - SRAM_USER_BASE_OFFSET; in gaudi_scrub_device_mem()
4647 dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n", in gaudi_scrub_device_mem()
4651 dev_err(hdev->dev, "Failed to clear SRAM (%d)\n", rc); in gaudi_scrub_device_mem()
4655 /* Scrub HBM using all DMA channels in parallel */ in gaudi_scrub_device_mem()
4658 dev_err(hdev->dev, "Failed to clear HBM (%d)\n", rc); in gaudi_scrub_device_mem()
4669 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_int_queue_base()
4674 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id); in gaudi_get_int_queue_base()
4678 q = &gaudi->internal_qmans[queue_id]; in gaudi_get_int_queue_base()
4679 *dma_handle = q->pq_dma_addr; in gaudi_get_int_queue_base()
4680 *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE; in gaudi_get_int_queue_base()
4682 return q->pq_kernel_addr; in gaudi_get_int_queue_base()
4688 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_send_cpu_message()
4690 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) { in gaudi_send_cpu_message()
4712 if (hdev->pldm) in gaudi_test_queue()
4721 dev_err(hdev->dev, in gaudi_test_queue()
4724 return -ENOMEM; in gaudi_test_queue()
4732 dev_err(hdev->dev, in gaudi_test_queue()
4735 rc = -ENOMEM; in gaudi_test_queue()
4743 fence_pkt->ctl = cpu_to_le32(tmp); in gaudi_test_queue()
4744 fence_pkt->value = cpu_to_le32(fence_val); in gaudi_test_queue()
4745 fence_pkt->addr = cpu_to_le64(fence_dma_addr); in gaudi_test_queue()
4751 dev_err(hdev->dev, in gaudi_test_queue()
4762 if (rc == -ETIMEDOUT) { in gaudi_test_queue()
4763 dev_err(hdev->dev, in gaudi_test_queue()
4766 rc = -EIO; in gaudi_test_queue()
4778 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_test_cpu_queue()
4784 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_test_cpu_queue()
4794 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) { in gaudi_test_queues()
4795 if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) { in gaudi_test_queues()
4798 ret_val = -EINVAL; in gaudi_test_queues()
4804 ret_val = -EINVAL; in gaudi_test_queues()
4817 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); in gaudi_dma_pool_zalloc()
4830 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE; in gaudi_dma_pool_free()
4832 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr); in gaudi_dma_pool_free()
4863 while ((count + 1) < sgt->nents) { in gaudi_get_dma_desc_list_size()
4895 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), in gaudi_pin_memory_before_cs()
4896 parser->job_userptr_list, &userptr)) in gaudi_pin_memory_before_cs()
4901 return -ENOMEM; in gaudi_pin_memory_before_cs()
4903 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), in gaudi_pin_memory_before_cs()
4908 list_add_tail(&userptr->job_node, parser->job_userptr_list); in gaudi_pin_memory_before_cs()
4910 rc = hl_dma_map_sgtable(hdev, userptr->sgt, dir); in gaudi_pin_memory_before_cs()
4912 dev_err(hdev->dev, "failed to map sgt with DMA region\n"); in gaudi_pin_memory_before_cs()
4916 userptr->dma_mapped = true; in gaudi_pin_memory_before_cs()
4917 userptr->dir = dir; in gaudi_pin_memory_before_cs()
4920 parser->patched_cb_size += in gaudi_pin_memory_before_cs()
4921 gaudi_get_dma_desc_list_size(hdev, userptr->sgt); in gaudi_pin_memory_before_cs()
4926 list_del(&userptr->job_node); in gaudi_pin_memory_before_cs()
4943 user_memset = (le32_to_cpu(user_dma_pkt->ctl) & in gaudi_validate_dma_pkt_host()
4951 dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n"); in gaudi_validate_dma_pkt_host()
4953 addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_validate_dma_pkt_host()
4955 dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n"); in gaudi_validate_dma_pkt_host()
4957 addr = (le64_to_cpu(user_dma_pkt->dst_addr) & in gaudi_validate_dma_pkt_host()
4963 parser->patched_cb_size += sizeof(*user_dma_pkt); in gaudi_validate_dma_pkt_host()
4976 u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) & in gaudi_validate_dma_pkt_no_mmu()
4980 dev_dbg(hdev->dev, "DMA packet details:\n"); in gaudi_validate_dma_pkt_no_mmu()
4981 dev_dbg(hdev->dev, "source == 0x%llx\n", in gaudi_validate_dma_pkt_no_mmu()
4982 le64_to_cpu(user_dma_pkt->src_addr)); in gaudi_validate_dma_pkt_no_mmu()
4983 dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr); in gaudi_validate_dma_pkt_no_mmu()
4984 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); in gaudi_validate_dma_pkt_no_mmu()
4991 if (!le32_to_cpu(user_dma_pkt->tsize)) { in gaudi_validate_dma_pkt_no_mmu()
4992 parser->patched_cb_size += sizeof(*user_dma_pkt); in gaudi_validate_dma_pkt_no_mmu()
4996 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) in gaudi_validate_dma_pkt_no_mmu()
5009 cfg = le32_to_cpu(user_pkt->cfg); in gaudi_validate_load_and_exe_pkt()
5012 dev_err(hdev->dev, in gaudi_validate_load_and_exe_pkt()
5014 return -EPERM; in gaudi_validate_load_and_exe_pkt()
5017 parser->patched_cb_size += sizeof(struct packet_load_and_exe); in gaudi_validate_load_and_exe_pkt()
5028 parser->patched_cb_size = 0; in gaudi_validate_cb()
5031 while (cb_parsed_length < parser->user_cb_size) { in gaudi_validate_cb()
5036 user_pkt = parser->user_cb->kernel_address + cb_parsed_length; in gaudi_validate_cb()
5039 (le64_to_cpu(user_pkt->header) & in gaudi_validate_cb()
5044 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); in gaudi_validate_cb()
5045 rc = -EINVAL; in gaudi_validate_cb()
5051 if (cb_parsed_length > parser->user_cb_size) { in gaudi_validate_cb()
5052 dev_err(hdev->dev, in gaudi_validate_cb()
5054 rc = -EINVAL; in gaudi_validate_cb()
5060 dev_err(hdev->dev, in gaudi_validate_cb()
5062 rc = -EPERM; in gaudi_validate_cb()
5066 dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); in gaudi_validate_cb()
5067 rc = -EPERM; in gaudi_validate_cb()
5071 dev_err(hdev->dev, "User not allowed to use STOP\n"); in gaudi_validate_cb()
5072 rc = -EPERM; in gaudi_validate_cb()
5076 dev_err(hdev->dev, in gaudi_validate_cb()
5078 rc = -EPERM; in gaudi_validate_cb()
5087 parser->contains_dma_pkt = true; in gaudi_validate_cb()
5089 parser->patched_cb_size += pkt_size; in gaudi_validate_cb()
5102 parser->patched_cb_size += pkt_size; in gaudi_validate_cb()
5106 dev_err(hdev->dev, "Invalid packet header 0x%x\n", in gaudi_validate_cb()
5108 rc = -EINVAL; in gaudi_validate_cb()
5122 if (parser->completion) in gaudi_validate_cb()
5123 parser->patched_cb_size += gaudi_get_patched_cb_extra_size( in gaudi_validate_cb()
5124 parser->patched_cb_size); in gaudi_validate_cb()
5147 ctl = le32_to_cpu(user_dma_pkt->ctl); in gaudi_patch_dma_packet()
5149 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) in gaudi_patch_dma_packet()
5156 addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_patch_dma_packet()
5157 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); in gaudi_patch_dma_packet()
5162 addr = le64_to_cpu(user_dma_pkt->dst_addr); in gaudi_patch_dma_packet()
5163 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_patch_dma_packet()
5169 le32_to_cpu(user_dma_pkt->tsize), in gaudi_patch_dma_packet()
5170 parser->job_userptr_list, &userptr))) { in gaudi_patch_dma_packet()
5171 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n", in gaudi_patch_dma_packet()
5172 addr, user_dma_pkt->tsize); in gaudi_patch_dma_packet()
5173 return -EFAULT; in gaudi_patch_dma_packet()
5184 sgt = userptr->sgt; in gaudi_patch_dma_packet()
5194 while ((count + 1) < sgt->nents) { in gaudi_patch_dma_packet()
5212 ctl = le32_to_cpu(user_dma_pkt->ctl); in gaudi_patch_dma_packet()
5216 new_dma_pkt->ctl = cpu_to_le32(ctl); in gaudi_patch_dma_packet()
5217 new_dma_pkt->tsize = cpu_to_le32(len); in gaudi_patch_dma_packet()
5220 new_dma_pkt->src_addr = cpu_to_le64(dma_addr); in gaudi_patch_dma_packet()
5221 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr); in gaudi_patch_dma_packet()
5223 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr); in gaudi_patch_dma_packet()
5224 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr); in gaudi_patch_dma_packet()
5234 dev_err(hdev->dev, in gaudi_patch_dma_packet()
5236 return -EFAULT; in gaudi_patch_dma_packet()
5239 /* Fix the last dma packet - wrcomp must be as user set it */ in gaudi_patch_dma_packet()
5240 new_dma_pkt--; in gaudi_patch_dma_packet()
5241 new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask); in gaudi_patch_dma_packet()
5256 while (cb_parsed_length < parser->user_cb_size) { in gaudi_patch_cb()
5262 user_pkt = parser->user_cb->kernel_address + cb_parsed_length; in gaudi_patch_cb()
5263 kernel_pkt = parser->patched_cb->kernel_address + in gaudi_patch_cb()
5267 (le64_to_cpu(user_pkt->header) & in gaudi_patch_cb()
5272 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); in gaudi_patch_cb()
5273 rc = -EINVAL; in gaudi_patch_cb()
5279 if (cb_parsed_length > parser->user_cb_size) { in gaudi_patch_cb()
5280 dev_err(hdev->dev, in gaudi_patch_cb()
5282 rc = -EINVAL; in gaudi_patch_cb()
5296 dev_err(hdev->dev, in gaudi_patch_cb()
5298 rc = -EPERM; in gaudi_patch_cb()
5302 dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); in gaudi_patch_cb()
5303 rc = -EPERM; in gaudi_patch_cb()
5307 dev_err(hdev->dev, "User not allowed to use STOP\n"); in gaudi_patch_cb()
5308 rc = -EPERM; in gaudi_patch_cb()
5325 dev_err(hdev->dev, "Invalid packet header 0x%x\n", in gaudi_patch_cb()
5327 rc = -EINVAL; in gaudi_patch_cb()
5352 if (parser->completion) in gaudi_parse_cb_mmu()
5353 parser->patched_cb_size = parser->user_cb_size + in gaudi_parse_cb_mmu()
5354 gaudi_get_patched_cb_extra_size(parser->user_cb_size); in gaudi_parse_cb_mmu()
5356 parser->patched_cb_size = parser->user_cb_size; in gaudi_parse_cb_mmu()
5358 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, in gaudi_parse_cb_mmu()
5359 parser->patched_cb_size, false, false, in gaudi_parse_cb_mmu()
5363 dev_err(hdev->dev, in gaudi_parse_cb_mmu()
5369 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_mmu()
5371 if (!parser->patched_cb) { in gaudi_parse_cb_mmu()
5372 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); in gaudi_parse_cb_mmu()
5373 rc = -EFAULT; in gaudi_parse_cb_mmu()
5379 * "parser->user_cb_size <= parser->user_cb->size" was done in get_cb_from_cs_chunk() in gaudi_parse_cb_mmu()
5387 memcpy(parser->patched_cb->kernel_address, in gaudi_parse_cb_mmu()
5388 parser->user_cb->kernel_address, in gaudi_parse_cb_mmu()
5389 parser->user_cb_size); in gaudi_parse_cb_mmu()
5391 patched_cb_size = parser->patched_cb_size; in gaudi_parse_cb_mmu()
5394 user_cb = parser->user_cb; in gaudi_parse_cb_mmu()
5395 parser->user_cb = parser->patched_cb; in gaudi_parse_cb_mmu()
5397 parser->user_cb = user_cb; in gaudi_parse_cb_mmu()
5400 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_mmu()
5404 if (patched_cb_size != parser->patched_cb_size) { in gaudi_parse_cb_mmu()
5405 dev_err(hdev->dev, "user CB size mismatch\n"); in gaudi_parse_cb_mmu()
5406 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_mmu()
5407 rc = -EINVAL; in gaudi_parse_cb_mmu()
5418 hl_cb_destroy(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_mmu()
5434 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, in gaudi_parse_cb_no_mmu()
5435 parser->patched_cb_size, false, false, in gaudi_parse_cb_no_mmu()
5438 dev_err(hdev->dev, in gaudi_parse_cb_no_mmu()
5443 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_no_mmu()
5445 if (!parser->patched_cb) { in gaudi_parse_cb_no_mmu()
5446 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); in gaudi_parse_cb_no_mmu()
5447 rc = -EFAULT; in gaudi_parse_cb_no_mmu()
5454 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_no_mmu()
5463 hl_cb_destroy(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_no_mmu()
5467 hl_userptr_delete_list(hdev, parser->job_userptr_list); in gaudi_parse_cb_no_mmu()
5474 struct asic_fixed_properties *asic_prop = &hdev->asic_prop; in gaudi_parse_cb_no_ext_queue()
5475 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_parse_cb_no_ext_queue()
5478 if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) && in gaudi_parse_cb_no_ext_queue()
5479 (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3)) { in gaudi_parse_cb_no_ext_queue()
5480 nic_queue_offset = parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0; in gaudi_parse_cb_no_ext_queue()
5483 if (!(gaudi->hw_cap_initialized & nic_mask_q_id)) { in gaudi_parse_cb_no_ext_queue()
5484 dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id); in gaudi_parse_cb_no_ext_queue()
5485 return -EINVAL; in gaudi_parse_cb_no_ext_queue()
5490 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5491 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5492 asic_prop->sram_user_base_address, in gaudi_parse_cb_no_ext_queue()
5493 asic_prop->sram_end_address)) in gaudi_parse_cb_no_ext_queue()
5496 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5497 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5498 asic_prop->dram_user_base_address, in gaudi_parse_cb_no_ext_queue()
5499 asic_prop->dram_end_address)) in gaudi_parse_cb_no_ext_queue()
5503 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5504 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5505 asic_prop->pmmu.start_addr, in gaudi_parse_cb_no_ext_queue()
5506 asic_prop->pmmu.end_addr)) in gaudi_parse_cb_no_ext_queue()
5509 dev_err(hdev->dev, in gaudi_parse_cb_no_ext_queue()
5511 parser->user_cb, parser->user_cb_size); in gaudi_parse_cb_no_ext_queue()
5513 return -EFAULT; in gaudi_parse_cb_no_ext_queue()
5518 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_cs_parser()
5520 if (parser->queue_type == QUEUE_TYPE_INT) in gaudi_cs_parser()
5523 if (gaudi->hw_cap_initialized & HW_CAP_MMU) in gaudi_cs_parser()
5539 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2); in gaudi_add_end_of_cb_packets()
5542 cq_padding->ctl = cpu_to_le32(FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_NOP)); in gaudi_add_end_of_cb_packets()
5552 cq_pkt->ctl = cpu_to_le32(tmp); in gaudi_add_end_of_cb_packets()
5553 cq_pkt->value = cpu_to_le32(cq_val); in gaudi_add_end_of_cb_packets()
5554 cq_pkt->addr = cpu_to_le64(cq_addr); in gaudi_add_end_of_cb_packets()
5560 cq_pkt->ctl = cpu_to_le32(tmp); in gaudi_add_end_of_cb_packets()
5561 cq_pkt->value = cpu_to_le32(1); in gaudi_add_end_of_cb_packets()
5562 msi_addr = hdev->pdev ? mmPCIE_CORE_MSI_REQ : mmPCIE_MSI_INTR_0 + msi_vec * 4; in gaudi_add_end_of_cb_packets()
5563 cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr); in gaudi_add_end_of_cb_packets()
5582 return -EFAULT; in gaudi_memset_device_memory()
5584 lin_dma_pkt = cb->kernel_address; in gaudi_memset_device_memory()
5594 lin_dma_pkt->ctl = cpu_to_le32(ctl); in gaudi_memset_device_memory()
5595 lin_dma_pkt->src_addr = cpu_to_le64(val); in gaudi_memset_device_memory()
5596 lin_dma_pkt->dst_addr |= cpu_to_le64(addr); in gaudi_memset_device_memory()
5597 lin_dma_pkt->tsize = cpu_to_le32(size); in gaudi_memset_device_memory()
5601 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_memset_device_memory()
5602 rc = -ENOMEM; in gaudi_memset_device_memory()
5608 if (err_cause && !hdev->init_done) { in gaudi_memset_device_memory()
5609 dev_dbg(hdev->dev, in gaudi_memset_device_memory()
5615 job->id = 0; in gaudi_memset_device_memory()
5616 job->user_cb = cb; in gaudi_memset_device_memory()
5617 atomic_inc(&job->user_cb->cs_cnt); in gaudi_memset_device_memory()
5618 job->user_cb_size = cb_size; in gaudi_memset_device_memory()
5619 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in gaudi_memset_device_memory()
5620 job->patched_cb = job->user_cb; in gaudi_memset_device_memory()
5621 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); in gaudi_memset_device_memory()
5628 atomic_dec(&cb->cs_cnt); in gaudi_memset_device_memory()
5633 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause); in gaudi_memset_device_memory()
5634 rc = -EIO; in gaudi_memset_device_memory()
5635 if (!hdev->init_done) { in gaudi_memset_device_memory()
5636 dev_dbg(hdev->dev, in gaudi_memset_device_memory()
5645 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_memset_device_memory()
5662 dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M); in gaudi_memset_registers()
5663 return -ENOMEM; in gaudi_memset_registers()
5668 return -EFAULT; in gaudi_memset_registers()
5670 pkt = cb->kernel_address; in gaudi_memset_registers()
5679 pkt->ctl = cpu_to_le32(ctl); in gaudi_memset_registers()
5680 pkt->value = cpu_to_le32(val); in gaudi_memset_registers()
5681 pkt->addr = cpu_to_le64(reg_base + (i * 4)); in gaudi_memset_registers()
5686 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_memset_registers()
5687 rc = -ENOMEM; in gaudi_memset_registers()
5691 job->id = 0; in gaudi_memset_registers()
5692 job->user_cb = cb; in gaudi_memset_registers()
5693 atomic_inc(&job->user_cb->cs_cnt); in gaudi_memset_registers()
5694 job->user_cb_size = cb_size; in gaudi_memset_registers()
5695 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in gaudi_memset_registers()
5696 job->patched_cb = job->user_cb; in gaudi_memset_registers()
5697 job->job_cb_size = cb_size; in gaudi_memset_registers()
5704 atomic_dec(&cb->cs_cnt); in gaudi_memset_registers()
5708 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_memset_registers()
5723 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5724 return -ENOMEM; in gaudi_restore_sm_registers()
5731 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5732 return -ENOMEM; in gaudi_restore_sm_registers()
5739 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5740 return -ENOMEM; in gaudi_restore_sm_registers()
5747 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5748 return -ENOMEM; in gaudi_restore_sm_registers()
5755 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5756 return -ENOMEM; in gaudi_restore_sm_registers()
5763 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5764 return -ENOMEM; in gaudi_restore_sm_registers()
5769 num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT; in gaudi_restore_sm_registers()
5772 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5773 return -ENOMEM; in gaudi_restore_sm_registers()
5778 num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR; in gaudi_restore_sm_registers()
5781 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5782 return -ENOMEM; in gaudi_restore_sm_registers()
5790 u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 - in gaudi_restore_dma_registers()
5806 /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be in gaudi_restore_dma_registers()
5826 qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE); in gaudi_restore_qm_registers()
5863 u32 size = hdev->asic_prop.mmu_pgt_size + in gaudi_mmu_clear_pgt_range()
5864 hdev->asic_prop.mmu_cache_mng_size; in gaudi_mmu_clear_pgt_range()
5865 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_clear_pgt_range()
5866 u64 addr = hdev->asic_prop.mmu_pgt_addr; in gaudi_mmu_clear_pgt_range()
5868 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_mmu_clear_pgt_range()
5905 dev_err(hdev->dev, in gaudi_dma_core_transfer()
5906 "DMA %d timed-out during reading of 0x%llx\n", in gaudi_dma_core_transfer()
5908 return -EIO; in gaudi_dma_core_transfer()
5914 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause); in gaudi_dma_core_transfer()
5915 dev_dbg(hdev->dev, in gaudi_dma_core_transfer()
5920 return -EIO; in gaudi_dma_core_transfer()
5940 return -ENOMEM; in gaudi_debugfs_read_dma()
5942 hdev->asic_funcs->hw_queues_lock(hdev); in gaudi_debugfs_read_dma()
5964 dev_err_ratelimited(hdev->dev, in gaudi_debugfs_read_dma()
5966 rc = -EAGAIN; in gaudi_debugfs_read_dma()
5984 dev_dbg(hdev->dev, in gaudi_debugfs_read_dma()
6011 size_left -= SZ_2M; in gaudi_debugfs_read_dma()
6024 hdev->asic_funcs->hw_queues_unlock(hdev); in gaudi_debugfs_read_dma()
6033 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_read_pte()
6035 if (hdev->reset_info.hard_reset_pending) in gaudi_read_pte()
6038 return readq(hdev->pcie_bar[HBM_BAR_ID] + in gaudi_read_pte()
6039 (addr - gaudi->hbm_bar_cur_addr)); in gaudi_read_pte()
6044 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_write_pte()
6046 if (hdev->reset_info.hard_reset_pending) in gaudi_write_pte()
6049 writeq(val, hdev->pcie_bar[HBM_BAR_ID] + in gaudi_write_pte()
6050 (addr - gaudi->hbm_bar_cur_addr)); in gaudi_write_pte()
6062 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_prepare()
6064 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_mmu_prepare()
6068 dev_crit(hdev->dev, "asid %u is too big\n", asid); in gaudi_mmu_prepare()
6217 if (gaudi->hw_cap_initialized & HW_CAP_NIC0) { in gaudi_mmu_prepare()
6230 if (gaudi->hw_cap_initialized & HW_CAP_NIC1) { in gaudi_mmu_prepare()
6243 if (gaudi->hw_cap_initialized & HW_CAP_NIC2) { in gaudi_mmu_prepare()
6256 if (gaudi->hw_cap_initialized & HW_CAP_NIC3) { in gaudi_mmu_prepare()
6269 if (gaudi->hw_cap_initialized & HW_CAP_NIC4) { in gaudi_mmu_prepare()
6282 if (gaudi->hw_cap_initialized & HW_CAP_NIC5) { in gaudi_mmu_prepare()
6295 if (gaudi->hw_cap_initialized & HW_CAP_NIC6) { in gaudi_mmu_prepare()
6308 if (gaudi->hw_cap_initialized & HW_CAP_NIC7) { in gaudi_mmu_prepare()
6321 if (gaudi->hw_cap_initialized & HW_CAP_NIC8) { in gaudi_mmu_prepare()
6334 if (gaudi->hw_cap_initialized & HW_CAP_NIC9) { in gaudi_mmu_prepare()
6361 if (hdev->pldm) in gaudi_send_job_on_qman0()
6368 dev_err(hdev->dev, in gaudi_send_job_on_qman0()
6370 return -ENOMEM; in gaudi_send_job_on_qman0()
6373 cb = job->patched_cb; in gaudi_send_job_on_qman0()
6375 fence_pkt = cb->kernel_address + in gaudi_send_job_on_qman0()
6376 job->job_cb_size - sizeof(struct packet_msg_prot); in gaudi_send_job_on_qman0()
6382 fence_pkt->ctl = cpu_to_le32(tmp); in gaudi_send_job_on_qman0()
6383 fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL); in gaudi_send_job_on_qman0()
6384 fence_pkt->addr = cpu_to_le64(fence_dma_addr); in gaudi_send_job_on_qman0()
6392 job->job_cb_size, cb->bus_address); in gaudi_send_job_on_qman0()
6394 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc); in gaudi_send_job_on_qman0()
6404 if (rc == -ETIMEDOUT) { in gaudi_send_job_on_qman0()
6405 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp); in gaudi_send_job_on_qman0()
6636 dev_err(hdev->dev, in gaudi_get_razwi_initiator_name()
6652 dev_err_ratelimited(hdev->dev, in gaudi_print_and_get_razwi_info()
6660 dev_err_ratelimited(hdev->dev, in gaudi_print_and_get_razwi_info()
6670 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_print_and_get_mmu_error_info()
6673 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_print_and_get_mmu_error_info()
6682 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr); in gaudi_print_and_get_mmu_error_info()
6694 dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr); in gaudi_print_and_get_mmu_error_info()
6701 * +-------------------+------------------------------------------------------+
6704 * +-------------------+------------------------------------------------------+
6705 * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
6710 * +-------------------+------------------------------------------------------+
6711 * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
6716 * +-------------------+------------------------------------------------------+
6725 num_mem_regs = params->num_memories / 32 + in gaudi_extract_ecc_info()
6726 ((params->num_memories % 32) ? 1 : 0); in gaudi_extract_ecc_info()
6728 if (params->block_address >= CFG_BASE) in gaudi_extract_ecc_info()
6729 params->block_address -= CFG_BASE; in gaudi_extract_ecc_info()
6731 if (params->derr) in gaudi_extract_ecc_info()
6732 err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET; in gaudi_extract_ecc_info()
6734 err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET; in gaudi_extract_ecc_info()
6751 dev_err(hdev->dev, "ECC error information cannot be found\n"); in gaudi_extract_ecc_info()
6752 return -EINVAL; in gaudi_extract_ecc_info()
6755 WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET, in gaudi_extract_ecc_info()
6759 RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET); in gaudi_extract_ecc_info()
6761 RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET); in gaudi_extract_ecc_info()
6764 reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET); in gaudi_extract_ecc_info()
6765 if (params->derr) in gaudi_extract_ecc_info()
6770 WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg); in gaudi_extract_ecc_info()
6776 * gaudi_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
6785 u32 mask = q_len - 1; in gaudi_queue_idx_dec()
6788 * modular decrement is equivalent to adding (queue_size -1) in gaudi_queue_idx_dec()
6790 * range [0, queue_len - 1] in gaudi_queue_idx_dec()
6792 return (idx + q_len - 1) & mask; in gaudi_queue_idx_dec()
6796 * gaudi_handle_sw_config_stream_data - print SW config stream data
6809 cq_ptr_lo_off = mmTPC0_QM_CQ_PTR_LO_1 - mmTPC0_QM_CQ_PTR_LO_0; in gaudi_handle_sw_config_stream_data()
6811 cq_ptr_lo = qman_base + (mmTPC0_QM_CQ_PTR_LO_0 - mmTPC0_QM_BASE) + in gaudi_handle_sw_config_stream_data()
6814 (mmTPC0_QM_CQ_PTR_HI_0 - mmTPC0_QM_CQ_PTR_LO_0); in gaudi_handle_sw_config_stream_data()
6816 (mmTPC0_QM_CQ_TSIZE_0 - mmTPC0_QM_CQ_PTR_LO_0); in gaudi_handle_sw_config_stream_data()
6820 dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n", in gaudi_handle_sw_config_stream_data()
6824 hdev->captured_err_info.undef_opcode.cq_addr = cq_ptr; in gaudi_handle_sw_config_stream_data()
6825 hdev->captured_err_info.undef_opcode.cq_size = size; in gaudi_handle_sw_config_stream_data()
6826 hdev->captured_err_info.undef_opcode.stream_id = stream; in gaudi_handle_sw_config_stream_data()
6831 * gaudi_handle_last_pqes_on_err - print last PQEs on error
6850 q = &hdev->kernel_queues[qid_base + stream]; in gaudi_handle_last_pqes_on_err()
6852 qm_ci_stream_off = mmTPC0_QM_PQ_CI_1 - mmTPC0_QM_PQ_CI_0; in gaudi_handle_last_pqes_on_err()
6853 pq_ci = qman_base + (mmTPC0_QM_PQ_CI_0 - mmTPC0_QM_BASE) + in gaudi_handle_last_pqes_on_err()
6856 queue_len = (q->queue_type == QUEUE_TYPE_INT) ? in gaudi_handle_last_pqes_on_err()
6857 q->int_queue_len : HL_QUEUE_LENGTH; in gaudi_handle_last_pqes_on_err()
6859 hdev->asic_funcs->hw_queues_lock(hdev); in gaudi_handle_last_pqes_on_err()
6866 /* we should start printing form ci -1 */ in gaudi_handle_last_pqes_on_err()
6874 bd = q->kernel_address; in gaudi_handle_last_pqes_on_err()
6877 len = le32_to_cpu(bd->len); in gaudi_handle_last_pqes_on_err()
6878 /* len 0 means uninitialized entry- break */ in gaudi_handle_last_pqes_on_err()
6882 addr[i] = le64_to_cpu(bd->ptr); in gaudi_handle_last_pqes_on_err()
6884 dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n", in gaudi_handle_last_pqes_on_err()
6892 struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode; in gaudi_handle_last_pqes_on_err()
6893 u32 arr_idx = undef_opcode->cb_addr_streams_len; in gaudi_handle_last_pqes_on_err()
6896 undef_opcode->timestamp = ktime_get(); in gaudi_handle_last_pqes_on_err()
6897 undef_opcode->engine_id = gaudi_queue_id_to_engine_id[qid_base]; in gaudi_handle_last_pqes_on_err()
6900 memcpy(undef_opcode->cb_addr_streams[arr_idx], addr, sizeof(addr)); in gaudi_handle_last_pqes_on_err()
6901 undef_opcode->cb_addr_streams_len++; in gaudi_handle_last_pqes_on_err()
6904 hdev->asic_funcs->hw_queues_unlock(hdev); in gaudi_handle_last_pqes_on_err()
6908 * handle_qman_data_on_err - extract QMAN data on error
6931 /* handle Lower-CP */ in handle_qman_data_on_err()
6949 glbl_sts_addr = qman_base + (mmTPC0_QM_GLBL_STS1_0 - mmTPC0_QM_BASE); in gaudi_handle_qman_err_generic()
6950 arb_err_addr = qman_base + (mmTPC0_QM_ARB_ERR_CAUSE - mmTPC0_QM_BASE); in gaudi_handle_qman_err_generic()
6967 dev_err_ratelimited(hdev->dev, in gaudi_handle_qman_err_generic()
6976 hdev->captured_err_info.undef_opcode.write_enable) { in gaudi_handle_qman_err_generic()
6977 memset(&hdev->captured_err_info.undef_opcode, 0, in gaudi_handle_qman_err_generic()
6978 sizeof(hdev->captured_err_info.undef_opcode)); in gaudi_handle_qman_err_generic()
6980 hdev->captured_err_info.undef_opcode.write_enable = false; in gaudi_handle_qman_err_generic()
6985 if (!hdev->stop_on_err) in gaudi_handle_qman_err_generic()
6998 dev_err_ratelimited(hdev->dev, in gaudi_handle_qman_err_generic()
7009 u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0; in gaudi_print_sm_sei_info()
7014 switch (sei_data->sei_cause) { in gaudi_print_sm_sei_info()
7016 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7019 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7022 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7023 "%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x", in gaudi_print_sm_sei_info()
7025 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7028 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7031 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7034 dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u", in gaudi_print_sm_sei_info()
7035 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7049 if (hdev->asic_prop.fw_security_enabled) { in gaudi_handle_ecc_event()
7060 index = event_type - GAUDI_EVENT_TPC0_SERR; in gaudi_handle_ecc_event()
7067 index = event_type - GAUDI_EVENT_TPC0_DERR; in gaudi_handle_ecc_event()
7078 index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4; in gaudi_handle_ecc_event()
7088 index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4; in gaudi_handle_ecc_event()
7098 index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4; in gaudi_handle_ecc_event()
7109 index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4; in gaudi_handle_ecc_event()
7122 ecc_address = le64_to_cpu(ecc_data->ecc_address); in gaudi_handle_ecc_event()
7123 ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom); in gaudi_handle_ecc_event()
7124 memory_wrapper_idx = ecc_data->memory_wrapper_idx; in gaudi_handle_ecc_event()
7132 dev_err(hdev->dev, in gaudi_handle_ecc_event()
7146 index = event_type - GAUDI_EVENT_TPC0_QM; in gaudi_handle_qman_err()
7163 index = event_type - GAUDI_EVENT_DMA0_QM; in gaudi_handle_qman_err()
7245 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", in gaudi_print_irq_info()
7274 struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; in gaudi_print_out_of_sync_info()
7276 dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", in gaudi_print_out_of_sync_info()
7277 le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); in gaudi_print_out_of_sync_info()
7283 dev_err(hdev->dev, in gaudi_print_fw_alive_info()
7285 (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? "Minor" : "Critical", in gaudi_print_fw_alive_info()
7286 le32_to_cpu(fw_alive->process_id), in gaudi_print_fw_alive_info()
7287 le32_to_cpu(fw_alive->thread_id), in gaudi_print_fw_alive_info()
7288 le64_to_cpu(fw_alive->uptime_seconds)); in gaudi_print_fw_alive_info()
7296 u16 nic_id = event_type - GAUDI_EVENT_NIC_SEI_0; in gaudi_print_nic_axi_irq_info()
7298 switch (eq_nic_sei->axi_error_cause) { in gaudi_print_nic_axi_irq_info()
7321 dev_err(hdev->dev, "unknown NIC AXI cause %d\n", in gaudi_print_nic_axi_irq_info()
7322 eq_nic_sei->axi_error_cause); in gaudi_print_nic_axi_irq_info()
7328 eq_nic_sei->id); in gaudi_print_nic_axi_irq_info()
7329 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", in gaudi_print_nic_axi_irq_info()
7335 /* GAUDI doesn't support any reset except hard-reset */ in gaudi_compute_reset_late_init()
7336 return -EPERM; in gaudi_compute_reset_late_init()
7345 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & in gaudi_hbm_read_interrupts()
7348 dev_err(hdev->dev, "No FW ECC data"); in gaudi_hbm_read_interrupts()
7353 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7355 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7357 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7359 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7361 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7363 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7365 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7367 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7370 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7372 device, ch, hbm_ecc_data->first_addr, type, in gaudi_hbm_read_interrupts()
7373 hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt, in gaudi_hbm_read_interrupts()
7374 hbm_ecc_data->dec_cnt); in gaudi_hbm_read_interrupts()
7378 if (hdev->asic_prop.fw_security_enabled) { in gaudi_hbm_read_interrupts()
7379 dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n"); in gaudi_hbm_read_interrupts()
7388 rc = -EIO; in gaudi_hbm_read_interrupts()
7389 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7396 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7408 rc = -EIO; in gaudi_hbm_read_interrupts()
7409 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7416 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7437 rc = -EIO; in gaudi_hbm_read_interrupts()
7438 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7445 rc = -EIO; in gaudi_hbm_read_interrupts()
7446 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7488 dev_err_ratelimited(hdev->dev, in gaudi_tpc_read_interrupts()
7492 /* If this is QM error, we need to soft-reset */ in gaudi_tpc_read_interrupts()
7505 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1; in tpc_dec_event_to_tpc_id()
7510 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6; in tpc_krn_event_to_tpc_id()
7517 mutex_lock(&hdev->clk_throttling.lock); in gaudi_print_clk_change_info()
7521 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7522 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7523 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); in gaudi_print_clk_change_info()
7524 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; in gaudi_print_clk_change_info()
7525 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7530 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7531 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); in gaudi_print_clk_change_info()
7532 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7537 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7538 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7539 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); in gaudi_print_clk_change_info()
7540 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; in gaudi_print_clk_change_info()
7542 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7547 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7548 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); in gaudi_print_clk_change_info()
7550 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7555 dev_err(hdev->dev, "Received invalid clock change event %d\n", in gaudi_print_clk_change_info()
7560 mutex_unlock(&hdev->clk_throttling.lock); in gaudi_print_clk_change_info()
7565 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_handle_eqe()
7567 u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0; in gaudi_handle_eqe()
7568 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl); in gaudi_handle_eqe()
7577 dev_err(hdev->dev, "Event type %u exceeds maximum of %u", in gaudi_handle_eqe()
7578 event_type, GAUDI_EVENT_SIZE - 1); in gaudi_handle_eqe()
7582 gaudi->events_stat[event_type]++; in gaudi_handle_eqe()
7583 gaudi->events_stat_aggregate[event_type]++; in gaudi_handle_eqe()
7610 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); in gaudi_handle_eqe()
7631 &eq_entry->hbm_ecc_data); in gaudi_handle_eqe()
7643 &eq_entry->hbm_ecc_data); in gaudi_handle_eqe()
7668 dev_err(hdev->dev, "reset required due to %s\n", in gaudi_handle_eqe()
7693 dev_err(hdev->dev, "reset required due to %s\n", in gaudi_handle_eqe()
7727 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); in gaudi_handle_eqe()
7806 &eq_entry->sm_sei_data); in gaudi_handle_eqe()
7810 dev_err(hdev->dev, in gaudi_handle_eqe()
7824 cause = le64_to_cpu(eq_entry->data[0]) & 0xFF; in gaudi_handle_eqe()
7825 dev_err(hdev->dev, in gaudi_handle_eqe()
7838 gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err); in gaudi_handle_eqe()
7844 gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive); in gaudi_handle_eqe()
7852 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n", in gaudi_handle_eqe()
7865 if (hdev->asic_prop.fw_security_enabled && !reset_direct) { in gaudi_handle_eqe()
7871 } else if (hdev->hard_reset_on_fw_events) { in gaudi_handle_eqe()
7894 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_events_stat()
7897 *size = (u32) sizeof(gaudi->events_stat_aggregate); in gaudi_get_events_stat()
7898 return gaudi->events_stat_aggregate; in gaudi_get_events_stat()
7901 *size = (u32) sizeof(gaudi->events_stat); in gaudi_get_events_stat()
7902 return gaudi->events_stat; in gaudi_get_events_stat()
7907 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_invalidate_cache()
7911 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) || in gaudi_mmu_invalidate_cache()
7912 hdev->reset_info.hard_reset_pending) in gaudi_mmu_invalidate_cache()
7915 if (hdev->pldm) in gaudi_mmu_invalidate_cache()
7922 WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++); in gaudi_mmu_invalidate_cache()
7945 return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags); in gaudi_mmu_invalidate_cache_range()
7953 if (hdev->pldm) in gaudi_mmu_update_asid_hop0_addr()
7972 dev_err(hdev->dev, in gaudi_mmu_update_asid_hop0_addr()
7982 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_send_heartbeat()
7984 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_send_heartbeat()
7992 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_cpucp_info_get()
7993 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_cpucp_info_get()
7996 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_cpucp_info_get()
8005 if (!strlen(prop->cpucp_info.card_name)) in gaudi_cpucp_info_get()
8006 strscpy_pad(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, in gaudi_cpucp_info_get()
8009 hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type); in gaudi_cpucp_info_get()
8019 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_is_device_idle()
8020 const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n"; in gaudi_is_device_idle()
8021 const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n"; in gaudi_is_device_idle()
8022 const char *nic_fmt = "%-5d%-9s%#-14x%#x\n"; in gaudi_is_device_idle()
8032 "--- ------- ------------ ---------- -------------\n"); in gaudi_is_device_idle()
8056 "--- ------- ------------ ---------- ----------\n"); in gaudi_is_device_idle()
8078 "--- ------- ------------ ---------- -----------\n"); in gaudi_is_device_idle()
8104 is_eng_idle ? "Y" : "N", "-", in gaudi_is_device_idle()
8105 "-", mme_arch_sts); in gaudi_is_device_idle()
8112 "--- ------- ------------ ----------\n"); in gaudi_is_device_idle()
8117 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) { in gaudi_is_device_idle()
8132 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) { in gaudi_is_device_idle()
8154 __acquires(&gaudi->hw_queues_lock) in gaudi_hw_queues_lock()
8156 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_queues_lock()
8158 spin_lock(&gaudi->hw_queues_lock); in gaudi_hw_queues_lock()
8162 __releases(&gaudi->hw_queues_lock) in gaudi_hw_queues_unlock()
8164 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_queues_unlock()
8166 spin_unlock(&gaudi->hw_queues_lock); in gaudi_hw_queues_unlock()
8171 return hdev->pdev->device; in gaudi_get_pci_id()
8177 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_eeprom_data()
8179 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_get_eeprom_data()
8187 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_monitor_dump()
8189 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_get_monitor_dump()
8205 offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS); in gaudi_run_tpc_kernel()
8207 if (hdev->pldm) in gaudi_run_tpc_kernel()
8248 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8251 return -EIO; in gaudi_run_tpc_kernel()
8271 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8274 return -EIO; in gaudi_run_tpc_kernel()
8286 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8289 return -EIO; in gaudi_run_tpc_kernel()
8298 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_internal_cb_pool_init()
8301 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_internal_cb_pool_init()
8304 hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev, in gaudi_internal_cb_pool_init()
8306 &hdev->internal_cb_pool_dma_addr, in gaudi_internal_cb_pool_init()
8309 if (!hdev->internal_cb_pool_virt_addr) in gaudi_internal_cb_pool_init()
8310 return -ENOMEM; in gaudi_internal_cb_pool_init()
8316 hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1); in gaudi_internal_cb_pool_init()
8317 if (!hdev->internal_cb_pool) { in gaudi_internal_cb_pool_init()
8318 dev_err(hdev->dev, in gaudi_internal_cb_pool_init()
8320 rc = -ENOMEM; in gaudi_internal_cb_pool_init()
8324 rc = gen_pool_add(hdev->internal_cb_pool, in gaudi_internal_cb_pool_init()
8325 (uintptr_t) hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_init()
8326 HOST_SPACE_INTERNAL_CB_SZ, -1); in gaudi_internal_cb_pool_init()
8328 dev_err(hdev->dev, in gaudi_internal_cb_pool_init()
8330 rc = -EFAULT; in gaudi_internal_cb_pool_init()
8334 hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, in gaudi_internal_cb_pool_init()
8338 if (!hdev->internal_cb_va_base) { in gaudi_internal_cb_pool_init()
8339 rc = -ENOMEM; in gaudi_internal_cb_pool_init()
8343 mutex_lock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8345 rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8346 hdev->internal_cb_pool_dma_addr, in gaudi_internal_cb_pool_init()
8355 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8360 hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8363 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8364 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8367 gen_pool_destroy(hdev->internal_cb_pool); in gaudi_internal_cb_pool_init()
8369 hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_init()
8370 hdev->internal_cb_pool_dma_addr); in gaudi_internal_cb_pool_init()
8378 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_internal_cb_pool_fini()
8380 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_internal_cb_pool_fini()
8383 mutex_lock(&hdev->mmu_lock); in gaudi_internal_cb_pool_fini()
8384 hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_fini()
8386 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_fini()
8389 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_fini()
8391 gen_pool_destroy(hdev->internal_cb_pool); in gaudi_internal_cb_pool_fini()
8393 hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_fini()
8394 hdev->internal_cb_pool_dma_addr); in gaudi_internal_cb_pool_fini()
8401 if (ctx->asid == HL_KERNEL_ASID_ID) in gaudi_ctx_init()
8404 rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx); in gaudi_ctx_init()
8408 rc = gaudi_restore_user_registers(ctx->hdev); in gaudi_ctx_init()
8410 gaudi_internal_cb_pool_fini(ctx->hdev, ctx); in gaudi_ctx_init()
8417 if (ctx->asid == HL_KERNEL_ASID_ID) in gaudi_ctx_fini()
8420 gaudi_internal_cb_pool_fini(ctx->hdev, ctx); in gaudi_ctx_fini()
8458 pkt = cb->kernel_address + size; in gaudi_gen_signal_cb()
8473 pkt->value = cpu_to_le32(value); in gaudi_gen_signal_cb()
8474 pkt->ctl = cpu_to_le32(ctl); in gaudi_gen_signal_cb()
8493 pkt->value = cpu_to_le32(value); in gaudi_add_mon_msg_short()
8494 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_mon_msg_short()
8509 dev_err(hdev->dev, in gaudi_add_arm_monitor_pkt()
8522 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - in gaudi_add_arm_monitor_pkt()
8542 pkt->value = cpu_to_le32(value); in gaudi_add_arm_monitor_pkt()
8543 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_arm_monitor_pkt()
8563 pkt->cfg = cpu_to_le32(cfg); in gaudi_add_fence_pkt()
8564 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_fence_pkt()
8632 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2; in gaudi_get_fence_addr()
8647 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2; in gaudi_get_fence_addr()
8662 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2; in gaudi_get_fence_addr()
8677 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2; in gaudi_get_fence_addr()
8683 return -EINVAL; in gaudi_get_fence_addr()
8705 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8713 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8724 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8735 struct hl_cb *cb = (struct hl_cb *) prop->data; in gaudi_gen_wait_cb()
8736 void *buf = cb->kernel_address; in gaudi_gen_wait_cb()
8738 u32 size = prop->size; in gaudi_gen_wait_cb()
8740 if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) { in gaudi_gen_wait_cb()
8741 dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", in gaudi_gen_wait_cb()
8742 prop->q_idx); in gaudi_gen_wait_cb()
8746 size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr); in gaudi_gen_wait_cb()
8747 size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, in gaudi_gen_wait_cb()
8748 prop->sob_mask, prop->sob_val, prop->mon_id); in gaudi_gen_wait_cb()
8758 dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, in gaudi_reset_sob()
8759 hw_sob->sob_id); in gaudi_reset_sob()
8762 hw_sob->sob_id * 4, 0); in gaudi_reset_sob()
8764 kref_init(&hw_sob->kref); in gaudi_reset_sob()
8777 return -EPERM; in gaudi_get_hw_block_id()
8784 return -EPERM; in gaudi_block_mmap()
8790 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_enable_events_from_fw()
8791 u32 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_enable_events_from_fw()
8793 le32_to_cpu(dyn_regs->gic_host_ints_irq); in gaudi_enable_events_from_fw()
8801 return -EINVAL; in gaudi_ack_mmu_page_fault_or_access_error()
8817 default: return -EINVAL; in gaudi_map_pll_idx_to_fw_idx()
8833 reg_value -= lower_32_bits(CFG_BASE); in gaudi_add_sync_to_engine_map_entry()
8838 return -ENOMEM; in gaudi_add_sync_to_engine_map_entry()
8839 entry->engine_type = engine_type; in gaudi_add_sync_to_engine_map_entry()
8840 entry->engine_id = engine_id; in gaudi_add_sync_to_engine_map_entry()
8841 entry->sync_id = reg_value; in gaudi_add_sync_to_engine_map_entry()
8842 hash_add(map->tb, &entry->node, reg_value); in gaudi_add_sync_to_engine_map_entry()
8850 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_gen_sync_to_engine_map()
8855 for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8857 reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8858 sds->props[SP_NEXT_TPC] * i); in gaudi_gen_sync_to_engine_map()
8867 for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8868 for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) { in gaudi_gen_sync_to_engine_map()
8870 reg_value = RREG32(sds->props[SP_MME_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8871 sds->props[SP_NEXT_MME] * i + in gaudi_gen_sync_to_engine_map()
8876 i * sds->props[SP_SUB_MME_ENG_NUM] + j); in gaudi_gen_sync_to_engine_map()
8883 for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8884 reg_value = RREG32(sds->props[SP_DMA_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8885 sds->props[SP_DMA_QUEUES_OFFSET] * i); in gaudi_gen_sync_to_engine_map()
8904 mon->status); in gaudi_monitor_valid()
8917 mon->arm_data); in gaudi_fill_sobs_from_mon()
8919 mon->arm_data); in gaudi_fill_sobs_from_mon()
8921 for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE - in gaudi_fill_sobs_from_mon()
8953 mon->id, name, in gaudi_print_single_monitor()
8955 mon->arm_data), in gaudi_print_single_monitor()
8960 mon->arm_data)), in gaudi_print_single_monitor()
8962 mon->arm_data), in gaudi_print_single_monitor()
8963 mon->wr_data, in gaudi_print_single_monitor()
8964 (((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low, in gaudi_print_single_monitor()
8969 mon->status)), in gaudi_print_single_monitor()
8979 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_print_fences_single_engine()
8980 int rc = -ENOMEM, i; in gaudi_print_fences_single_engine()
8983 statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES], in gaudi_print_fences_single_engine()
8988 fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] * in gaudi_print_fences_single_engine()
8989 sds->props[SP_ENGINE_NUM_OF_QUEUES], in gaudi_print_fences_single_engine()
8994 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i) in gaudi_print_fences_single_engine()
8997 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] * in gaudi_print_fences_single_engine()
8998 sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) in gaudi_print_fences_single_engine()
9002 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) { in gaudi_print_fences_single_engine()
9015 (i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]); in gaudi_print_fences_single_engine()
9016 fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] + in gaudi_print_fences_single_engine()
9017 sds->props[SP_FENCE0_RDATA_OFFSET]; in gaudi_print_fences_single_engine()
9053 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_state_dump_init()
9057 hash_add(sds->so_id_to_str_tb, in gaudi_state_dump_init()
9062 hash_add(sds->monitor_id_to_str_tb, in gaudi_state_dump_init()
9066 sds->props = gaudi_state_dump_specs_props; in gaudi_state_dump_init()
9068 sds->sync_namager_names = gaudi_sync_manager_names; in gaudi_state_dump_init()
9070 sds->funcs = gaudi_state_dump_funcs; in gaudi_state_dump_init()
9097 cpucp_info = &hdev->asic_prop.cpucp_info; in infineon_ver_show()
9099 return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version)); in infineon_ver_show()
9113 dev_vrm_attr_grp->attrs = gaudi_vrm_dev_attrs; in gaudi_add_device_attr()
9219 * gaudi_set_asic_funcs - set GAUDI function pointers
9226 hdev->asic_funcs = &gaudi_funcs; in gaudi_set_asic_funcs()