Lines Matching full:devid

104 	u16 devid;  member
122 u16 devid; member
140 u16 devid; member
280 static inline void update_last_devid(u16 devid) in update_last_devid() argument
282 if (devid > amd_iommu_last_bdf) in update_last_devid()
283 amd_iommu_last_bdf = devid; in update_last_devid()
549 update_last_devid(dev->devid); in find_last_devid_from_ivhd()
891 static void set_dev_entry_bit(u16 devid, u8 bit) in set_dev_entry_bit() argument
896 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); in set_dev_entry_bit()
899 static int get_dev_entry_bit(u16 devid, u8 bit) in get_dev_entry_bit() argument
904 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; in get_dev_entry_bit()
912 u32 lo, hi, devid, old_devtb_size; in copy_device_table() local
970 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in copy_device_table()
971 old_dev_tbl_cpy[devid] = old_devtb[devid]; in copy_device_table()
972 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; in copy_device_table()
973 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; in copy_device_table()
976 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; in copy_device_table()
977 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; in copy_device_table()
980 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { in copy_device_table()
983 old_dev_tbl_cpy[devid].data[1] &= ~tmp; in copy_device_table()
986 old_dev_tbl_cpy[devid].data[0] &= ~tmp; in copy_device_table()
990 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; in copy_device_table()
991 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; in copy_device_table()
992 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; in copy_device_table()
996 pr_err("Wrong old irq remapping flag: %#x\n", devid); in copy_device_table()
1000 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; in copy_device_table()
1008 void amd_iommu_apply_erratum_63(u16 devid) in amd_iommu_apply_erratum_63() argument
1012 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | in amd_iommu_apply_erratum_63()
1013 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); in amd_iommu_apply_erratum_63()
1016 set_dev_entry_bit(devid, DEV_ENTRY_IW); in amd_iommu_apply_erratum_63()
1020 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) in set_iommu_for_device() argument
1022 amd_iommu_rlookup_table[devid] = iommu; in set_iommu_for_device()
1030 u16 devid, u32 flags, u32 ext_flags) in set_dev_entry_from_acpi() argument
1033 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); in set_dev_entry_from_acpi()
1035 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); in set_dev_entry_from_acpi()
1037 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); in set_dev_entry_from_acpi()
1039 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); in set_dev_entry_from_acpi()
1041 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); in set_dev_entry_from_acpi()
1043 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); in set_dev_entry_from_acpi()
1045 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); in set_dev_entry_from_acpi()
1047 amd_iommu_apply_erratum_63(devid); in set_dev_entry_from_acpi()
1049 set_iommu_for_device(iommu, devid); in set_dev_entry_from_acpi()
1052 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) in add_special_device() argument
1071 *devid = entry->devid; in add_special_device()
1081 entry->devid = *devid; in add_special_device()
1089 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, in add_acpi_hid_device() argument
1103 *devid = entry->devid; in add_acpi_hid_device()
1113 entry->devid = *devid; in add_acpi_hid_device()
1115 entry->root_devid = (entry->devid & (~0x7)); in add_acpi_hid_device()
1132 &early_ioapic_map[i].devid, in add_early_maps()
1141 &early_hpet_map[i].devid, in add_early_maps()
1150 &early_acpihid_map[i].devid, in add_early_maps()
1168 u16 devid = 0, devid_start = 0, devid_to = 0; in init_iommu_from_acpi() local
1213 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1215 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1216 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1217 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1220 devid = e->devid; in init_iommu_from_acpi()
1221 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1226 "devid: %02x:%02x.%x flags: %02x\n", in init_iommu_from_acpi()
1227 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1228 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1229 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1232 devid_start = e->devid; in init_iommu_from_acpi()
1239 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1241 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1242 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1243 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1249 devid = e->devid; in init_iommu_from_acpi()
1251 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1253 amd_iommu_alias_table[devid] = devid_to; in init_iommu_from_acpi()
1258 "devid: %02x:%02x.%x flags: %02x " in init_iommu_from_acpi()
1260 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1261 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1262 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1268 devid_start = e->devid; in init_iommu_from_acpi()
1276 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1278 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1279 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1280 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1283 devid = e->devid; in init_iommu_from_acpi()
1284 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1289 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " in init_iommu_from_acpi()
1291 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1292 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1293 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1296 devid_start = e->devid; in init_iommu_from_acpi()
1303 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", in init_iommu_from_acpi()
1304 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1305 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1306 PCI_FUNC(e->devid)); in init_iommu_from_acpi()
1308 devid = e->devid; in init_iommu_from_acpi()
1309 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { in init_iommu_from_acpi()
1322 u16 devid; in init_iommu_from_acpi() local
1326 devid = (e->ext >> 8) & 0xffff; in init_iommu_from_acpi()
1338 PCI_BUS_NUM(devid), in init_iommu_from_acpi()
1339 PCI_SLOT(devid), in init_iommu_from_acpi()
1340 PCI_FUNC(devid)); in init_iommu_from_acpi()
1342 ret = add_special_device(type, handle, &devid, false); in init_iommu_from_acpi()
1347 * add_special_device might update the devid in case a in init_iommu_from_acpi()
1351 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1356 u16 devid; in init_iommu_from_acpi() local
1398 devid = e->devid; in init_iommu_from_acpi()
1401 PCI_BUS_NUM(devid), in init_iommu_from_acpi()
1402 PCI_SLOT(devid), in init_iommu_from_acpi()
1403 PCI_FUNC(devid)); in init_iommu_from_acpi()
1407 ret = add_acpi_hid_device(hid, uid, &devid, false); in init_iommu_from_acpi()
1412 * add_special_device might update the devid in case a in init_iommu_from_acpi()
1416 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1536 iommu->devid = h->devid; in init_iommu_one()
1630 amd_iommu_rlookup_table[iommu->devid] = NULL; in init_iommu_one()
1647 u16 devid = ivhd->devid; in get_highest_supported_ivhd_type() local
1653 if (ivhd->devid == devid) in get_highest_supported_ivhd_type()
1681 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), in init_iommu_all()
1682 PCI_FUNC(h->devid), h->cap_ptr, in init_iommu_all()
1783 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
1784 iommu->devid & 0xff); in iommu_init_pci()
2060 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", in iommu_init_intcapxt()
2061 iommu->devid, iommu->dev->irq); in iommu_init_intcapxt()
2134 e->devid_start = e->devid_end = m->devid; in init_unity_map_range()
2143 e->devid_start = m->devid; in init_unity_map_range()
2198 u32 devid; in init_device_table_dma() local
2200 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in init_device_table_dma()
2201 set_dev_entry_bit(devid, DEV_ENTRY_VALID); in init_device_table_dma()
2202 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); in init_device_table_dma()
2208 u32 devid; in uninit_device_table_dma() local
2210 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in uninit_device_table_dma()
2211 amd_iommu_dev_table[devid].data[0] = 0ULL; in uninit_device_table_dma()
2212 amd_iommu_dev_table[devid].data[1] = 0ULL; in uninit_device_table_dma()
2218 u32 devid; in init_device_table() local
2223 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) in init_device_table()
2224 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); in init_device_table()
2488 int devid, id = mpc_ioapic_id(idx); in check_ioapic_information() local
2490 devid = get_ioapic_devid(id); in check_ioapic_information()
2491 if (devid < 0) { in check_ioapic_information()
2495 } else if (devid == IOAPIC_SB_DEVID) { in check_ioapic_information()
3002 u16 devid; in parse_ivrs_ioapic() local
3017 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); in parse_ivrs_ioapic()
3022 early_ioapic_map[i].devid = devid; in parse_ivrs_ioapic()
3032 u16 devid; in parse_ivrs_hpet() local
3047 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); in parse_ivrs_hpet()
3052 early_hpet_map[i].devid = devid; in parse_ivrs_hpet()
3083 early_acpihid_map[i].devid = in parse_ivrs_acpihid()