Lines Matching full:its

82  * value of BASER register configuration and ITS page size.
94 * The ITS structure - contains most of the infrastructure, with the
128 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) argument
129 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) argument
130 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) argument
162 * The ITS view of a device - belongs to an ITS, owns an interrupt
163 * translation table, and a list of interrupts. If it some of its
169 struct its_node *its; member
295 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) in require_its_list_vmovp() argument
297 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); in require_its_list_vmovp()
307 struct its_node *its; in get_its_list() local
310 list_for_each_entry(its, &its_nodes, entry) { in get_its_list()
311 if (!is_v4(its)) in get_its_list()
314 if (require_its_list_vmovp(vm, its)) in get_its_list()
315 __set_bit(its->list_nr, &its_list); in get_its_list()
330 struct its_node *its = its_dev->its; in dev_event_to_col() local
332 return its->collections + its_dev->event_map.col_map[event]; in dev_event_to_col()
419 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
421 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
428 * ITS command descriptors - parameters to be encoded in a command
527 * The ITS command block, which is what the ITS actually parses.
705 static struct its_collection *its_build_mapd_cmd(struct its_node *its, in its_build_mapd_cmd() argument
725 static struct its_collection *its_build_mapc_cmd(struct its_node *its, in its_build_mapc_cmd() argument
739 static struct its_collection *its_build_mapti_cmd(struct its_node *its, in its_build_mapti_cmd() argument
759 static struct its_collection *its_build_movi_cmd(struct its_node *its, in its_build_movi_cmd() argument
778 static struct its_collection *its_build_discard_cmd(struct its_node *its, in its_build_discard_cmd() argument
796 static struct its_collection *its_build_inv_cmd(struct its_node *its, in its_build_inv_cmd() argument
814 static struct its_collection *its_build_int_cmd(struct its_node *its, in its_build_int_cmd() argument
832 static struct its_collection *its_build_clear_cmd(struct its_node *its, in its_build_clear_cmd() argument
850 static struct its_collection *its_build_invall_cmd(struct its_node *its, in its_build_invall_cmd() argument
862 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, in its_build_vinvall_cmd() argument
871 return valid_vpe(its, desc->its_vinvall_cmd.vpe); in its_build_vinvall_cmd()
874 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, in its_build_vmapp_cmd() argument
878 struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe); in its_build_vmapp_cmd()
889 if (is_v4_1(its)) { in its_build_vmapp_cmd()
902 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; in its_build_vmapp_cmd()
910 if (!is_v4_1(its)) in its_build_vmapp_cmd()
933 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, in its_build_vmapti_cmd() argument
939 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) in its_build_vmapti_cmd()
953 return valid_vpe(its, desc->its_vmapti_cmd.vpe); in its_build_vmapti_cmd()
956 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, in its_build_vmovi_cmd() argument
962 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) in its_build_vmovi_cmd()
976 return valid_vpe(its, desc->its_vmovi_cmd.vpe); in its_build_vmovi_cmd()
979 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, in its_build_vmovp_cmd() argument
985 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; in its_build_vmovp_cmd()
992 if (is_v4_1(its)) { in its_build_vmovp_cmd()
999 return valid_vpe(its, desc->its_vmovp_cmd.vpe); in its_build_vmovp_cmd()
1002 static struct its_vpe *its_build_vinv_cmd(struct its_node *its, in its_build_vinv_cmd() argument
1017 return valid_vpe(its, map->vpe); in its_build_vinv_cmd()
1020 static struct its_vpe *its_build_vint_cmd(struct its_node *its, in its_build_vint_cmd() argument
1035 return valid_vpe(its, map->vpe); in its_build_vint_cmd()
1038 static struct its_vpe *its_build_vclear_cmd(struct its_node *its, in its_build_vclear_cmd() argument
1053 return valid_vpe(its, map->vpe); in its_build_vclear_cmd()
1056 static struct its_vpe *its_build_invdb_cmd(struct its_node *its, in its_build_invdb_cmd() argument
1060 if (WARN_ON(!is_v4_1(its))) in its_build_invdb_cmd()
1068 return valid_vpe(its, desc->its_invdb_cmd.vpe); in its_build_invdb_cmd()
1071 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, in its_build_vsgi_cmd() argument
1075 if (WARN_ON(!is_v4_1(its))) in its_build_vsgi_cmd()
1088 return valid_vpe(its, desc->its_vsgi_cmd.vpe); in its_build_vsgi_cmd()
1091 static u64 its_cmd_ptr_to_offset(struct its_node *its, in its_cmd_ptr_to_offset() argument
1094 return (ptr - its->cmd_base) * sizeof(*ptr); in its_cmd_ptr_to_offset()
1097 static int its_queue_full(struct its_node *its) in its_queue_full() argument
1102 widx = its->cmd_write - its->cmd_base; in its_queue_full()
1103 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); in its_queue_full()
1105 /* This is incredibly unlikely to happen, unless the ITS locks up. */ in its_queue_full()
1112 static struct its_cmd_block *its_allocate_entry(struct its_node *its) in its_allocate_entry() argument
1117 while (its_queue_full(its)) { in its_allocate_entry()
1120 pr_err_ratelimited("ITS queue not draining\n"); in its_allocate_entry()
1127 cmd = its->cmd_write++; in its_allocate_entry()
1130 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) in its_allocate_entry()
1131 its->cmd_write = its->cmd_base; in its_allocate_entry()
1142 static struct its_cmd_block *its_post_commands(struct its_node *its) in its_post_commands() argument
1144 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); in its_post_commands()
1146 writel_relaxed(wr, its->base + GITS_CWRITER); in its_post_commands()
1148 return its->cmd_write; in its_post_commands()
1151 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) in its_flush_cmd() argument
1155 * the ITS. in its_flush_cmd()
1157 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) in its_flush_cmd()
1163 static int its_wait_for_range_completion(struct its_node *its, in its_wait_for_range_completion() argument
1171 to_idx = its_cmd_ptr_to_offset(its, to); in its_wait_for_range_completion()
1180 rd_idx = readl_relaxed(its->base + GITS_CREADR); in its_wait_for_range_completion()
1196 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", in its_wait_for_range_completion()
1210 void name(struct its_node *its, \
1219 raw_spin_lock_irqsave(&its->lock, flags); \
1221 cmd = its_allocate_entry(its); \
1223 raw_spin_unlock_irqrestore(&its->lock, flags); \
1226 sync_obj = builder(its, cmd, desc); \
1227 its_flush_cmd(its, cmd); \
1230 sync_cmd = its_allocate_entry(its); \
1234 buildfn(its, sync_cmd, sync_obj); \
1235 its_flush_cmd(its, sync_cmd); \
1239 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1240 next_cmd = its_post_commands(its); \
1241 raw_spin_unlock_irqrestore(&its->lock, flags); \
1243 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1244 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1247 static void its_build_sync_cmd(struct its_node *its, in its_build_sync_cmd() argument
1260 static void its_build_vsync_cmd(struct its_node *its, in BUILD_SINGLE_CMD_FUNC()
1280 its_send_single_command(dev->its, its_build_int_cmd, &desc); in BUILD_SINGLE_CMD_FUNC()
1290 its_send_single_command(dev->its, its_build_clear_cmd, &desc); in its_send_clear()
1300 its_send_single_command(dev->its, its_build_inv_cmd, &desc); in its_send_inv()
1310 its_send_single_command(dev->its, its_build_mapd_cmd, &desc); in its_send_mapd()
1313 static void its_send_mapc(struct its_node *its, struct its_collection *col, in its_send_mapc() argument
1321 its_send_single_command(its, its_build_mapc_cmd, &desc); in its_send_mapc()
1332 its_send_single_command(dev->its, its_build_mapti_cmd, &desc); in its_send_mapti()
1344 its_send_single_command(dev->its, its_build_movi_cmd, &desc); in its_send_movi()
1354 its_send_single_command(dev->its, its_build_discard_cmd, &desc); in its_send_discard()
1357 static void its_send_invall(struct its_node *its, struct its_collection *col) in its_send_invall() argument
1363 its_send_single_command(its, its_build_invall_cmd, &desc); in its_send_invall()
1377 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); in its_send_vmapti()
1390 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); in its_send_vmovi()
1393 static void its_send_vmapp(struct its_node *its, in its_send_vmapp() argument
1400 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; in its_send_vmapp()
1402 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); in its_send_vmapp()
1408 struct its_node *its; in its_send_vmovp() local
1414 its = list_first_entry(&its_nodes, struct its_node, entry); in its_send_vmovp()
1415 desc.its_vmovp_cmd.col = &its->collections[col_id]; in its_send_vmovp()
1416 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); in its_send_vmovp()
1433 list_for_each_entry(its, &its_nodes, entry) { in its_send_vmovp()
1434 if (!is_v4(its)) in its_send_vmovp()
1437 if (!require_its_list_vmovp(vpe->its_vm, its)) in its_send_vmovp()
1440 desc.its_vmovp_cmd.col = &its->collections[col_id]; in its_send_vmovp()
1441 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); in its_send_vmovp()
1445 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) in its_send_vinvall() argument
1450 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); in its_send_vinvall()
1464 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); in its_send_vinv()
1478 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); in its_send_vint()
1492 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); in its_send_vclear()
1495 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) in its_send_invdb() argument
1500 its_send_single_vcommand(its, its_build_invdb_cmd, &desc); in its_send_invdb()
1572 WARN_ON(!is_v4_1(its_dev->its)); in direct_lpi_inv()
1590 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) in lpi_update_config()
1608 if (is_v4_1(its_dev->its)) in its_vlpi_set_doorbell()
1621 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI in its_vlpi_set_doorbell()
1701 node = its_dev->its->numa_node; in its_select_cpu()
1723 * ITS placed next to two NUMA nodes. in its_select_cpu()
1733 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) in its_select_cpu()
1751 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && in its_select_cpu()
1789 target_col = &its_dev->its->collections[cpu]; in its_set_affinity()
1806 struct its_node *its = its_dev->its; in its_irq_get_msi_base() local
1808 return its->phys_base + GITS_TRANSLATER; in its_irq_get_msi_base()
1817 its_dev->its->get_msi_base(its_dev)); in its_irq_compose_msi_msg()
1870 static void its_map_vm(struct its_node *its, struct its_vm *vm) in its_map_vm() argument
1881 vm->vlpi_count[its->list_nr]++; in its_map_vm()
1883 if (vm->vlpi_count[its->list_nr] == 1) { in its_map_vm()
1890 its_send_vmapp(its, vpe, true); in its_map_vm()
1892 its_send_vinvall(its, vpe); in its_map_vm()
1897 static void its_unmap_vm(struct its_node *its, struct its_vm *vm) in its_unmap_vm() argument
1899 /* Not using the ITS list? Everything is always mapped. */ in its_unmap_vm()
1905 if (!--vm->vlpi_count[its->list_nr]) { in its_unmap_vm()
1910 its_send_vmapp(its, vm->vpes[i], false); in its_unmap_vm()
1944 /* Ensure all the VPEs are mapped on this ITS */ in its_vlpi_map()
1945 its_map_vm(its_dev->its, info->map->vm); in its_vlpi_map()
2003 /* Potentially unmap the VM from this ITS */ in its_vlpi_unmap()
2004 its_unmap_vm(its_dev->its, its_dev->event_map.vm); in its_vlpi_unmap()
2039 /* Need a v4 ITS */ in its_irq_set_vcpu_affinity()
2040 if (!is_v4(its_dev->its)) in its_irq_set_vcpu_affinity()
2066 .name = "ITS",
2142 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); in alloc_lpi_range()
2201 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", in its_lpi_init()
2210 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); in its_lpi_init()
2362 static u64 its_read_baser(struct its_node *its, struct its_baser *baser) in its_read_baser() argument
2364 u32 idx = baser - its->tables; in its_read_baser()
2366 return gits_read_baser(its->base + GITS_BASER + (idx << 3)); in its_read_baser()
2369 static void its_write_baser(struct its_node *its, struct its_baser *baser, in its_write_baser() argument
2372 u32 idx = baser - its->tables; in its_write_baser()
2374 gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); in its_write_baser()
2375 baser->val = its_read_baser(its, baser); in its_write_baser()
2378 static int its_setup_baser(struct its_node *its, struct its_baser *baser, in its_setup_baser() argument
2381 u64 val = its_read_baser(its, baser); in its_setup_baser()
2392 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", in its_setup_baser()
2393 &its->phys_base, its_base_type_string[type], in its_setup_baser()
2399 page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); in its_setup_baser()
2411 pr_err("ITS: no 52bit PA support when psz=%d\n", psz); in its_setup_baser()
2446 its_write_baser(its, baser, val); in its_setup_baser()
2465 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", in its_setup_baser()
2466 &its->phys_base, its_base_type_string[type], in its_setup_baser()
2477 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", in its_setup_baser()
2478 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), in its_setup_baser()
2487 static bool its_parse_indirect_baser(struct its_node *its, in its_parse_indirect_baser() argument
2491 u64 tmp = its_read_baser(its, baser); in its_parse_indirect_baser()
2505 its_write_baser(its, baser, val | GITS_BASER_INDIRECT); in its_parse_indirect_baser()
2510 * The size of the lvl2 table is equal to ITS page size in its_parse_indirect_baser()
2513 * which is reported by ITS hardware times lvl1 table in its_parse_indirect_baser()
2523 * range of device IDs that the ITS can grok... The ID in its_parse_indirect_baser()
2532 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", in its_parse_indirect_baser()
2533 &its->phys_base, its_base_type_string[type], in its_parse_indirect_baser()
2534 device_ids(its), ids); in its_parse_indirect_baser()
2552 static u32 compute_its_aff(struct its_node *its) in compute_its_aff() argument
2558 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute in compute_its_aff()
2562 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); in compute_its_aff()
2564 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); in compute_its_aff()
2570 struct its_node *its; in find_sibling_its() local
2578 list_for_each_entry(its, &its_nodes, entry) { in find_sibling_its()
2581 if (!is_v4_1(its) || its == cur_its) in find_sibling_its()
2584 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) in find_sibling_its()
2587 if (aff != compute_its_aff(its)) in find_sibling_its()
2591 baser = its->tables[2].val; in find_sibling_its()
2595 return its; in find_sibling_its()
2601 static void its_free_tables(struct its_node *its) in its_free_tables() argument
2606 if (its->tables[i].base) { in its_free_tables()
2607 its_free_pages(its->tables[i].base, its->tables[i].order); in its_free_tables()
2608 its->tables[i].base = NULL; in its_free_tables()
2613 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) in its_probe_baser_psz() argument
2620 val = its_read_baser(its, baser); in its_probe_baser_psz()
2639 its_write_baser(its, baser, val); in its_probe_baser_psz()
2661 static int its_alloc_tables(struct its_node *its) in its_alloc_tables() argument
2667 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) in its_alloc_tables()
2671 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) { in its_alloc_tables()
2677 struct its_baser *baser = its->tables + i; in its_alloc_tables()
2678 u64 val = its_read_baser(its, baser); in its_alloc_tables()
2686 if (its_probe_baser_psz(its, baser)) { in its_alloc_tables()
2687 its_free_tables(its); in its_alloc_tables()
2695 indirect = its_parse_indirect_baser(its, baser, &order, in its_alloc_tables()
2696 device_ids(its)); in its_alloc_tables()
2700 if (is_v4_1(its)) { in its_alloc_tables()
2704 if ((sibling = find_sibling_its(its))) { in its_alloc_tables()
2706 its_write_baser(its, baser, baser->val); in its_alloc_tables()
2711 indirect = its_parse_indirect_baser(its, baser, &order, in its_alloc_tables()
2716 err = its_setup_baser(its, baser, cache, shr, order, indirect); in its_alloc_tables()
2718 its_free_tables(its); in its_alloc_tables()
2732 struct its_node *its; in inherit_vpe_l1_table_from_its() local
2739 list_for_each_entry(its, &its_nodes, entry) { in inherit_vpe_l1_table_from_its()
2742 if (!is_v4_1(its)) in inherit_vpe_l1_table_from_its()
2745 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) in inherit_vpe_l1_table_from_its()
2748 if (aff != compute_its_aff(its)) in inherit_vpe_l1_table_from_its()
2752 baser = its->tables[2].val; in inherit_vpe_l1_table_from_its()
2757 gic_data_rdist()->vpe_l1_base = its->tables[2].base; in inherit_vpe_l1_table_from_its()
2809 * ours wrt CommonLPIAff. Let's use its own VPROPBASER. in inherit_vpe_l1_table_from_rd()
3019 static int its_alloc_collections(struct its_node *its) in its_alloc_collections() argument
3023 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), in its_alloc_collections()
3025 if (!its->collections) in its_alloc_collections()
3029 its->collections[i].target_address = ~0ULL; in its_alloc_collections()
3126 pr_err_ratelimited("ITS virtual pending table not cleaning\n"); in read_vpend_dirty_clear()
3282 static void its_cpu_init_collection(struct its_node *its) in its_cpu_init_collection() argument
3287 /* avoid cross node collections and its mapping */ in its_cpu_init_collection()
3288 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { in its_cpu_init_collection()
3292 if (its->numa_node != NUMA_NO_NODE && in its_cpu_init_collection()
3293 its->numa_node != of_node_to_nid(cpu_node)) in its_cpu_init_collection()
3298 * We now have to bind each collection to its target in its_cpu_init_collection()
3301 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { in its_cpu_init_collection()
3303 * This ITS wants the physical address of the in its_cpu_init_collection()
3308 /* This ITS wants a linear CPU number. */ in its_cpu_init_collection()
3314 its->collections[cpu].target_address = target; in its_cpu_init_collection()
3315 its->collections[cpu].col_id = cpu; in its_cpu_init_collection()
3317 its_send_mapc(its, &its->collections[cpu], 1); in its_cpu_init_collection()
3318 its_send_invall(its, &its->collections[cpu]); in its_cpu_init_collection()
3323 struct its_node *its; in its_cpu_init_collections() local
3327 list_for_each_entry(its, &its_nodes, entry) in its_cpu_init_collections()
3328 its_cpu_init_collection(its); in its_cpu_init_collections()
3333 static struct its_device *its_find_device(struct its_node *its, u32 dev_id) in its_find_device() argument
3338 raw_spin_lock_irqsave(&its->lock, flags); in its_find_device()
3340 list_for_each_entry(tmp, &its->its_device_list, entry) { in its_find_device()
3347 raw_spin_unlock_irqrestore(&its->lock, flags); in its_find_device()
3352 static struct its_baser *its_get_baser(struct its_node *its, u32 type) in its_get_baser() argument
3357 if (GITS_BASER_TYPE(its->tables[i].val) == type) in its_get_baser()
3358 return &its->tables[i]; in its_get_baser()
3364 static bool its_alloc_table_entry(struct its_node *its, in its_alloc_table_entry() argument
3385 page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, in its_alloc_table_entry()
3400 /* Ensure updated table contents are visible to ITS hardware */ in its_alloc_table_entry()
3407 static bool its_alloc_device_table(struct its_node *its, u32 dev_id) in its_alloc_device_table() argument
3411 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); in its_alloc_device_table()
3413 /* Don't allow device id that exceeds ITS hardware limit */ in its_alloc_device_table()
3415 return (ilog2(dev_id) < device_ids(its)); in its_alloc_device_table()
3417 return its_alloc_table_entry(its, baser, dev_id); in its_alloc_device_table()
3422 struct its_node *its; in its_alloc_vpe_table() local
3432 list_for_each_entry(its, &its_nodes, entry) { in its_alloc_vpe_table()
3435 if (!is_v4(its)) in its_alloc_vpe_table()
3438 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); in its_alloc_vpe_table()
3442 if (!its_alloc_table_entry(its, baser, vpe_id)) in its_alloc_vpe_table()
3462 static struct its_device *its_create_device(struct its_node *its, u32 dev_id, in its_create_device() argument
3475 if (!its_alloc_device_table(its, dev_id)) in its_create_device()
3486 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); in its_create_device()
3489 itt = itt_alloc_pool(its->numa_node, sz); in its_create_device()
3514 dev->its = its; in its_create_device()
3526 raw_spin_lock_irqsave(&its->lock, flags); in its_create_device()
3527 list_add(&dev->entry, &its->its_device_list); in its_create_device()
3528 raw_spin_unlock_irqrestore(&its->lock, flags); in its_create_device()
3530 /* Map device to its ITT */ in its_create_device()
3540 raw_spin_lock_irqsave(&its_dev->its->lock, flags); in its_free_device()
3542 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); in its_free_device()
3567 struct its_node *its; in its_msi_prepare() local
3577 * are built on top of the ITS. in its_msi_prepare()
3582 its = msi_info->data; in its_msi_prepare()
3586 vpe_proxy.dev->its == its && in its_msi_prepare()
3594 mutex_lock(&its->dev_alloc_lock); in its_msi_prepare()
3595 its_dev = its_find_device(its, dev_id); in its_msi_prepare()
3607 its_dev = its_create_device(its, dev_id, nvec, true); in its_msi_prepare()
3618 mutex_unlock(&its->dev_alloc_lock); in its_msi_prepare()
3656 struct its_node *its = its_dev->its; in its_irq_domain_alloc() local
3666 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); in its_irq_domain_alloc()
3725 struct its_node *its = its_dev->its; in its_irq_domain_free() local
3739 mutex_lock(&its->dev_alloc_lock); in its_irq_domain_free()
3757 mutex_unlock(&its->dev_alloc_lock); in its_irq_domain_free()
3876 target_col = &vpe_proxy.dev->its->collections[to]; in its_vpe_db_proxy_move()
3904 struct its_node *its; in its_vpe_set_affinity() local
3928 * interrupt to its new location. in its_vpe_set_affinity()
3938 * the mapping state on this VM should the ITS list be in use (see in its_vpe_set_affinity()
3948 * If we are offered another CPU in the same GICv4.1 ITS in its_vpe_set_affinity()
3968 its = find_4_1_its(); in its_vpe_set_affinity()
3969 if (its && its->flags & ITS_FLAGS_WORKAROUND_HISILICON_162100801) in its_vpe_set_affinity()
4025 * would be able to read its coarse map pretty quickly anyway, in its_vpe_schedule()
4047 struct its_node *its; in its_vpe_invall() local
4051 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_invall()
4052 if (!is_v4(its)) in its_vpe_invall()
4055 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) in its_vpe_invall()
4059 * Sending a VINVALL to a single ITS is enough, as all in its_vpe_invall()
4062 its_send_vinvall(its, vpe); in its_vpe_invall()
4183 static struct its_node *its = NULL; in find_4_1_its() local
4185 if (!its) { in find_4_1_its()
4186 list_for_each_entry(its, &its_nodes, entry) { in find_4_1_its()
4187 if (is_v4_1(its)) in find_4_1_its()
4188 return its; in find_4_1_its()
4192 its = NULL; in find_4_1_its()
4195 return its; in find_4_1_its()
4201 struct its_node *its; in its_vpe_4_1_send_inv() local
4206 * it to the first valid ITS, and let the HW do its magic. in its_vpe_4_1_send_inv()
4208 its = find_4_1_its(); in its_vpe_4_1_send_inv()
4209 if (its) in its_vpe_4_1_send_inv()
4210 its_send_invdb(its, vpe); in its_vpe_4_1_send_inv()
4337 * GICv4.1 allows us to send VSGI commands to any ITS as long as the in its_configure_sgi()
4339 * activation time, we're pretty sure the first GICv4.1 ITS will do. in its_configure_sgi()
4382 struct its_node *its = find_4_1_its(); in its_sgi_set_irqchip_state() local
4387 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); in its_sgi_set_irqchip_state()
4675 struct its_node *its; in its_vpe_irq_domain_activate() local
4689 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_irq_domain_activate()
4690 if (!is_v4(its)) in its_vpe_irq_domain_activate()
4693 its_send_vmapp(its, vpe, true); in its_vpe_irq_domain_activate()
4694 its_send_vinvall(its, vpe); in its_vpe_irq_domain_activate()
4704 struct its_node *its; in its_vpe_irq_domain_deactivate() local
4713 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_irq_domain_deactivate()
4714 if (!is_v4(its)) in its_vpe_irq_domain_deactivate()
4717 its_send_vmapp(its, vpe, false); in its_vpe_irq_domain_deactivate()
4744 * GIC architecture specification requires the ITS to be both in its_force_quiescent()
4751 /* Disable the generation of all interrupts to this ITS */ in its_force_quiescent()
4755 /* Poll GITS_CTLR and wait until ITS becomes quiescent */ in its_force_quiescent()
4772 struct its_node *its = data; in its_enable_quirk_cavium_22375() local
4775 its->typer &= ~GITS_TYPER_DEVBITS; in its_enable_quirk_cavium_22375()
4776 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); in its_enable_quirk_cavium_22375()
4777 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; in its_enable_quirk_cavium_22375()
4784 struct its_node *its = data; in its_enable_quirk_cavium_23144() local
4786 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; in its_enable_quirk_cavium_23144()
4793 struct its_node *its = data; in its_enable_quirk_qdf2400_e0065() local
4796 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; in its_enable_quirk_qdf2400_e0065()
4797 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); in its_enable_quirk_qdf2400_e0065()
4804 struct its_node *its = its_dev->its; in its_irq_get_msi_base_pre_its() local
4807 * The Socionext Synquacer SoC has a so-called 'pre-ITS', in its_irq_get_msi_base_pre_its()
4813 return its->pre_its_base + (its_dev->device_id << 2); in its_irq_get_msi_base_pre_its()
4818 struct its_node *its = data; in its_enable_quirk_socionext_synquacer() local
4822 if (!fwnode_property_read_u32_array(its->fwnode_handle, in its_enable_quirk_socionext_synquacer()
4823 "socionext,synquacer-pre-its", in its_enable_quirk_socionext_synquacer()
4827 its->pre_its_base = pre_its_window[0]; in its_enable_quirk_socionext_synquacer()
4828 its->get_msi_base = its_irq_get_msi_base_pre_its; in its_enable_quirk_socionext_synquacer()
4831 if (device_ids(its) > ids) { in its_enable_quirk_socionext_synquacer()
4832 its->typer &= ~GITS_TYPER_DEVBITS; in its_enable_quirk_socionext_synquacer()
4833 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); in its_enable_quirk_socionext_synquacer()
4836 /* the pre-ITS breaks isolation, so disable MSI remapping */ in its_enable_quirk_socionext_synquacer()
4837 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; in its_enable_quirk_socionext_synquacer()
4845 struct its_node *its = data; in its_enable_quirk_hip07_161600802() local
4851 its->vlpi_redist_offset = SZ_128K; in its_enable_quirk_hip07_161600802()
4857 struct its_node *its = data; in its_enable_rk3588001() local
4863 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; in its_enable_rk3588001()
4871 struct its_node *its = data; in its_set_non_coherent() local
4873 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; in its_set_non_coherent()
4879 struct its_node *its = data; in its_enable_quirk_hip09_162100801() local
4881 its->flags |= ITS_FLAGS_WORKAROUND_HISILICON_162100801; in its_enable_quirk_hip09_162100801()
4899 .desc = "ITS: Cavium errata 22375, 24313",
4907 .desc = "ITS: Cavium erratum 23144",
4915 .desc = "ITS: QDF2400 erratum 0065",
4916 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4925 * implementation, but with a 'pre-ITS' added that requires
4928 .desc = "ITS: Socionext Synquacer pre-ITS",
4936 .desc = "ITS: Hip07 erratum 161600802",
4944 .desc = "ITS: Hip09 erratum 162100801",
4952 .desc = "ITS: Rockchip erratum RK3588001",
4959 .desc = "ITS: non-coherent attribute",
4965 .desc = "ITS: Rockchip erratum RK3568002",
4975 static void its_enable_quirks(struct its_node *its) in its_enable_quirks() argument
4977 u32 iidr = readl_relaxed(its->base + GITS_IIDR); in its_enable_quirks()
4979 gic_enable_quirks(iidr, its_quirks, its); in its_enable_quirks()
4981 if (is_of_node(its->fwnode_handle)) in its_enable_quirks()
4982 gic_enable_of_quirks(to_of_node(its->fwnode_handle), in its_enable_quirks()
4983 its_quirks, its); in its_enable_quirks()
4988 struct its_node *its; in its_save_disable() local
4992 list_for_each_entry(its, &its_nodes, entry) { in its_save_disable()
4995 base = its->base; in its_save_disable()
4996 its->ctlr_save = readl_relaxed(base + GITS_CTLR); in its_save_disable()
4999 pr_err("ITS@%pa: failed to quiesce: %d\n", in its_save_disable()
5000 &its->phys_base, err); in its_save_disable()
5001 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_save_disable()
5005 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); in its_save_disable()
5010 list_for_each_entry_continue_reverse(its, &its_nodes, entry) { in its_save_disable()
5013 base = its->base; in its_save_disable()
5014 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_save_disable()
5024 struct its_node *its; in its_restore_enable() local
5028 list_for_each_entry(its, &its_nodes, entry) { in its_restore_enable()
5032 base = its->base; in its_restore_enable()
5035 * Make sure that the ITS is disabled. If it fails to quiesce, in its_restore_enable()
5037 * registers is undefined according to the GIC v3 ITS in its_restore_enable()
5040 * Firmware resuming with the ITS enabled is terminally broken. in its_restore_enable()
5045 pr_err("ITS@%pa: failed to quiesce on resume: %d\n", in its_restore_enable()
5046 &its->phys_base, ret); in its_restore_enable()
5050 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); in its_restore_enable()
5056 its->cmd_write = its->cmd_base; in its_restore_enable()
5061 struct its_baser *baser = &its->tables[i]; in its_restore_enable()
5066 its_write_baser(its, baser, baser->val); in its_restore_enable()
5068 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_restore_enable()
5071 * Reinit the collection if it's stored in the ITS. This is in its_restore_enable()
5075 if (its->collections[smp_processor_id()].col_id < in its_restore_enable()
5077 its_cpu_init_collection(its); in its_restore_enable()
5094 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); in its_map_one()
5101 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); in its_map_one()
5108 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); in its_map_one()
5119 static int its_init_domain(struct its_node *its) in its_init_domain() argument
5129 info->data = its; in its_init_domain()
5132 its->msi_domain_flags, 0, in its_init_domain()
5133 its->fwnode_handle, &its_domain_ops, in its_init_domain()
5150 struct its_node *its; in its_init_vpe_domain() local
5155 pr_info("ITS: Using DirectLPI for VPE invalidation\n"); in its_init_vpe_domain()
5159 /* Any ITS will do, even if not v4 */ in its_init_vpe_domain()
5160 its = list_first_entry(&its_nodes, struct its_node, entry); in its_init_vpe_domain()
5169 devid = GENMASK(device_ids(its) - 1, 0); in its_init_vpe_domain()
5170 vpe_proxy.dev = its_create_device(its, devid, entries, false); in its_init_vpe_domain()
5173 pr_err("ITS: Can't allocate GICv4 proxy device\n"); in its_init_vpe_domain()
5181 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", in its_init_vpe_domain()
5187 static int __init its_compute_its_list_map(struct its_node *its) in its_compute_its_list_map() argument
5200 pr_err("ITS@%pa: No ITSList entry available!\n", in its_compute_its_list_map()
5201 &its->phys_base); in its_compute_its_list_map()
5205 ctlr = readl_relaxed(its->base + GITS_CTLR); in its_compute_its_list_map()
5208 writel_relaxed(ctlr, its->base + GITS_CTLR); in its_compute_its_list_map()
5209 ctlr = readl_relaxed(its->base + GITS_CTLR); in its_compute_its_list_map()
5216 pr_err("ITS@%pa: Duplicate ITSList entry %d\n", in its_compute_its_list_map()
5217 &its->phys_base, its_number); in its_compute_its_list_map()
5224 static int __init its_probe_one(struct its_node *its) in its_probe_one() argument
5231 its_enable_quirks(its); in its_probe_one()
5233 if (is_v4(its)) { in its_probe_one()
5234 if (!(its->typer & GITS_TYPER_VMOVP)) { in its_probe_one()
5235 err = its_compute_its_list_map(its); in its_probe_one()
5239 its->list_nr = err; in its_probe_one()
5241 pr_info("ITS@%pa: Using ITS number %d\n", in its_probe_one()
5242 &its->phys_base, err); in its_probe_one()
5244 pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); in its_probe_one()
5247 if (is_v4_1(its)) { in its_probe_one()
5248 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); in its_probe_one()
5250 its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); in its_probe_one()
5251 if (!its->sgir_base) { in its_probe_one()
5256 its->mpidr = readl_relaxed(its->base + GITS_MPIDR); in its_probe_one()
5258 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", in its_probe_one()
5259 &its->phys_base, its->mpidr, svpet); in its_probe_one()
5263 page = its_alloc_pages_node(its->numa_node, in its_probe_one()
5270 its->cmd_base = (void *)page_address(page); in its_probe_one()
5271 its->cmd_write = its->cmd_base; in its_probe_one()
5273 err = its_alloc_tables(its); in its_probe_one()
5277 err = its_alloc_collections(its); in its_probe_one()
5281 baser = (virt_to_phys(its->cmd_base) | in its_probe_one()
5287 gits_write_cbaser(baser, its->base + GITS_CBASER); in its_probe_one()
5288 tmp = gits_read_cbaser(its->base + GITS_CBASER); in its_probe_one()
5290 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) in its_probe_one()
5303 gits_write_cbaser(baser, its->base + GITS_CBASER); in its_probe_one()
5305 pr_info("ITS: using cache flushing for cmd queue\n"); in its_probe_one()
5306 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; in its_probe_one()
5309 gits_write_cwriter(0, its->base + GITS_CWRITER); in its_probe_one()
5310 ctlr = readl_relaxed(its->base + GITS_CTLR); in its_probe_one()
5312 if (is_v4(its)) in its_probe_one()
5314 writel_relaxed(ctlr, its->base + GITS_CTLR); in its_probe_one()
5316 err = its_init_domain(its); in its_probe_one()
5321 list_add(&its->entry, &its_nodes); in its_probe_one()
5327 its_free_tables(its); in its_probe_one()
5329 its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); in its_probe_one()
5331 if (its->sgir_base) in its_probe_one()
5332 iounmap(its->sgir_base); in its_probe_one()
5334 pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); in its_probe_one()
5491 { .compatible = "arm,gic-v3-its", },
5499 struct its_node *its; in its_node_init() local
5506 pr_info("ITS %pR\n", res); in its_node_init()
5508 its = kzalloc(sizeof(*its), GFP_KERNEL); in its_node_init()
5509 if (!its) in its_node_init()
5512 raw_spin_lock_init(&its->lock); in its_node_init()
5513 mutex_init(&its->dev_alloc_lock); in its_node_init()
5514 INIT_LIST_HEAD(&its->entry); in its_node_init()
5515 INIT_LIST_HEAD(&its->its_device_list); in its_node_init()
5517 its->typer = gic_read_typer(its_base + GITS_TYPER); in its_node_init()
5518 its->base = its_base; in its_node_init()
5519 its->phys_base = res->start; in its_node_init()
5520 its->get_msi_base = its_irq_get_msi_base; in its_node_init()
5521 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; in its_node_init()
5523 its->numa_node = numa_node; in its_node_init()
5524 its->fwnode_handle = handle; in its_node_init()
5526 return its; in its_node_init()
5533 static void its_node_destroy(struct its_node *its) in its_node_destroy() argument
5535 iounmap(its->base); in its_node_destroy()
5536 kfree(its); in its_node_destroy()
5546 * Make sure *all* the ITS are reset before we probe any, as in its_of_probe()
5547 * they may be sharing memory. If any of the ITS fails to in its_of_probe()
5565 struct its_node *its; in its_of_probe() local
5570 pr_warn("%pOF: no msi-controller property, ITS ignored\n", in its_of_probe()
5581 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); in its_of_probe()
5582 if (!its) in its_of_probe()
5585 err = its_probe_one(its); in its_of_probe()
5587 its_node_destroy(its); in its_of_probe()
5602 /* GIC ITS ID */
5637 pr_err("SRAT: Invalid header length %d in ITS affinity\n", in gic_acpi_parse_srat_its()
5650 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); in gic_acpi_parse_srat_its()
5657 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", in gic_acpi_parse_srat_its()
5685 /* free the its_srat_maps after ITS probing */
5701 struct its_node *its; in gic_acpi_parse_madt_its() local
5713 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", in gic_acpi_parse_madt_its()
5721 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", in gic_acpi_parse_madt_its()
5726 its = its_node_init(&res, dom_handle, in gic_acpi_parse_madt_its()
5728 if (!its) { in gic_acpi_parse_madt_its()
5735 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; in gic_acpi_parse_madt_its()
5737 err = its_probe_one(its); in gic_acpi_parse_madt_its()
5768 * Make sure *all* the ITS are reset before we probe any, as in its_acpi_probe()
5769 * they may be sharing memory. If any of the ITS fails to in its_acpi_probe()
5810 struct its_node *its; in its_init() local
5830 pr_warn("ITS: No ITS available, not enabling LPIs\n"); in its_init()
5838 list_for_each_entry(its, &its_nodes, entry) { in its_init()
5839 has_v4 |= is_v4(its); in its_init()
5840 has_v4_1 |= is_v4_1(its); in its_init()
5858 pr_err("ITS: Disabling GICv4 support\n"); in its_init()