Lines Matching defs:vpe
359 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
361 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
362 return vpe->col_idx;
365 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
367 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
374 struct its_vpe *vpe = NULL;
378 vpe = irq_data_get_irq_chip_data(d);
382 vpe = map->vpe;
385 if (vpe) {
386 cpu = vpe_to_cpuid_lock(vpe, flags);
400 struct its_vpe *vpe = NULL;
403 vpe = irq_data_get_irq_chip_data(d);
407 vpe = map->vpe;
410 if (vpe)
411 vpe_to_cpuid_unlock(vpe, flags);
422 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
424 if (valid_col(its->collections + vpe->col_idx))
425 return vpe;
483 struct its_vpe *vpe;
487 struct its_vpe *vpe;
493 struct its_vpe *vpe;
501 struct its_vpe *vpe;
508 struct its_vpe *vpe;
515 struct its_vpe *vpe;
519 struct its_vpe *vpe;
870 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
874 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
881 struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
887 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
891 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
898 vpe = NULL;
904 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
911 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
916 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
928 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
933 return vpe;
943 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
949 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
956 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
966 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
972 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
979 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
992 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
997 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
1002 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
1020 return valid_vpe(its, map->vpe);
1038 return valid_vpe(its, map->vpe);
1056 return valid_vpe(its, map->vpe);
1067 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
1071 return valid_vpe(its, desc->its_invdb_cmd.vpe);
1082 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
1091 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1374 desc.its_vmapti_cmd.vpe = map->vpe;
1388 desc.its_vmovi_cmd.vpe = map->vpe;
1397 struct its_vpe *vpe, bool valid)
1401 desc.its_vmapp_cmd.vpe = vpe;
1403 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1408 static void its_send_vmovp(struct its_vpe *vpe)
1412 int col_id = vpe->col_idx;
1414 desc.its_vmovp_cmd.vpe = vpe;
1433 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1440 if (!require_its_list_vmovp(vpe->its_vm, its))
1448 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1452 desc.its_vinvall_cmd.vpe = vpe;
1498 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1502 desc.its_invdb_cmd.vpe = vpe;
1578 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1890 struct its_vpe *vpe = vm->vpes[i];
1892 scoped_guard(raw_spinlock, &vpe->vpe_lock)
1893 its_send_vmapp(its, vpe, true);
1895 its_send_vinvall(its, vpe);
3798 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3805 if (vpe->vpe_proxy_event == -1)
3808 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3809 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3819 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3821 vpe->vpe_proxy_event = -1;
3824 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3834 its_vpe_db_proxy_unmap_locked(vpe);
3839 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3846 if (vpe->vpe_proxy_event != -1)
3854 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3855 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3858 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3859 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3862 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3875 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3883 its_vpe_db_proxy_map_locked(vpe);
3886 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3887 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3892 static void its_vpe_4_1_invall_locked(int cpu, struct its_vpe *vpe)
3898 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
3910 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3920 if (!atomic_read(&vpe->vmapp_count)) {
3942 * protect us, and that we must ensure nobody samples vpe->col_idx
3944 * taken on any vLPI handling path that evaluates vpe->col_idx.
3951 raw_spin_lock(&vpe->its_vm->vmapp_lock);
3953 from = vpe_to_cpuid_lock(vpe, &flags);
3973 vpe->col_idx = cpu;
3975 its_send_vmovp(vpe);
3979 its_vpe_4_1_invall_locked(cpu, vpe);
3981 its_vpe_db_proxy_move(vpe, from, cpu);
3985 vpe_to_cpuid_unlock(vpe, flags);
3988 raw_spin_unlock(&vpe->its_vm->vmapp_lock);
4007 static void its_vpe_schedule(struct its_vpe *vpe)
4013 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
4022 val = virt_to_phys(page_address(vpe->vpt_page)) &
4031 * easily. So in the end, vpe->pending_last is only an
4038 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
4043 static void its_vpe_deschedule(struct its_vpe *vpe)
4050 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
4051 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4054 static void its_vpe_invall(struct its_vpe *vpe)
4058 guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
4064 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
4071 its_send_vinvall(its, vpe);
4078 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4083 its_vpe_schedule(vpe);
4087 its_vpe_deschedule(vpe);
4095 its_vpe_invall(vpe);
4103 static void its_vpe_send_cmd(struct its_vpe *vpe,
4110 its_vpe_db_proxy_map_locked(vpe);
4111 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
4118 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4123 its_vpe_send_cmd(vpe, its_send_inv);
4149 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4157 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4159 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4161 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4166 its_vpe_send_cmd(vpe, its_send_int);
4168 its_vpe_send_cmd(vpe, its_send_clear);
4180 .name = "GICv4-vpe",
4209 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4219 its_send_invdb(its, vpe);
4234 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4244 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4249 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4268 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4272 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4273 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4282 vpe->pending_last = true;
4286 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4292 cpu = vpe_to_cpuid_lock(vpe, &flags);
4293 its_vpe_4_1_invall_locked(cpu, vpe);
4294 vpe_to_cpuid_unlock(vpe, flags);
4299 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4304 its_vpe_4_1_schedule(vpe, info);
4308 its_vpe_4_1_deschedule(vpe, info);
4316 its_vpe_4_1_invall(vpe);
4325 .name = "GICv4.1-vpe",
4335 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4338 desc.its_vsgi_cmd.vpe = vpe;
4340 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4341 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4342 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4355 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4357 vpe->sgi_config[d->hwirq].enabled = false;
4363 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4365 vpe->sgi_config[d->hwirq].enabled = true;
4390 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4394 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4407 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4427 cpu = vpe_to_cpuid_lock(vpe, &flags);
4430 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4447 vpe_to_cpuid_unlock(vpe, flags);
4459 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4464 vpe->sgi_config[d->hwirq].priority = info->priority;
4465 vpe->sgi_config[d->hwirq].group = info->group;
4488 struct its_vpe *vpe = args;
4495 vpe->sgi_config[i].priority = 0;
4496 vpe->sgi_config[i].enabled = false;
4497 vpe->sgi_config[i].group = false;
4500 &its_sgi_irq_chip, vpe);
4525 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4537 vpe->sgi_config[d->hwirq].enabled = false;
4559 static int its_vpe_init(struct its_vpe *vpe)
4582 raw_spin_lock_init(&vpe->vpe_lock);
4583 vpe->vpe_id = vpe_id;
4584 vpe->vpt_page = vpt_page;
4585 atomic_set(&vpe->vmapp_count, 0);
4587 vpe->vpe_proxy_event = -1;
4592 static void its_vpe_teardown(struct its_vpe *vpe)
4594 its_vpe_db_proxy_unmap(vpe);
4595 its_vpe_id_free(vpe->vpe_id);
4596 its_free_pending_table(vpe->vpt_page);
4611 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4613 BUG_ON(vm != vpe->its_vm);
4616 its_vpe_teardown(vpe);
4683 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4687 vpe->col_idx = cpumask_first(cpu_online_mask);
4688 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4702 its_send_vmapp(its, vpe, true);
4703 its_send_vinvall(its, vpe);
4712 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4726 its_send_vmapp(its, vpe, false);
4734 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4735 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),