Lines Matching +full:reserved +full:- +full:cpu +full:- +full:vectors
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 #define pr_fmt(fmt) "irq-mips-gic: " fmt
26 #include <asm/mips-cps.h>
30 #include <dt-bindings/interrupt-controller/mips-gic.h>
35 /* Add 2 to convert GIC CPU pin to core interrupt */
44 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
47 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
71 unsigned int cpu; in __gic_with_next_online_cpu() local
73 /* Discover the next online CPU */ in __gic_with_next_online_cpu()
74 cpu = cpumask_next(prev, cpu_online_mask); in __gic_with_next_online_cpu()
77 if (cpu >= nr_cpu_ids) in __gic_with_next_online_cpu()
78 return cpu; in __gic_with_next_online_cpu()
81 * Move the access lock to the next CPU's GIC local register block. in __gic_with_next_online_cpu()
86 write_gic_vl_other(mips_cm_vp_id(cpu)); in __gic_with_next_online_cpu()
88 return cpu; in __gic_with_next_online_cpu()
98 * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers
99 * @cpu: An integer variable to hold the current CPU number
107 #define for_each_online_cpu_gic(cpu, gic_lock) \ argument
109 for ((cpu) = __gic_with_next_online_cpu(-1); \
110 (cpu) < nr_cpu_ids; \
112 (cpu) = __gic_with_next_online_cpu(cpu))
115 * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster
136 unsigned int cpu, cl; in gic_irq_lock_cluster() local
138 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); in gic_irq_lock_cluster()
139 BUG_ON(cpu >= NR_CPUS); in gic_irq_lock_cluster()
141 cl = cpu_cluster(&cpu_data[cpu]); in gic_irq_lock_cluster()
186 irq -= GIC_PIN_TO_VEC_OFFSET; in gic_bind_eic_interrupt()
192 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) in gic_send_ipi() argument
217 return -1; in gic_get_c0_perfcount_int()
229 return -1; in gic_get_c0_fdc_int()
243 /* Get per-cpu bitmaps */ in gic_handle_shared_int()
267 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_mask_irq()
281 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_unmask_irq()
282 unsigned int cpu; in gic_unmask_irq() local
292 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); in gic_unmask_irq()
293 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); in gic_unmask_irq()
298 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_ack_irq()
313 irq = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_set_type()
371 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); in gic_set_affinity()
372 unsigned int cpu, cl, old_cpu, old_cl; in gic_set_affinity() local
377 * ie. CPU in Linux parlance, at a time. Therefore we always route to in gic_set_affinity()
378 * the first online CPU in the mask. in gic_set_affinity()
380 cpu = cpumask_first_and(cpumask, cpu_online_mask); in gic_set_affinity()
381 if (cpu >= NR_CPUS) in gic_set_affinity()
382 return -EINVAL; in gic_set_affinity()
386 cl = cpu_cluster(&cpu_data[cpu]); in gic_set_affinity()
404 * Update effective affinity - after this gic_irq_lock_cluster() will in gic_set_affinity()
407 irq_data_update_effective_affinity(d, cpumask_of(cpu)); in gic_set_affinity()
420 write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu))); in gic_set_affinity()
425 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); in gic_set_affinity()
430 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); in gic_set_affinity()
435 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); in gic_set_affinity()
488 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_mask_local_irq()
495 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_unmask_local_irq()
509 int intr, cpu; in gic_mask_local_irq_all_vpes() local
514 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_mask_local_irq_all_vpes()
516 cd->mask = false; in gic_mask_local_irq_all_vpes()
518 for_each_online_cpu_gic(cpu, &gic_lock) in gic_mask_local_irq_all_vpes()
525 int intr, cpu; in gic_unmask_local_irq_all_vpes() local
530 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); in gic_unmask_local_irq_all_vpes()
532 cd->mask = true; in gic_unmask_local_irq_all_vpes()
534 for_each_online_cpu_gic(cpu, &gic_lock) in gic_unmask_local_irq_all_vpes()
557 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); in gic_all_vpes_irq_cpu_online()
558 if (cd->mask) in gic_all_vpes_irq_cpu_online()
584 irq_hw_number_t hw, unsigned int cpu) in gic_shared_irq_domain_map() argument
591 irq_data_update_effective_affinity(data, cpumask_of(cpu)); in gic_shared_irq_domain_map()
599 write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu))); in gic_shared_irq_domain_map()
603 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); in gic_shared_irq_domain_map()
617 return -EINVAL; in gic_irq_domain_xlate()
624 return -EINVAL; in gic_irq_domain_xlate()
635 int err, cpu; in gic_irq_domain_map() local
642 return -EBUSY; in gic_irq_domain_map()
659 * If adding support for more per-cpu interrupts, keep the in gic_irq_domain_map()
672 cd->map = map; in gic_irq_domain_map()
695 return -EPERM; in gic_irq_domain_map()
698 for_each_online_cpu_gic(cpu, &gic_lock) in gic_irq_domain_map()
711 if (fwspec->param[0] == GIC_SHARED) in gic_irq_domain_alloc()
712 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); in gic_irq_domain_alloc()
714 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); in gic_irq_domain_alloc()
753 int cpu, ret, i; in gic_ipi_domain_alloc() local
757 return -ENOMEM; in gic_ipi_domain_alloc()
762 return -EBUSY; in gic_ipi_domain_alloc()
766 /* map the hwirq for each cpu consecutively */ in gic_ipi_domain_alloc()
768 for_each_cpu(cpu, ipimask) { in gic_ipi_domain_alloc()
777 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, in gic_ipi_domain_alloc()
783 /* Set affinity to cpu. */ in gic_ipi_domain_alloc()
785 cpumask_of(cpu)); in gic_ipi_domain_alloc()
790 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); in gic_ipi_domain_alloc()
824 is_ipi = d->bus_token == bus_token; in gic_ipi_domain_match()
825 return (!node || to_of_node(d->fwnode) == node) && is_ipi; in gic_ipi_domain_match()
850 return -ENXIO; in gic_register_ipi_domain()
856 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { in gic_register_ipi_domain()
860 * Reserve 2 interrupts per possible CPU/VP for use as IPIs, in gic_register_ipi_domain()
864 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); in gic_register_ipi_domain()
881 static int gic_cpu_startup(unsigned int cpu) in gic_cpu_startup() argument
900 unsigned long reserved; in gic_of_init() local
906 /* Find the first available CPU vector. */ in gic_of_init()
908 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); in gic_of_init()
909 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", in gic_of_init()
911 reserved |= BIT(cpu_vec); in gic_of_init()
913 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); in gic_of_init()
915 pr_err("No CPU vectors available\n"); in gic_of_init()
916 return -ENODEV; in gic_of_init()
922 * in the device-tree. in gic_of_init()
932 return -ENODEV; in gic_of_init()
948 return -ENOMEM; in gic_of_init()
961 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; in gic_of_init()
971 return -ENXIO; in gic_of_init()
984 * to gic_cpu_startup for each cpu. in gic_of_init()
1004 pr_warn("No CPU cores on the cluster %d skip it\n", cl); in gic_of_init()