Lines Matching +full:reserved +full:- +full:cpu +full:- +full:vectors
1 // SPDX-License-Identifier: GPL-2.0-only
30 unsigned int cpu; member
67 info->mask = mask; in init_irq_alloc_info()
83 while (irqd->parent_data) in apic_chip_data()
84 irqd = irqd->parent_data; in apic_chip_data()
86 return irqd->chip_data; in apic_chip_data()
93 return apicd ? &apicd->hw_irq_cfg : NULL; in irqd_cfg()
108 INIT_HLIST_NODE(&apicd->clist); in alloc_apic_chip_data()
118 unsigned int cpu) in apic_update_irq_cfg() argument
124 apicd->hw_irq_cfg.vector = vector; in apic_update_irq_cfg()
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); in apic_update_irq_cfg()
126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); in apic_update_irq_cfg()
127 trace_vector_config(irqd->irq, vector, cpu, in apic_update_irq_cfg()
128 apicd->hw_irq_cfg.dest_apicid); in apic_update_irq_cfg()
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, in apic_update_vector()
141 apicd->cpu); in apic_update_vector()
149 apicd->prev_vector = 0; in apic_update_vector()
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) in apic_update_vector()
153 * If the target CPU of the previous vector is online, then mark in apic_update_vector()
155 * first interrupt on the new vector arrives. If the target CPU is in apic_update_vector()
160 if (cpu_online(apicd->cpu)) { in apic_update_vector()
161 apicd->move_in_progress = true; in apic_update_vector()
162 apicd->prev_vector = apicd->vector; in apic_update_vector()
163 apicd->prev_cpu = apicd->cpu; in apic_update_vector()
164 WARN_ON_ONCE(apicd->cpu == newcpu); in apic_update_vector()
166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, in apic_update_vector()
171 apicd->vector = newvec; in apic_update_vector()
172 apicd->cpu = newcpu; in apic_update_vector()
179 unsigned int cpu = cpumask_first(cpu_online_mask); in vector_assign_managed_shutdown() local
181 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu); in vector_assign_managed_shutdown()
192 apicd->is_managed = true; in reserve_managed_vector()
195 trace_vector_reserve_managed(irqd->irq, ret); in reserve_managed_vector()
204 apicd->can_reserve = true; in reserve_irq_vector_locked()
205 apicd->has_reserved = true; in reserve_irq_vector_locked()
207 trace_vector_reserve(irqd->irq, 0); in reserve_irq_vector_locked()
225 bool resvd = apicd->has_reserved; in assign_vector_locked()
226 unsigned int cpu = apicd->cpu; in assign_vector_locked() local
227 int vector = apicd->vector; in assign_vector_locked()
232 * If the current target CPU is online and in the new requested in assign_vector_locked()
234 * one CPU to another. in assign_vector_locked()
236 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest)) in assign_vector_locked()
242 * leave a stale vector on some CPU around or in case of a pending in assign_vector_locked()
245 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) in assign_vector_locked()
246 return -EBUSY; in assign_vector_locked()
248 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu); in assign_vector_locked()
249 trace_vector_alloc(irqd->irq, vector, resvd, vector); in assign_vector_locked()
252 apic_update_vector(irqd, vector, cpu); in assign_vector_locked()
253 apic_update_irq_cfg(irqd, vector, cpu); in assign_vector_locked()
272 /* Get the affinity mask - either irq_default_affinity or (user) set */ in assign_irq_vector_any_locked()
303 if (info->mask) in assign_irq_vector_policy()
304 return assign_irq_vector(irqd, info->mask); in assign_irq_vector_policy()
317 int vector, cpu; in assign_managed_vector() local
322 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) in assign_managed_vector()
325 &cpu); in assign_managed_vector()
326 trace_vector_alloc_managed(irqd->irq, vector, vector); in assign_managed_vector()
329 apic_update_vector(irqd, vector, cpu); in assign_managed_vector()
330 apic_update_irq_cfg(irqd, vector, cpu); in assign_managed_vector()
338 unsigned int vector = apicd->vector; in clear_irq_vector()
345 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, in clear_irq_vector()
346 apicd->prev_cpu); in clear_irq_vector()
348 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
349 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); in clear_irq_vector()
350 apicd->vector = 0; in clear_irq_vector()
353 vector = apicd->prev_vector; in clear_irq_vector()
357 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
358 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); in clear_irq_vector()
359 apicd->prev_vector = 0; in clear_irq_vector()
360 apicd->move_in_progress = 0; in clear_irq_vector()
361 hlist_del_init(&apicd->clist); in clear_irq_vector()
369 trace_vector_deactivate(irqd->irq, apicd->is_managed, in x86_vector_deactivate()
370 apicd->can_reserve, false); in x86_vector_deactivate()
373 if (!apicd->is_managed && !apicd->can_reserve) in x86_vector_deactivate()
376 if (apicd->has_reserved) in x86_vector_deactivate()
381 if (apicd->can_reserve) in x86_vector_deactivate()
395 apicd->has_reserved = false; in activate_reserved()
404 apicd->can_reserve = false; in activate_reserved()
414 irqd->irq); in activate_reserved()
428 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); in activate_managed()
429 return -EINVAL; in activate_managed()
439 irqd->irq); in activate_managed()
451 trace_vector_activate(irqd->irq, apicd->is_managed, in x86_vector_activate()
452 apicd->can_reserve, reserve); in x86_vector_activate()
455 if (!apicd->can_reserve && !apicd->is_managed) in x86_vector_activate()
459 else if (apicd->is_managed) in x86_vector_activate()
461 else if (apicd->has_reserved) in x86_vector_activate()
472 trace_vector_teardown(irqd->irq, apicd->is_managed, in vector_free_reserved_and_managed()
473 apicd->has_reserved); in vector_free_reserved_and_managed()
475 if (apicd->has_reserved) in vector_free_reserved_and_managed()
477 if (apicd->is_managed) in vector_free_reserved_and_managed()
491 if (irqd && irqd->chip_data) { in x86_vector_free_irqs()
495 apicd = irqd->chip_data; in x86_vector_free_irqs()
509 apicd->vector = ISA_IRQ_VECTOR(virq); in vector_configure_legacy()
510 apicd->cpu = 0; in vector_configure_legacy()
519 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); in vector_configure_legacy()
522 apicd->can_reserve = true; in vector_configure_legacy()
540 return -ENXIO; in x86_vector_alloc_irqs()
543 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) in x86_vector_alloc_irqs()
544 return -ENOSYS; in x86_vector_alloc_irqs()
550 WARN_ON_ONCE(irqd->chip_data); in x86_vector_alloc_irqs()
553 err = -ENOMEM; in x86_vector_alloc_irqs()
557 apicd->irq = virq + i; in x86_vector_alloc_irqs()
558 irqd->chip = &lapic_controller; in x86_vector_alloc_irqs()
559 irqd->chip_data = apicd; in x86_vector_alloc_irqs()
560 irqd->hwirq = virq + i; in x86_vector_alloc_irqs()
573 * Legacy vectors are already assigned when the IOAPIC in x86_vector_alloc_irqs()
579 if (info->flags & X86_IRQ_ALLOC_LEGACY) { in x86_vector_alloc_irqs()
587 irqd->chip_data = NULL; in x86_vector_alloc_irqs()
613 irq = irqd->irq; in x86_vector_debug_show()
620 if (!irqd->chip_data) { in x86_vector_debug_show()
626 memcpy(&apicd, irqd->chip_data, sizeof(apicd)); in x86_vector_debug_show()
630 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); in x86_vector_debug_show()
677 return legacy_pic->probe(); in arch_probe_nr_irqs()
684 * and moveable in the cpu hotplug check and it prevents managed in lapic_assign_legacy_vector()
700 /* System vectors are reserved, online it */ in lapic_assign_system_vectors()
738 int isairq = vector - ISA_IRQ_VECTOR(0); in __setup_vector_irq()
749 /* Online the local APIC infrastructure and initialize the vectors */
756 /* Online the vector matrix array for this CPU */ in lapic_online()
763 * they can be distributed to any online CPU in hardware. The in lapic_online()
764 * kernel has no influence on that. So all active legacy vectors in lapic_online()
785 return -EIO; in apic_set_affinity()
807 apic->send_IPI(apicd->cpu, apicd->vector); in apic_retrigger_irq()
837 unsigned int vector = apicd->prev_vector; in free_moved_vector()
838 unsigned int cpu = apicd->prev_cpu; in free_moved_vector() local
839 bool managed = apicd->is_managed; in free_moved_vector()
843 * from an online CPU, but CPU isolation 'managed_irq' in free_moved_vector()
847 * 2) Migration away from an isolated CPU can happen when in free_moved_vector()
848 * a non-isolated CPU which is in the calculated in free_moved_vector()
851 trace_vector_free_moved(apicd->irq, cpu, vector, managed); in free_moved_vector()
852 irq_matrix_free(vector_matrix, cpu, vector, managed); in free_moved_vector()
853 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; in free_moved_vector()
854 hlist_del_init(&apicd->clist); in free_moved_vector()
855 apicd->prev_vector = 0; in free_moved_vector()
856 apicd->move_in_progress = 0; in free_moved_vector()
866 /* Prevent vectors vanishing under us */ in DEFINE_IDTENTRY_SYSVEC()
870 unsigned int irr, vector = apicd->prev_vector; in DEFINE_IDTENTRY_SYSVEC()
877 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest in DEFINE_IDTENTRY_SYSVEC()
883 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); in DEFINE_IDTENTRY_SYSVEC()
894 unsigned int cpu; in __send_cleanup_vector() local
897 apicd->move_in_progress = 0; in __send_cleanup_vector()
898 cpu = apicd->prev_cpu; in __send_cleanup_vector()
899 if (cpu_online(cpu)) { in __send_cleanup_vector()
900 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu)); in __send_cleanup_vector()
901 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR); in __send_cleanup_vector()
903 apicd->prev_vector = 0; in __send_cleanup_vector()
913 if (apicd->move_in_progress) in send_cleanup_vector()
922 if (likely(!apicd->move_in_progress)) in irq_complete_move()
926 * If the interrupt arrived on the new target CPU, cleanup the in irq_complete_move()
927 * vector on the old target CPU. A vector check is not required in irq_complete_move()
929 * on the same CPU. in irq_complete_move()
931 if (apicd->cpu == smp_processor_id()) in irq_complete_move()
936 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
966 vector = apicd->prev_vector; in irq_force_complete_move()
973 * -EBUSY. This can leave the interrupt in a stale state. in irq_force_complete_move()
985 if (apicd->move_in_progress) { in irq_force_complete_move()
989 * set_ioapic(new_vector) <-- Interrupt is raised before update in irq_force_complete_move()
993 * So if the target cpu cannot handle that interrupt before in irq_force_complete_move()
997 * But in case of cpu hotplug this should be a non issue in irq_force_complete_move()
1000 * interrupt can be blocked on the target cpu because all cpus in irq_force_complete_move()
1006 * beyond the point where the target cpu disables interrupts in irq_force_complete_move()
1019 irqd->irq, vector); in irq_force_complete_move()
1029 * prevent that the actual interrupt move will run out of vectors.
1033 unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); in lapic_can_unplug_cpu() local
1040 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n", in lapic_can_unplug_cpu()
1041 cpu, tomove, avl); in lapic_can_unplug_cpu()
1042 ret = -ENOSPC; in lapic_can_unplug_cpu()
1047 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n", in lapic_can_unplug_cpu()
1074 pr_debug("printing local APIC contents on CPU#%d/%d:\n", in print_local_APIC()
1178 int cpu; in print_local_APICs() local
1184 for_each_online_cpu(cpu) { in print_local_APICs()
1185 if (cpu >= maxcpu) in print_local_APICs()
1187 smp_call_function_single(cpu, print_local_APIC, NULL, 1); in print_local_APICs()
1227 int num = -1; in setup_show_lapic()