Lines Matching full:cpu

23 #include <linux/cpu.h>
137 * port needs to be freed at device/cpu down. So we cache the
146 __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, in __xen_register_percpu_irq() argument
154 snprintf(per_cpu(xen_timer_name, cpu), in __xen_register_percpu_irq()
155 sizeof(per_cpu(xen_timer_name, cpu)), in __xen_register_percpu_irq()
156 "%s%d", action->name, cpu); in __xen_register_percpu_irq()
157 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, in __xen_register_percpu_irq()
159 per_cpu(xen_timer_name, cpu), action->dev_id); in __xen_register_percpu_irq()
160 per_cpu(xen_timer_irq, cpu) = irq; in __xen_register_percpu_irq()
163 snprintf(per_cpu(xen_resched_name, cpu), in __xen_register_percpu_irq()
164 sizeof(per_cpu(xen_resched_name, cpu)), in __xen_register_percpu_irq()
165 "%s%d", action->name, cpu); in __xen_register_percpu_irq()
166 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, in __xen_register_percpu_irq()
168 per_cpu(xen_resched_name, cpu), action->dev_id); in __xen_register_percpu_irq()
169 per_cpu(xen_resched_irq, cpu) = irq; in __xen_register_percpu_irq()
172 snprintf(per_cpu(xen_ipi_name, cpu), in __xen_register_percpu_irq()
173 sizeof(per_cpu(xen_ipi_name, cpu)), in __xen_register_percpu_irq()
174 "%s%d", action->name, cpu); in __xen_register_percpu_irq()
175 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, in __xen_register_percpu_irq()
177 per_cpu(xen_ipi_name, cpu), action->dev_id); in __xen_register_percpu_irq()
178 per_cpu(xen_ipi_irq, cpu) = irq; in __xen_register_percpu_irq()
181 snprintf(per_cpu(xen_cmc_name, cpu), in __xen_register_percpu_irq()
182 sizeof(per_cpu(xen_cmc_name, cpu)), in __xen_register_percpu_irq()
183 "%s%d", action->name, cpu); in __xen_register_percpu_irq()
184 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, in __xen_register_percpu_irq()
187 per_cpu(xen_cmc_name, cpu), in __xen_register_percpu_irq()
189 per_cpu(xen_cmc_irq, cpu) = irq; in __xen_register_percpu_irq()
192 snprintf(per_cpu(xen_cmcp_name, cpu), in __xen_register_percpu_irq()
193 sizeof(per_cpu(xen_cmcp_name, cpu)), in __xen_register_percpu_irq()
194 "%s%d", action->name, cpu); in __xen_register_percpu_irq()
195 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, in __xen_register_percpu_irq()
198 per_cpu(xen_cmcp_name, cpu), in __xen_register_percpu_irq()
200 per_cpu(xen_cmcp_irq, cpu) = irq; in __xen_register_percpu_irq()
203 snprintf(per_cpu(xen_cpep_name, cpu), in __xen_register_percpu_irq()
204 sizeof(per_cpu(xen_cpep_name, cpu)), in __xen_register_percpu_irq()
205 "%s%d", action->name, cpu); in __xen_register_percpu_irq()
206 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, in __xen_register_percpu_irq()
209 per_cpu(xen_cpep_name, cpu), in __xen_register_percpu_irq()
211 per_cpu(xen_cpep_irq, cpu) = irq; in __xen_register_percpu_irq()
231 * on cpu hotplug. in __xen_register_percpu_irq()
240 if (!cpu && save) { in __xen_register_percpu_irq()
280 unsigned int cpu = (unsigned long)hcpu; in unbind_evtchn_callback() local
284 if (per_cpu(xen_cpep_irq, cpu) >= 0) { in unbind_evtchn_callback()
285 unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), in unbind_evtchn_callback()
287 per_cpu(xen_cpep_irq, cpu) = -1; in unbind_evtchn_callback()
289 if (per_cpu(xen_cmcp_irq, cpu) >= 0) { in unbind_evtchn_callback()
290 unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), in unbind_evtchn_callback()
292 per_cpu(xen_cmcp_irq, cpu) = -1; in unbind_evtchn_callback()
294 if (per_cpu(xen_cmc_irq, cpu) >= 0) { in unbind_evtchn_callback()
295 unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); in unbind_evtchn_callback()
296 per_cpu(xen_cmc_irq, cpu) = -1; in unbind_evtchn_callback()
298 if (per_cpu(xen_ipi_irq, cpu) >= 0) { in unbind_evtchn_callback()
299 unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); in unbind_evtchn_callback()
300 per_cpu(xen_ipi_irq, cpu) = -1; in unbind_evtchn_callback()
302 if (per_cpu(xen_resched_irq, cpu) >= 0) { in unbind_evtchn_callback()
303 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), in unbind_evtchn_callback()
305 per_cpu(xen_resched_irq, cpu) = -1; in unbind_evtchn_callback()
307 if (per_cpu(xen_timer_irq, cpu) >= 0) { in unbind_evtchn_callback()
308 unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), in unbind_evtchn_callback()
310 per_cpu(xen_timer_irq, cpu) = -1; in unbind_evtchn_callback()
322 void xen_smp_intr_init_early(unsigned int cpu) in xen_smp_intr_init_early() argument
328 __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, in xen_smp_intr_init_early()
336 unsigned int cpu = smp_processor_id(); in xen_smp_intr_init() local
342 if (cpu == 0) { in xen_smp_intr_init()
343 /* Initialization was already done for boot cpu. */ in xen_smp_intr_init()
370 xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) in xen_platform_send_ipi() argument
376 * This should be in __cpu_up(cpu) in ia64 smpboot.c in xen_platform_send_ipi()
380 xen_smp_intr_init_early(cpu); in xen_platform_send_ipi()
382 xen_send_ipi(cpu, vector); in xen_platform_send_ipi()
383 /* vcpu_prepare_and_up(cpu); */ in xen_platform_send_ipi()
390 xen_send_IPI_one(cpu, XEN_IPI_VECTOR); in xen_platform_send_ipi()
393 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); in xen_platform_send_ipi()
396 xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); in xen_platform_send_ipi()
399 xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); in xen_platform_send_ipi()
406 xen_send_ipi(cpu, IA64_TIMER_VECTOR); in xen_platform_send_ipi()