1 /*
2  * linux/arch/ia64/kernel/irq_ia64.c
3  *
4  * Copyright (C) 1998-2001 Hewlett-Packard Co
5  *	Stephane Eranian <eranian@hpl.hp.com>
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  *
8  *  6/10/99: Updated to bring in sync with x86 version to facilitate
9  *	     support for SMP and different interrupt controllers.
10  *
11  * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12  *                      PCI to vector allocation routine.
13  * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14  *						Added CPU Hotplug handling for IPF.
15  */
16 
17 #include <linux/module.h>
18 
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/ptrace.h>
26 #include <linux/random.h>	/* for rand_initialize_irq() */
27 #include <linux/signal.h>
28 #include <linux/smp.h>
29 #include <linux/threads.h>
30 #include <linux/bitops.h>
31 #include <linux/irq.h>
32 #include <linux/ratelimit.h>
33 #include <linux/acpi.h>
34 #include <linux/sched.h>
35 
36 #include <asm/delay.h>
37 #include <asm/intrinsics.h>
38 #include <asm/io.h>
39 #include <asm/hw_irq.h>
40 #include <asm/machvec.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
43 #include <asm/tlbflush.h>
44 
45 #ifdef CONFIG_PERFMON
46 # include <asm/perfmon.h>
47 #endif
48 
49 #define IRQ_DEBUG	0
50 
51 #define IRQ_VECTOR_UNASSIGNED	(0)
52 
53 #define IRQ_UNUSED		(0)
54 #define IRQ_USED		(1)
55 #define IRQ_RSVD		(2)
56 
57 /* These can be overridden in platform_irq_init */
58 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
59 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
60 
61 /* default base addr of IPI table */
62 void __iomem *ipi_base_addr = ((void __iomem *)
63 			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
64 
65 static cpumask_t vector_allocation_domain(int cpu);
66 
67 /*
68  * Legacy IRQ to IA-64 vector translation table.
69  */
70 __u8 isa_irq_to_vector_map[16] = {
71 	/* 8259 IRQ translation, first 16 entries */
72 	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
73 	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
74 };
75 EXPORT_SYMBOL(isa_irq_to_vector_map);
76 
77 DEFINE_SPINLOCK(vector_lock);
78 
79 struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
80 	[0 ... NR_IRQS - 1] = {
81 		.vector = IRQ_VECTOR_UNASSIGNED,
82 		.domain = CPU_MASK_NONE
83 	}
84 };
85 
86 DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
87 	[0 ... IA64_NUM_VECTORS - 1] = -1
88 };
89 
90 static cpumask_t vector_table[IA64_NUM_VECTORS] = {
91 	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
92 };
93 
94 static int irq_status[NR_IRQS] = {
95 	[0 ... NR_IRQS -1] = IRQ_UNUSED
96 };
97 
check_irq_used(int irq)98 int check_irq_used(int irq)
99 {
100 	if (irq_status[irq] == IRQ_USED)
101 		return 1;
102 
103 	return -1;
104 }
105 
find_unassigned_irq(void)106 static inline int find_unassigned_irq(void)
107 {
108 	int irq;
109 
110 	for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
111 		if (irq_status[irq] == IRQ_UNUSED)
112 			return irq;
113 	return -ENOSPC;
114 }
115 
find_unassigned_vector(cpumask_t domain)116 static inline int find_unassigned_vector(cpumask_t domain)
117 {
118 	cpumask_t mask;
119 	int pos, vector;
120 
121 	cpus_and(mask, domain, cpu_online_map);
122 	if (cpus_empty(mask))
123 		return -EINVAL;
124 
125 	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
126 		vector = IA64_FIRST_DEVICE_VECTOR + pos;
127 		cpus_and(mask, domain, vector_table[vector]);
128 		if (!cpus_empty(mask))
129 			continue;
130 		return vector;
131 	}
132 	return -ENOSPC;
133 }
134 
__bind_irq_vector(int irq,int vector,cpumask_t domain)135 static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
136 {
137 	cpumask_t mask;
138 	int cpu;
139 	struct irq_cfg *cfg = &irq_cfg[irq];
140 
141 	BUG_ON((unsigned)irq >= NR_IRQS);
142 	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
143 
144 	cpus_and(mask, domain, cpu_online_map);
145 	if (cpus_empty(mask))
146 		return -EINVAL;
147 	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
148 		return 0;
149 	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
150 		return -EBUSY;
151 	for_each_cpu_mask(cpu, mask)
152 		per_cpu(vector_irq, cpu)[vector] = irq;
153 	cfg->vector = vector;
154 	cfg->domain = domain;
155 	irq_status[irq] = IRQ_USED;
156 	cpus_or(vector_table[vector], vector_table[vector], domain);
157 	return 0;
158 }
159 
bind_irq_vector(int irq,int vector,cpumask_t domain)160 int bind_irq_vector(int irq, int vector, cpumask_t domain)
161 {
162 	unsigned long flags;
163 	int ret;
164 
165 	spin_lock_irqsave(&vector_lock, flags);
166 	ret = __bind_irq_vector(irq, vector, domain);
167 	spin_unlock_irqrestore(&vector_lock, flags);
168 	return ret;
169 }
170 
__clear_irq_vector(int irq)171 static void __clear_irq_vector(int irq)
172 {
173 	int vector, cpu;
174 	cpumask_t mask;
175 	cpumask_t domain;
176 	struct irq_cfg *cfg = &irq_cfg[irq];
177 
178 	BUG_ON((unsigned)irq >= NR_IRQS);
179 	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
180 	vector = cfg->vector;
181 	domain = cfg->domain;
182 	cpus_and(mask, cfg->domain, cpu_online_map);
183 	for_each_cpu_mask(cpu, mask)
184 		per_cpu(vector_irq, cpu)[vector] = -1;
185 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
186 	cfg->domain = CPU_MASK_NONE;
187 	irq_status[irq] = IRQ_UNUSED;
188 	cpus_andnot(vector_table[vector], vector_table[vector], domain);
189 }
190 
clear_irq_vector(int irq)191 static void clear_irq_vector(int irq)
192 {
193 	unsigned long flags;
194 
195 	spin_lock_irqsave(&vector_lock, flags);
196 	__clear_irq_vector(irq);
197 	spin_unlock_irqrestore(&vector_lock, flags);
198 }
199 
200 int
ia64_native_assign_irq_vector(int irq)201 ia64_native_assign_irq_vector (int irq)
202 {
203 	unsigned long flags;
204 	int vector, cpu;
205 	cpumask_t domain = CPU_MASK_NONE;
206 
207 	vector = -ENOSPC;
208 
209 	spin_lock_irqsave(&vector_lock, flags);
210 	for_each_online_cpu(cpu) {
211 		domain = vector_allocation_domain(cpu);
212 		vector = find_unassigned_vector(domain);
213 		if (vector >= 0)
214 			break;
215 	}
216 	if (vector < 0)
217 		goto out;
218 	if (irq == AUTO_ASSIGN)
219 		irq = vector;
220 	BUG_ON(__bind_irq_vector(irq, vector, domain));
221  out:
222 	spin_unlock_irqrestore(&vector_lock, flags);
223 	return vector;
224 }
225 
226 void
ia64_native_free_irq_vector(int vector)227 ia64_native_free_irq_vector (int vector)
228 {
229 	if (vector < IA64_FIRST_DEVICE_VECTOR ||
230 	    vector > IA64_LAST_DEVICE_VECTOR)
231 		return;
232 	clear_irq_vector(vector);
233 }
234 
235 int
reserve_irq_vector(int vector)236 reserve_irq_vector (int vector)
237 {
238 	if (vector < IA64_FIRST_DEVICE_VECTOR ||
239 	    vector > IA64_LAST_DEVICE_VECTOR)
240 		return -EINVAL;
241 	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
242 }
243 
244 /*
245  * Initialize vector_irq on a new cpu. This function must be called
246  * with vector_lock held.
247  */
__setup_vector_irq(int cpu)248 void __setup_vector_irq(int cpu)
249 {
250 	int irq, vector;
251 
252 	/* Clear vector_irq */
253 	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
254 		per_cpu(vector_irq, cpu)[vector] = -1;
255 	/* Mark the inuse vectors */
256 	for (irq = 0; irq < NR_IRQS; ++irq) {
257 		if (!cpu_isset(cpu, irq_cfg[irq].domain))
258 			continue;
259 		vector = irq_to_vector(irq);
260 		per_cpu(vector_irq, cpu)[vector] = irq;
261 	}
262 }
263 
264 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
265 
266 static enum vector_domain_type {
267 	VECTOR_DOMAIN_NONE,
268 	VECTOR_DOMAIN_PERCPU
269 } vector_domain_type = VECTOR_DOMAIN_NONE;
270 
vector_allocation_domain(int cpu)271 static cpumask_t vector_allocation_domain(int cpu)
272 {
273 	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
274 		return cpumask_of_cpu(cpu);
275 	return CPU_MASK_ALL;
276 }
277 
__irq_prepare_move(int irq,int cpu)278 static int __irq_prepare_move(int irq, int cpu)
279 {
280 	struct irq_cfg *cfg = &irq_cfg[irq];
281 	int vector;
282 	cpumask_t domain;
283 
284 	if (cfg->move_in_progress || cfg->move_cleanup_count)
285 		return -EBUSY;
286 	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
287 		return -EINVAL;
288 	if (cpu_isset(cpu, cfg->domain))
289 		return 0;
290 	domain = vector_allocation_domain(cpu);
291 	vector = find_unassigned_vector(domain);
292 	if (vector < 0)
293 		return -ENOSPC;
294 	cfg->move_in_progress = 1;
295 	cfg->old_domain = cfg->domain;
296 	cfg->vector = IRQ_VECTOR_UNASSIGNED;
297 	cfg->domain = CPU_MASK_NONE;
298 	BUG_ON(__bind_irq_vector(irq, vector, domain));
299 	return 0;
300 }
301 
irq_prepare_move(int irq,int cpu)302 int irq_prepare_move(int irq, int cpu)
303 {
304 	unsigned long flags;
305 	int ret;
306 
307 	spin_lock_irqsave(&vector_lock, flags);
308 	ret = __irq_prepare_move(irq, cpu);
309 	spin_unlock_irqrestore(&vector_lock, flags);
310 	return ret;
311 }
312 
irq_complete_move(unsigned irq)313 void irq_complete_move(unsigned irq)
314 {
315 	struct irq_cfg *cfg = &irq_cfg[irq];
316 	cpumask_t cleanup_mask;
317 	int i;
318 
319 	if (likely(!cfg->move_in_progress))
320 		return;
321 
322 	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
323 		return;
324 
325 	cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
326 	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
327 	for_each_cpu_mask(i, cleanup_mask)
328 		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
329 	cfg->move_in_progress = 0;
330 }
331 
smp_irq_move_cleanup_interrupt(int irq,void * dev_id)332 static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
333 {
334 	int me = smp_processor_id();
335 	ia64_vector vector;
336 	unsigned long flags;
337 
338 	for (vector = IA64_FIRST_DEVICE_VECTOR;
339 	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
340 		int irq;
341 		struct irq_desc *desc;
342 		struct irq_cfg *cfg;
343 		irq = __get_cpu_var(vector_irq)[vector];
344 		if (irq < 0)
345 			continue;
346 
347 		desc = irq_to_desc(irq);
348 		cfg = irq_cfg + irq;
349 		raw_spin_lock(&desc->lock);
350 		if (!cfg->move_cleanup_count)
351 			goto unlock;
352 
353 		if (!cpu_isset(me, cfg->old_domain))
354 			goto unlock;
355 
356 		spin_lock_irqsave(&vector_lock, flags);
357 		__get_cpu_var(vector_irq)[vector] = -1;
358 		cpu_clear(me, vector_table[vector]);
359 		spin_unlock_irqrestore(&vector_lock, flags);
360 		cfg->move_cleanup_count--;
361 	unlock:
362 		raw_spin_unlock(&desc->lock);
363 	}
364 	return IRQ_HANDLED;
365 }
366 
367 static struct irqaction irq_move_irqaction = {
368 	.handler =	smp_irq_move_cleanup_interrupt,
369 	.flags =	IRQF_DISABLED,
370 	.name =		"irq_move"
371 };
372 
parse_vector_domain(char * arg)373 static int __init parse_vector_domain(char *arg)
374 {
375 	if (!arg)
376 		return -EINVAL;
377 	if (!strcmp(arg, "percpu")) {
378 		vector_domain_type = VECTOR_DOMAIN_PERCPU;
379 		no_int_routing = 1;
380 	}
381 	return 0;
382 }
383 early_param("vector", parse_vector_domain);
384 #else
vector_allocation_domain(int cpu)385 static cpumask_t vector_allocation_domain(int cpu)
386 {
387 	return CPU_MASK_ALL;
388 }
389 #endif
390 
391 
destroy_and_reserve_irq(unsigned int irq)392 void destroy_and_reserve_irq(unsigned int irq)
393 {
394 	unsigned long flags;
395 
396 	dynamic_irq_cleanup(irq);
397 
398 	spin_lock_irqsave(&vector_lock, flags);
399 	__clear_irq_vector(irq);
400 	irq_status[irq] = IRQ_RSVD;
401 	spin_unlock_irqrestore(&vector_lock, flags);
402 }
403 
404 /*
405  * Dynamic irq allocate and deallocation for MSI
406  */
create_irq(void)407 int create_irq(void)
408 {
409 	unsigned long flags;
410 	int irq, vector, cpu;
411 	cpumask_t domain = CPU_MASK_NONE;
412 
413 	irq = vector = -ENOSPC;
414 	spin_lock_irqsave(&vector_lock, flags);
415 	for_each_online_cpu(cpu) {
416 		domain = vector_allocation_domain(cpu);
417 		vector = find_unassigned_vector(domain);
418 		if (vector >= 0)
419 			break;
420 	}
421 	if (vector < 0)
422 		goto out;
423 	irq = find_unassigned_irq();
424 	if (irq < 0)
425 		goto out;
426 	BUG_ON(__bind_irq_vector(irq, vector, domain));
427  out:
428 	spin_unlock_irqrestore(&vector_lock, flags);
429 	if (irq >= 0)
430 		dynamic_irq_init(irq);
431 	return irq;
432 }
433 
destroy_irq(unsigned int irq)434 void destroy_irq(unsigned int irq)
435 {
436 	dynamic_irq_cleanup(irq);
437 	clear_irq_vector(irq);
438 }
439 
440 #ifdef CONFIG_SMP
441 #	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
442 #	define IS_LOCAL_TLB_FLUSH(vec)	(vec == IA64_IPI_LOCAL_TLB_FLUSH)
443 #else
444 #	define IS_RESCHEDULE(vec)	(0)
445 #	define IS_LOCAL_TLB_FLUSH(vec)	(0)
446 #endif
447 /*
448  * That's where the IVT branches when we get an external
449  * interrupt. This branches to the correct hardware IRQ handler via
450  * function ptr.
451  */
452 void
ia64_handle_irq(ia64_vector vector,struct pt_regs * regs)453 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
454 {
455 	struct pt_regs *old_regs = set_irq_regs(regs);
456 	unsigned long saved_tpr;
457 
458 #if IRQ_DEBUG
459 	{
460 		unsigned long bsp, sp;
461 
462 		/*
463 		 * Note: if the interrupt happened while executing in
464 		 * the context switch routine (ia64_switch_to), we may
465 		 * get a spurious stack overflow here.  This is
466 		 * because the register and the memory stack are not
467 		 * switched atomically.
468 		 */
469 		bsp = ia64_getreg(_IA64_REG_AR_BSP);
470 		sp = ia64_getreg(_IA64_REG_SP);
471 
472 		if ((sp - bsp) < 1024) {
473 			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
474 
475 			if (__ratelimit(&ratelimit)) {
476 				printk("ia64_handle_irq: DANGER: less than "
477 				       "1KB of free stack space!!\n"
478 				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
479 			}
480 		}
481 	}
482 #endif /* IRQ_DEBUG */
483 
484 	/*
485 	 * Always set TPR to limit maximum interrupt nesting depth to
486 	 * 16 (without this, it would be ~240, which could easily lead
487 	 * to kernel stack overflows).
488 	 */
489 	irq_enter();
490 	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
491 	ia64_srlz_d();
492 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
493 		int irq = local_vector_to_irq(vector);
494 		struct irq_desc *desc = irq_to_desc(irq);
495 
496 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
497 			smp_local_flush_tlb();
498 			kstat_incr_irqs_this_cpu(irq, desc);
499 		} else if (unlikely(IS_RESCHEDULE(vector))) {
500 			scheduler_ipi();
501 			kstat_incr_irqs_this_cpu(irq, desc);
502 		} else {
503 			ia64_setreg(_IA64_REG_CR_TPR, vector);
504 			ia64_srlz_d();
505 
506 			if (unlikely(irq < 0)) {
507 				printk(KERN_ERR "%s: Unexpected interrupt "
508 				       "vector %d on CPU %d is not mapped "
509 				       "to any IRQ!\n", __func__, vector,
510 				       smp_processor_id());
511 			} else
512 				generic_handle_irq(irq);
513 
514 			/*
515 			 * Disable interrupts and send EOI:
516 			 */
517 			local_irq_disable();
518 			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
519 		}
520 		ia64_eoi();
521 		vector = ia64_get_ivr();
522 	}
523 	/*
524 	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
525 	 * handler needs to be able to wait for further keyboard interrupts, which can't
526 	 * come through until ia64_eoi() has been done.
527 	 */
528 	irq_exit();
529 	set_irq_regs(old_regs);
530 }
531 
532 #ifdef CONFIG_HOTPLUG_CPU
533 /*
534  * This function emulates a interrupt processing when a cpu is about to be
535  * brought down.
536  */
ia64_process_pending_intr(void)537 void ia64_process_pending_intr(void)
538 {
539 	ia64_vector vector;
540 	unsigned long saved_tpr;
541 	extern unsigned int vectors_in_migration[NR_IRQS];
542 
543 	vector = ia64_get_ivr();
544 
545 	irq_enter();
546 	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
547 	ia64_srlz_d();
548 
549 	 /*
550 	  * Perform normal interrupt style processing
551 	  */
552 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
553 		int irq = local_vector_to_irq(vector);
554 		struct irq_desc *desc = irq_to_desc(irq);
555 
556 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
557 			smp_local_flush_tlb();
558 			kstat_incr_irqs_this_cpu(irq, desc);
559 		} else if (unlikely(IS_RESCHEDULE(vector))) {
560 			kstat_incr_irqs_this_cpu(irq, desc);
561 		} else {
562 			struct pt_regs *old_regs = set_irq_regs(NULL);
563 
564 			ia64_setreg(_IA64_REG_CR_TPR, vector);
565 			ia64_srlz_d();
566 
567 			/*
568 			 * Now try calling normal ia64_handle_irq as it would have got called
569 			 * from a real intr handler. Try passing null for pt_regs, hopefully
570 			 * it will work. I hope it works!.
571 			 * Probably could shared code.
572 			 */
573 			if (unlikely(irq < 0)) {
574 				printk(KERN_ERR "%s: Unexpected interrupt "
575 				       "vector %d on CPU %d not being mapped "
576 				       "to any IRQ!!\n", __func__, vector,
577 				       smp_processor_id());
578 			} else {
579 				vectors_in_migration[irq]=0;
580 				generic_handle_irq(irq);
581 			}
582 			set_irq_regs(old_regs);
583 
584 			/*
585 			 * Disable interrupts and send EOI
586 			 */
587 			local_irq_disable();
588 			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
589 		}
590 		ia64_eoi();
591 		vector = ia64_get_ivr();
592 	}
593 	irq_exit();
594 }
595 #endif
596 
597 
598 #ifdef CONFIG_SMP
599 
dummy_handler(int irq,void * dev_id)600 static irqreturn_t dummy_handler (int irq, void *dev_id)
601 {
602 	BUG();
603 }
604 
605 static struct irqaction ipi_irqaction = {
606 	.handler =	handle_IPI,
607 	.flags =	IRQF_DISABLED,
608 	.name =		"IPI"
609 };
610 
611 /*
612  * KVM uses this interrupt to force a cpu out of guest mode
613  */
614 static struct irqaction resched_irqaction = {
615 	.handler =	dummy_handler,
616 	.flags =	IRQF_DISABLED,
617 	.name =		"resched"
618 };
619 
620 static struct irqaction tlb_irqaction = {
621 	.handler =	dummy_handler,
622 	.flags =	IRQF_DISABLED,
623 	.name =		"tlb_flush"
624 };
625 
626 #endif
627 
628 void
ia64_native_register_percpu_irq(ia64_vector vec,struct irqaction * action)629 ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
630 {
631 	unsigned int irq;
632 
633 	irq = vec;
634 	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
635 	irq_set_status_flags(irq, IRQ_PER_CPU);
636 	irq_set_chip(irq, &irq_type_ia64_lsapic);
637 	if (action)
638 		setup_irq(irq, action);
639 	irq_set_handler(irq, handle_percpu_irq);
640 }
641 
642 void __init
ia64_native_register_ipi(void)643 ia64_native_register_ipi(void)
644 {
645 #ifdef CONFIG_SMP
646 	register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
647 	register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
648 	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
649 #endif
650 }
651 
652 void __init
init_IRQ(void)653 init_IRQ (void)
654 {
655 #ifdef CONFIG_ACPI
656 	acpi_boot_init();
657 #endif
658 	ia64_register_ipi();
659 	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
660 #ifdef CONFIG_SMP
661 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
662 	if (vector_domain_type != VECTOR_DOMAIN_NONE)
663 		register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
664 #endif
665 #endif
666 #ifdef CONFIG_PERFMON
667 	pfm_init_percpu();
668 #endif
669 	platform_irq_init();
670 }
671 
672 void
ia64_send_ipi(int cpu,int vector,int delivery_mode,int redirect)673 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
674 {
675 	void __iomem *ipi_addr;
676 	unsigned long ipi_data;
677 	unsigned long phys_cpu_id;
678 
679 	phys_cpu_id = cpu_physical_id(cpu);
680 
681 	/*
682 	 * cpu number is in 8bit ID and 8bit EID
683 	 */
684 
685 	ipi_data = (delivery_mode << 8) | (vector & 0xff);
686 	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
687 
688 	writeq(ipi_data, ipi_addr);
689 }
690