1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
3 
4 /*
5  * Copyright (C) 1998-2004 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  *	Stephane Eranian <eranian@hpl.hp.com>
8  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10  *
11  * 11/24/98	S.Eranian	added ia64_set_iva()
12  * 12/03/99	D. Mosberger	implement thread_saved_pc() via kernel unwind API
13  * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 support
14  */
15 
16 
17 #include <asm/intrinsics.h>
18 #include <asm/kregs.h>
19 #include <asm/ptrace.h>
20 #include <asm/ustack.h>
21 
22 #define IA64_NUM_PHYS_STACK_REG	96
23 #define IA64_NUM_DBG_REGS	8
24 
25 #define DEFAULT_MAP_BASE	__IA64_UL_CONST(0x2000000000000000)
26 #define DEFAULT_TASK_SIZE	__IA64_UL_CONST(0xa000000000000000)
27 
28 /*
29  * TASK_SIZE really is a mis-named.  It really is the maximum user
30  * space address (plus one).  On IA-64, there are five regions of 2TB
31  * each (assuming 8KB page size), for a total of 8TB of user virtual
32  * address space.
33  */
34 #define TASK_SIZE_OF(tsk)	((tsk)->thread.task_size)
35 #define TASK_SIZE       	TASK_SIZE_OF(current)
36 
37 /*
38  * This decides where the kernel will search for a free chunk of vm
39  * space during mmap's.
40  */
41 #define TASK_UNMAPPED_BASE	(current->thread.map_base)
42 
43 #define IA64_THREAD_FPH_VALID	(__IA64_UL(1) << 0)	/* floating-point high state valid? */
44 #define IA64_THREAD_DBG_VALID	(__IA64_UL(1) << 1)	/* debug registers valid? */
45 #define IA64_THREAD_PM_VALID	(__IA64_UL(1) << 2)	/* performance registers valid? */
46 #define IA64_THREAD_UAC_NOPRINT	(__IA64_UL(1) << 3)	/* don't log unaligned accesses */
47 #define IA64_THREAD_UAC_SIGBUS	(__IA64_UL(1) << 4)	/* generate SIGBUS on unaligned acc. */
48 #define IA64_THREAD_MIGRATION	(__IA64_UL(1) << 5)	/* require migration
49 							   sync at ctx sw */
50 #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)	/* don't log any fpswa faults */
51 #define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)	/* send a SIGFPE for fpswa faults */
52 
53 #define IA64_THREAD_UAC_SHIFT	3
54 #define IA64_THREAD_UAC_MASK	(IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
55 #define IA64_THREAD_FPEMU_SHIFT	6
56 #define IA64_THREAD_FPEMU_MASK	(IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
57 
58 
59 /*
60  * This shift should be large enough to be able to represent 1000000000/itc_freq with good
61  * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
62  * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
63  */
64 #define IA64_NSEC_PER_CYC_SHIFT	30
65 
66 #ifndef __ASSEMBLY__
67 
68 #include <linux/cache.h>
69 #include <linux/compiler.h>
70 #include <linux/threads.h>
71 #include <linux/types.h>
72 
73 #include <asm/fpu.h>
74 #include <asm/page.h>
75 #include <asm/percpu.h>
76 #include <asm/rse.h>
77 #include <asm/unwind.h>
78 #include <linux/atomic.h>
79 #ifdef CONFIG_NUMA
80 #include <asm/nodedata.h>
81 #endif
82 
83 /* like above but expressed as bitfields for more efficient access: */
84 struct ia64_psr {
85 	__u64 reserved0 : 1;
86 	__u64 be : 1;
87 	__u64 up : 1;
88 	__u64 ac : 1;
89 	__u64 mfl : 1;
90 	__u64 mfh : 1;
91 	__u64 reserved1 : 7;
92 	__u64 ic : 1;
93 	__u64 i : 1;
94 	__u64 pk : 1;
95 	__u64 reserved2 : 1;
96 	__u64 dt : 1;
97 	__u64 dfl : 1;
98 	__u64 dfh : 1;
99 	__u64 sp : 1;
100 	__u64 pp : 1;
101 	__u64 di : 1;
102 	__u64 si : 1;
103 	__u64 db : 1;
104 	__u64 lp : 1;
105 	__u64 tb : 1;
106 	__u64 rt : 1;
107 	__u64 reserved3 : 4;
108 	__u64 cpl : 2;
109 	__u64 is : 1;
110 	__u64 mc : 1;
111 	__u64 it : 1;
112 	__u64 id : 1;
113 	__u64 da : 1;
114 	__u64 dd : 1;
115 	__u64 ss : 1;
116 	__u64 ri : 2;
117 	__u64 ed : 1;
118 	__u64 bn : 1;
119 	__u64 reserved4 : 19;
120 };
121 
122 union ia64_isr {
123 	__u64  val;
124 	struct {
125 		__u64 code : 16;
126 		__u64 vector : 8;
127 		__u64 reserved1 : 8;
128 		__u64 x : 1;
129 		__u64 w : 1;
130 		__u64 r : 1;
131 		__u64 na : 1;
132 		__u64 sp : 1;
133 		__u64 rs : 1;
134 		__u64 ir : 1;
135 		__u64 ni : 1;
136 		__u64 so : 1;
137 		__u64 ei : 2;
138 		__u64 ed : 1;
139 		__u64 reserved2 : 20;
140 	};
141 };
142 
143 union ia64_lid {
144 	__u64 val;
145 	struct {
146 		__u64  rv  : 16;
147 		__u64  eid : 8;
148 		__u64  id  : 8;
149 		__u64  ig  : 32;
150 	};
151 };
152 
153 union ia64_tpr {
154 	__u64 val;
155 	struct {
156 		__u64 ig0 : 4;
157 		__u64 mic : 4;
158 		__u64 rsv : 8;
159 		__u64 mmi : 1;
160 		__u64 ig1 : 47;
161 	};
162 };
163 
164 union ia64_itir {
165 	__u64 val;
166 	struct {
167 		__u64 rv3  :  2; /* 0-1 */
168 		__u64 ps   :  6; /* 2-7 */
169 		__u64 key  : 24; /* 8-31 */
170 		__u64 rv4  : 32; /* 32-63 */
171 	};
172 };
173 
174 union  ia64_rr {
175 	__u64 val;
176 	struct {
177 		__u64  ve	:  1;  /* enable hw walker */
178 		__u64  reserved0:  1;  /* reserved */
179 		__u64  ps	:  6;  /* log page size */
180 		__u64  rid	: 24;  /* region id */
181 		__u64  reserved1: 32;  /* reserved */
182 	};
183 };
184 
185 /*
186  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
187  * state comes earlier:
188  */
189 struct cpuinfo_ia64 {
190 	unsigned int softirq_pending;
191 	unsigned long itm_delta;	/* # of clock cycles between clock ticks */
192 	unsigned long itm_next;		/* interval timer mask value to use for next clock tick */
193 	unsigned long nsec_per_cyc;	/* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
194 	unsigned long unimpl_va_mask;	/* mask of unimplemented virtual address bits (from PAL) */
195 	unsigned long unimpl_pa_mask;	/* mask of unimplemented physical address bits (from PAL) */
196 	unsigned long itc_freq;		/* frequency of ITC counter */
197 	unsigned long proc_freq;	/* frequency of processor */
198 	unsigned long cyc_per_usec;	/* itc_freq/1000000 */
199 	unsigned long ptce_base;
200 	unsigned int ptce_count[2];
201 	unsigned int ptce_stride[2];
202 	struct task_struct *ksoftirqd;	/* kernel softirq daemon for this CPU */
203 
204 #ifdef CONFIG_SMP
205 	unsigned long loops_per_jiffy;
206 	int cpu;
207 	unsigned int socket_id;	/* physical processor socket id */
208 	unsigned short core_id;	/* core id */
209 	unsigned short thread_id; /* thread id */
210 	unsigned short num_log;	/* Total number of logical processors on
211 				 * this socket that were successfully booted */
212 	unsigned char cores_per_socket;	/* Cores per processor socket */
213 	unsigned char threads_per_core;	/* Threads per core */
214 #endif
215 
216 	/* CPUID-derived information: */
217 	unsigned long ppn;
218 	unsigned long features;
219 	unsigned char number;
220 	unsigned char revision;
221 	unsigned char model;
222 	unsigned char family;
223 	unsigned char archrev;
224 	char vendor[16];
225 	char *model_name;
226 
227 #ifdef CONFIG_NUMA
228 	struct ia64_node_data *node_data;
229 #endif
230 };
231 
232 DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
233 
234 /*
235  * The "local" data variable.  It refers to the per-CPU data of the currently executing
236  * CPU, much like "current" points to the per-task data of the currently executing task.
237  * Do not use the address of local_cpu_data, since it will be different from
238  * cpu_data(smp_processor_id())!
239  */
240 #define local_cpu_data		(&__ia64_per_cpu_var(ia64_cpu_info))
241 #define cpu_data(cpu)		(&per_cpu(ia64_cpu_info, cpu))
242 
243 extern void print_cpu_info (struct cpuinfo_ia64 *);
244 
245 typedef struct {
246 	unsigned long seg;
247 } mm_segment_t;
248 
249 #define SET_UNALIGN_CTL(task,value)								\
250 ({												\
251 	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)			\
252 				| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK));	\
253 	0;											\
254 })
255 #define GET_UNALIGN_CTL(task,addr)								\
256 ({												\
257 	put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT,	\
258 		 (int __user *) (addr));							\
259 })
260 
261 #define SET_FPEMU_CTL(task,value)								\
262 ({												\
263 	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK)		\
264 			  | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK));	\
265 	0;											\
266 })
267 #define GET_FPEMU_CTL(task,addr)								\
268 ({												\
269 	put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT,	\
270 		 (int __user *) (addr));							\
271 })
272 
273 struct thread_struct {
274 	__u32 flags;			/* various thread flags (see IA64_THREAD_*) */
275 	/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
276 	__u8 on_ustack;			/* executing on user-stacks? */
277 	__u8 pad[3];
278 	__u64 ksp;			/* kernel stack pointer */
279 	__u64 map_base;			/* base address for get_unmapped_area() */
280 	__u64 task_size;		/* limit for task size */
281 	__u64 rbs_bot;			/* the base address for the RBS */
282 	int last_fph_cpu;		/* CPU that may hold the contents of f32-f127 */
283 
284 #ifdef CONFIG_PERFMON
285 	void *pfm_context;		     /* pointer to detailed PMU context */
286 	unsigned long pfm_needs_checking;    /* when >0, pending perfmon work on kernel exit */
287 # define INIT_THREAD_PM		.pfm_context =		NULL,     \
288 				.pfm_needs_checking =	0UL,
289 #else
290 # define INIT_THREAD_PM
291 #endif
292 	unsigned long dbr[IA64_NUM_DBG_REGS];
293 	unsigned long ibr[IA64_NUM_DBG_REGS];
294 	struct ia64_fpreg fph[96];	/* saved/loaded on demand */
295 };
296 
297 #define INIT_THREAD {						\
298 	.flags =	0,					\
299 	.on_ustack =	0,					\
300 	.ksp =		0,					\
301 	.map_base =	DEFAULT_MAP_BASE,			\
302 	.rbs_bot =	STACK_TOP - DEFAULT_USER_STACK_SIZE,	\
303 	.task_size =	DEFAULT_TASK_SIZE,			\
304 	.last_fph_cpu =  -1,					\
305 	INIT_THREAD_PM						\
306 	.dbr =		{0, },					\
307 	.ibr =		{0, },					\
308 	.fph =		{{{{0}}}, }				\
309 }
310 
311 #define start_thread(regs,new_ip,new_sp) do {							\
312 	regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))		\
313 			 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));		\
314 	regs->cr_iip = new_ip;									\
315 	regs->ar_rsc = 0xf;		/* eager mode, privilege level 3 */			\
316 	regs->ar_rnat = 0;									\
317 	regs->ar_bspstore = current->thread.rbs_bot;						\
318 	regs->ar_fpsr = FPSR_DEFAULT;								\
319 	regs->loadrs = 0;									\
320 	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\
321 	regs->r12 = new_sp - 16;	/* allocate 16 byte scratch area */			\
322 	if (unlikely(!get_dumpable(current->mm))) {							\
323 		/*										\
324 		 * Zap scratch regs to avoid leaking bits between processes with different	\
325 		 * uid/privileges.								\
326 		 */										\
327 		regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;					\
328 		regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0;	\
329 	}											\
330 } while (0)
331 
332 /* Forward declarations, a strange C thing... */
333 struct mm_struct;
334 struct task_struct;
335 
336 /*
337  * Free all resources held by a thread. This is called after the
338  * parent of DEAD_TASK has collected the exit status of the task via
339  * wait().
340  */
341 #define release_thread(dead_task)
342 
343 /* Prepare to copy thread state - unlazy all lazy status */
344 #define prepare_to_copy(tsk)	do { } while (0)
345 
346 /*
347  * This is the mechanism for creating a new kernel thread.
348  *
349  * NOTE 1: Only a kernel-only process (ie the swapper or direct
350  * descendants who haven't done an "execve()") should use this: it
351  * will work within a system call from a "real" process, but the
352  * process memory space will not be free'd until both the parent and
353  * the child have exited.
354  *
355  * NOTE 2: This MUST NOT be an inlined function.  Otherwise, we get
356  * into trouble in init/main.c when the child thread returns to
357  * do_basic_setup() and the timing is such that free_initmem() has
358  * been called already.
359  */
360 extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
361 
362 /* Get wait channel for task P.  */
363 extern unsigned long get_wchan (struct task_struct *p);
364 
365 /* Return instruction pointer of blocked task TSK.  */
366 #define KSTK_EIP(tsk)					\
367   ({							\
368 	struct pt_regs *_regs = task_pt_regs(tsk);	\
369 	_regs->cr_iip + ia64_psr(_regs)->ri;		\
370   })
371 
372 /* Return stack pointer of blocked task TSK.  */
373 #define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
374 
375 extern void ia64_getreg_unknown_kr (void);
376 extern void ia64_setreg_unknown_kr (void);
377 
378 #define ia64_get_kr(regnum)					\
379 ({								\
380 	unsigned long r = 0;					\
381 								\
382 	switch (regnum) {					\
383 	    case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;	\
384 	    case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;	\
385 	    case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;	\
386 	    case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;	\
387 	    case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;	\
388 	    case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;	\
389 	    case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;	\
390 	    case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;	\
391 	    default: ia64_getreg_unknown_kr(); break;		\
392 	}							\
393 	r;							\
394 })
395 
396 #define ia64_set_kr(regnum, r) 					\
397 ({								\
398 	switch (regnum) {					\
399 	    case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;	\
400 	    case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;	\
401 	    case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;	\
402 	    case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;	\
403 	    case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;	\
404 	    case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;	\
405 	    case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;	\
406 	    case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;	\
407 	    default: ia64_setreg_unknown_kr(); break;		\
408 	}							\
409 })
410 
411 /*
412  * The following three macros can't be inline functions because we don't have struct
413  * task_struct at this point.
414  */
415 
416 /*
417  * Return TRUE if task T owns the fph partition of the CPU we're running on.
418  * Must be called from code that has preemption disabled.
419  */
420 #define ia64_is_local_fpu_owner(t)								\
421 ({												\
422 	struct task_struct *__ia64_islfo_task = (t);						\
423 	(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()				\
424 	 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));	\
425 })
426 
427 /*
428  * Mark task T as owning the fph partition of the CPU we're running on.
429  * Must be called from code that has preemption disabled.
430  */
431 #define ia64_set_local_fpu_owner(t) do {						\
432 	struct task_struct *__ia64_slfo_task = (t);					\
433 	__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();			\
434 	ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);		\
435 } while (0)
436 
437 /* Mark the fph partition of task T as being invalid on all CPUs.  */
438 #define ia64_drop_fpu(t)	((t)->thread.last_fph_cpu = -1)
439 
440 extern void __ia64_init_fpu (void);
441 extern void __ia64_save_fpu (struct ia64_fpreg *fph);
442 extern void __ia64_load_fpu (struct ia64_fpreg *fph);
443 extern void ia64_save_debug_regs (unsigned long *save_area);
444 extern void ia64_load_debug_regs (unsigned long *save_area);
445 
446 #define ia64_fph_enable()	do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
447 #define ia64_fph_disable()	do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
448 
449 /* load fp 0.0 into fph */
450 static inline void
ia64_init_fpu(void)451 ia64_init_fpu (void) {
452 	ia64_fph_enable();
453 	__ia64_init_fpu();
454 	ia64_fph_disable();
455 }
456 
457 /* save f32-f127 at FPH */
458 static inline void
ia64_save_fpu(struct ia64_fpreg * fph)459 ia64_save_fpu (struct ia64_fpreg *fph) {
460 	ia64_fph_enable();
461 	__ia64_save_fpu(fph);
462 	ia64_fph_disable();
463 }
464 
465 /* load f32-f127 from FPH */
466 static inline void
ia64_load_fpu(struct ia64_fpreg * fph)467 ia64_load_fpu (struct ia64_fpreg *fph) {
468 	ia64_fph_enable();
469 	__ia64_load_fpu(fph);
470 	ia64_fph_disable();
471 }
472 
473 static inline __u64
ia64_clear_ic(void)474 ia64_clear_ic (void)
475 {
476 	__u64 psr;
477 	psr = ia64_getreg(_IA64_REG_PSR);
478 	ia64_stop();
479 	ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
480 	ia64_srlz_i();
481 	return psr;
482 }
483 
484 /*
485  * Restore the psr.
486  */
487 static inline void
ia64_set_psr(__u64 psr)488 ia64_set_psr (__u64 psr)
489 {
490 	ia64_stop();
491 	ia64_setreg(_IA64_REG_PSR_L, psr);
492 	ia64_srlz_i();
493 }
494 
495 /*
496  * Insert a translation into an instruction and/or data translation
497  * register.
498  */
499 static inline void
ia64_itr(__u64 target_mask,__u64 tr_num,__u64 vmaddr,__u64 pte,__u64 log_page_size)500 ia64_itr (__u64 target_mask, __u64 tr_num,
501 	  __u64 vmaddr, __u64 pte,
502 	  __u64 log_page_size)
503 {
504 	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
505 	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
506 	ia64_stop();
507 	if (target_mask & 0x1)
508 		ia64_itri(tr_num, pte);
509 	if (target_mask & 0x2)
510 		ia64_itrd(tr_num, pte);
511 }
512 
513 /*
514  * Insert a translation into the instruction and/or data translation
515  * cache.
516  */
517 static inline void
ia64_itc(__u64 target_mask,__u64 vmaddr,__u64 pte,__u64 log_page_size)518 ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
519 	  __u64 log_page_size)
520 {
521 	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
522 	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
523 	ia64_stop();
524 	/* as per EAS2.6, itc must be the last instruction in an instruction group */
525 	if (target_mask & 0x1)
526 		ia64_itci(pte);
527 	if (target_mask & 0x2)
528 		ia64_itcd(pte);
529 }
530 
531 /*
532  * Purge a range of addresses from instruction and/or data translation
533  * register(s).
534  */
535 static inline void
ia64_ptr(__u64 target_mask,__u64 vmaddr,__u64 log_size)536 ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
537 {
538 	if (target_mask & 0x1)
539 		ia64_ptri(vmaddr, (log_size << 2));
540 	if (target_mask & 0x2)
541 		ia64_ptrd(vmaddr, (log_size << 2));
542 }
543 
544 /* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */
545 static inline void
ia64_set_iva(void * ivt_addr)546 ia64_set_iva (void *ivt_addr)
547 {
548 	ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
549 	ia64_srlz_i();
550 }
551 
552 /* Set the page table address and control bits.  */
553 static inline void
ia64_set_pta(__u64 pta)554 ia64_set_pta (__u64 pta)
555 {
556 	/* Note: srlz.i implies srlz.d */
557 	ia64_setreg(_IA64_REG_CR_PTA, pta);
558 	ia64_srlz_i();
559 }
560 
561 static inline void
ia64_eoi(void)562 ia64_eoi (void)
563 {
564 	ia64_setreg(_IA64_REG_CR_EOI, 0);
565 	ia64_srlz_d();
566 }
567 
568 #define cpu_relax()	ia64_hint(ia64_hint_pause)
569 
570 static inline int
ia64_get_irr(unsigned int vector)571 ia64_get_irr(unsigned int vector)
572 {
573 	unsigned int reg = vector / 64;
574 	unsigned int bit = vector % 64;
575 	u64 irr;
576 
577 	switch (reg) {
578 	case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
579 	case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
580 	case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
581 	case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
582 	}
583 
584 	return test_bit(bit, &irr);
585 }
586 
587 static inline void
ia64_set_lrr0(unsigned long val)588 ia64_set_lrr0 (unsigned long val)
589 {
590 	ia64_setreg(_IA64_REG_CR_LRR0, val);
591 	ia64_srlz_d();
592 }
593 
594 static inline void
ia64_set_lrr1(unsigned long val)595 ia64_set_lrr1 (unsigned long val)
596 {
597 	ia64_setreg(_IA64_REG_CR_LRR1, val);
598 	ia64_srlz_d();
599 }
600 
601 
602 /*
603  * Given the address to which a spill occurred, return the unat bit
604  * number that corresponds to this address.
605  */
606 static inline __u64
ia64_unat_pos(void * spill_addr)607 ia64_unat_pos (void *spill_addr)
608 {
609 	return ((__u64) spill_addr >> 3) & 0x3f;
610 }
611 
612 /*
613  * Set the NaT bit of an integer register which was spilled at address
614  * SPILL_ADDR.  UNAT is the mask to be updated.
615  */
616 static inline void
ia64_set_unat(__u64 * unat,void * spill_addr,unsigned long nat)617 ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
618 {
619 	__u64 bit = ia64_unat_pos(spill_addr);
620 	__u64 mask = 1UL << bit;
621 
622 	*unat = (*unat & ~mask) | (nat << bit);
623 }
624 
625 /*
626  * Return saved PC of a blocked thread.
627  * Note that the only way T can block is through a call to schedule() -> switch_to().
628  */
629 static inline unsigned long
thread_saved_pc(struct task_struct * t)630 thread_saved_pc (struct task_struct *t)
631 {
632 	struct unw_frame_info info;
633 	unsigned long ip;
634 
635 	unw_init_from_blocked_task(&info, t);
636 	if (unw_unwind(&info) < 0)
637 		return 0;
638 	unw_get_ip(&info, &ip);
639 	return ip;
640 }
641 
642 /*
643  * Get the current instruction/program counter value.
644  */
645 #define current_text_addr() \
646 	({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
647 
648 static inline __u64
ia64_get_ivr(void)649 ia64_get_ivr (void)
650 {
651 	__u64 r;
652 	ia64_srlz_d();
653 	r = ia64_getreg(_IA64_REG_CR_IVR);
654 	ia64_srlz_d();
655 	return r;
656 }
657 
658 static inline void
ia64_set_dbr(__u64 regnum,__u64 value)659 ia64_set_dbr (__u64 regnum, __u64 value)
660 {
661 	__ia64_set_dbr(regnum, value);
662 #ifdef CONFIG_ITANIUM
663 	ia64_srlz_d();
664 #endif
665 }
666 
667 static inline __u64
ia64_get_dbr(__u64 regnum)668 ia64_get_dbr (__u64 regnum)
669 {
670 	__u64 retval;
671 
672 	retval = __ia64_get_dbr(regnum);
673 #ifdef CONFIG_ITANIUM
674 	ia64_srlz_d();
675 #endif
676 	return retval;
677 }
678 
679 static inline __u64
ia64_rotr(__u64 w,__u64 n)680 ia64_rotr (__u64 w, __u64 n)
681 {
682 	return (w >> n) | (w << (64 - n));
683 }
684 
685 #define ia64_rotl(w,n)	ia64_rotr((w), (64) - (n))
686 
687 /*
688  * Take a mapped kernel address and return the equivalent address
689  * in the region 7 identity mapped virtual area.
690  */
691 static inline void *
ia64_imva(void * addr)692 ia64_imva (void *addr)
693 {
694 	void *result;
695 	result = (void *) ia64_tpa(addr);
696 	return __va(result);
697 }
698 
699 #define ARCH_HAS_PREFETCH
700 #define ARCH_HAS_PREFETCHW
701 #define ARCH_HAS_SPINLOCK_PREFETCH
702 #define PREFETCH_STRIDE			L1_CACHE_BYTES
703 
704 static inline void
prefetch(const void * x)705 prefetch (const void *x)
706 {
707 	 ia64_lfetch(ia64_lfhint_none, x);
708 }
709 
710 static inline void
prefetchw(const void * x)711 prefetchw (const void *x)
712 {
713 	ia64_lfetch_excl(ia64_lfhint_none, x);
714 }
715 
716 #define spin_lock_prefetch(x)	prefetchw(x)
717 
718 extern unsigned long boot_option_idle_override;
719 
720 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
721 			 IDLE_NOMWAIT, IDLE_POLL};
722 
723 #endif /* !__ASSEMBLY__ */
724 
725 #endif /* _ASM_IA64_PROCESSOR_H */
726