1 /*
2  * linux/arch/unicore32/include/asm/system.h
3  *
4  * Code specific to PKUnity SoC and UniCore ISA
5  *
6  * Copyright (C) 2001-2010 GUAN Xue-tao
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #ifndef __UNICORE_SYSTEM_H__
13 #define __UNICORE_SYSTEM_H__
14 
15 #ifdef __KERNEL__
16 
17 /*
18  * CR1 bits (CP#0 CR1)
19  */
20 #define CR_M	(1 << 0)	/* MMU enable				*/
21 #define CR_A	(1 << 1)	/* Alignment abort enable		*/
22 #define CR_D	(1 << 2)	/* Dcache enable			*/
23 #define CR_I	(1 << 3)	/* Icache enable			*/
24 #define CR_B	(1 << 4)	/* Dcache write mechanism: write back	*/
25 #define CR_T	(1 << 5)	/* Burst enable				*/
26 #define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
27 
28 #ifndef __ASSEMBLY__
29 
30 #include <linux/linkage.h>
31 #include <linux/irqflags.h>
32 
33 struct thread_info;
34 struct task_struct;
35 
36 struct pt_regs;
37 
38 void die(const char *msg, struct pt_regs *regs, int err);
39 
40 struct siginfo;
41 void uc32_notify_die(const char *str, struct pt_regs *regs,
42 		struct siginfo *info, unsigned long err, unsigned long trap);
43 
44 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
45 				       struct pt_regs *),
46 		     int sig, int code, const char *name);
47 
48 #define xchg(ptr, x) \
49 	((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
50 
51 extern asmlinkage void __backtrace(void);
52 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
53 
54 struct mm_struct;
55 extern void show_pte(struct mm_struct *mm, unsigned long addr);
56 extern void __show_regs(struct pt_regs *);
57 
58 extern int cpu_architecture(void);
59 extern void cpu_init(void);
60 
61 #define vectors_high()	(cr_alignment & CR_V)
62 
63 #define isb() __asm__ __volatile__ ("" : : : "memory")
64 #define dsb() __asm__ __volatile__ ("" : : : "memory")
65 #define dmb() __asm__ __volatile__ ("" : : : "memory")
66 
67 #define mb()		barrier()
68 #define rmb()		barrier()
69 #define wmb()		barrier()
70 #define smp_mb()	barrier()
71 #define smp_rmb()	barrier()
72 #define smp_wmb()	barrier()
73 #define read_barrier_depends()		do { } while (0)
74 #define smp_read_barrier_depends()	do { } while (0)
75 
76 #define set_mb(var, value)	do { var = value; smp_mb(); } while (0)
77 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
78 
79 extern unsigned long cr_no_alignment;	/* defined in entry-unicore.S */
80 extern unsigned long cr_alignment;	/* defined in entry-unicore.S */
81 
get_cr(void)82 static inline unsigned int get_cr(void)
83 {
84 	unsigned int val;
85 	asm("movc %0, p0.c1, #0" : "=r" (val) : : "cc");
86 	return val;
87 }
88 
set_cr(unsigned int val)89 static inline void set_cr(unsigned int val)
90 {
91 	asm volatile("movc p0.c1, %0, #0	@set CR"
92 	  : : "r" (val) : "cc");
93 	isb();
94 }
95 
96 extern void adjust_cr(unsigned long mask, unsigned long set);
97 
98 /*
99  * switch_to(prev, next) should switch from task `prev' to `next'
100  * `prev' will never be the same as `next'.  schedule() itself
101  * contains the memory barrier to tell GCC not to cache `current'.
102  */
103 extern struct task_struct *__switch_to(struct task_struct *,
104 		struct thread_info *, struct thread_info *);
105 extern void panic(const char *fmt, ...);
106 
107 #define switch_to(prev, next, last)					\
108 do {									\
109 	last = __switch_to(prev,					\
110 		task_thread_info(prev), task_thread_info(next));	\
111 } while (0)
112 
113 static inline unsigned long
__xchg(unsigned long x,volatile void * ptr,int size)114 __xchg(unsigned long x, volatile void *ptr, int size)
115 {
116 	unsigned long ret;
117 
118 	switch (size) {
119 	case 1:
120 		asm volatile("@	__xchg1\n"
121 		"	swapb	%0, %1, [%2]"
122 			: "=&r" (ret)
123 			: "r" (x), "r" (ptr)
124 			: "memory", "cc");
125 		break;
126 	case 4:
127 		asm volatile("@	__xchg4\n"
128 		"	swapw	%0, %1, [%2]"
129 			: "=&r" (ret)
130 			: "r" (x), "r" (ptr)
131 			: "memory", "cc");
132 		break;
133 	default:
134 		panic("xchg: bad data size: ptr 0x%p, size %d\n",
135 			ptr, size);
136 	}
137 
138 	return ret;
139 }
140 
141 #include <asm-generic/cmpxchg-local.h>
142 
143 /*
144  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
145  * them available.
146  */
147 #define cmpxchg_local(ptr, o, n)					\
148 		((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr),	\
149 		(unsigned long)(o), (unsigned long)(n), sizeof(*(ptr))))
150 #define cmpxchg64_local(ptr, o, n)					\
151 		__cmpxchg64_local_generic((ptr), (o), (n))
152 
153 #include <asm-generic/cmpxchg.h>
154 
155 #endif /* __ASSEMBLY__ */
156 
157 #define arch_align_stack(x) (x)
158 
159 #endif /* __KERNEL__ */
160 
161 #endif
162