xref: /kvm-unit-tests/lib/arm64/asm/processor.h (revision 0917dc65eabbacb592456c0d1bb05e5828c23661)
1 #ifndef _ASMARM64_PROCESSOR_H_
2 #define _ASMARM64_PROCESSOR_H_
3 /*
4  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 
9 /* System Control Register (SCTLR_EL1) bits */
10 #define SCTLR_EL1_EE	(1 << 25)
11 #define SCTLR_EL1_WXN	(1 << 19)
12 #define SCTLR_EL1_I	(1 << 12)
13 #define SCTLR_EL1_SA0	(1 << 4)
14 #define SCTLR_EL1_SA	(1 << 3)
15 #define SCTLR_EL1_C	(1 << 2)
16 #define SCTLR_EL1_A	(1 << 1)
17 #define SCTLR_EL1_M	(1 << 0)
18 
19 #define CTR_DMINLINE_SHIFT	16
20 #define CTR_DMINLINE_MASK	(0xf << 16)
21 #define CTR_DMINLINE(x)	\
22 	(((x) & CTR_DMINLINE_MASK) >> CTR_DMINLINE_SHIFT)
23 
24 #ifndef __ASSEMBLY__
25 #include <asm/ptrace.h>
26 #include <asm/esr.h>
27 #include <asm/sysreg.h>
28 #include <asm/barrier.h>
29 
30 enum vector {
31 	EL1T_SYNC,
32 	EL1T_IRQ,
33 	EL1T_FIQ,
34 	EL1T_ERROR,
35 	EL1H_SYNC,
36 	EL1H_IRQ,
37 	EL1H_FIQ,
38 	EL1H_ERROR,
39 	EL0_SYNC_64,
40 	EL0_IRQ_64,
41 	EL0_FIQ_64,
42 	EL0_ERROR_64,
43 	EL0_SYNC_32,
44 	EL0_IRQ_32,
45 	EL0_FIQ_32,
46 	EL0_ERROR_32,
47 	VECTOR_MAX,
48 };
49 
50 #define EC_MAX 64
51 
52 typedef void (*vector_fn)(enum vector v, struct pt_regs *regs,
53 			  unsigned int esr);
54 typedef void (*exception_fn)(struct pt_regs *regs, unsigned int esr);
55 typedef void (*irq_handler_fn)(struct pt_regs *regs);
56 extern void install_vector_handler(enum vector v, vector_fn fn);
57 extern void install_exception_handler(enum vector v, unsigned int ec,
58 				      exception_fn fn);
59 extern void install_irq_handler(enum vector v, irq_handler_fn fn);
60 extern void default_vector_sync_handler(enum vector v, struct pt_regs *regs,
61 					unsigned int esr);
62 extern void default_vector_irq_handler(enum vector v, struct pt_regs *regs,
63 				       unsigned int esr);
64 extern void vector_handlers_default_init(vector_fn *handlers);
65 
66 extern void show_regs(struct pt_regs *regs);
67 extern bool get_far(unsigned int esr, unsigned long *far);
68 
69 static inline unsigned long current_level(void)
70 {
71 	unsigned long el;
72 	asm volatile("mrs %0, CurrentEL" : "=r" (el));
73 	return el & 0xc;
74 }
75 
76 static inline void local_irq_enable(void)
77 {
78 	asm volatile("msr daifclr, #2" : : : "memory");
79 }
80 
81 static inline void local_irq_disable(void)
82 {
83 	asm volatile("msr daifset, #2" : : : "memory");
84 }
85 
86 static inline uint64_t get_mpidr(void)
87 {
88 	return read_sysreg(mpidr_el1);
89 }
90 
91 #define MPIDR_HWID_BITMASK 0xff00ffffff
92 extern int mpidr_to_cpu(uint64_t mpidr);
93 
94 #define MPIDR_LEVEL_SHIFT(level) \
95 	(((1 << level) >> 1) << 3)
96 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
97 	((mpidr >> MPIDR_LEVEL_SHIFT(level)) & 0xff)
98 
99 extern void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr);
100 extern bool is_user(void);
101 extern bool __mmu_enabled(void);
102 
103 static inline u64 get_cntvct(void)
104 {
105 	isb();
106 	return read_sysreg(cntvct_el0);
107 }
108 
109 static inline u32 get_cntfrq(void)
110 {
111 	return read_sysreg(cntfrq_el0);
112 }
113 
114 static inline u64 get_ctr(void)
115 {
116 	return read_sysreg(ctr_el0);
117 }
118 
119 extern unsigned long dcache_line_size;
120 
121 static inline unsigned long get_id_aa64mmfr0_el1(void)
122 {
123 	return read_sysreg(id_aa64mmfr0_el1);
124 }
125 
126 #define ID_AA64MMFR0_TGRAN4_SHIFT	28
127 #define ID_AA64MMFR0_TGRAN64_SHIFT	24
128 #define ID_AA64MMFR0_TGRAN16_SHIFT	20
129 
130 #define ID_AA64MMFR0_TGRAN4_SUPPORTED	0x0
131 #define ID_AA64MMFR0_TGRAN64_SUPPORTED	0x0
132 #define ID_AA64MMFR0_TGRAN16_SUPPORTED	0x1
133 
134 static inline bool system_supports_granule(size_t granule)
135 {
136 	u32 shift;
137 	u32 val;
138 	u64 mmfr0;
139 
140 	if (granule == SZ_4K) {
141 		shift = ID_AA64MMFR0_TGRAN4_SHIFT;
142 		val = ID_AA64MMFR0_TGRAN4_SUPPORTED;
143 	} else if (granule == SZ_16K) {
144 		shift = ID_AA64MMFR0_TGRAN16_SHIFT;
145 		val = ID_AA64MMFR0_TGRAN16_SUPPORTED;
146 	} else {
147 		assert(granule == SZ_64K);
148 		shift = ID_AA64MMFR0_TGRAN64_SHIFT;
149 		val = ID_AA64MMFR0_TGRAN64_SUPPORTED;
150 	}
151 
152 	mmfr0 = get_id_aa64mmfr0_el1();
153 
154 	return ((mmfr0 >> shift) & 0xf) == val;
155 }
156 
157 #endif /* !__ASSEMBLY__ */
158 #endif /* _ASMARM64_PROCESSOR_H_ */
159