xref: /kvm-unit-tests/lib/arm64/asm/processor.h (revision 0cc3a351b925928827baa4b69cf0e46ff5837083)
1 #ifndef _ASMARM64_PROCESSOR_H_
2 #define _ASMARM64_PROCESSOR_H_
3 /*
4  * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 
9 #ifndef __ASSEMBLER__
10 #include <asm/ptrace.h>
11 #include <asm/esr.h>
12 #include <asm/sysreg.h>
13 #include <asm/barrier.h>
14 
15 enum vector {
16 	EL1T_SYNC,
17 	EL1T_IRQ,
18 	EL1T_FIQ,
19 	EL1T_ERROR,
20 	EL1H_SYNC,
21 	EL1H_IRQ,
22 	EL1H_FIQ,
23 	EL1H_ERROR,
24 	EL0_SYNC_64,
25 	EL0_IRQ_64,
26 	EL0_FIQ_64,
27 	EL0_ERROR_64,
28 	EL0_SYNC_32,
29 	EL0_IRQ_32,
30 	EL0_FIQ_32,
31 	EL0_ERROR_32,
32 	VECTOR_MAX,
33 };
34 
35 #define EC_MAX 64
36 
37 typedef void (*vector_fn)(enum vector v, struct pt_regs *regs,
38 			  unsigned int esr);
39 typedef void (*exception_fn)(struct pt_regs *regs, unsigned int esr);
40 typedef void (*irq_handler_fn)(struct pt_regs *regs);
41 extern void install_vector_handler(enum vector v, vector_fn fn);
42 extern void install_exception_handler(enum vector v, unsigned int ec,
43 				      exception_fn fn);
44 extern void install_irq_handler(enum vector v, irq_handler_fn fn);
45 extern void default_vector_sync_handler(enum vector v, struct pt_regs *regs,
46 					unsigned int esr);
47 extern void default_vector_irq_handler(enum vector v, struct pt_regs *regs,
48 				       unsigned int esr);
49 extern void vector_handlers_default_init(vector_fn *handlers);
50 
51 extern void show_regs(struct pt_regs *regs);
52 extern bool get_far(unsigned int esr, unsigned long *far);
53 
current_level(void)54 static inline unsigned long current_level(void)
55 {
56 	unsigned long el;
57 	asm volatile("mrs %0, CurrentEL" : "=r" (el));
58 	return el & 0xc;
59 }
60 
local_irq_enable(void)61 static inline void local_irq_enable(void)
62 {
63 	asm volatile("msr daifclr, #2" : : : "memory");
64 }
65 
local_irq_disable(void)66 static inline void local_irq_disable(void)
67 {
68 	asm volatile("msr daifset, #2" : : : "memory");
69 }
70 
get_mpidr(void)71 static inline uint64_t get_mpidr(void)
72 {
73 	return read_sysreg(mpidr_el1);
74 }
75 
76 #define MPIDR_HWID_BITMASK 0xff00ffffff
77 extern int mpidr_to_cpu(uint64_t mpidr);
78 
79 #define MPIDR_LEVEL_SHIFT(level) \
80 	(((1 << level) >> 1) << 3)
81 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
82 	((mpidr >> MPIDR_LEVEL_SHIFT(level)) & 0xff)
83 
84 extern void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr);
85 extern bool is_user(void);
86 extern bool __mmu_enabled(void);
87 
get_cntvct(void)88 static inline u64 get_cntvct(void)
89 {
90 	isb();
91 	return read_sysreg(cntvct_el0);
92 }
93 
get_cntfrq(void)94 static inline u32 get_cntfrq(void)
95 {
96 	return read_sysreg(cntfrq_el0);
97 }
98 
get_ctr(void)99 static inline u64 get_ctr(void)
100 {
101 	return read_sysreg(ctr_el0);
102 }
103 
get_id_aa64mmfr0_el1(void)104 static inline unsigned long get_id_aa64mmfr0_el1(void)
105 {
106 	return read_sysreg(id_aa64mmfr0_el1);
107 }
108 
109 #define ID_AA64MMFR0_TGRAN4_SHIFT	28
110 #define ID_AA64MMFR0_TGRAN64_SHIFT	24
111 #define ID_AA64MMFR0_TGRAN16_SHIFT	20
112 
113 #define ID_AA64MMFR0_TGRAN4_SUPPORTED(r)			\
114 ({								\
115 	u64 __v = ((r) >> ID_AA64MMFR0_TGRAN4_SHIFT) & 0xf;	\
116 	(__v) == 0 || (__v) == 1;				\
117 })
118 
119 #define ID_AA64MMFR0_TGRAN64_SUPPORTED(r)			\
120 ({								\
121 	u64 __v = ((r) >> ID_AA64MMFR0_TGRAN64_SHIFT) & 0xf;	\
122 	(__v) == 0;						\
123 })
124 
125 #define ID_AA64MMFR0_TGRAN16_SUPPORTED(r)			\
126 ({								\
127 	u64 __v = ((r) >> ID_AA64MMFR0_TGRAN16_SHIFT) & 0xf;	\
128 	(__v) == 1 || (__v) == 2;				\
129 })
130 
system_supports_granule(size_t granule)131 static inline bool system_supports_granule(size_t granule)
132 {
133 	u64 mmfr0 = get_id_aa64mmfr0_el1();
134 
135 	if (granule == SZ_4K)
136 		return ID_AA64MMFR0_TGRAN4_SUPPORTED(mmfr0);
137 
138 	if (granule == SZ_16K)
139 		return ID_AA64MMFR0_TGRAN16_SUPPORTED(mmfr0);
140 
141 	assert(granule == SZ_64K);
142 	return ID_AA64MMFR0_TGRAN64_SUPPORTED(mmfr0);
143 }
144 
get_id_aa64pfr0_el1(void)145 static inline unsigned long get_id_aa64pfr0_el1(void)
146 {
147 	return read_sysreg(id_aa64pfr0_el1);
148 }
149 
150 #define ID_AA64PFR0_EL1_SVE_SHIFT	32
151 
system_supports_sve(void)152 static inline bool system_supports_sve(void)
153 {
154 	return ((get_id_aa64pfr0_el1() >> ID_AA64PFR0_EL1_SVE_SHIFT) & 0xf) != 0;
155 }
156 
sve_vl(void)157 static inline unsigned long sve_vl(void)
158 {
159 	unsigned long vl;
160 
161 	asm volatile(".arch_extension sve\n"
162 		     "rdvl %x0, #8"
163 		     : "=r" (vl));
164 
165 	return vl;
166 }
167 
168 
system_supports_rndr(void)169 static inline bool system_supports_rndr(void)
170 {
171 	u64 id_aa64isar0_el1 = read_sysreg(ID_AA64ISAR0_EL1);
172 
173 	return ((id_aa64isar0_el1 >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf) != 0;
174 }
175 
176 #endif /* !__ASSEMBLER__ */
177 #endif /* _ASMARM64_PROCESSOR_H_ */
178