1 /*
2 * TCG CPU-specific operations
3 *
4 * Copyright 2021 SUSE LLC
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 */
9
10 #ifndef TCG_CPU_OPS_H
11 #define TCG_CPU_OPS_H
12
13 #include "exec/breakpoint.h"
14 #include "exec/hwaddr.h"
15 #include "exec/memattrs.h"
16 #include "exec/memop.h"
17 #include "exec/mmu-access-type.h"
18 #include "exec/vaddr.h"
19 #include "accel/tcg/tb-cpu-state.h"
20 #include "tcg/tcg-mo.h"
21
22 struct TCGCPUOps {
23 /**
24 * mttcg_supported: multi-threaded TCG is supported
25 *
26 * Target (TCG frontend) supports:
27 * - atomic instructions
28 * - memory ordering primitives (barriers)
29 */
30 bool mttcg_supported;
31
32 /**
33 * @precise_smc: Stores which modify code within the current TB force
34 * the TB to exit; the next executed instruction will see
35 * the result of the store.
36 */
37 bool precise_smc;
38
39 /**
40 * @guest_default_memory_order: default barrier that is required
41 * for the guest memory ordering.
42 */
43 TCGBar guest_default_memory_order;
44
45 /**
46 * @initialize: Initialize TCG state
47 *
48 * Called when the first CPU is realized.
49 */
50 void (*initialize)(void);
51 /**
52 * @translate_code: Translate guest instructions to TCGOps
53 * @cpu: cpu context
54 * @tb: translation block
55 * @max_insns: max number of instructions to translate
56 * @pc: guest virtual program counter address
57 * @host_pc: host physical program counter address
58 *
59 * This function must be provided by the target, which should create
60 * the target-specific DisasContext, and then invoke translator_loop.
61 */
62 void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
63 int *max_insns, vaddr pc, void *host_pc);
64 /**
65 * @get_tb_cpu_state: Extract CPU state for a TCG #TranslationBlock
66 *
67 * Fill in all data required to select or compile a TranslationBlock.
68 */
69 TCGTBCPUState (*get_tb_cpu_state)(CPUState *cs);
70 /**
71 * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
72 *
73 * This is called when we abandon execution of a TB before starting it,
74 * and must set all parts of the CPU state which the previous TB in the
75 * chain may not have updated.
76 * By default, when this is NULL, a call is made to @set_pc(tb->pc).
77 *
78 * If more state needs to be restored, the target must implement a
79 * function to restore all the state, and register it here.
80 */
81 void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
82 /**
83 * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
84 *
85 * This is called when we unwind state in the middle of a TB,
86 * usually before raising an exception. Set all part of the CPU
87 * state which are tracked insn-by-insn in the target-specific
88 * arguments to start_insn, passed as @data.
89 */
90 void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
91 const uint64_t *data);
92
93 /** @cpu_exec_enter: Callback for cpu_exec preparation */
94 void (*cpu_exec_enter)(CPUState *cpu);
95 /** @cpu_exec_exit: Callback for cpu_exec cleanup */
96 void (*cpu_exec_exit)(CPUState *cpu);
97 /** @debug_excp_handler: Callback for handling debug exceptions */
98 void (*debug_excp_handler)(CPUState *cpu);
99
100 /** @mmu_index: Callback for choosing softmmu mmu index */
101 int (*mmu_index)(CPUState *cpu, bool ifetch);
102
103 #ifdef CONFIG_USER_ONLY
104 /**
105 * @fake_user_interrupt: Callback for 'fake exception' handling.
106 *
107 * Simulate 'fake exception' which will be handled outside the
108 * cpu execution loop (hack for x86 user mode).
109 */
110 void (*fake_user_interrupt)(CPUState *cpu);
111
112 /**
113 * record_sigsegv:
114 * @cpu: cpu context
115 * @addr: faulting guest address
116 * @access_type: access was read/write/execute
117 * @maperr: true for invalid page, false for permission fault
118 * @ra: host pc for unwinding
119 *
120 * We are about to raise SIGSEGV with si_code set for @maperr,
121 * and si_addr set for @addr. Record anything further needed
122 * for the signal ucontext_t.
123 *
124 * If the emulated kernel does not provide anything to the signal
125 * handler with anything besides the user context registers, and
126 * the siginfo_t, then this hook need do nothing and may be omitted.
127 * Otherwise, record the data and return; the caller will raise
128 * the signal, unwind the cpu state, and return to the main loop.
129 *
130 * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided
131 * so that a "normal" cpu exception can be raised. In this case,
132 * the signal must be raised by the architecture cpu_loop.
133 */
134 void (*record_sigsegv)(CPUState *cpu, vaddr addr,
135 MMUAccessType access_type,
136 bool maperr, uintptr_t ra);
137 /**
138 * record_sigbus:
139 * @cpu: cpu context
140 * @addr: misaligned guest address
141 * @access_type: access was read/write/execute
142 * @ra: host pc for unwinding
143 *
144 * We are about to raise SIGBUS with si_code BUS_ADRALN,
145 * and si_addr set for @addr. Record anything further needed
146 * for the signal ucontext_t.
147 *
148 * If the emulated kernel does not provide the signal handler with
149 * anything besides the user context registers, and the siginfo_t,
150 * then this hook need do nothing and may be omitted.
151 * Otherwise, record the data and return; the caller will raise
152 * the signal, unwind the cpu state, and return to the main loop.
153 *
154 * If it is simpler to re-use the sysemu do_unaligned_access code,
155 * @ra is provided so that a "normal" cpu exception can be raised.
156 * In this case, the signal must be raised by the architecture cpu_loop.
157 */
158 void (*record_sigbus)(CPUState *cpu, vaddr addr,
159 MMUAccessType access_type, uintptr_t ra);
160
161 /**
162 * untagged_addr: Remove an ignored tag from an address
163 * @cpu: cpu context
164 * @addr: tagged guest address
165 */
166 vaddr (*untagged_addr)(CPUState *cs, vaddr addr);
167 #else
168 /** @do_interrupt: Callback for interrupt handling. */
169 void (*do_interrupt)(CPUState *cpu);
170 /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
171 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
172 /** @cpu_exec_reset: Callback for reset in cpu_exec. */
173 void (*cpu_exec_reset)(CPUState *cpu);
174 /**
175 * @cpu_exec_halt: Callback for handling halt in cpu_exec.
176 *
177 * The target CPU should do any special processing here that it needs
178 * to do when the CPU is in the halted state.
179 *
180 * Return true to indicate that the CPU should now leave halt, false
181 * if it should remain in the halted state. (This should generally
182 * be the same value that cpu_has_work() would return.)
183 *
184 * This method must be provided. If the target does not need to
185 * do anything special for halt, the same function used for its
186 * SysemuCPUOps::has_work method can be used here, as they have the
187 * same function signature.
188 */
189 bool (*cpu_exec_halt)(CPUState *cpu);
190 /**
191 * @tlb_fill_align: Handle a softmmu tlb miss
192 * @cpu: cpu context
193 * @out: output page properties
194 * @addr: virtual address
195 * @access_type: read, write or execute
196 * @mmu_idx: mmu context
197 * @memop: memory operation for the access
198 * @size: memory access size, or 0 for whole page
199 * @probe: test only, no fault
200 * @ra: host return address for exception unwind
201 *
202 * If the access is valid, fill in @out and return true.
203 * Otherwise if probe is true, return false.
204 * Otherwise raise an exception and do not return.
205 *
206 * The alignment check for the access is deferred to this hook,
207 * so that the target can determine the priority of any alignment
208 * fault with respect to other potential faults from paging.
209 * Zero may be passed for @memop to skip any alignment check
210 * for non-memory-access operations such as probing.
211 */
212 bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr,
213 MMUAccessType access_type, int mmu_idx,
214 MemOp memop, int size, bool probe, uintptr_t ra);
215 /**
216 * @tlb_fill: Handle a softmmu tlb miss
217 *
218 * If the access is valid, call tlb_set_page and return true;
219 * if the access is invalid and probe is true, return false;
220 * otherwise raise an exception and do not return.
221 */
222 bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
223 MMUAccessType access_type, int mmu_idx,
224 bool probe, uintptr_t retaddr);
225 /**
226 * @pointer_wrap:
227 *
228 * We have incremented @base to @result, resulting in a page change.
229 * For the current cpu state, adjust @result for possible overflow.
230 */
231 vaddr (*pointer_wrap)(CPUState *cpu, int mmu_idx, vaddr result, vaddr base);
232 /**
233 * @do_transaction_failed: Callback for handling failed memory transactions
234 * (ie bus faults or external aborts; not MMU faults)
235 */
236 void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
237 unsigned size, MMUAccessType access_type,
238 int mmu_idx, MemTxAttrs attrs,
239 MemTxResult response, uintptr_t retaddr);
240 /**
241 * @do_unaligned_access: Callback for unaligned access handling
242 * The callback must exit via raising an exception.
243 */
244 G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
245 MMUAccessType access_type,
246 int mmu_idx, uintptr_t retaddr);
247
248 /**
249 * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
250 */
251 vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
252
253 /**
254 * @debug_check_watchpoint: return true if the architectural
255 * watchpoint whose address has matched should really fire, used by ARM
256 * and RISC-V
257 */
258 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
259
260 /**
261 * @debug_check_breakpoint: return true if the architectural
262 * breakpoint whose PC has matched should really fire.
263 */
264 bool (*debug_check_breakpoint)(CPUState *cpu);
265
266 /**
267 * @io_recompile_replay_branch: Callback for cpu_io_recompile.
268 *
269 * The cpu has been stopped, and cpu_restore_state_from_tb has been
270 * called. If the faulting instruction is in a delay slot, and the
271 * target architecture requires re-execution of the branch, then
272 * adjust the cpu state as required and return true.
273 */
274 bool (*io_recompile_replay_branch)(CPUState *cpu,
275 const TranslationBlock *tb);
276 /**
277 * @need_replay_interrupt: Return %true if @interrupt_request
278 * needs to be recorded for replay purposes.
279 */
280 bool (*need_replay_interrupt)(int interrupt_request);
281 #endif /* !CONFIG_USER_ONLY */
282 };
283
284 #if defined(CONFIG_USER_ONLY)
285
cpu_check_watchpoint(CPUState * cpu,vaddr addr,vaddr len,MemTxAttrs atr,int fl,uintptr_t ra)286 static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
287 MemTxAttrs atr, int fl, uintptr_t ra)
288 {
289 }
290
cpu_watchpoint_address_matches(CPUState * cpu,vaddr addr,vaddr len)291 static inline int cpu_watchpoint_address_matches(CPUState *cpu,
292 vaddr addr, vaddr len)
293 {
294 return 0;
295 }
296
297 #else
298
299 /**
300 * cpu_check_watchpoint:
301 * @cpu: cpu context
302 * @addr: guest virtual address
303 * @len: access length
304 * @attrs: memory access attributes
305 * @flags: watchpoint access type
306 * @ra: unwind return address
307 *
308 * Check for a watchpoint hit in [addr, addr+len) of the type
309 * specified by @flags. Exit via exception with a hit.
310 */
311 void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
312 MemTxAttrs attrs, int flags, uintptr_t ra);
313
314 /**
315 * cpu_watchpoint_address_matches:
316 * @cpu: cpu context
317 * @addr: guest virtual address
318 * @len: access length
319 *
320 * Return the watchpoint flags that apply to [addr, addr+len).
321 * If no watchpoint is registered for the range, the result is 0.
322 */
323 int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
324
325 /*
326 * Common pointer_wrap implementations.
327 */
328 vaddr cpu_pointer_wrap_notreached(CPUState *, int, vaddr, vaddr);
329 vaddr cpu_pointer_wrap_uint32(CPUState *, int, vaddr, vaddr);
330
331 #endif
332
333 #endif /* TCG_CPU_OPS_H */
334