xref: /qemu/target/avr/helper.c (revision 03f50d7ee756eecbd4481c3008b5e01e999729c7)
1 /*
2  * QEMU AVR CPU helpers
3  *
4  * Copyright (c) 2016-2020 Michael Rolnik
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see
18  * <http://www.gnu.org/licenses/lgpl-2.1.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "qemu/error-report.h"
24 #include "cpu.h"
25 #include "accel/tcg/cpu-ops.h"
26 #include "accel/tcg/getpc.h"
27 #include "exec/cputlb.h"
28 #include "exec/page-protection.h"
29 #include "exec/target_page.h"
30 #include "accel/tcg/cpu-ldst.h"
31 #include "exec/helper-proto.h"
32 
33 bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
34 {
35     CPUAVRState *env = cpu_env(cs);
36 
37     /*
38      * We cannot separate a skip from the next instruction,
39      * as the skip would not be preserved across the interrupt.
40      * Separating the two insn normally only happens at page boundaries.
41      */
42     if (env->skip) {
43         return false;
44     }
45 
46     if (interrupt_request & CPU_INTERRUPT_RESET) {
47         if (cpu_interrupts_enabled(env)) {
48             cs->exception_index = EXCP_RESET;
49             avr_cpu_do_interrupt(cs);
50 
51             cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
52             return true;
53         }
54     }
55     if (interrupt_request & CPU_INTERRUPT_HARD) {
56         if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
57             int index = ctz64(env->intsrc);
58             cs->exception_index = EXCP_INT(index);
59             avr_cpu_do_interrupt(cs);
60 
61             env->intsrc &= env->intsrc - 1; /* clear the interrupt */
62             if (!env->intsrc) {
63                 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
64             }
65             return true;
66         }
67     }
68     return false;
69 }
70 
71 static void do_stb(CPUAVRState *env, uint32_t addr, uint8_t data, uintptr_t ra)
72 {
73     cpu_stb_mmuidx_ra(env, addr, data, MMU_DATA_IDX, ra);
74 }
75 
76 void avr_cpu_do_interrupt(CPUState *cs)
77 {
78     CPUAVRState *env = cpu_env(cs);
79 
80     uint32_t ret = env->pc_w;
81     int vector = 0;
82     int size = avr_feature(env, AVR_FEATURE_JMP_CALL) ? 2 : 1;
83     int base = 0;
84 
85     if (cs->exception_index == EXCP_RESET) {
86         vector = 0;
87     } else if (env->intsrc != 0) {
88         vector = ctz64(env->intsrc) + 1;
89     }
90 
91     if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) {
92         do_stb(env, env->sp--, ret, 0);
93         do_stb(env, env->sp--, ret >> 8, 0);
94         do_stb(env, env->sp--, ret >> 16, 0);
95     } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) {
96         do_stb(env, env->sp--, ret, 0);
97         do_stb(env, env->sp--, ret >> 8, 0);
98     } else {
99         do_stb(env, env->sp--, ret, 0);
100     }
101 
102     env->pc_w = base + vector * size;
103     env->sregI = 0; /* clear Global Interrupt Flag */
104 
105     cs->exception_index = -1;
106 }
107 
108 hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
109 {
110     return addr; /* I assume 1:1 address correspondence */
111 }
112 
113 bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
114                       MMUAccessType access_type, int mmu_idx,
115                       bool probe, uintptr_t retaddr)
116 {
117     int prot;
118     uint32_t paddr;
119 
120     address &= TARGET_PAGE_MASK;
121 
122     if (mmu_idx == MMU_CODE_IDX) {
123         /* Access to code in flash. */
124         paddr = OFFSET_CODE + address;
125         prot = PAGE_READ | PAGE_EXEC;
126         if (paddr >= OFFSET_DATA) {
127             /*
128              * This should not be possible via any architectural operations.
129              * There is certainly not an exception that we can deliver.
130              * Accept probing that might come from generic code.
131              */
132             if (probe) {
133                 return false;
134             }
135             error_report("execution left flash memory");
136             abort();
137         }
138     } else {
139         /* Access to memory. */
140         paddr = OFFSET_DATA + address;
141         prot = PAGE_READ | PAGE_WRITE;
142     }
143 
144     tlb_set_page(cs, address, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
145     return true;
146 }
147 
148 /*
149  *  helpers
150  */
151 
152 void helper_sleep(CPUAVRState *env)
153 {
154     CPUState *cs = env_cpu(env);
155 
156     cs->exception_index = EXCP_HLT;
157     cpu_loop_exit(cs);
158 }
159 
160 void helper_unsupported(CPUAVRState *env)
161 {
162     CPUState *cs = env_cpu(env);
163 
164     /*
165      *  I count not find what happens on the real platform, so
166      *  it's EXCP_DEBUG for meanwhile
167      */
168     cs->exception_index = EXCP_DEBUG;
169     if (qemu_loglevel_mask(LOG_UNIMP)) {
170         qemu_log("UNSUPPORTED\n");
171         cpu_dump_state(cs, stderr, 0);
172     }
173     cpu_loop_exit(cs);
174 }
175 
176 void helper_debug(CPUAVRState *env)
177 {
178     CPUState *cs = env_cpu(env);
179 
180     cs->exception_index = EXCP_DEBUG;
181     cpu_loop_exit(cs);
182 }
183 
184 void helper_break(CPUAVRState *env)
185 {
186     CPUState *cs = env_cpu(env);
187 
188     cs->exception_index = EXCP_DEBUG;
189     cpu_loop_exit(cs);
190 }
191 
192 void helper_wdr(CPUAVRState *env)
193 {
194     qemu_log_mask(LOG_UNIMP, "WDG reset (not implemented)\n");
195 }
196 
197 /*
198  * The first 32 bytes of the data space are mapped to the cpu regs.
199  * We cannot write these from normal store operations because TCG
200  * does not expect global temps to be modified -- a global may be
201  * live in a host cpu register across the store.  We can however
202  * read these, as TCG does make sure the global temps are saved
203  * in case the load operation traps.
204  */
205 
206 static uint64_t avr_cpu_reg1_read(void *opaque, hwaddr addr, unsigned size)
207 {
208     CPUAVRState *env = opaque;
209 
210     assert(addr < 32);
211     return env->r[addr];
212 }
213 
214 /*
215  * The range 0x38-0x3f of the i/o space is mapped to cpu regs.
216  * As above, we cannot write these from normal store operations.
217  */
218 
219 static uint64_t avr_cpu_reg2_read(void *opaque, hwaddr addr, unsigned size)
220 {
221     CPUAVRState *env = opaque;
222 
223     switch (addr) {
224     case REG_38_RAMPD:
225         return 0xff & (env->rampD >> 16);
226     case REG_38_RAMPX:
227         return 0xff & (env->rampX >> 16);
228     case REG_38_RAMPY:
229         return 0xff & (env->rampY >> 16);
230     case REG_38_RAMPZ:
231         return 0xff & (env->rampZ >> 16);
232     case REG_38_EIDN:
233         return 0xff & (env->eind >> 16);
234     case REG_38_SPL:
235         return env->sp & 0x00ff;
236     case REG_38_SPH:
237         return 0xff & (env->sp >> 8);
238     case REG_38_SREG:
239         return cpu_get_sreg(env);
240     }
241     g_assert_not_reached();
242 }
243 
244 static void avr_cpu_trap_write(void *opaque, hwaddr addr,
245                                uint64_t data64, unsigned size)
246 {
247     CPUAVRState *env = opaque;
248     CPUState *cs = env_cpu(env);
249 
250     env->fullacc = true;
251     cpu_loop_exit_restore(cs, cs->mem_io_pc);
252 }
253 
254 const MemoryRegionOps avr_cpu_reg1 = {
255     .read = avr_cpu_reg1_read,
256     .write = avr_cpu_trap_write,
257     .endianness = DEVICE_NATIVE_ENDIAN,
258     .valid.min_access_size = 1,
259     .valid.max_access_size = 1,
260 };
261 
262 const MemoryRegionOps avr_cpu_reg2 = {
263     .read = avr_cpu_reg2_read,
264     .write = avr_cpu_trap_write,
265     .endianness = DEVICE_NATIVE_ENDIAN,
266     .valid.min_access_size = 1,
267     .valid.max_access_size = 1,
268 };
269 
270 /*
271  *  this function implements ST instruction when there is a possibility to write
272  *  into a CPU register
273  */
274 void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr)
275 {
276     env->fullacc = false;
277 
278     switch (addr) {
279     case 0 ... 31:
280         /* CPU registers */
281         env->r[addr] = data;
282         break;
283 
284     case REG_38_RAMPD + 0x38 + NUMBER_OF_CPU_REGISTERS:
285         if (avr_feature(env, AVR_FEATURE_RAMPD)) {
286             env->rampD = data << 16;
287         }
288         break;
289     case REG_38_RAMPX + 0x38 + NUMBER_OF_CPU_REGISTERS:
290         if (avr_feature(env, AVR_FEATURE_RAMPX)) {
291             env->rampX = data << 16;
292         }
293         break;
294     case REG_38_RAMPY + 0x38 + NUMBER_OF_CPU_REGISTERS:
295         if (avr_feature(env, AVR_FEATURE_RAMPY)) {
296             env->rampY = data << 16;
297         }
298         break;
299     case REG_38_RAMPZ + 0x38 + NUMBER_OF_CPU_REGISTERS:
300         if (avr_feature(env, AVR_FEATURE_RAMPZ)) {
301             env->rampZ = data << 16;
302         }
303         break;
304     case REG_38_EIDN + 0x38 + NUMBER_OF_CPU_REGISTERS:
305         env->eind = data << 16;
306         break;
307     case REG_38_SPL + 0x38 + NUMBER_OF_CPU_REGISTERS:
308         env->sp = (env->sp & 0xff00) | data;
309         break;
310     case REG_38_SPH + 0x38 + NUMBER_OF_CPU_REGISTERS:
311         if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) {
312             env->sp = (env->sp & 0x00ff) | (data << 8);
313         }
314         break;
315     case REG_38_SREG + 0x38 + NUMBER_OF_CPU_REGISTERS:
316         cpu_set_sreg(env, data);
317         break;
318 
319     default:
320         do_stb(env, addr, data, GETPC());
321         break;
322     }
323 }
324