xref: /qemu/target/avr/helper.c (revision 7cef6d686309e2792186504ae17cf4f3eb57ef68)
1 /*
2  * QEMU AVR CPU helpers
3  *
4  * Copyright (c) 2016-2020 Michael Rolnik
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see
18  * <http://www.gnu.org/licenses/lgpl-2.1.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "qemu/error-report.h"
24 #include "cpu.h"
25 #include "accel/tcg/cpu-ops.h"
26 #include "exec/cputlb.h"
27 #include "exec/page-protection.h"
28 #include "exec/target_page.h"
29 #include "accel/tcg/cpu-ldst.h"
30 #include "exec/helper-proto.h"
31 
avr_cpu_exec_interrupt(CPUState * cs,int interrupt_request)32 bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
33 {
34     CPUAVRState *env = cpu_env(cs);
35 
36     /*
37      * We cannot separate a skip from the next instruction,
38      * as the skip would not be preserved across the interrupt.
39      * Separating the two insn normally only happens at page boundaries.
40      */
41     if (env->skip) {
42         return false;
43     }
44 
45     if (interrupt_request & CPU_INTERRUPT_RESET) {
46         if (cpu_interrupts_enabled(env)) {
47             cs->exception_index = EXCP_RESET;
48             avr_cpu_do_interrupt(cs);
49 
50             cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
51             return true;
52         }
53     }
54     if (interrupt_request & CPU_INTERRUPT_HARD) {
55         if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
56             int index = ctz64(env->intsrc);
57             cs->exception_index = EXCP_INT(index);
58             avr_cpu_do_interrupt(cs);
59 
60             env->intsrc &= env->intsrc - 1; /* clear the interrupt */
61             if (!env->intsrc) {
62                 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
63             }
64             return true;
65         }
66     }
67     return false;
68 }
69 
do_stb(CPUAVRState * env,uint32_t addr,uint8_t data,uintptr_t ra)70 static void do_stb(CPUAVRState *env, uint32_t addr, uint8_t data, uintptr_t ra)
71 {
72     cpu_stb_mmuidx_ra(env, addr, data, MMU_DATA_IDX, ra);
73 }
74 
avr_cpu_do_interrupt(CPUState * cs)75 void avr_cpu_do_interrupt(CPUState *cs)
76 {
77     CPUAVRState *env = cpu_env(cs);
78 
79     uint32_t ret = env->pc_w;
80     int vector = 0;
81     int size = avr_feature(env, AVR_FEATURE_JMP_CALL) ? 2 : 1;
82     int base = 0;
83 
84     if (cs->exception_index == EXCP_RESET) {
85         vector = 0;
86     } else if (env->intsrc != 0) {
87         vector = ctz64(env->intsrc) + 1;
88     }
89 
90     if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) {
91         do_stb(env, env->sp--, ret, 0);
92         do_stb(env, env->sp--, ret >> 8, 0);
93         do_stb(env, env->sp--, ret >> 16, 0);
94     } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) {
95         do_stb(env, env->sp--, ret, 0);
96         do_stb(env, env->sp--, ret >> 8, 0);
97     } else {
98         do_stb(env, env->sp--, ret, 0);
99     }
100 
101     env->pc_w = base + vector * size;
102     env->sregI = 0; /* clear Global Interrupt Flag */
103 
104     cs->exception_index = -1;
105 }
106 
avr_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)107 hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
108 {
109     return addr; /* I assume 1:1 address correspondence */
110 }
111 
avr_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)112 bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
113                       MMUAccessType access_type, int mmu_idx,
114                       bool probe, uintptr_t retaddr)
115 {
116     int prot;
117     uint32_t paddr;
118 
119     address &= TARGET_PAGE_MASK;
120 
121     if (mmu_idx == MMU_CODE_IDX) {
122         /* Access to code in flash. */
123         paddr = OFFSET_CODE + address;
124         prot = PAGE_READ | PAGE_EXEC;
125         if (paddr >= OFFSET_DATA) {
126             /*
127              * This should not be possible via any architectural operations.
128              * There is certainly not an exception that we can deliver.
129              * Accept probing that might come from generic code.
130              */
131             if (probe) {
132                 return false;
133             }
134             error_report("execution left flash memory");
135             abort();
136         }
137     } else {
138         /* Access to memory. */
139         paddr = OFFSET_DATA + address;
140         prot = PAGE_READ | PAGE_WRITE;
141     }
142 
143     tlb_set_page(cs, address, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
144     return true;
145 }
146 
147 /*
148  *  helpers
149  */
150 
helper_sleep(CPUAVRState * env)151 void helper_sleep(CPUAVRState *env)
152 {
153     CPUState *cs = env_cpu(env);
154 
155     cs->exception_index = EXCP_HLT;
156     cpu_loop_exit(cs);
157 }
158 
helper_unsupported(CPUAVRState * env)159 void helper_unsupported(CPUAVRState *env)
160 {
161     CPUState *cs = env_cpu(env);
162 
163     /*
164      *  I count not find what happens on the real platform, so
165      *  it's EXCP_DEBUG for meanwhile
166      */
167     cs->exception_index = EXCP_DEBUG;
168     if (qemu_loglevel_mask(LOG_UNIMP)) {
169         qemu_log("UNSUPPORTED\n");
170         cpu_dump_state(cs, stderr, 0);
171     }
172     cpu_loop_exit(cs);
173 }
174 
helper_debug(CPUAVRState * env)175 void helper_debug(CPUAVRState *env)
176 {
177     CPUState *cs = env_cpu(env);
178 
179     cs->exception_index = EXCP_DEBUG;
180     cpu_loop_exit(cs);
181 }
182 
helper_break(CPUAVRState * env)183 void helper_break(CPUAVRState *env)
184 {
185     CPUState *cs = env_cpu(env);
186 
187     cs->exception_index = EXCP_DEBUG;
188     cpu_loop_exit(cs);
189 }
190 
helper_wdr(CPUAVRState * env)191 void helper_wdr(CPUAVRState *env)
192 {
193     qemu_log_mask(LOG_UNIMP, "WDG reset (not implemented)\n");
194 }
195 
196 /*
197  * The first 32 bytes of the data space are mapped to the cpu regs.
198  * We cannot write these from normal store operations because TCG
199  * does not expect global temps to be modified -- a global may be
200  * live in a host cpu register across the store.  We can however
201  * read these, as TCG does make sure the global temps are saved
202  * in case the load operation traps.
203  */
204 
avr_cpu_reg1_read(void * opaque,hwaddr addr,unsigned size)205 static uint64_t avr_cpu_reg1_read(void *opaque, hwaddr addr, unsigned size)
206 {
207     CPUAVRState *env = opaque;
208 
209     assert(addr < 32);
210     return env->r[addr];
211 }
212 
213 /*
214  * The range 0x38-0x3f of the i/o space is mapped to cpu regs.
215  * As above, we cannot write these from normal store operations.
216  */
217 
avr_cpu_reg2_read(void * opaque,hwaddr addr,unsigned size)218 static uint64_t avr_cpu_reg2_read(void *opaque, hwaddr addr, unsigned size)
219 {
220     CPUAVRState *env = opaque;
221 
222     switch (addr) {
223     case REG_38_RAMPD:
224         return 0xff & (env->rampD >> 16);
225     case REG_38_RAMPX:
226         return 0xff & (env->rampX >> 16);
227     case REG_38_RAMPY:
228         return 0xff & (env->rampY >> 16);
229     case REG_38_RAMPZ:
230         return 0xff & (env->rampZ >> 16);
231     case REG_38_EIDN:
232         return 0xff & (env->eind >> 16);
233     case REG_38_SPL:
234         return env->sp & 0x00ff;
235     case REG_38_SPH:
236         return 0xff & (env->sp >> 8);
237     case REG_38_SREG:
238         return cpu_get_sreg(env);
239     }
240     g_assert_not_reached();
241 }
242 
avr_cpu_trap_write(void * opaque,hwaddr addr,uint64_t data64,unsigned size)243 static void avr_cpu_trap_write(void *opaque, hwaddr addr,
244                                uint64_t data64, unsigned size)
245 {
246     CPUAVRState *env = opaque;
247     CPUState *cs = env_cpu(env);
248 
249     env->fullacc = true;
250     cpu_loop_exit_restore(cs, cs->mem_io_pc);
251 }
252 
253 const MemoryRegionOps avr_cpu_reg1 = {
254     .read = avr_cpu_reg1_read,
255     .write = avr_cpu_trap_write,
256     .endianness = DEVICE_NATIVE_ENDIAN,
257     .valid.min_access_size = 1,
258     .valid.max_access_size = 1,
259 };
260 
261 const MemoryRegionOps avr_cpu_reg2 = {
262     .read = avr_cpu_reg2_read,
263     .write = avr_cpu_trap_write,
264     .endianness = DEVICE_NATIVE_ENDIAN,
265     .valid.min_access_size = 1,
266     .valid.max_access_size = 1,
267 };
268 
269 /*
270  *  this function implements ST instruction when there is a possibility to write
271  *  into a CPU register
272  */
helper_fullwr(CPUAVRState * env,uint32_t data,uint32_t addr)273 void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr)
274 {
275     env->fullacc = false;
276 
277     switch (addr) {
278     case 0 ... 31:
279         /* CPU registers */
280         env->r[addr] = data;
281         break;
282 
283     case REG_38_RAMPD + 0x38 + NUMBER_OF_CPU_REGISTERS:
284         if (avr_feature(env, AVR_FEATURE_RAMPD)) {
285             env->rampD = data << 16;
286         }
287         break;
288     case REG_38_RAMPX + 0x38 + NUMBER_OF_CPU_REGISTERS:
289         if (avr_feature(env, AVR_FEATURE_RAMPX)) {
290             env->rampX = data << 16;
291         }
292         break;
293     case REG_38_RAMPY + 0x38 + NUMBER_OF_CPU_REGISTERS:
294         if (avr_feature(env, AVR_FEATURE_RAMPY)) {
295             env->rampY = data << 16;
296         }
297         break;
298     case REG_38_RAMPZ + 0x38 + NUMBER_OF_CPU_REGISTERS:
299         if (avr_feature(env, AVR_FEATURE_RAMPZ)) {
300             env->rampZ = data << 16;
301         }
302         break;
303     case REG_38_EIDN + 0x38 + NUMBER_OF_CPU_REGISTERS:
304         env->eind = data << 16;
305         break;
306     case REG_38_SPL + 0x38 + NUMBER_OF_CPU_REGISTERS:
307         env->sp = (env->sp & 0xff00) | data;
308         break;
309     case REG_38_SPH + 0x38 + NUMBER_OF_CPU_REGISTERS:
310         if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) {
311             env->sp = (env->sp & 0x00ff) | (data << 8);
312         }
313         break;
314     case REG_38_SREG + 0x38 + NUMBER_OF_CPU_REGISTERS:
315         cpu_set_sreg(env, data);
316         break;
317 
318     default:
319         do_stb(env, addr, data, GETPC());
320         break;
321     }
322 }
323