1 /*
2 * PowerPC CPU routines for qemu.
3 *
4 * Copyright (c) 2017 Nikunj A Dadhania, IBM Corporation.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "cpu-models.h"
23 #include "cpu-qom.h"
24 #include "exec/log.h"
25 #include "exec/watchpoint.h"
26 #include "fpu/softfloat-helpers.h"
27 #include "mmu-hash64.h"
28 #include "helper_regs.h"
29 #include "system/tcg.h"
30
cpu_read_xer(const CPUPPCState * env)31 target_ulong cpu_read_xer(const CPUPPCState *env)
32 {
33 if (is_isa300(env)) {
34 return env->xer | (env->so << XER_SO) |
35 (env->ov << XER_OV) | (env->ca << XER_CA) |
36 (env->ov32 << XER_OV32) | (env->ca32 << XER_CA32);
37 }
38
39 return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) |
40 (env->ca << XER_CA);
41 }
42
cpu_write_xer(CPUPPCState * env,target_ulong xer)43 void cpu_write_xer(CPUPPCState *env, target_ulong xer)
44 {
45 env->so = (xer >> XER_SO) & 1;
46 env->ov = (xer >> XER_OV) & 1;
47 env->ca = (xer >> XER_CA) & 1;
48 /* write all the flags, while reading back check of isa300 */
49 env->ov32 = (xer >> XER_OV32) & 1;
50 env->ca32 = (xer >> XER_CA32) & 1;
51 env->xer = xer & ~((1ul << XER_SO) |
52 (1ul << XER_OV) | (1ul << XER_CA) |
53 (1ul << XER_OV32) | (1ul << XER_CA32));
54 }
55
ppc_store_vscr(CPUPPCState * env,uint32_t vscr)56 void ppc_store_vscr(CPUPPCState *env, uint32_t vscr)
57 {
58 env->vscr = vscr & ~(1u << VSCR_SAT);
59 /* Which bit we set is completely arbitrary, but clear the rest. */
60 env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
61 env->vscr_sat.u64[1] = 0;
62 set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
63 set_flush_inputs_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
64 }
65
ppc_get_vscr(CPUPPCState * env)66 uint32_t ppc_get_vscr(CPUPPCState *env)
67 {
68 uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0;
69 return env->vscr | (sat << VSCR_SAT);
70 }
71
ppc_set_cr(CPUPPCState * env,uint64_t cr)72 void ppc_set_cr(CPUPPCState *env, uint64_t cr)
73 {
74 for (int i = 7; i >= 0; i--) {
75 env->crf[i] = cr & 0xf;
76 cr >>= 4;
77 }
78 }
79
ppc_get_cr(const CPUPPCState * env)80 uint64_t ppc_get_cr(const CPUPPCState *env)
81 {
82 uint64_t cr = 0;
83 for (int i = 0; i < 8; i++) {
84 cr |= (env->crf[i] & 0xf) << (4 * (7 - i));
85 }
86 return cr;
87 }
88
89 /* GDBstub can read and write MSR... */
ppc_store_msr(CPUPPCState * env,target_ulong value)90 void ppc_store_msr(CPUPPCState *env, target_ulong value)
91 {
92 hreg_store_msr(env, value, 0);
93 }
94
95 #if !defined(CONFIG_USER_ONLY)
ppc_store_lpcr(PowerPCCPU * cpu,target_ulong val)96 void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
97 {
98 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
99 CPUPPCState *env = &cpu->env;
100
101 env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
102 /* The gtse bit affects hflags */
103 hreg_compute_hflags(env);
104
105 ppc_maybe_interrupt(env);
106 }
107
108 #if defined(TARGET_PPC64)
ppc_update_ciabr(CPUPPCState * env)109 void ppc_update_ciabr(CPUPPCState *env)
110 {
111 CPUState *cs = env_cpu(env);
112 target_ulong ciabr = env->spr[SPR_CIABR];
113 target_ulong ciea, priv;
114
115 ciea = ciabr & PPC_BITMASK(0, 61);
116 priv = ciabr & PPC_BITMASK(62, 63);
117
118 if (env->ciabr_breakpoint) {
119 cpu_breakpoint_remove_by_ref(cs, env->ciabr_breakpoint);
120 env->ciabr_breakpoint = NULL;
121 }
122
123 if (priv) {
124 cpu_breakpoint_insert(cs, ciea, BP_CPU, &env->ciabr_breakpoint);
125 }
126 }
127
ppc_store_ciabr(CPUPPCState * env,target_ulong val)128 void ppc_store_ciabr(CPUPPCState *env, target_ulong val)
129 {
130 env->spr[SPR_CIABR] = val;
131 ppc_update_ciabr(env);
132 }
133
ppc_update_daw(CPUPPCState * env,int rid)134 void ppc_update_daw(CPUPPCState *env, int rid)
135 {
136 CPUState *cs = env_cpu(env);
137 int spr_dawr = rid ? SPR_DAWR1 : SPR_DAWR0;
138 int spr_dawrx = rid ? SPR_DAWRX1 : SPR_DAWRX0;
139 target_ulong deaw = env->spr[spr_dawr] & PPC_BITMASK(0, 60);
140 uint32_t dawrx = env->spr[spr_dawrx];
141 int mrd = extract32(dawrx, PPC_BIT_NR(48), 54 - 48);
142 bool dw = extract32(dawrx, PPC_BIT_NR(57), 1);
143 bool dr = extract32(dawrx, PPC_BIT_NR(58), 1);
144 bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
145 bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
146 bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
147 vaddr len;
148 int flags;
149
150 if (env->dawr_watchpoint[rid]) {
151 cpu_watchpoint_remove_by_ref(cs, env->dawr_watchpoint[rid]);
152 env->dawr_watchpoint[rid] = NULL;
153 }
154
155 if (!dr && !dw) {
156 return;
157 }
158
159 if (!hv && !sv && !pr) {
160 return;
161 }
162
163 len = (mrd + 1) * 8;
164 flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
165 if (dr) {
166 flags |= BP_MEM_READ;
167 }
168 if (dw) {
169 flags |= BP_MEM_WRITE;
170 }
171
172 cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr_watchpoint[rid]);
173 }
174
ppc_store_dawr0(CPUPPCState * env,target_ulong val)175 void ppc_store_dawr0(CPUPPCState *env, target_ulong val)
176 {
177 env->spr[SPR_DAWR0] = val;
178 ppc_update_daw(env, 0);
179 }
180
ppc_store_dawrx(CPUPPCState * env,uint32_t val,int rid)181 static void ppc_store_dawrx(CPUPPCState *env, uint32_t val, int rid)
182 {
183 int hrammc = extract32(val, PPC_BIT_NR(56), 1);
184
185 if (hrammc) {
186 /* This might be done with a second watchpoint at the xor of DEAW[0] */
187 qemu_log_mask(LOG_UNIMP, "%s: DAWRX%d[HRAMMC] is unimplemented\n",
188 __func__, rid);
189 }
190
191 env->spr[rid ? SPR_DAWRX1 : SPR_DAWRX0] = val;
192 ppc_update_daw(env, rid);
193 }
194
ppc_store_dawrx0(CPUPPCState * env,uint32_t val)195 void ppc_store_dawrx0(CPUPPCState *env, uint32_t val)
196 {
197 ppc_store_dawrx(env, val, 0);
198 }
199
ppc_store_dawr1(CPUPPCState * env,target_ulong val)200 void ppc_store_dawr1(CPUPPCState *env, target_ulong val)
201 {
202 env->spr[SPR_DAWR1] = val;
203 ppc_update_daw(env, 1);
204 }
205
ppc_store_dawrx1(CPUPPCState * env,uint32_t val)206 void ppc_store_dawrx1(CPUPPCState *env, uint32_t val)
207 {
208 ppc_store_dawrx(env, val, 1);
209 }
210
211 #endif
212 #endif
213
fpscr_set_rounding_mode(CPUPPCState * env)214 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
215 {
216 int rnd_type;
217
218 /* Set rounding mode */
219 switch (env->fpscr & FP_RN) {
220 case 0:
221 /* Best approximation (round to nearest) */
222 rnd_type = float_round_nearest_even;
223 break;
224 case 1:
225 /* Smaller magnitude (round toward zero) */
226 rnd_type = float_round_to_zero;
227 break;
228 case 2:
229 /* Round toward +infinite */
230 rnd_type = float_round_up;
231 break;
232 default:
233 case 3:
234 /* Round toward -infinite */
235 rnd_type = float_round_down;
236 break;
237 }
238 set_float_rounding_mode(rnd_type, &env->fp_status);
239 }
240
ppc_store_fpscr(CPUPPCState * env,target_ulong val)241 void ppc_store_fpscr(CPUPPCState *env, target_ulong val)
242 {
243 val &= FPSCR_MTFS_MASK;
244 if (val & FPSCR_IX) {
245 val |= FP_VX;
246 }
247 if ((val >> FPSCR_XX) & (val >> FPSCR_XE) & 0x1f) {
248 val |= FP_FEX;
249 }
250 env->fpscr = val;
251 env->fp_status.rebias_overflow = (FP_OE & env->fpscr) ? true : false;
252 env->fp_status.rebias_underflow = (FP_UE & env->fpscr) ? true : false;
253 if (tcg_enabled()) {
254 fpscr_set_rounding_mode(env);
255 }
256 }
257