xref: /qemu/target/ppc/power8-pmu.c (revision 7cef6d686309e2792186504ae17cf4f3eb57ef68)
1 /*
2  * PMU emulation helpers for TCG IBM POWER chips
3  *
4  *  Copyright IBM Corp. 2021
5  *
6  * Authors:
7  *  Daniel Henrique Barboza      <danielhb413@gmail.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "cpu.h"
15 #include "helper_regs.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/error-report.h"
18 #include "qemu/timer.h"
19 #include "hw/ppc/ppc.h"
20 #include "power8-pmu.h"
21 
22 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
23 
pmc_has_overflow_enabled(CPUPPCState * env,int sprn)24 static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
25 {
26     if (sprn == SPR_POWER_PMC1) {
27         return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE;
28     }
29 
30     return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
31 }
32 
33 /*
34  * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt.
35  * hflags must subsequently be updated.
36  */
pmu_update_summaries(CPUPPCState * env)37 static void pmu_update_summaries(CPUPPCState *env)
38 {
39     target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
40     target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
41     int ins_cnt = 0;
42     int cyc_cnt = 0;
43 
44     if (mmcr0 & MMCR0_FC) {
45         goto out;
46     }
47 
48     if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
49         target_ulong sel;
50 
51         sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
52         switch (sel) {
53         case 0x02:
54         case 0xfe:
55             ins_cnt |= 1 << 1;
56             break;
57         case 0x1e:
58         case 0xf0:
59             cyc_cnt |= 1 << 1;
60             break;
61         }
62 
63         sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
64         ins_cnt |= (sel == 0x02) << 2;
65         cyc_cnt |= (sel == 0x1e) << 2;
66 
67         sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
68         ins_cnt |= (sel == 0x02) << 3;
69         cyc_cnt |= (sel == 0x1e) << 3;
70 
71         sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
72         ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
73         cyc_cnt |= (sel == 0x1e) << 4;
74     }
75 
76     ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
77     cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
78 
79  out:
80     env->pmc_ins_cnt = ins_cnt;
81     env->pmc_cyc_cnt = cyc_cnt;
82 }
83 
hreg_bhrb_filter_update(CPUPPCState * env)84 static void hreg_bhrb_filter_update(CPUPPCState *env)
85 {
86     target_long ifm;
87 
88     if (!(env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) {
89         /* disable recording to BHRB */
90         env->bhrb_filter = BHRB_TYPE_NORECORD;
91         return;
92     }
93 
94     ifm = (env->spr[SPR_POWER_MMCRA] & MMCRA_IFM_MASK) >> MMCRA_IFM_SHIFT;
95     switch (ifm) {
96     case 0:
97         /* record all branches */
98         env->bhrb_filter = -1;
99         break;
100     case 1:
101         /* only record calls (LK = 1) */
102         env->bhrb_filter = BHRB_TYPE_CALL;
103         break;
104     case 2:
105         /* only record indirect branches */
106         env->bhrb_filter = BHRB_TYPE_INDIRECT;
107         break;
108     case 3:
109         /* only record conditional branches */
110         env->bhrb_filter = BHRB_TYPE_COND;
111         break;
112     }
113 }
114 
pmu_mmcr01a_updated(CPUPPCState * env)115 void pmu_mmcr01a_updated(CPUPPCState *env)
116 {
117     PowerPCCPU *cpu = env_archcpu(env);
118 
119     pmu_update_summaries(env);
120     hreg_update_pmu_hflags(env);
121 
122     if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAO) {
123         ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
124     } else {
125         ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0);
126     }
127 
128     hreg_bhrb_filter_update(env);
129 
130     /*
131      * Should this update overflow timers (if mmcr0 is updated) so they
132      * get set in cpu_post_load?
133      */
134 }
135 
pmu_increment_insns(CPUPPCState * env,uint32_t num_insns)136 static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
137 {
138     target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
139     unsigned ins_cnt = env->pmc_ins_cnt;
140     bool overflow_triggered = false;
141     target_ulong tmp;
142 
143     if (ins_cnt & (1 << 1)) {
144         tmp = env->spr[SPR_POWER_PMC1];
145         tmp += num_insns;
146         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
147             tmp = PMC_COUNTER_NEGATIVE_VAL;
148             overflow_triggered = true;
149         }
150         env->spr[SPR_POWER_PMC1] = tmp;
151     }
152 
153     if (ins_cnt & (1 << 2)) {
154         tmp = env->spr[SPR_POWER_PMC2];
155         tmp += num_insns;
156         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
157             tmp = PMC_COUNTER_NEGATIVE_VAL;
158             overflow_triggered = true;
159         }
160         env->spr[SPR_POWER_PMC2] = tmp;
161     }
162 
163     if (ins_cnt & (1 << 3)) {
164         tmp = env->spr[SPR_POWER_PMC3];
165         tmp += num_insns;
166         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
167             tmp = PMC_COUNTER_NEGATIVE_VAL;
168             overflow_triggered = true;
169         }
170         env->spr[SPR_POWER_PMC3] = tmp;
171     }
172 
173     if (ins_cnt & (1 << 4)) {
174         target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
175         int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
176         if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
177             tmp = env->spr[SPR_POWER_PMC4];
178             tmp += num_insns;
179             if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
180                 tmp = PMC_COUNTER_NEGATIVE_VAL;
181                 overflow_triggered = true;
182             }
183             env->spr[SPR_POWER_PMC4] = tmp;
184         }
185     }
186 
187     if (ins_cnt & (1 << 5)) {
188         tmp = env->spr[SPR_POWER_PMC5];
189         tmp += num_insns;
190         if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
191             tmp = PMC_COUNTER_NEGATIVE_VAL;
192             overflow_triggered = true;
193         }
194         env->spr[SPR_POWER_PMC5] = tmp;
195     }
196 
197     return overflow_triggered;
198 }
199 
pmu_update_cycles(CPUPPCState * env)200 static void pmu_update_cycles(CPUPPCState *env)
201 {
202     uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
203     uint64_t time_delta = now - env->pmu_base_time;
204     int sprn, cyc_cnt = env->pmc_cyc_cnt;
205 
206     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
207         if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
208             /*
209              * The pseries and powernv clock runs at 1Ghz, meaning
210              * that 1 nanosec equals 1 cycle.
211              */
212             env->spr[sprn] += time_delta;
213         }
214     }
215 
216     /* Update base_time for future calculations */
217     env->pmu_base_time = now;
218 }
219 
220 /*
221  * Helper function to retrieve the cycle overflow timer of the
222  * 'sprn' counter.
223  */
get_cyc_overflow_timer(CPUPPCState * env,int sprn)224 static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn)
225 {
226     return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1];
227 }
228 
pmc_update_overflow_timer(CPUPPCState * env,int sprn)229 static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
230 {
231     QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
232     int64_t timeout;
233 
234     /*
235      * PMC5 does not have an overflow timer and this pointer
236      * will be NULL.
237      */
238     if (!pmc_overflow_timer) {
239         return;
240     }
241 
242     if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
243         !pmc_has_overflow_enabled(env, sprn)) {
244         /* Overflow timer is not needed for this counter */
245         timer_del(pmc_overflow_timer);
246         return;
247     }
248 
249     if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
250         timeout = 0;
251     } else {
252         timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
253     }
254 
255     /*
256      * Use timer_mod_anticipate() because an overflow timer might
257      * be already running for this PMC.
258      */
259     timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout);
260 }
261 
pmu_update_overflow_timers(CPUPPCState * env)262 static void pmu_update_overflow_timers(CPUPPCState *env)
263 {
264     int sprn;
265 
266     /*
267      * Scroll through all PMCs and start counter overflow timers for
268      * PM_CYC events, if needed.
269      */
270     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
271         pmc_update_overflow_timer(env, sprn);
272     }
273 }
274 
pmu_delete_timers(CPUPPCState * env)275 static void pmu_delete_timers(CPUPPCState *env)
276 {
277     QEMUTimer *pmc_overflow_timer;
278     int sprn;
279 
280     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
281         pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
282 
283         if (pmc_overflow_timer) {
284             timer_del(pmc_overflow_timer);
285         }
286     }
287 }
288 
helper_store_mmcr0(CPUPPCState * env,target_ulong value)289 void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
290 {
291     pmu_update_cycles(env);
292 
293     env->spr[SPR_POWER_MMCR0] = value;
294 
295     pmu_mmcr01a_updated(env);
296 
297     /* Update cycle overflow timers with the current MMCR0 state */
298     pmu_update_overflow_timers(env);
299 }
300 
helper_store_mmcr1(CPUPPCState * env,uint64_t value)301 void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
302 {
303     pmu_update_cycles(env);
304 
305     env->spr[SPR_POWER_MMCR1] = value;
306 
307     pmu_mmcr01a_updated(env);
308 }
309 
helper_store_mmcrA(CPUPPCState * env,uint64_t value)310 void helper_store_mmcrA(CPUPPCState *env, uint64_t value)
311 {
312     env->spr[SPR_POWER_MMCRA] = value;
313 
314     pmu_mmcr01a_updated(env);
315 }
316 
helper_read_pmc(CPUPPCState * env,uint32_t sprn)317 target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
318 {
319     pmu_update_cycles(env);
320 
321     return env->spr[sprn];
322 }
323 
helper_store_pmc(CPUPPCState * env,uint32_t sprn,uint64_t value)324 void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
325 {
326     pmu_update_cycles(env);
327 
328     env->spr[sprn] = (uint32_t)value;
329 
330     pmc_update_overflow_timer(env, sprn);
331 }
332 
perfm_alert(PowerPCCPU * cpu)333 static void perfm_alert(PowerPCCPU *cpu)
334 {
335     CPUPPCState *env = &cpu->env;
336 
337     pmu_update_cycles(env);
338 
339     if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
340         env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
341 
342         /* Changing MMCR0_FC requires summaries and hflags update */
343         pmu_mmcr01a_updated(env);
344 
345         /*
346          * Delete all pending timers if we need to freeze
347          * the PMC. We'll restart them when the PMC starts
348          * running again.
349          */
350         pmu_delete_timers(env);
351     }
352 
353     if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
354         /* These MMCR0 bits do not require summaries or hflags update. */
355         env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
356         env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
357         ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
358     }
359 
360     raise_ebb_perfm_exception(env);
361 }
362 
helper_handle_pmc5_overflow(CPUPPCState * env)363 void helper_handle_pmc5_overflow(CPUPPCState *env)
364 {
365     env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL;
366     perfm_alert(env_archcpu(env));
367 }
368 
369 /* This helper assumes that the PMC is running. */
helper_insns_inc(CPUPPCState * env,uint32_t num_insns)370 void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
371 {
372     bool overflow_triggered;
373 
374     overflow_triggered = pmu_increment_insns(env, num_insns);
375     if (overflow_triggered) {
376         perfm_alert(env_archcpu(env));
377     }
378 }
379 
cpu_ppc_pmu_timer_cb(void * opaque)380 static void cpu_ppc_pmu_timer_cb(void *opaque)
381 {
382     PowerPCCPU *cpu = opaque;
383 
384     perfm_alert(cpu);
385 }
386 
cpu_ppc_pmu_init(CPUPPCState * env)387 void cpu_ppc_pmu_init(CPUPPCState *env)
388 {
389     PowerPCCPU *cpu = env_archcpu(env);
390     int i, sprn;
391 
392     for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
393         if (sprn == SPR_POWER_PMC5) {
394             continue;
395         }
396 
397         i = sprn - SPR_POWER_PMC1;
398 
399         env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
400                                                        &cpu_ppc_pmu_timer_cb,
401                                                        cpu);
402     }
403 }
404 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
405