1 /*
2 * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
3 *
4 * Copyright (C) 2022-2023 Rivos Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/timer.h"
21 #include "cpu_bits.h"
22 #include "riscv-iommu-hpm.h"
23 #include "riscv-iommu.h"
24 #include "riscv-iommu-bits.h"
25 #include "trace.h"
26
27 /* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */
get_cycles(void)28 static inline uint64_t get_cycles(void)
29 {
30 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
31 }
32
riscv_iommu_hpmcycle_read(RISCVIOMMUState * s)33 uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
34 {
35 const uint64_t cycle = riscv_iommu_reg_get64(
36 s, RISCV_IOMMU_REG_IOHPMCYCLES);
37 const uint32_t inhibit = riscv_iommu_reg_get32(
38 s, RISCV_IOMMU_REG_IOCOUNTINH);
39 const uint64_t ctr_prev = s->hpmcycle_prev;
40 const uint64_t ctr_val = s->hpmcycle_val;
41
42 trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val);
43
44 if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
45 /*
46 * Counter should not increment if inhibit bit is set. We can't really
47 * stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated
48 * counter value to indicate that counter was not incremented.
49 */
50 return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) |
51 (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
52 }
53
54 return (ctr_val + get_cycles() - ctr_prev) |
55 (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
56 }
57
hpm_incr_ctr(RISCVIOMMUState * s,uint32_t ctr_idx)58 static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
59 {
60 const uint32_t off = ctr_idx << 3;
61 uint64_t cntr_val;
62
63 cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
64 stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
65
66 trace_riscv_iommu_hpm_incr_ctr(cntr_val);
67
68 /* Handle the overflow scenario. */
69 if (cntr_val == UINT64_MAX) {
70 /*
71 * Generate interrupt only if OF bit is clear. +1 to offset the cycle
72 * register OF bit.
73 */
74 const uint32_t ovf =
75 riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
76 BIT(ctr_idx + 1), 0);
77 if (!get_field(ovf, BIT(ctr_idx + 1))) {
78 riscv_iommu_reg_mod64(s,
79 RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
80 RISCV_IOMMU_IOHPMEVT_OF,
81 0);
82 riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
83 }
84 }
85 }
86
riscv_iommu_hpm_incr_ctr(RISCVIOMMUState * s,RISCVIOMMUContext * ctx,unsigned event_id)87 void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
88 unsigned event_id)
89 {
90 const uint32_t inhibit = riscv_iommu_reg_get32(
91 s, RISCV_IOMMU_REG_IOCOUNTINH);
92 uint32_t did_gscid;
93 uint32_t pid_pscid;
94 uint32_t ctr_idx;
95 gpointer value;
96 uint32_t ctrs;
97 uint64_t evt;
98
99 if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
100 return;
101 }
102
103 value = g_hash_table_lookup(s->hpm_event_ctr_map,
104 GUINT_TO_POINTER(event_id));
105 if (value == NULL) {
106 return;
107 }
108
109 for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
110 ctr_idx = ctz32(ctrs);
111 if (get_field(inhibit, BIT(ctr_idx + 1))) {
112 continue;
113 }
114
115 evt = riscv_iommu_reg_get64(s,
116 RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
117
118 /*
119 * It's quite possible that event ID has been changed in counter
120 * but hashtable hasn't been updated yet. We don't want to increment
121 * counter for the old event ID.
122 */
123 if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
124 continue;
125 }
126
127 if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
128 did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
129 pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
130 } else {
131 did_gscid = ctx->devid;
132 pid_pscid = ctx->process_id;
133 }
134
135 if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
136 /*
137 * If the transaction does not have a valid process_id, counter
138 * increments if device_id matches DID_GSCID. If the transaction
139 * has a valid process_id, counter increments if device_id
140 * matches DID_GSCID and process_id matches PID_PSCID. See
141 * IOMMU Specification, Chapter 5.23. Performance-monitoring
142 * event selector.
143 */
144 if (ctx->process_id &&
145 get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
146 continue;
147 }
148 }
149
150 if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
151 uint32_t mask = ~0;
152
153 if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
154 /*
155 * 1001 1011 mask = GSCID
156 * 0000 0111 mask = mask ^ (mask + 1)
157 * 1111 1000 mask = ~mask;
158 */
159 mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
160 mask = mask ^ (mask + 1);
161 mask = ~mask;
162 }
163
164 if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
165 (did_gscid & mask)) {
166 continue;
167 }
168 }
169
170 hpm_incr_ctr(s, ctr_idx);
171 }
172 }
173
174 /* Timer callback for cycle counter overflow. */
riscv_iommu_hpm_timer_cb(void * priv)175 void riscv_iommu_hpm_timer_cb(void *priv)
176 {
177 RISCVIOMMUState *s = priv;
178 const uint32_t inhibit = riscv_iommu_reg_get32(
179 s, RISCV_IOMMU_REG_IOCOUNTINH);
180 uint32_t ovf;
181
182 if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
183 return;
184 }
185
186 if (s->irq_overflow_left > 0) {
187 uint64_t irq_trigger_at =
188 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left;
189 timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at);
190 s->irq_overflow_left = 0;
191 return;
192 }
193
194 ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
195 if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) {
196 /*
197 * We don't need to set hpmcycle_val to zero and update hpmcycle_prev to
198 * current clock value. The way we calculate iohpmcycs will overflow
199 * and return the correct value. This avoids the need to synchronize
200 * timer callback and write callback.
201 */
202 riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
203 RISCV_IOMMU_IOCOUNTOVF_CY, 0);
204 riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES,
205 RISCV_IOMMU_IOHPMCYCLES_OVF, 0);
206 riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
207 }
208 }
209
hpm_setup_timer(RISCVIOMMUState * s,uint64_t value)210 static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
211 {
212 const uint32_t inhibit = riscv_iommu_reg_get32(
213 s, RISCV_IOMMU_REG_IOCOUNTINH);
214 uint64_t overflow_at, overflow_ns;
215
216 if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
217 return;
218 }
219
220 /*
221 * We are using INT64_MAX here instead to UINT64_MAX because cycle counter
222 * has 63-bit precision and INT64_MAX is the maximum it can store.
223 */
224 if (value) {
225 overflow_ns = INT64_MAX - value + 1;
226 } else {
227 overflow_ns = INT64_MAX;
228 }
229
230 overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
231
232 if (overflow_at > INT64_MAX) {
233 s->irq_overflow_left = overflow_at - INT64_MAX;
234 overflow_at = INT64_MAX;
235 }
236
237 timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
238 }
239
240 /* Updates the internal cycle counter state when iocntinh:CY is changed. */
riscv_iommu_process_iocntinh_cy(RISCVIOMMUState * s,bool prev_cy_inh)241 void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
242 {
243 const uint32_t inhibit = riscv_iommu_reg_get32(
244 s, RISCV_IOMMU_REG_IOCOUNTINH);
245
246 /* We only need to process CY bit toggle. */
247 if (!(inhibit ^ prev_cy_inh)) {
248 return;
249 }
250
251 trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh);
252
253 if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
254 /*
255 * Cycle counter is enabled. Just start the timer again and update
256 * the clock snapshot value to point to the current time to make
257 * sure iohpmcycles read is correct.
258 */
259 s->hpmcycle_prev = get_cycles();
260 hpm_setup_timer(s, s->hpmcycle_val);
261 } else {
262 /*
263 * Cycle counter is disabled. Stop the timer and update the cycle
264 * counter to record the current value which is last programmed
265 * value + the cycles passed so far.
266 */
267 s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev);
268 timer_del(s->hpm_timer);
269 }
270 }
271
riscv_iommu_process_hpmcycle_write(RISCVIOMMUState * s)272 void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
273 {
274 const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
275 const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
276
277 trace_riscv_iommu_hpm_cycle_write(ovf, val);
278
279 /*
280 * Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
281 */
282 if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) &&
283 !get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) {
284 riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0,
285 RISCV_IOMMU_IOCOUNTOVF_CY);
286 }
287
288 s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF;
289 s->hpmcycle_prev = get_cycles();
290 hpm_setup_timer(s, s->hpmcycle_val);
291 }
292
check_valid_event_id(unsigned event_id)293 static inline bool check_valid_event_id(unsigned event_id)
294 {
295 return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
296 event_id < RISCV_IOMMU_HPMEVENT_MAX;
297 }
298
hpm_event_equal(gpointer key,gpointer value,gpointer udata)299 static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
300 {
301 uint32_t *pair = udata;
302
303 if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
304 pair[1] = GPOINTER_TO_UINT(key);
305 return true;
306 }
307
308 return false;
309 }
310
311 /* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
update_event_map(RISCVIOMMUState * s,uint64_t value,uint32_t ctr_idx)312 static void update_event_map(RISCVIOMMUState *s, uint64_t value,
313 uint32_t ctr_idx)
314 {
315 unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
316 uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
317 uint32_t new_value = 1 << ctr_idx;
318 gpointer data;
319
320 /*
321 * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
322 * remove the current mapping.
323 */
324 if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
325 data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
326
327 new_value = GPOINTER_TO_UINT(data) & ~(new_value);
328 if (new_value != 0) {
329 g_hash_table_replace(s->hpm_event_ctr_map,
330 GUINT_TO_POINTER(pair[1]),
331 GUINT_TO_POINTER(new_value));
332 } else {
333 g_hash_table_remove(s->hpm_event_ctr_map,
334 GUINT_TO_POINTER(pair[1]));
335 }
336
337 return;
338 }
339
340 /* Update the counter mask if the event is already enabled. */
341 if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
342 GUINT_TO_POINTER(event_id),
343 NULL,
344 &data)) {
345 new_value |= GPOINTER_TO_UINT(data);
346 }
347
348 g_hash_table_insert(s->hpm_event_ctr_map,
349 GUINT_TO_POINTER(event_id),
350 GUINT_TO_POINTER(new_value));
351 }
352
riscv_iommu_process_hpmevt_write(RISCVIOMMUState * s,uint32_t evt_reg)353 void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
354 {
355 const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
356 const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
357 uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
358
359 if (ctr_idx >= s->hpm_cntrs) {
360 return;
361 }
362
363 trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val);
364
365 /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
366 if (get_field(ovf, BIT(ctr_idx + 1)) &&
367 !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
368 /* +1 to offset CYCLE register OF bit. */
369 riscv_iommu_reg_mod32(
370 s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
371 }
372
373 if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
374 /* Reset EventID (WARL) field to invalid. */
375 val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
376 RISCV_IOMMU_HPMEVENT_INVALID);
377 riscv_iommu_reg_set64(s, evt_reg, val);
378 }
379
380 update_event_map(s, val, ctr_idx);
381 }
382