14faea7e0STomasz Jeznach /* 24faea7e0STomasz Jeznach * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers 34faea7e0STomasz Jeznach * 44faea7e0STomasz Jeznach * Copyright (C) 2022-2023 Rivos Inc. 54faea7e0STomasz Jeznach * 64faea7e0STomasz Jeznach * This program is free software; you can redistribute it and/or modify it 74faea7e0STomasz Jeznach * under the terms and conditions of the GNU General Public License, 84faea7e0STomasz Jeznach * version 2 or later, as published by the Free Software Foundation. 94faea7e0STomasz Jeznach * 104faea7e0STomasz Jeznach * This program is distributed in the hope that it will be useful, 114faea7e0STomasz Jeznach * but WITHOUT ANY WARRANTY; without even the implied warranty of 124faea7e0STomasz Jeznach * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 134faea7e0STomasz Jeznach * GNU General Public License for more details. 144faea7e0STomasz Jeznach * 154faea7e0STomasz Jeznach * You should have received a copy of the GNU General Public License along 164faea7e0STomasz Jeznach * with this program; if not, see <http://www.gnu.org/licenses/>. 174faea7e0STomasz Jeznach */ 184faea7e0STomasz Jeznach 194faea7e0STomasz Jeznach #include "qemu/osdep.h" 204faea7e0STomasz Jeznach #include "qemu/timer.h" 214faea7e0STomasz Jeznach #include "cpu_bits.h" 224faea7e0STomasz Jeznach #include "riscv-iommu-hpm.h" 234faea7e0STomasz Jeznach #include "riscv-iommu.h" 244faea7e0STomasz Jeznach #include "riscv-iommu-bits.h" 254faea7e0STomasz Jeznach #include "trace.h" 264faea7e0STomasz Jeznach 274faea7e0STomasz Jeznach /* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */ 284faea7e0STomasz Jeznach static inline uint64_t get_cycles(void) 294faea7e0STomasz Jeznach { 304faea7e0STomasz Jeznach return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 314faea7e0STomasz Jeznach } 324faea7e0STomasz Jeznach 334faea7e0STomasz Jeznach uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s) 344faea7e0STomasz Jeznach { 354faea7e0STomasz Jeznach const uint64_t cycle = riscv_iommu_reg_get64( 364faea7e0STomasz Jeznach s, RISCV_IOMMU_REG_IOHPMCYCLES); 374faea7e0STomasz Jeznach const uint32_t inhibit = riscv_iommu_reg_get32( 384faea7e0STomasz Jeznach s, RISCV_IOMMU_REG_IOCOUNTINH); 394faea7e0STomasz Jeznach const uint64_t ctr_prev = s->hpmcycle_prev; 404faea7e0STomasz Jeznach const uint64_t ctr_val = s->hpmcycle_val; 414faea7e0STomasz Jeznach 42*66975d9cSDaniel Henrique Barboza trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val); 43*66975d9cSDaniel Henrique Barboza 444faea7e0STomasz Jeznach if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) { 454faea7e0STomasz Jeznach /* 464faea7e0STomasz Jeznach * Counter should not increment if inhibit bit is set. We can't really 474faea7e0STomasz Jeznach * stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated 484faea7e0STomasz Jeznach * counter value to indicate that counter was not incremented. 494faea7e0STomasz Jeznach */ 504faea7e0STomasz Jeznach return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) | 514faea7e0STomasz Jeznach (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF); 524faea7e0STomasz Jeznach } 534faea7e0STomasz Jeznach 544faea7e0STomasz Jeznach return (ctr_val + get_cycles() - ctr_prev) | 554faea7e0STomasz Jeznach (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF); 564faea7e0STomasz Jeznach } 5711ecf24cSTomasz Jeznach 5811ecf24cSTomasz Jeznach static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx) 5911ecf24cSTomasz Jeznach { 6011ecf24cSTomasz Jeznach const uint32_t off = ctr_idx << 3; 6111ecf24cSTomasz Jeznach uint64_t cntr_val; 6211ecf24cSTomasz Jeznach 6311ecf24cSTomasz Jeznach cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]); 6411ecf24cSTomasz Jeznach stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1); 6511ecf24cSTomasz Jeznach 66*66975d9cSDaniel Henrique Barboza trace_riscv_iommu_hpm_incr_ctr(cntr_val); 67*66975d9cSDaniel Henrique Barboza 6811ecf24cSTomasz Jeznach /* Handle the overflow scenario. */ 6911ecf24cSTomasz Jeznach if (cntr_val == UINT64_MAX) { 7011ecf24cSTomasz Jeznach /* 7111ecf24cSTomasz Jeznach * Generate interrupt only if OF bit is clear. +1 to offset the cycle 7211ecf24cSTomasz Jeznach * register OF bit. 7311ecf24cSTomasz Jeznach */ 7411ecf24cSTomasz Jeznach const uint32_t ovf = 7511ecf24cSTomasz Jeznach riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 7611ecf24cSTomasz Jeznach BIT(ctr_idx + 1), 0); 7711ecf24cSTomasz Jeznach if (!get_field(ovf, BIT(ctr_idx + 1))) { 7811ecf24cSTomasz Jeznach riscv_iommu_reg_mod64(s, 7911ecf24cSTomasz Jeznach RISCV_IOMMU_REG_IOHPMEVT_BASE + off, 8011ecf24cSTomasz Jeznach RISCV_IOMMU_IOHPMEVT_OF, 8111ecf24cSTomasz Jeznach 0); 8211ecf24cSTomasz Jeznach riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM); 8311ecf24cSTomasz Jeznach } 8411ecf24cSTomasz Jeznach } 8511ecf24cSTomasz Jeznach } 8611ecf24cSTomasz Jeznach 8711ecf24cSTomasz Jeznach void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx, 8811ecf24cSTomasz Jeznach unsigned event_id) 8911ecf24cSTomasz Jeznach { 9011ecf24cSTomasz Jeznach const uint32_t inhibit = riscv_iommu_reg_get32( 9111ecf24cSTomasz Jeznach s, RISCV_IOMMU_REG_IOCOUNTINH); 9211ecf24cSTomasz Jeznach uint32_t did_gscid; 9311ecf24cSTomasz Jeznach uint32_t pid_pscid; 9411ecf24cSTomasz Jeznach uint32_t ctr_idx; 9511ecf24cSTomasz Jeznach gpointer value; 9611ecf24cSTomasz Jeznach uint32_t ctrs; 9711ecf24cSTomasz Jeznach uint64_t evt; 9811ecf24cSTomasz Jeznach 9911ecf24cSTomasz Jeznach if (!(s->cap & RISCV_IOMMU_CAP_HPM)) { 10011ecf24cSTomasz Jeznach return; 10111ecf24cSTomasz Jeznach } 10211ecf24cSTomasz Jeznach 10311ecf24cSTomasz Jeznach value = g_hash_table_lookup(s->hpm_event_ctr_map, 10411ecf24cSTomasz Jeznach GUINT_TO_POINTER(event_id)); 10511ecf24cSTomasz Jeznach if (value == NULL) { 10611ecf24cSTomasz Jeznach return; 10711ecf24cSTomasz Jeznach } 10811ecf24cSTomasz Jeznach 10911ecf24cSTomasz Jeznach for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) { 11011ecf24cSTomasz Jeznach ctr_idx = ctz32(ctrs); 11111ecf24cSTomasz Jeznach if (get_field(inhibit, BIT(ctr_idx + 1))) { 11211ecf24cSTomasz Jeznach continue; 11311ecf24cSTomasz Jeznach } 11411ecf24cSTomasz Jeznach 11511ecf24cSTomasz Jeznach evt = riscv_iommu_reg_get64(s, 11611ecf24cSTomasz Jeznach RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3)); 11711ecf24cSTomasz Jeznach 11811ecf24cSTomasz Jeznach /* 11911ecf24cSTomasz Jeznach * It's quite possible that event ID has been changed in counter 12011ecf24cSTomasz Jeznach * but hashtable hasn't been updated yet. We don't want to increment 12111ecf24cSTomasz Jeznach * counter for the old event ID. 12211ecf24cSTomasz Jeznach */ 12311ecf24cSTomasz Jeznach if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) { 12411ecf24cSTomasz Jeznach continue; 12511ecf24cSTomasz Jeznach } 12611ecf24cSTomasz Jeznach 12711ecf24cSTomasz Jeznach if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) { 12811ecf24cSTomasz Jeznach did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID); 12911ecf24cSTomasz Jeznach pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID); 13011ecf24cSTomasz Jeznach } else { 13111ecf24cSTomasz Jeznach did_gscid = ctx->devid; 13211ecf24cSTomasz Jeznach pid_pscid = ctx->process_id; 13311ecf24cSTomasz Jeznach } 13411ecf24cSTomasz Jeznach 13511ecf24cSTomasz Jeznach if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) { 13611ecf24cSTomasz Jeznach /* 13711ecf24cSTomasz Jeznach * If the transaction does not have a valid process_id, counter 13811ecf24cSTomasz Jeznach * increments if device_id matches DID_GSCID. If the transaction 13911ecf24cSTomasz Jeznach * has a valid process_id, counter increments if device_id 14011ecf24cSTomasz Jeznach * matches DID_GSCID and process_id matches PID_PSCID. See 14111ecf24cSTomasz Jeznach * IOMMU Specification, Chapter 5.23. Performance-monitoring 14211ecf24cSTomasz Jeznach * event selector. 14311ecf24cSTomasz Jeznach */ 14411ecf24cSTomasz Jeznach if (ctx->process_id && 14511ecf24cSTomasz Jeznach get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) { 14611ecf24cSTomasz Jeznach continue; 14711ecf24cSTomasz Jeznach } 14811ecf24cSTomasz Jeznach } 14911ecf24cSTomasz Jeznach 15011ecf24cSTomasz Jeznach if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) { 15111ecf24cSTomasz Jeznach uint32_t mask = ~0; 15211ecf24cSTomasz Jeznach 15311ecf24cSTomasz Jeznach if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) { 15411ecf24cSTomasz Jeznach /* 15511ecf24cSTomasz Jeznach * 1001 1011 mask = GSCID 15611ecf24cSTomasz Jeznach * 0000 0111 mask = mask ^ (mask + 1) 15711ecf24cSTomasz Jeznach * 1111 1000 mask = ~mask; 15811ecf24cSTomasz Jeznach */ 15911ecf24cSTomasz Jeznach mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID); 16011ecf24cSTomasz Jeznach mask = mask ^ (mask + 1); 16111ecf24cSTomasz Jeznach mask = ~mask; 16211ecf24cSTomasz Jeznach } 16311ecf24cSTomasz Jeznach 16411ecf24cSTomasz Jeznach if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) != 16511ecf24cSTomasz Jeznach (did_gscid & mask)) { 16611ecf24cSTomasz Jeznach continue; 16711ecf24cSTomasz Jeznach } 16811ecf24cSTomasz Jeznach } 16911ecf24cSTomasz Jeznach 17011ecf24cSTomasz Jeznach hpm_incr_ctr(s, ctr_idx); 17111ecf24cSTomasz Jeznach } 17211ecf24cSTomasz Jeznach } 173ffb37df0STomasz Jeznach 174ffb37df0STomasz Jeznach /* Timer callback for cycle counter overflow. */ 175ffb37df0STomasz Jeznach void riscv_iommu_hpm_timer_cb(void *priv) 176ffb37df0STomasz Jeznach { 177ffb37df0STomasz Jeznach RISCVIOMMUState *s = priv; 178ffb37df0STomasz Jeznach const uint32_t inhibit = riscv_iommu_reg_get32( 179ffb37df0STomasz Jeznach s, RISCV_IOMMU_REG_IOCOUNTINH); 180ffb37df0STomasz Jeznach uint32_t ovf; 181ffb37df0STomasz Jeznach 182ffb37df0STomasz Jeznach if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) { 183ffb37df0STomasz Jeznach return; 184ffb37df0STomasz Jeznach } 185ffb37df0STomasz Jeznach 186ffb37df0STomasz Jeznach if (s->irq_overflow_left > 0) { 187ffb37df0STomasz Jeznach uint64_t irq_trigger_at = 188ffb37df0STomasz Jeznach qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left; 189ffb37df0STomasz Jeznach timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at); 190ffb37df0STomasz Jeznach s->irq_overflow_left = 0; 191ffb37df0STomasz Jeznach return; 192ffb37df0STomasz Jeznach } 193ffb37df0STomasz Jeznach 194ffb37df0STomasz Jeznach ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF); 195ffb37df0STomasz Jeznach if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) { 196ffb37df0STomasz Jeznach /* 197ffb37df0STomasz Jeznach * We don't need to set hpmcycle_val to zero and update hpmcycle_prev to 198ffb37df0STomasz Jeznach * current clock value. The way we calculate iohpmcycs will overflow 199ffb37df0STomasz Jeznach * and return the correct value. This avoids the need to synchronize 200ffb37df0STomasz Jeznach * timer callback and write callback. 201ffb37df0STomasz Jeznach */ 202ffb37df0STomasz Jeznach riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 203ffb37df0STomasz Jeznach RISCV_IOMMU_IOCOUNTOVF_CY, 0); 204ffb37df0STomasz Jeznach riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES, 205ffb37df0STomasz Jeznach RISCV_IOMMU_IOHPMCYCLES_OVF, 0); 206ffb37df0STomasz Jeznach riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM); 207ffb37df0STomasz Jeznach } 208ffb37df0STomasz Jeznach } 2092cf2a6c0STomasz Jeznach 2102cf2a6c0STomasz Jeznach static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value) 2112cf2a6c0STomasz Jeznach { 2122cf2a6c0STomasz Jeznach const uint32_t inhibit = riscv_iommu_reg_get32( 2132cf2a6c0STomasz Jeznach s, RISCV_IOMMU_REG_IOCOUNTINH); 2142cf2a6c0STomasz Jeznach uint64_t overflow_at, overflow_ns; 2152cf2a6c0STomasz Jeznach 2162cf2a6c0STomasz Jeznach if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) { 2172cf2a6c0STomasz Jeznach return; 2182cf2a6c0STomasz Jeznach } 2192cf2a6c0STomasz Jeznach 2202cf2a6c0STomasz Jeznach /* 2212cf2a6c0STomasz Jeznach * We are using INT64_MAX here instead to UINT64_MAX because cycle counter 2222cf2a6c0STomasz Jeznach * has 63-bit precision and INT64_MAX is the maximum it can store. 2232cf2a6c0STomasz Jeznach */ 2242cf2a6c0STomasz Jeznach if (value) { 2252cf2a6c0STomasz Jeznach overflow_ns = INT64_MAX - value + 1; 2262cf2a6c0STomasz Jeznach } else { 2272cf2a6c0STomasz Jeznach overflow_ns = INT64_MAX; 2282cf2a6c0STomasz Jeznach } 2292cf2a6c0STomasz Jeznach 2302cf2a6c0STomasz Jeznach overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns; 2312cf2a6c0STomasz Jeznach 2322cf2a6c0STomasz Jeznach if (overflow_at > INT64_MAX) { 2332cf2a6c0STomasz Jeznach s->irq_overflow_left = overflow_at - INT64_MAX; 2342cf2a6c0STomasz Jeznach overflow_at = INT64_MAX; 2352cf2a6c0STomasz Jeznach } 2362cf2a6c0STomasz Jeznach 2372cf2a6c0STomasz Jeznach timer_mod_anticipate_ns(s->hpm_timer, overflow_at); 2382cf2a6c0STomasz Jeznach } 2392cf2a6c0STomasz Jeznach 2402cf2a6c0STomasz Jeznach /* Updates the internal cycle counter state when iocntinh:CY is changed. */ 2412cf2a6c0STomasz Jeznach void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh) 2422cf2a6c0STomasz Jeznach { 2432cf2a6c0STomasz Jeznach const uint32_t inhibit = riscv_iommu_reg_get32( 2442cf2a6c0STomasz Jeznach s, RISCV_IOMMU_REG_IOCOUNTINH); 2452cf2a6c0STomasz Jeznach 2462cf2a6c0STomasz Jeznach /* We only need to process CY bit toggle. */ 2472cf2a6c0STomasz Jeznach if (!(inhibit ^ prev_cy_inh)) { 2482cf2a6c0STomasz Jeznach return; 2492cf2a6c0STomasz Jeznach } 2502cf2a6c0STomasz Jeznach 251*66975d9cSDaniel Henrique Barboza trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh); 252*66975d9cSDaniel Henrique Barboza 2532cf2a6c0STomasz Jeznach if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) { 2542cf2a6c0STomasz Jeznach /* 2552cf2a6c0STomasz Jeznach * Cycle counter is enabled. Just start the timer again and update 2562cf2a6c0STomasz Jeznach * the clock snapshot value to point to the current time to make 2572cf2a6c0STomasz Jeznach * sure iohpmcycles read is correct. 2582cf2a6c0STomasz Jeznach */ 2592cf2a6c0STomasz Jeznach s->hpmcycle_prev = get_cycles(); 2602cf2a6c0STomasz Jeznach hpm_setup_timer(s, s->hpmcycle_val); 2612cf2a6c0STomasz Jeznach } else { 2622cf2a6c0STomasz Jeznach /* 2632cf2a6c0STomasz Jeznach * Cycle counter is disabled. Stop the timer and update the cycle 2642cf2a6c0STomasz Jeznach * counter to record the current value which is last programmed 2652cf2a6c0STomasz Jeznach * value + the cycles passed so far. 2662cf2a6c0STomasz Jeznach */ 2672cf2a6c0STomasz Jeznach s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev); 2682cf2a6c0STomasz Jeznach timer_del(s->hpm_timer); 2692cf2a6c0STomasz Jeznach } 2702cf2a6c0STomasz Jeznach } 27191dd0bd0STomasz Jeznach 27291dd0bd0STomasz Jeznach void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s) 27391dd0bd0STomasz Jeznach { 27491dd0bd0STomasz Jeznach const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES); 27591dd0bd0STomasz Jeznach const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF); 27691dd0bd0STomasz Jeznach 277*66975d9cSDaniel Henrique Barboza trace_riscv_iommu_hpm_cycle_write(ovf, val); 278*66975d9cSDaniel Henrique Barboza 27991dd0bd0STomasz Jeznach /* 28091dd0bd0STomasz Jeznach * Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register. 28191dd0bd0STomasz Jeznach */ 28291dd0bd0STomasz Jeznach if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) && 28391dd0bd0STomasz Jeznach !get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) { 28491dd0bd0STomasz Jeznach riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, 28591dd0bd0STomasz Jeznach RISCV_IOMMU_IOCOUNTOVF_CY); 28691dd0bd0STomasz Jeznach } 28791dd0bd0STomasz Jeznach 28891dd0bd0STomasz Jeznach s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF; 28991dd0bd0STomasz Jeznach s->hpmcycle_prev = get_cycles(); 29091dd0bd0STomasz Jeznach hpm_setup_timer(s, s->hpmcycle_val); 29191dd0bd0STomasz Jeznach } 2924faa3e6fSTomasz Jeznach 2934faa3e6fSTomasz Jeznach static inline bool check_valid_event_id(unsigned event_id) 2944faa3e6fSTomasz Jeznach { 2954faa3e6fSTomasz Jeznach return event_id > RISCV_IOMMU_HPMEVENT_INVALID && 2964faa3e6fSTomasz Jeznach event_id < RISCV_IOMMU_HPMEVENT_MAX; 2974faa3e6fSTomasz Jeznach } 2984faa3e6fSTomasz Jeznach 2994faa3e6fSTomasz Jeznach static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata) 3004faa3e6fSTomasz Jeznach { 3014faa3e6fSTomasz Jeznach uint32_t *pair = udata; 3024faa3e6fSTomasz Jeznach 3034faa3e6fSTomasz Jeznach if (GPOINTER_TO_UINT(value) & (1 << pair[0])) { 3044faa3e6fSTomasz Jeznach pair[1] = GPOINTER_TO_UINT(key); 3054faa3e6fSTomasz Jeznach return true; 3064faa3e6fSTomasz Jeznach } 3074faa3e6fSTomasz Jeznach 3084faa3e6fSTomasz Jeznach return false; 3094faa3e6fSTomasz Jeznach } 3104faa3e6fSTomasz Jeznach 3114faa3e6fSTomasz Jeznach /* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */ 3124faa3e6fSTomasz Jeznach static void update_event_map(RISCVIOMMUState *s, uint64_t value, 3134faa3e6fSTomasz Jeznach uint32_t ctr_idx) 3144faa3e6fSTomasz Jeznach { 3154faa3e6fSTomasz Jeznach unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID); 3164faa3e6fSTomasz Jeznach uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID }; 3174faa3e6fSTomasz Jeznach uint32_t new_value = 1 << ctr_idx; 3184faa3e6fSTomasz Jeznach gpointer data; 3194faa3e6fSTomasz Jeznach 3204faa3e6fSTomasz Jeznach /* 3214faa3e6fSTomasz Jeznach * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID 3224faa3e6fSTomasz Jeznach * remove the current mapping. 3234faa3e6fSTomasz Jeznach */ 3244faa3e6fSTomasz Jeznach if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) { 3254faa3e6fSTomasz Jeznach data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair); 3264faa3e6fSTomasz Jeznach 3274faa3e6fSTomasz Jeznach new_value = GPOINTER_TO_UINT(data) & ~(new_value); 3284faa3e6fSTomasz Jeznach if (new_value != 0) { 3294faa3e6fSTomasz Jeznach g_hash_table_replace(s->hpm_event_ctr_map, 3304faa3e6fSTomasz Jeznach GUINT_TO_POINTER(pair[1]), 3314faa3e6fSTomasz Jeznach GUINT_TO_POINTER(new_value)); 3324faa3e6fSTomasz Jeznach } else { 3334faa3e6fSTomasz Jeznach g_hash_table_remove(s->hpm_event_ctr_map, 3344faa3e6fSTomasz Jeznach GUINT_TO_POINTER(pair[1])); 3354faa3e6fSTomasz Jeznach } 3364faa3e6fSTomasz Jeznach 3374faa3e6fSTomasz Jeznach return; 3384faa3e6fSTomasz Jeznach } 3394faa3e6fSTomasz Jeznach 3404faa3e6fSTomasz Jeznach /* Update the counter mask if the event is already enabled. */ 3414faa3e6fSTomasz Jeznach if (g_hash_table_lookup_extended(s->hpm_event_ctr_map, 3424faa3e6fSTomasz Jeznach GUINT_TO_POINTER(event_id), 3434faa3e6fSTomasz Jeznach NULL, 3444faa3e6fSTomasz Jeznach &data)) { 3454faa3e6fSTomasz Jeznach new_value |= GPOINTER_TO_UINT(data); 3464faa3e6fSTomasz Jeznach } 3474faa3e6fSTomasz Jeznach 3484faa3e6fSTomasz Jeznach g_hash_table_insert(s->hpm_event_ctr_map, 3494faa3e6fSTomasz Jeznach GUINT_TO_POINTER(event_id), 3504faa3e6fSTomasz Jeznach GUINT_TO_POINTER(new_value)); 3514faa3e6fSTomasz Jeznach } 3524faa3e6fSTomasz Jeznach 3534faa3e6fSTomasz Jeznach void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg) 3544faa3e6fSTomasz Jeznach { 3554faa3e6fSTomasz Jeznach const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3; 3564faa3e6fSTomasz Jeznach const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF); 3574faa3e6fSTomasz Jeznach uint64_t val = riscv_iommu_reg_get64(s, evt_reg); 3584faa3e6fSTomasz Jeznach 3594faa3e6fSTomasz Jeznach if (ctr_idx >= s->hpm_cntrs) { 3604faa3e6fSTomasz Jeznach return; 3614faa3e6fSTomasz Jeznach } 3624faa3e6fSTomasz Jeznach 363*66975d9cSDaniel Henrique Barboza trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val); 364*66975d9cSDaniel Henrique Barboza 3654faa3e6fSTomasz Jeznach /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */ 3664faa3e6fSTomasz Jeznach if (get_field(ovf, BIT(ctr_idx + 1)) && 3674faa3e6fSTomasz Jeznach !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) { 3684faa3e6fSTomasz Jeznach /* +1 to offset CYCLE register OF bit. */ 3694faa3e6fSTomasz Jeznach riscv_iommu_reg_mod32( 3704faa3e6fSTomasz Jeznach s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1)); 3714faa3e6fSTomasz Jeznach } 3724faa3e6fSTomasz Jeznach 3734faa3e6fSTomasz Jeznach if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) { 3744faa3e6fSTomasz Jeznach /* Reset EventID (WARL) field to invalid. */ 3754faa3e6fSTomasz Jeznach val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID, 3764faa3e6fSTomasz Jeznach RISCV_IOMMU_HPMEVENT_INVALID); 3774faa3e6fSTomasz Jeznach riscv_iommu_reg_set64(s, evt_reg, val); 3784faa3e6fSTomasz Jeznach } 3794faa3e6fSTomasz Jeznach 3804faa3e6fSTomasz Jeznach update_event_map(s, val, ctr_idx); 3814faa3e6fSTomasz Jeznach } 382