@@ -36,5 +36,7 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
bool new_virt);
+RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
+ bool upper_half, uint32_t ctr_idx);
#endif /* RISCV_PMU_H */
@@ -1039,6 +1039,9 @@ static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
goto done;
}
+ /* Update counter before reading. */
+ riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
+
if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
curr_val += counter_arr[PRV_M];
}
@@ -1122,7 +1125,7 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
+RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx)
{
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
@@ -425,6 +425,8 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
target_ulong *mhpmevent_val;
uint64_t of_bit_mask;
int64_t irq_trigger_at;
+ uint64_t curr_ctr_val, curr_ctrh_val;
+ uint64_t ctr_val;
if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
@@ -454,6 +456,26 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
return;
}
+ riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctr_val, false, ctr_idx);
+ ctr_val = counter->mhpmcounter_val;
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctrh_val, true, ctr_idx);
+ curr_ctr_val = curr_ctr_val | (curr_ctrh_val << 32);
+ ctr_val = ctr_val |
+ ((uint64_t)counter->mhpmcounterh_val << 32);
+ }
+
+ /*
+ * We can not accommodate for inhibited modes when setting up timer. Check
+ * if the counter has actually overflowed or not by comparing current
+ * counter value (accommodated for inhibited modes) with software written
+ * counter value.
+ */
+ if (curr_ctr_val >= ctr_val) {
+ riscv_pmu_setup_timer(env, curr_ctr_val, ctr_idx);
+ return;
+ }
+
if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
/* Generate interrupt only if OF bit is clear */
if (!(*mhpmevent_val & of_bit_mask)) {
@@ -475,7 +497,7 @@ void riscv_pmu_timer_cb(void *priv)
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
{
- uint64_t overflow_delta, overflow_at;
+ uint64_t overflow_delta, overflow_at, curr_ns;
int64_t overflow_ns, overflow_left = 0;
RISCVCPU *cpu = env_archcpu(env);
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
@@ -506,8 +528,10 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
} else {
return -1;
}
- overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- overflow_ns;
+ curr_ns = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ overflow_at = curr_ns + overflow_ns;
+ if (overflow_at <= curr_ns)
+ overflow_at = UINT64_MAX;
if (overflow_at > INT64_MAX) {
overflow_left += overflow_at - INT64_MAX;