@@ -136,6 +136,8 @@ typedef struct CPUIOTLBEntry {
CPU_COMMON_TLB \
/* Instruction Count (for profiling) */ \
uint64_t prof_ic; \
+ /* How much of prof_ic's value have we processed? */ \
+ uint64_t prof_ic_last; \
/* PC (for profiling) */ \
uint64_t prof_pc; \
/* next page start (for profiling) */ \
@@ -673,6 +673,15 @@ static inline bool arm_ccnt_enabled(CPUARMState *env)
return true;
}
+/* Called by anything that wants to be an input for event counts to the PMU
+ * (except for SWINC, event 0x000, since its events can target specific
+ * counters)
+ */
+static void pmevcntr_increment(CPUARMState *env, uint8_t event_type,
+ uint64_t increment_by)
+{
+}
+
void pmccntr_sync(CPUARMState *env)
{
uint64_t temp_ticks;
@@ -4060,6 +4069,40 @@ void HELPER(context_check_pid)(CPUARMState *env)
bb_context_check_pid(env->prof_ic, pid);
}
+
+void HELPER(update_instruction_count)(CPUARMState *env)
+{
+ if (bbtrace_initialized()) {
+ /*
+ * If the bbv plugin is compiled in and enabled, we must account for the
+ * fact that bbv_profile needs to see prof_ic before we clear it.
+ * However, it doesn't always clear the counter every time this gets
+ * called, so we must keep track of the last value seen to ensure we
+ * update the instruction counter correctly in that case.
+ */
+ increment_instruction_counters(env->prof_ic - env->prof_ic_last);
+ if (env->prof_pc && env->prof_is_jmp) {
+ // If this is the end of a basic block, zero out last_seen counter too
+ env->prof_ic_last = 0;
+ } else {
+ env->prof_ic_last = env->prof_ic;
+ }
+ } else {
+ pmevcntr_increment(env, PMU_COUNTER_TYPE_INSTRUCTIONS, env->prof_ic);
+ pmevcntr_increment(env, PMU_COUNTER_TYPE_CYCLES, env->prof_ic);
+ env->prof_ic = 0;
+ }
+}
+
+#else //!CONFIG_BBVEC
+
+void HELPER(update_instruction_count)(CPUARMState *env)
+{
+ pmevcntr_increment(env, PMU_COUNTER_TYPE_INSTRUCTIONS, env->prof_ic);
+ pmevcntr_increment(env, PMU_COUNTER_TYPE_CYCLES, env->prof_ic);
+ env->prof_ic = 0;
+}
+
#endif //CONFIG_BBVEC
/* Sign/zero extend */
@@ -43,6 +43,8 @@ DEF_HELPER_1(context_check_mode, void, env)
DEF_HELPER_1(context_check_pid, void, env)
#endif // CONFIG_BBVEC
+DEF_HELPER_1(update_instruction_count, void, env)
+
DEF_HELPER_3(ssat, i32, env, i32, i32)
DEF_HELPER_3(usat, i32, env, i32, i32)
DEF_HELPER_3(ssat16, i32, env, i32, i32)
@@ -55,9 +55,9 @@ static TCGv_i64 cpu_exclusive_high;
* keep track of the current pc, so the last pc in the block can be
* captured. */
static TCGv_i64 cpu_prof_pc;
-static TCGv_i64 cpu_prof_ic;
static TCGv_i64 cpu_prof_is_jmp;
#endif // CONFIG_BBVEC
+static TCGv_i64 cpu_prof_ic;
#ifdef CONFIG_USER_ONLY
static TCGv_i64 cpu_exclusive_test;
static TCGv_i32 cpu_exclusive_info;
@@ -124,10 +124,11 @@ void a64_translate_init(void)
#ifdef CONFIG_BBVEC
// bbvec profiling globals
- cpu_prof_ic = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, prof_ic), "prof_ic");
cpu_prof_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, prof_pc), "prof_pc");
cpu_prof_is_jmp = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, prof_is_jmp), "prof_is_jmp");
#endif // CONFIG_BBVEC
+ cpu_prof_ic = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, prof_ic), "prof_ic");
cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
@@ -210,11 +211,6 @@ void gen_a64_set_pc_im(uint64_t val)
#ifdef CONFIG_BBVEC
/* Basic block profiling functions */
-static void gen_insn_cnt_incr(CPUARMState * env, DisasContext *s)
-{
- tcg_gen_addi_i64(cpu_prof_ic, cpu_prof_ic, 1);
-}
-
static void gen_pc_incr(CPUARMState * env, DisasContext *s)
{
tcg_gen_movi_i64(cpu_prof_pc, s->pc);
@@ -226,6 +222,11 @@ static void gen_store_is_jmp(uint32_t jmp)
}
#endif // CONFIG_BBVEC
+static void gen_insn_cnt_incr(CPUARMState * env, DisasContext *s)
+{
+ tcg_gen_addi_i64(cpu_prof_ic, cpu_prof_ic, 1);
+}
+
static void gen_exception_internal(int excp)
{
TCGv_i32 tcg_excp = tcg_const_i32(excp);
@@ -11068,6 +11069,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
tcg_clear_temp_count();
+ gen_helper_update_instruction_count(cpu_env);
#ifdef CONFIG_BBVEC
/* Profile previously run block, check for PID change, and initialize
* prof_is_jmp flag. */
@@ -11082,10 +11084,10 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
#ifdef CONFIG_BBVEC
if (bbtrace_initialized()) {
gen_helper_context_check_mode(cpu_env);
- gen_insn_cnt_incr(env, dc);
gen_pc_incr(env, dc);
}
#endif // CONFIG_BBVEC
+ gen_insn_cnt_incr(env, dc);
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
@@ -71,10 +71,10 @@ static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
static TCGv_i64 cpu_exclusive_addr;
static TCGv_i64 cpu_exclusive_val;
#ifdef CONFIG_BBVEC
-static TCGv_i64 cpu_prof_ic;
static TCGv_i64 cpu_prof_pc;
static TCGv_i64 cpu_prof_is_jmp;
#endif
+static TCGv_i64 cpu_prof_ic;
#ifdef CONFIG_USER_ONLY
static TCGv_i64 cpu_exclusive_test;
static TCGv_i32 cpu_exclusive_info;
@@ -109,10 +109,11 @@ void arm_translate_init(void)
#ifdef CONFIG_BBVEC
// bbvec profiling globals
- cpu_prof_ic = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, prof_ic), "prof_ic");
cpu_prof_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, prof_pc), "prof_pc");
cpu_prof_is_jmp = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, prof_is_jmp), "prof_is_jmp");
#endif
+ cpu_prof_ic = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUARMState, prof_ic), "prof_ic");
cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
@@ -194,11 +195,6 @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg)
#ifdef CONFIG_BBVEC
/* Basic block profiling functions */
-static void gen_insn_cnt_incr(CPUARMState * env, DisasContext *s)
-{
- tcg_gen_addi_i64(cpu_prof_ic, cpu_prof_ic, 1);
-}
-
static void gen_pc_incr(CPUARMState * env, DisasContext *s)
{
tcg_gen_movi_i64(cpu_prof_pc, s->pc);
@@ -210,6 +206,11 @@ static void gen_store_is_jmp(uint32_t jmp)
}
#endif
+static void gen_insn_cnt_incr(CPUARMState * env, DisasContext *s)
+{
+ tcg_gen_addi_i64(cpu_prof_ic, cpu_prof_ic, 1);
+}
+
/* Set a CPU register. The source must be a temporary and will be
marked as dead. */
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
@@ -11627,6 +11628,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
store_cpu_field(tmp, condexec_bits);
}
+ gen_helper_update_instruction_count(cpu_env);
#ifdef CONFIG_BBVEC
/* Profile previously run block, check for PID change, and initialize
* prof_is_jmp flag. */
@@ -11641,13 +11643,13 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
#ifdef CONFIG_BBVEC
if (bbtrace_initialized()) {
gen_helper_context_check_mode(cpu_env);
- gen_insn_cnt_incr(env, dc);
gen_pc_incr(env, dc);
/* FIXME: this call should not be necessary if all the cases
where the prof_is_jmp flag gets set are correct. */
gen_store_is_jmp(0);
}
#endif
+ gen_insn_cnt_incr(env, dc);
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
This (partially) divorces counting instructions from basic block collection so instructions can be counted without the bbv plugin being enabled. Written by Aaron Lindsay. Signed-off-by: Christopher Covington <cov@codeaurora.org> --- include/exec/cpu-defs.h | 2 ++ target-arm/helper.c | 43 +++++++++++++++++++++++++++++++++++++++++++ target-arm/helper.h | 2 ++ target-arm/translate-a64.c | 18 ++++++++++-------- target-arm/translate.c | 18 ++++++++++-------- 5 files changed, 67 insertions(+), 16 deletions(-)