@@ -82,9 +82,9 @@ static inline uint64_t cpu_ppc_get_tb (CPUState *env)
return 0;
}
-uint32_t cpu_ppc_load_tbl (CPUState *env)
+uint64_t cpu_ppc_load_tbl (CPUState *env)
{
- return cpu_ppc_get_tb(env) & 0xFFFFFFFF;
+ return cpu_ppc_get_tb(env);
}
uint32_t cpu_ppc_load_tbu (CPUState *env)
@@ -401,7 +401,7 @@ static inline uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk,
return muldiv64(vmclk, tb_env->tb_freq, get_ticks_per_sec()) + tb_offset;
}
-uint32_t cpu_ppc_load_tbl (CPUState *env)
+uint64_t cpu_ppc_load_tbl (CPUState *env)
{
ppc_tb_t *tb_env = env->tb_env;
uint64_t tb;
@@ -409,7 +409,7 @@ uint32_t cpu_ppc_load_tbl (CPUState *env)
tb = cpu_ppc_get_tb(tb_env, qemu_get_clock(vm_clock), tb_env->tb_offset);
LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
- return tb & 0xFFFFFFFF;
+ return tb;
}
static inline uint32_t _cpu_ppc_load_tbu(CPUState *env)
@@ -1068,9 +1068,9 @@ static inline uint64_t cpu_ppc_get_tb (CPUState *env)
return 0;
}
-uint32_t cpu_ppc_load_tbl (CPUState *env)
+uint64_t cpu_ppc_load_tbl (CPUState *env)
{
- return cpu_ppc_get_tb(env) & 0xFFFFFFFF;
+ return cpu_ppc_get_tb(env);
}
uint32_t cpu_ppc_load_tbu (CPUState *env)
@@ -741,7 +741,7 @@ int cpu_ppc_register_internal (CPUPPCState *env, const ppc_def_t *def);
/* Time-base and decrementer management */
#ifndef NO_CPU_IO_DEFS
-uint32_t cpu_ppc_load_tbl (CPUPPCState *env);
+uint64_t cpu_ppc_load_tbl (CPUPPCState *env);
uint32_t cpu_ppc_load_tbu (CPUPPCState *env);
void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value);
void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value);
@@ -68,7 +68,7 @@ void helper_store_dump_spr (uint32_t sprn)
target_ulong helper_load_tbl (void)
{
- return cpu_ppc_load_tbl(env);
+ return (target_ulong)cpu_ppc_load_tbl(env);
}
target_ulong helper_load_tbu (void)
On PPC we have a 64-bit time base. Usually (PPC32) this is accessed using two separate 32 bit SPR accesses to SPR_TBU and SPR_TBL. On PPC64 the SPR_TBL register acts as 64 bit though, so we get the full 64 bits as return value. If we only take the lower ones, fine. But Linux wants to see all 64 bits or it breaks. This patch makes PPC64 Linux work even after TB crossed the 32-bit boundary, which usually happened a few seconds after bootup. Signed-off-by: Alexander Graf <agraf@suse.de> --- To verify my assumptions of the above I used this test program: int main() { unsigned int tbu=0, tbl=0; unsigned long tb=0; asm("mftbu %0" : "=r" (tbu)); asm("mftbl %0" : "=r" (tbl)); asm("mftbl %0" : "=r" (tb)); printf("TB: %#x %#x\n", tbu, tbl); printf("TB64: %#lx\n", tb); } It produces the following output on a 970MP CPU: $ ./mftb TB: 0x238 0xd676bd6 TB64: 0x2380d676f75 V1 -> V2: - adjust user targets too - do an explicit cast for target_ulong --- darwin-user/main.c | 4 ++-- hw/ppc.c | 4 ++-- linux-user/main.c | 4 ++-- target-ppc/cpu.h | 2 +- target-ppc/op_helper.c | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-)