Message ID | 20210225134652.2127648-14-npiggin@gmail.com |
---|---|
State | Superseded |
Headers | show |
Series | KVM: PPC: Book3S: C-ify the P9 entry/exit code | expand |
Nicholas Piggin <npiggin@gmail.com> writes: > Switching the MMU from radix<->radix mode is tricky particularly as the > MMU can remain enabled and requires a certain sequence of SPR updates. > Move these together into their own functions. > > This also includes the radix TLB check / flush because it's tied in to > MMU switching due to tlbiel getting LPID from LPIDR. > > (XXX: isync / hwsync synchronisation TBD) I see bot mtlpidr and mtlpcr requiring a CSI in the ISA. Do you say we might need more than an isync? Regardless, I'd expect that to be separate from the refactoring here, so: Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com> > > Signed-off-by: Nicholas Piggin <npiggin@gmail.com> > --- > arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++--------------- > 1 file changed, 32 insertions(+), 23 deletions(-) > > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index 895090636295..23d6dc04b0e9 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -3440,12 +3440,38 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) > trace_kvmppc_run_core(vc, 1); > } > > +static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) > +{ > + struct kvmppc_vcore *vc = vcpu->arch.vcore; > + struct kvm_nested_guest *nested = vcpu->arch.nested; > + u32 lpid; > + > + lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; > + > + mtspr(SPRN_LPID, lpid); > + mtspr(SPRN_LPCR, lpcr); > + mtspr(SPRN_PID, vcpu->arch.pid); > + isync(); > + > + /* TLBIEL must have LPIDR set, so set guest LPID before flushing. */ > + kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested); > +} > + > +static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid) > +{ > + mtspr(SPRN_PID, pid); > + mtspr(SPRN_LPID, kvm->arch.host_lpid); > + mtspr(SPRN_LPCR, kvm->arch.host_lpcr); > + isync(); > +} > + > /* > * Load up hypervisor-mode registers on P9. > */ > static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, > unsigned long lpcr) > { > + struct kvm *kvm = vcpu->kvm; > struct kvmppc_vcore *vc = vcpu->arch.vcore; > s64 hdec; > u64 tb, purr, spurr; > @@ -3468,12 +3494,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, > * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0, > * so set HDICE before writing HDEC. > */ > - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE); > + mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE); > isync(); > > hdec = time_limit - mftb(); > if (hdec < 0) { > - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); > + mtspr(SPRN_LPCR, kvm->arch.host_lpcr); > isync(); > return BOOK3S_INTERRUPT_HV_DECREMENTER; > } > @@ -3508,7 +3534,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, > } > mtspr(SPRN_CIABR, vcpu->arch.ciabr); > mtspr(SPRN_IC, vcpu->arch.ic); > - mtspr(SPRN_PID, vcpu->arch.pid); > > mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | > (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); > @@ -3522,8 +3547,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, > > mtspr(SPRN_AMOR, ~0UL); > > - mtspr(SPRN_LPCR, lpcr); > - isync(); > + switch_mmu_to_guest_radix(kvm, vcpu, lpcr); > > kvmppc_xive_push_vcpu(vcpu); > > @@ -3562,7 +3586,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, > mtspr(SPRN_DAWR1, host_dawr1); > mtspr(SPRN_DAWRX1, host_dawrx1); > } > - mtspr(SPRN_PID, host_pidr); > > /* > * Since this is radix, do a eieio; tlbsync; ptesync sequence in > @@ -3577,9 +3600,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, > if (cpu_has_feature(CPU_FTR_ARCH_31)) > asm volatile(PPC_CP_ABORT); > > - mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ > - isync(); > - > vc->dpdes = mfspr(SPRN_DPDES); > vc->vtb = mfspr(SPRN_VTB); > mtspr(SPRN_DPDES, 0); > @@ -3596,7 +3616,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, > } > > mtspr(SPRN_HDEC, 0x7fffffff); > - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); > + > + switch_mmu_to_host_radix(kvm, host_pidr); > > return trap; > } > @@ -4130,7 +4151,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, > { > struct kvm_run *run = vcpu->run; > int trap, r, pcpu; > - int srcu_idx, lpid; > + int srcu_idx; > struct kvmppc_vcore *vc; > struct kvm *kvm = vcpu->kvm; > struct kvm_nested_guest *nested = vcpu->arch.nested; > @@ -4204,13 +4225,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, > vc->vcore_state = VCORE_RUNNING; > trace_kvmppc_run_core(vc, 0); > > - if (cpu_has_feature(CPU_FTR_HVMODE)) { > - lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; > - mtspr(SPRN_LPID, lpid); > - isync(); > - kvmppc_check_need_tlb_flush(kvm, pcpu, nested); > - } > - > guest_enter_irqoff(); > > srcu_idx = srcu_read_lock(&kvm->srcu); > @@ -4229,11 +4243,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, > > srcu_read_unlock(&kvm->srcu, srcu_idx); > > - if (cpu_has_feature(CPU_FTR_HVMODE)) { > - mtspr(SPRN_LPID, kvm->arch.host_lpid); > - isync(); > - } > - > set_irq_happened(trap); > > kvmppc_set_host_core(pcpu);
Excerpts from Fabiano Rosas's message of February 27, 2021 1:56 am: > Nicholas Piggin <npiggin@gmail.com> writes: > >> Switching the MMU from radix<->radix mode is tricky particularly as the >> MMU can remain enabled and requires a certain sequence of SPR updates. >> Move these together into their own functions. >> >> This also includes the radix TLB check / flush because it's tied in to >> MMU switching due to tlbiel getting LPID from LPIDR. >> >> (XXX: isync / hwsync synchronisation TBD) > > I see bot mtlpidr and mtlpcr requiring a CSI in the ISA. Do you say we > might need more than an isync? We might need a CSI before it, we might also need a hwsync before it. I don't know whether we need isyncs between any of them (I don't think we should because they're all mtsprs). > > Regardless, I'd expect that to be separate from the refactoring here, > so: > > Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com> Thanks, Nick > >> >> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> >> --- >> arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++--------------- >> 1 file changed, 32 insertions(+), 23 deletions(-) >> >> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c >> index 895090636295..23d6dc04b0e9 100644 >> --- a/arch/powerpc/kvm/book3s_hv.c >> +++ b/arch/powerpc/kvm/book3s_hv.c >> @@ -3440,12 +3440,38 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) >> trace_kvmppc_run_core(vc, 1); >> } >> >> +static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) >> +{ >> + struct kvmppc_vcore *vc = vcpu->arch.vcore; >> + struct kvm_nested_guest *nested = vcpu->arch.nested; >> + u32 lpid; >> + >> + lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; >> + >> + mtspr(SPRN_LPID, lpid); >> + mtspr(SPRN_LPCR, lpcr); >> + mtspr(SPRN_PID, vcpu->arch.pid); >> + isync(); >> + >> + /* TLBIEL must have LPIDR set, so set guest LPID before flushing. */ >> + kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested); >> +} >> + >> +static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid) >> +{ >> + mtspr(SPRN_PID, pid); >> + mtspr(SPRN_LPID, kvm->arch.host_lpid); >> + mtspr(SPRN_LPCR, kvm->arch.host_lpcr); >> + isync(); >> +} >> + >> /* >> * Load up hypervisor-mode registers on P9. >> */ >> static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, >> unsigned long lpcr) >> { >> + struct kvm *kvm = vcpu->kvm; >> struct kvmppc_vcore *vc = vcpu->arch.vcore; >> s64 hdec; >> u64 tb, purr, spurr; >> @@ -3468,12 +3494,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, >> * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0, >> * so set HDICE before writing HDEC. >> */ >> - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE); >> + mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE); >> isync(); >> >> hdec = time_limit - mftb(); >> if (hdec < 0) { >> - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); >> + mtspr(SPRN_LPCR, kvm->arch.host_lpcr); >> isync(); >> return BOOK3S_INTERRUPT_HV_DECREMENTER; >> } >> @@ -3508,7 +3534,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, >> } >> mtspr(SPRN_CIABR, vcpu->arch.ciabr); >> mtspr(SPRN_IC, vcpu->arch.ic); >> - mtspr(SPRN_PID, vcpu->arch.pid); >> >> mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | >> (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); >> @@ -3522,8 +3547,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, >> >> mtspr(SPRN_AMOR, ~0UL); >> >> - mtspr(SPRN_LPCR, lpcr); >> - isync(); >> + switch_mmu_to_guest_radix(kvm, vcpu, lpcr); >> >> kvmppc_xive_push_vcpu(vcpu); >> >> @@ -3562,7 +3586,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, >> mtspr(SPRN_DAWR1, host_dawr1); >> mtspr(SPRN_DAWRX1, host_dawrx1); >> } >> - mtspr(SPRN_PID, host_pidr); >> >> /* >> * Since this is radix, do a eieio; tlbsync; ptesync sequence in >> @@ -3577,9 +3600,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, >> if (cpu_has_feature(CPU_FTR_ARCH_31)) >> asm volatile(PPC_CP_ABORT); >> >> - mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ >> - isync(); >> - >> vc->dpdes = mfspr(SPRN_DPDES); >> vc->vtb = mfspr(SPRN_VTB); >> mtspr(SPRN_DPDES, 0); >> @@ -3596,7 +3616,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, >> } >> >> mtspr(SPRN_HDEC, 0x7fffffff); >> - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); >> + >> + switch_mmu_to_host_radix(kvm, host_pidr); >> >> return trap; >> } >> @@ -4130,7 +4151,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, >> { >> struct kvm_run *run = vcpu->run; >> int trap, r, pcpu; >> - int srcu_idx, lpid; >> + int srcu_idx; >> struct kvmppc_vcore *vc; >> struct kvm *kvm = vcpu->kvm; >> struct kvm_nested_guest *nested = vcpu->arch.nested; >> @@ -4204,13 +4225,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, >> vc->vcore_state = VCORE_RUNNING; >> trace_kvmppc_run_core(vc, 0); >> >> - if (cpu_has_feature(CPU_FTR_HVMODE)) { >> - lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; >> - mtspr(SPRN_LPID, lpid); >> - isync(); >> - kvmppc_check_need_tlb_flush(kvm, pcpu, nested); >> - } >> - >> guest_enter_irqoff(); >> >> srcu_idx = srcu_read_lock(&kvm->srcu); >> @@ -4229,11 +4243,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, >> >> srcu_read_unlock(&kvm->srcu, srcu_idx); >> >> - if (cpu_has_feature(CPU_FTR_HVMODE)) { >> - mtspr(SPRN_LPID, kvm->arch.host_lpid); >> - isync(); >> - } >> - >> set_irq_happened(trap); >> >> kvmppc_set_host_core(pcpu); >
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 895090636295..23d6dc04b0e9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3440,12 +3440,38 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) trace_kvmppc_run_core(vc, 1); } +static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + struct kvm_nested_guest *nested = vcpu->arch.nested; + u32 lpid; + + lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; + + mtspr(SPRN_LPID, lpid); + mtspr(SPRN_LPCR, lpcr); + mtspr(SPRN_PID, vcpu->arch.pid); + isync(); + + /* TLBIEL must have LPIDR set, so set guest LPID before flushing. */ + kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested); +} + +static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid) +{ + mtspr(SPRN_PID, pid); + mtspr(SPRN_LPID, kvm->arch.host_lpid); + mtspr(SPRN_LPCR, kvm->arch.host_lpcr); + isync(); +} + /* * Load up hypervisor-mode registers on P9. */ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr) { + struct kvm *kvm = vcpu->kvm; struct kvmppc_vcore *vc = vcpu->arch.vcore; s64 hdec; u64 tb, purr, spurr; @@ -3468,12 +3494,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0, * so set HDICE before writing HDEC. */ - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE); + mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE); isync(); hdec = time_limit - mftb(); if (hdec < 0) { - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); + mtspr(SPRN_LPCR, kvm->arch.host_lpcr); isync(); return BOOK3S_INTERRUPT_HV_DECREMENTER; } @@ -3508,7 +3534,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, } mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_IC, vcpu->arch.ic); - mtspr(SPRN_PID, vcpu->arch.pid); mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); @@ -3522,8 +3547,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_AMOR, ~0UL); - mtspr(SPRN_LPCR, lpcr); - isync(); + switch_mmu_to_guest_radix(kvm, vcpu, lpcr); kvmppc_xive_push_vcpu(vcpu); @@ -3562,7 +3586,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_DAWR1, host_dawr1); mtspr(SPRN_DAWRX1, host_dawrx1); } - mtspr(SPRN_PID, host_pidr); /* * Since this is radix, do a eieio; tlbsync; ptesync sequence in @@ -3577,9 +3600,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, if (cpu_has_feature(CPU_FTR_ARCH_31)) asm volatile(PPC_CP_ABORT); - mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ - isync(); - vc->dpdes = mfspr(SPRN_DPDES); vc->vtb = mfspr(SPRN_VTB); mtspr(SPRN_DPDES, 0); @@ -3596,7 +3616,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, } mtspr(SPRN_HDEC, 0x7fffffff); - mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); + + switch_mmu_to_host_radix(kvm, host_pidr); return trap; } @@ -4130,7 +4151,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, { struct kvm_run *run = vcpu->run; int trap, r, pcpu; - int srcu_idx, lpid; + int srcu_idx; struct kvmppc_vcore *vc; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; @@ -4204,13 +4225,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, vc->vcore_state = VCORE_RUNNING; trace_kvmppc_run_core(vc, 0); - if (cpu_has_feature(CPU_FTR_HVMODE)) { - lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; - mtspr(SPRN_LPID, lpid); - isync(); - kvmppc_check_need_tlb_flush(kvm, pcpu, nested); - } - guest_enter_irqoff(); srcu_idx = srcu_read_lock(&kvm->srcu); @@ -4229,11 +4243,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, srcu_read_unlock(&kvm->srcu, srcu_idx); - if (cpu_has_feature(CPU_FTR_HVMODE)) { - mtspr(SPRN_LPID, kvm->arch.host_lpid); - isync(); - } - set_irq_happened(trap); kvmppc_set_host_core(pcpu);
Switching the MMU from radix<->radix mode is tricky particularly as the MMU can remain enabled and requires a certain sequence of SPR updates. Move these together into their own functions. This also includes the radix TLB check / flush because it's tied in to MMU switching due to tlbiel getting LPID from LPIDR. (XXX: isync / hwsync synchronisation TBD) Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 23 deletions(-)