diff mbox series

[v3,16/41] KVM: PPC: Book3S HV P9: Move radix MMU switching instructions together

Message ID 20210305150638.2675513-17-npiggin@gmail.com (mailing list archive)
State Changes Requested
Headers show
Series KVM: PPC: Book3S: C-ify the P9 entry/exit code | expand
Related show

Checks

Context Check Description
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/merge (91966823812efbd175f904599e5cf2a854b39809)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/next (fe07bfda2fb9cdef8a4d4008a409bb02f35f1bd8)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linus/master (280d542f6ffac0e6d65dc267f92191d509b13b64)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/fixes (5c88a17e15795226b56d83f579cbb9b7a4864f79)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linux-next (7a7fd0de4a9804299793e564a555a49c1fc924cb)
snowpatch_ozlabs/apply_patch fail Failed to apply to any branch

Commit Message

Nicholas Piggin March 5, 2021, 3:06 p.m. UTC
Switching the MMU from radix<->radix mode is tricky particularly as the
MMU can remain enabled and requires a certain sequence of SPR updates.
Move these together into their own functions.

This also includes the radix TLB check / flush because it's tied in to
MMU switching due to tlbiel getting LPID from LPIDR.

(XXX: isync / hwsync synchronisation TBD)

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 23 deletions(-)

Comments

Alexey Kardashevskiy March 22, 2021, 4:24 a.m. UTC | #1
On 06/03/2021 02:06, Nicholas Piggin wrote:
> Switching the MMU from radix<->radix mode is tricky particularly as the
> MMU can remain enabled and requires a certain sequence of SPR updates.
> Move these together into their own functions.
> 
> This also includes the radix TLB check / flush because it's tied in to
> MMU switching due to tlbiel getting LPID from LPIDR.
> 
> (XXX: isync / hwsync synchronisation TBD)


Looks alright but what is this comment about? Is something missing or 
just sub optimal?


> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>


> ---
>   arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++---------------
>   1 file changed, 32 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index f1230f9d98ba..b9cae42b9cd5 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -3449,12 +3449,38 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
>   	trace_kvmppc_run_core(vc, 1);
>   }
>   
> +static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
> +{
> +	struct kvmppc_vcore *vc = vcpu->arch.vcore;
> +	struct kvm_nested_guest *nested = vcpu->arch.nested;
> +	u32 lpid;
> +
> +	lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
> +
> +	mtspr(SPRN_LPID, lpid);
> +	mtspr(SPRN_LPCR, lpcr);
> +	mtspr(SPRN_PID, vcpu->arch.pid);
> +	isync();
> +
> +	/* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
> +	kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
> +}
> +
> +static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
> +{
> +	mtspr(SPRN_PID, pid);
> +	mtspr(SPRN_LPID, kvm->arch.host_lpid);
> +	mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
> +	isync();
> +}
> +
>   /*
>    * Load up hypervisor-mode registers on P9.
>    */
>   static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>   				     unsigned long lpcr)
>   {
> +	struct kvm *kvm = vcpu->kvm;
>   	struct kvmppc_vcore *vc = vcpu->arch.vcore;
>   	s64 hdec;
>   	u64 tb, purr, spurr;
> @@ -3477,12 +3503,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>   	 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
>   	 * so set HDICE before writing HDEC.
>   	 */
> -	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
> +	mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE);
>   	isync();
>   
>   	hdec = time_limit - mftb();
>   	if (hdec < 0) {
> -		mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
> +		mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
>   		isync();
>   		return BOOK3S_INTERRUPT_HV_DECREMENTER;
>   	}
> @@ -3517,7 +3543,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>   	}
>   	mtspr(SPRN_CIABR, vcpu->arch.ciabr);
>   	mtspr(SPRN_IC, vcpu->arch.ic);
> -	mtspr(SPRN_PID, vcpu->arch.pid);
>   
>   	mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
>   	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
> @@ -3531,8 +3556,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>   
>   	mtspr(SPRN_AMOR, ~0UL);
>   
> -	mtspr(SPRN_LPCR, lpcr);
> -	isync();
> +	switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
>   
>   	kvmppc_xive_push_vcpu(vcpu);
>   
> @@ -3571,7 +3595,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>   		mtspr(SPRN_DAWR1, host_dawr1);
>   		mtspr(SPRN_DAWRX1, host_dawrx1);
>   	}
> -	mtspr(SPRN_PID, host_pidr);
>   
>   	/*
>   	 * Since this is radix, do a eieio; tlbsync; ptesync sequence in
> @@ -3586,9 +3609,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>   	if (cpu_has_feature(CPU_FTR_ARCH_31))
>   		asm volatile(PPC_CP_ABORT);
>   
> -	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
> -	isync();
> -
>   	vc->dpdes = mfspr(SPRN_DPDES);
>   	vc->vtb = mfspr(SPRN_VTB);
>   	mtspr(SPRN_DPDES, 0);
> @@ -3605,7 +3625,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>   	}
>   
>   	mtspr(SPRN_HDEC, 0x7fffffff);
> -	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
> +
> +	switch_mmu_to_host_radix(kvm, host_pidr);
>   
>   	return trap;
>   }
> @@ -4138,7 +4159,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
>   {
>   	struct kvm_run *run = vcpu->run;
>   	int trap, r, pcpu;
> -	int srcu_idx, lpid;
> +	int srcu_idx;
>   	struct kvmppc_vcore *vc;
>   	struct kvm *kvm = vcpu->kvm;
>   	struct kvm_nested_guest *nested = vcpu->arch.nested;
> @@ -4212,13 +4233,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
>   	vc->vcore_state = VCORE_RUNNING;
>   	trace_kvmppc_run_core(vc, 0);
>   
> -	if (cpu_has_feature(CPU_FTR_HVMODE)) {


The new location of mtspr(SPRN_LPID, lpid) does not check for 
CPU_FTR_HVMODE anymore, is this going to work with HV KVM on pseries? 
Thanks,




> -		lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
> -		mtspr(SPRN_LPID, lpid);
> -		isync();
> -		kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
> -	}
> -
>   	guest_enter_irqoff();
>   
>   	srcu_idx = srcu_read_lock(&kvm->srcu);
> @@ -4237,11 +4251,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
>   
>   	srcu_read_unlock(&kvm->srcu, srcu_idx);
>   
> -	if (cpu_has_feature(CPU_FTR_HVMODE)) {
> -		mtspr(SPRN_LPID, kvm->arch.host_lpid);
> -		isync();
> -	}
> -
>   	set_irq_happened(trap);
>   
>   	kvmppc_set_host_core(pcpu);
>
Nicholas Piggin March 22, 2021, 5:25 a.m. UTC | #2
Excerpts from Alexey Kardashevskiy's message of March 22, 2021 2:24 pm:
> 
> 
> On 06/03/2021 02:06, Nicholas Piggin wrote:
>> Switching the MMU from radix<->radix mode is tricky particularly as the
>> MMU can remain enabled and requires a certain sequence of SPR updates.
>> Move these together into their own functions.
>> 
>> This also includes the radix TLB check / flush because it's tied in to
>> MMU switching due to tlbiel getting LPID from LPIDR.
>> 
>> (XXX: isync / hwsync synchronisation TBD)
> 
> 
> Looks alright but what is this comment about? Is something missing or 
> just sub optimal?

Ah, yeah the architecture says for example a CSI is required before + 
after each, but the fine print is that you only need those to separate 
between previous or subsequent accesses that may use those contexts
being switched from/to.

Then there is the question of CSI between the instructions so e.g., you 
don't get the TLB prefetch bug if the mtPIDR could go out of order ahead
of the mtLPIDR, but those instructions are serialized so they wouldn't.

There's possibly a few clarifications coming to the architecture around 
this as well.

I think things are relatively okay but probably need a bit more 
commenting to justify where the isyncs() aren't. It's possible we might 
be able to even remove the isyncs that are there.

Making a like-for-like conversion is a bit tricky because there are
possible context synchronising instructions between them already.

Maybe for the first series, I'll just put an isync between all of them,
and then a later patch can replace some of them with comments.

> 
> 
>> 
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> 
> 
>> ---
>>   arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++---------------
>>   1 file changed, 32 insertions(+), 23 deletions(-)
>> 
>> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
>> index f1230f9d98ba..b9cae42b9cd5 100644
>> --- a/arch/powerpc/kvm/book3s_hv.c
>> +++ b/arch/powerpc/kvm/book3s_hv.c
>> @@ -3449,12 +3449,38 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
>>   	trace_kvmppc_run_core(vc, 1);
>>   }
>>   
>> +static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
>> +{
>> +	struct kvmppc_vcore *vc = vcpu->arch.vcore;
>> +	struct kvm_nested_guest *nested = vcpu->arch.nested;
>> +	u32 lpid;
>> +
>> +	lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
>> +
>> +	mtspr(SPRN_LPID, lpid);
>> +	mtspr(SPRN_LPCR, lpcr);
>> +	mtspr(SPRN_PID, vcpu->arch.pid);
>> +	isync();
>> +
>> +	/* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
>> +	kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
>> +}
>> +
>> +static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
>> +{
>> +	mtspr(SPRN_PID, pid);
>> +	mtspr(SPRN_LPID, kvm->arch.host_lpid);
>> +	mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
>> +	isync();
>> +}
>> +
>>   /*
>>    * Load up hypervisor-mode registers on P9.
>>    */
>>   static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>   				     unsigned long lpcr)
>>   {
>> +	struct kvm *kvm = vcpu->kvm;
>>   	struct kvmppc_vcore *vc = vcpu->arch.vcore;
>>   	s64 hdec;
>>   	u64 tb, purr, spurr;
>> @@ -3477,12 +3503,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>   	 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
>>   	 * so set HDICE before writing HDEC.
>>   	 */
>> -	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
>> +	mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE);
>>   	isync();
>>   
>>   	hdec = time_limit - mftb();
>>   	if (hdec < 0) {
>> -		mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
>> +		mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
>>   		isync();
>>   		return BOOK3S_INTERRUPT_HV_DECREMENTER;
>>   	}
>> @@ -3517,7 +3543,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>   	}
>>   	mtspr(SPRN_CIABR, vcpu->arch.ciabr);
>>   	mtspr(SPRN_IC, vcpu->arch.ic);
>> -	mtspr(SPRN_PID, vcpu->arch.pid);
>>   
>>   	mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
>>   	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
>> @@ -3531,8 +3556,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>   
>>   	mtspr(SPRN_AMOR, ~0UL);
>>   
>> -	mtspr(SPRN_LPCR, lpcr);
>> -	isync();
>> +	switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
>>   
>>   	kvmppc_xive_push_vcpu(vcpu);
>>   
>> @@ -3571,7 +3595,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>   		mtspr(SPRN_DAWR1, host_dawr1);
>>   		mtspr(SPRN_DAWRX1, host_dawrx1);
>>   	}
>> -	mtspr(SPRN_PID, host_pidr);
>>   
>>   	/*
>>   	 * Since this is radix, do a eieio; tlbsync; ptesync sequence in
>> @@ -3586,9 +3609,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>   	if (cpu_has_feature(CPU_FTR_ARCH_31))
>>   		asm volatile(PPC_CP_ABORT);
>>   
>> -	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
>> -	isync();
>> -
>>   	vc->dpdes = mfspr(SPRN_DPDES);
>>   	vc->vtb = mfspr(SPRN_VTB);
>>   	mtspr(SPRN_DPDES, 0);
>> @@ -3605,7 +3625,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>   	}
>>   
>>   	mtspr(SPRN_HDEC, 0x7fffffff);
>> -	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
>> +
>> +	switch_mmu_to_host_radix(kvm, host_pidr);
>>   
>>   	return trap;
>>   }
>> @@ -4138,7 +4159,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
>>   {
>>   	struct kvm_run *run = vcpu->run;
>>   	int trap, r, pcpu;
>> -	int srcu_idx, lpid;
>> +	int srcu_idx;
>>   	struct kvmppc_vcore *vc;
>>   	struct kvm *kvm = vcpu->kvm;
>>   	struct kvm_nested_guest *nested = vcpu->arch.nested;
>> @@ -4212,13 +4233,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
>>   	vc->vcore_state = VCORE_RUNNING;
>>   	trace_kvmppc_run_core(vc, 0);
>>   
>> -	if (cpu_has_feature(CPU_FTR_HVMODE)) {
> 
> 
> The new location of mtspr(SPRN_LPID, lpid) does not check for 
> CPU_FTR_HVMODE anymore, is this going to work with HV KVM on pseries? 

Yes, these are moved to HVMODE specific code now.

Thanks,
Nick
Alexey Kardashevskiy March 22, 2021, 6:21 a.m. UTC | #3
On 22/03/2021 16:25, Nicholas Piggin wrote:
> Excerpts from Alexey Kardashevskiy's message of March 22, 2021 2:24 pm:
>>
>>
>> On 06/03/2021 02:06, Nicholas Piggin wrote:
>>> Switching the MMU from radix<->radix mode is tricky particularly as the
>>> MMU can remain enabled and requires a certain sequence of SPR updates.
>>> Move these together into their own functions.
>>>
>>> This also includes the radix TLB check / flush because it's tied in to
>>> MMU switching due to tlbiel getting LPID from LPIDR.
>>>
>>> (XXX: isync / hwsync synchronisation TBD)
>>
>>
>> Looks alright but what is this comment about? Is something missing or
>> just sub optimal?
> 
> Ah, yeah the architecture says for example a CSI is required before +
> after each, but the fine print is that you only need those to separate
> between previous or subsequent accesses that may use those contexts
> being switched from/to.
> 
> Then there is the question of CSI between the instructions so e.g., you
> don't get the TLB prefetch bug if the mtPIDR could go out of order ahead
> of the mtLPIDR, but those instructions are serialized so they wouldn't.
> 
> There's possibly a few clarifications coming to the architecture around
> this as well.
> 
> I think things are relatively okay but probably need a bit more
> commenting to justify where the isyncs() aren't. It's possible we might
> be able to even remove the isyncs that are there.
> 
> Making a like-for-like conversion is a bit tricky because there are
> possible context synchronising instructions between them already.
> 
> Maybe for the first series, I'll just put an isync between all of them,
> and then a later patch can replace some of them with comments.
> 
>>
>>
>>>
>>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>>
>>
>>> ---
>>>    arch/powerpc/kvm/book3s_hv.c | 55 +++++++++++++++++++++---------------
>>>    1 file changed, 32 insertions(+), 23 deletions(-)
>>>
>>> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
>>> index f1230f9d98ba..b9cae42b9cd5 100644
>>> --- a/arch/powerpc/kvm/book3s_hv.c
>>> +++ b/arch/powerpc/kvm/book3s_hv.c
>>> @@ -3449,12 +3449,38 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
>>>    	trace_kvmppc_run_core(vc, 1);
>>>    }
>>>    
>>> +static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
>>> +{
>>> +	struct kvmppc_vcore *vc = vcpu->arch.vcore;
>>> +	struct kvm_nested_guest *nested = vcpu->arch.nested;
>>> +	u32 lpid;
>>> +
>>> +	lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
>>> +
>>> +	mtspr(SPRN_LPID, lpid);
>>> +	mtspr(SPRN_LPCR, lpcr);
>>> +	mtspr(SPRN_PID, vcpu->arch.pid);
>>> +	isync();
>>> +
>>> +	/* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
>>> +	kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
>>> +}
>>> +
>>> +static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
>>> +{
>>> +	mtspr(SPRN_PID, pid);
>>> +	mtspr(SPRN_LPID, kvm->arch.host_lpid);
>>> +	mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
>>> +	isync();
>>> +}
>>> +
>>>    /*
>>>     * Load up hypervisor-mode registers on P9.
>>>     */
>>>    static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    				     unsigned long lpcr)
>>>    {
>>> +	struct kvm *kvm = vcpu->kvm;
>>>    	struct kvmppc_vcore *vc = vcpu->arch.vcore;
>>>    	s64 hdec;
>>>    	u64 tb, purr, spurr;
>>> @@ -3477,12 +3503,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    	 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
>>>    	 * so set HDICE before writing HDEC.
>>>    	 */
>>> -	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
>>> +	mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE);
>>>    	isync();
>>>    
>>>    	hdec = time_limit - mftb();
>>>    	if (hdec < 0) {
>>> -		mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
>>> +		mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
>>>    		isync();
>>>    		return BOOK3S_INTERRUPT_HV_DECREMENTER;
>>>    	}
>>> @@ -3517,7 +3543,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    	}
>>>    	mtspr(SPRN_CIABR, vcpu->arch.ciabr);
>>>    	mtspr(SPRN_IC, vcpu->arch.ic);
>>> -	mtspr(SPRN_PID, vcpu->arch.pid);
>>>    
>>>    	mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
>>>    	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
>>> @@ -3531,8 +3556,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    
>>>    	mtspr(SPRN_AMOR, ~0UL);
>>>    
>>> -	mtspr(SPRN_LPCR, lpcr);
>>> -	isync();
>>> +	switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
>>>    
>>>    	kvmppc_xive_push_vcpu(vcpu);
>>>    
>>> @@ -3571,7 +3595,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    		mtspr(SPRN_DAWR1, host_dawr1);
>>>    		mtspr(SPRN_DAWRX1, host_dawrx1);
>>>    	}
>>> -	mtspr(SPRN_PID, host_pidr);
>>>    
>>>    	/*
>>>    	 * Since this is radix, do a eieio; tlbsync; ptesync sequence in
>>> @@ -3586,9 +3609,6 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    	if (cpu_has_feature(CPU_FTR_ARCH_31))
>>>    		asm volatile(PPC_CP_ABORT);
>>>    
>>> -	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
>>> -	isync();
>>> -
>>>    	vc->dpdes = mfspr(SPRN_DPDES);
>>>    	vc->vtb = mfspr(SPRN_VTB);
>>>    	mtspr(SPRN_DPDES, 0);
>>> @@ -3605,7 +3625,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    	}
>>>    
>>>    	mtspr(SPRN_HDEC, 0x7fffffff);
>>> -	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
>>> +
>>> +	switch_mmu_to_host_radix(kvm, host_pidr);
>>>    
>>>    	return trap;
>>>    }
>>> @@ -4138,7 +4159,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    {
>>>    	struct kvm_run *run = vcpu->run;
>>>    	int trap, r, pcpu;
>>> -	int srcu_idx, lpid;
>>> +	int srcu_idx;
>>>    	struct kvmppc_vcore *vc;
>>>    	struct kvm *kvm = vcpu->kvm;
>>>    	struct kvm_nested_guest *nested = vcpu->arch.nested;
>>> @@ -4212,13 +4233,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
>>>    	vc->vcore_state = VCORE_RUNNING;
>>>    	trace_kvmppc_run_core(vc, 0);
>>>    
>>> -	if (cpu_has_feature(CPU_FTR_HVMODE)) {
>>
>>
>> The new location of mtspr(SPRN_LPID, lpid) does not check for
>> CPU_FTR_HVMODE anymore, is this going to work with HV KVM on pseries?
> 
> Yes, these are moved to HVMODE specific code now.

ah right, kvmhv_on_pseries() is !cpu_has_feature(CPU_FTR_HVMODE).


Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index f1230f9d98ba..b9cae42b9cd5 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3449,12 +3449,38 @@  static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	trace_kvmppc_run_core(vc, 1);
 }
 
+static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+	struct kvmppc_vcore *vc = vcpu->arch.vcore;
+	struct kvm_nested_guest *nested = vcpu->arch.nested;
+	u32 lpid;
+
+	lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+
+	mtspr(SPRN_LPID, lpid);
+	mtspr(SPRN_LPCR, lpcr);
+	mtspr(SPRN_PID, vcpu->arch.pid);
+	isync();
+
+	/* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
+	kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
+}
+
+static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
+{
+	mtspr(SPRN_PID, pid);
+	mtspr(SPRN_LPID, kvm->arch.host_lpid);
+	mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+	isync();
+}
+
 /*
  * Load up hypervisor-mode registers on P9.
  */
 static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 				     unsigned long lpcr)
 {
+	struct kvm *kvm = vcpu->kvm;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	s64 hdec;
 	u64 tb, purr, spurr;
@@ -3477,12 +3503,12 @@  static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 	 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
 	 * so set HDICE before writing HDEC.
 	 */
-	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
+	mtspr(SPRN_LPCR, kvm->arch.host_lpcr | LPCR_HDICE);
 	isync();
 
 	hdec = time_limit - mftb();
 	if (hdec < 0) {
-		mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+		mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
 		isync();
 		return BOOK3S_INTERRUPT_HV_DECREMENTER;
 	}
@@ -3517,7 +3543,6 @@  static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 	}
 	mtspr(SPRN_CIABR, vcpu->arch.ciabr);
 	mtspr(SPRN_IC, vcpu->arch.ic);
-	mtspr(SPRN_PID, vcpu->arch.pid);
 
 	mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
 	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
@@ -3531,8 +3556,7 @@  static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 
 	mtspr(SPRN_AMOR, ~0UL);
 
-	mtspr(SPRN_LPCR, lpcr);
-	isync();
+	switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
 
 	kvmppc_xive_push_vcpu(vcpu);
 
@@ -3571,7 +3595,6 @@  static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 		mtspr(SPRN_DAWR1, host_dawr1);
 		mtspr(SPRN_DAWRX1, host_dawrx1);
 	}
-	mtspr(SPRN_PID, host_pidr);
 
 	/*
 	 * Since this is radix, do a eieio; tlbsync; ptesync sequence in
@@ -3586,9 +3609,6 @@  static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 	if (cpu_has_feature(CPU_FTR_ARCH_31))
 		asm volatile(PPC_CP_ABORT);
 
-	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
-	isync();
-
 	vc->dpdes = mfspr(SPRN_DPDES);
 	vc->vtb = mfspr(SPRN_VTB);
 	mtspr(SPRN_DPDES, 0);
@@ -3605,7 +3625,8 @@  static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 	}
 
 	mtspr(SPRN_HDEC, 0x7fffffff);
-	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+
+	switch_mmu_to_host_radix(kvm, host_pidr);
 
 	return trap;
 }
@@ -4138,7 +4159,7 @@  int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 {
 	struct kvm_run *run = vcpu->run;
 	int trap, r, pcpu;
-	int srcu_idx, lpid;
+	int srcu_idx;
 	struct kvmppc_vcore *vc;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_nested_guest *nested = vcpu->arch.nested;
@@ -4212,13 +4233,6 @@  int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 	vc->vcore_state = VCORE_RUNNING;
 	trace_kvmppc_run_core(vc, 0);
 
-	if (cpu_has_feature(CPU_FTR_HVMODE)) {
-		lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
-		mtspr(SPRN_LPID, lpid);
-		isync();
-		kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
-	}
-
 	guest_enter_irqoff();
 
 	srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -4237,11 +4251,6 @@  int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 
 	srcu_read_unlock(&kvm->srcu, srcu_idx);
 
-	if (cpu_has_feature(CPU_FTR_HVMODE)) {
-		mtspr(SPRN_LPID, kvm->arch.host_lpid);
-		isync();
-	}
-
 	set_irq_happened(trap);
 
 	kvmppc_set_host_core(pcpu);