diff mbox series

[RFC,07/43] KVM: PPC: Book3S HV: POWER10 enable HAIL when running radix guests

Message ID 20210622105736.633352-8-npiggin@gmail.com
State New
Headers show
Series KVM: PPC: Book3S HV P9: entry/exit optimisations round 1 | expand

Commit Message

Nicholas Piggin June 22, 2021, 10:57 a.m. UTC
HV interrupts may be taken with the MMU enabled when radix guests are
running. Enable LPCR[HAIL] on ISA v3.1 processors for radix guests.
Make this depend on the host LPCR[HAIL] being enabled. Currently that is
always enabled, but having this test means any issue that might require
LPCR[HAIL] to be disabled in the host will not have to be duplicated in
KVM.

-1380 cycles on P10 NULL hcall entry+exit

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 29 +++++++++++++++++++++++++----
 1 file changed, 25 insertions(+), 4 deletions(-)

Comments

Fabiano Rosas June 30, 2021, 7:41 p.m. UTC | #1
Nicholas Piggin <npiggin@gmail.com> writes:

> HV interrupts may be taken with the MMU enabled when radix guests are
> running. Enable LPCR[HAIL] on ISA v3.1 processors for radix guests.
> Make this depend on the host LPCR[HAIL] being enabled. Currently that is
> always enabled, but having this test means any issue that might require
> LPCR[HAIL] to be disabled in the host will not have to be duplicated in
> KVM.
>
> -1380 cycles on P10 NULL hcall entry+exit
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>

> ---
>  arch/powerpc/kvm/book3s_hv.c | 29 +++++++++++++++++++++++++----
>  1 file changed, 25 insertions(+), 4 deletions(-)
>
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 36e1db48fccf..ed713f49fbd5 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -4896,6 +4896,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
>   */
>  int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
>  {
> +	unsigned long lpcr, lpcr_mask;
> +
>  	if (nesting_enabled(kvm))
>  		kvmhv_release_all_nested(kvm);
>  	kvmppc_rmap_reset(kvm);
> @@ -4905,8 +4907,13 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
>  	kvm->arch.radix = 0;
>  	spin_unlock(&kvm->mmu_lock);
>  	kvmppc_free_radix(kvm);
> -	kvmppc_update_lpcr(kvm, LPCR_VPM1,
> -			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
> +
> +	lpcr = LPCR_VPM1;
> +	lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
> +	if (cpu_has_feature(CPU_FTR_ARCH_31))
> +		lpcr_mask |= LPCR_HAIL;
> +	kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
> +
>  	return 0;
>  }
>
> @@ -4916,6 +4923,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
>   */
>  int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
>  {
> +	unsigned long lpcr, lpcr_mask;
>  	int err;
>
>  	err = kvmppc_init_vm_radix(kvm);
> @@ -4927,8 +4935,17 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
>  	kvm->arch.radix = 1;
>  	spin_unlock(&kvm->mmu_lock);
>  	kvmppc_free_hpt(&kvm->arch.hpt);
> -	kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
> -			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
> +
> +	lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR;
> +	lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
> +	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
> +		lpcr_mask |= LPCR_HAIL;
> +		if (cpu_has_feature(CPU_FTR_HVMODE) &&
> +				(kvm->arch.host_lpcr & LPCR_HAIL))
> +			lpcr |= LPCR_HAIL;
> +	}
> +	kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
> +
>  	return 0;
>  }
>
> @@ -5092,6 +5109,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
>  		kvm->arch.mmu_ready = 1;
>  		lpcr &= ~LPCR_VPM1;
>  		lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
> +		if (cpu_has_feature(CPU_FTR_HVMODE) &&
> +		    cpu_has_feature(CPU_FTR_ARCH_31) &&
> +		    (kvm->arch.host_lpcr & LPCR_HAIL))
> +			lpcr |= LPCR_HAIL;
>  		ret = kvmppc_init_vm_radix(kvm);
>  		if (ret) {
>  			kvmppc_free_lpid(kvm->arch.lpid);
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 36e1db48fccf..ed713f49fbd5 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4896,6 +4896,8 @@  static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
  */
 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
 {
+	unsigned long lpcr, lpcr_mask;
+
 	if (nesting_enabled(kvm))
 		kvmhv_release_all_nested(kvm);
 	kvmppc_rmap_reset(kvm);
@@ -4905,8 +4907,13 @@  int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
 	kvm->arch.radix = 0;
 	spin_unlock(&kvm->mmu_lock);
 	kvmppc_free_radix(kvm);
-	kvmppc_update_lpcr(kvm, LPCR_VPM1,
-			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+	lpcr = LPCR_VPM1;
+	lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+	if (cpu_has_feature(CPU_FTR_ARCH_31))
+		lpcr_mask |= LPCR_HAIL;
+	kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
 	return 0;
 }
 
@@ -4916,6 +4923,7 @@  int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
  */
 int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
 {
+	unsigned long lpcr, lpcr_mask;
 	int err;
 
 	err = kvmppc_init_vm_radix(kvm);
@@ -4927,8 +4935,17 @@  int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
 	kvm->arch.radix = 1;
 	spin_unlock(&kvm->mmu_lock);
 	kvmppc_free_hpt(&kvm->arch.hpt);
-	kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
-			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+	lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+	lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+		lpcr_mask |= LPCR_HAIL;
+		if (cpu_has_feature(CPU_FTR_HVMODE) &&
+				(kvm->arch.host_lpcr & LPCR_HAIL))
+			lpcr |= LPCR_HAIL;
+	}
+	kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
 	return 0;
 }
 
@@ -5092,6 +5109,10 @@  static int kvmppc_core_init_vm_hv(struct kvm *kvm)
 		kvm->arch.mmu_ready = 1;
 		lpcr &= ~LPCR_VPM1;
 		lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+		if (cpu_has_feature(CPU_FTR_HVMODE) &&
+		    cpu_has_feature(CPU_FTR_ARCH_31) &&
+		    (kvm->arch.host_lpcr & LPCR_HAIL))
+			lpcr |= LPCR_HAIL;
 		ret = kvmppc_init_vm_radix(kvm);
 		if (ret) {
 			kvmppc_free_lpid(kvm->arch.lpid);