diff mbox series

KVM: PPC: Book3S HV P9: Optimise loads around context switch

Message ID 20220123114725.3549202-1-npiggin@gmail.com (mailing list archive)
State Accepted
Headers show
Series KVM: PPC: Book3S HV P9: Optimise loads around context switch | expand

Checks

Context Check Description
snowpatch_ozlabs/github-powerpc_ppctests success Successfully ran 8 jobs.
snowpatch_ozlabs/github-powerpc_selftests success Successfully ran 8 jobs.
snowpatch_ozlabs/github-powerpc_kernel_qemu success Successfully ran 24 jobs.
snowpatch_ozlabs/github-powerpc_sparse success Successfully ran 4 jobs.
snowpatch_ozlabs/github-powerpc_clang success Successfully ran 7 jobs.

Commit Message

Nicholas Piggin Jan. 23, 2022, 11:47 a.m. UTC
It is better to get all loads for the register values in flight
before starting to switch LPID, PID, and LPCR because those
mtSPRs are expensive and serialising.

This also just tidies up the code for a potential future change
to the context switching sequence.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kvm/book3s_hv_p9_entry.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

Comments

Fabiano Rosas Jan. 24, 2022, 5:29 p.m. UTC | #1
Nicholas Piggin <npiggin@gmail.com> writes:

> It is better to get all loads for the register values in flight
> before starting to switch LPID, PID, and LPCR because those
> mtSPRs are expensive and serialising.
>
> This also just tidies up the code for a potential future change
> to the context switching sequence.
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
Michael Ellerman May 24, 2022, 10:51 a.m. UTC | #2
On Sun, 23 Jan 2022 21:47:25 +1000, Nicholas Piggin wrote:
> It is better to get all loads for the register values in flight
> before starting to switch LPID, PID, and LPCR because those
> mtSPRs are expensive and serialising.
> 
> This also just tidies up the code for a potential future change
> to the context switching sequence.
> 
> [...]

Applied to powerpc/topic/ppc-kvm.

[1/1] KVM: PPC: Book3S HV P9: Optimise loads around context switch
      https://git.kernel.org/powerpc/c/361234d7a1c9a5290d33e35d49821b7a32a32854

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index a28e5b3daabd..9dba3e3f65a0 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -539,8 +539,10 @@  static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
 {
 	struct kvm_nested_guest *nested = vcpu->arch.nested;
 	u32 lpid;
+	u32 pid;
 
 	lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+	pid = vcpu->arch.pid;
 
 	/*
 	 * Prior memory accesses to host PID Q3 must be completed before we
@@ -551,7 +553,7 @@  static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
 	isync();
 	mtspr(SPRN_LPID, lpid);
 	mtspr(SPRN_LPCR, lpcr);
-	mtspr(SPRN_PID, vcpu->arch.pid);
+	mtspr(SPRN_PID, pid);
 	/*
 	 * isync not required here because we are HRFID'ing to guest before
 	 * any guest context access, which is context synchronising.
@@ -561,9 +563,11 @@  static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
 static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
 {
 	u32 lpid;
+	u32 pid;
 	int i;
 
 	lpid = kvm->arch.lpid;
+	pid = vcpu->arch.pid;
 
 	/*
 	 * See switch_mmu_to_guest_radix. ptesync should not be required here
@@ -574,7 +578,7 @@  static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
 	isync();
 	mtspr(SPRN_LPID, lpid);
 	mtspr(SPRN_LPCR, lpcr);
-	mtspr(SPRN_PID, vcpu->arch.pid);
+	mtspr(SPRN_PID, pid);
 
 	for (i = 0; i < vcpu->arch.slb_max; i++)
 		mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
@@ -585,6 +589,9 @@  static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
 
 static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
 {
+	u32 lpid = kvm->arch.host_lpid;
+	u64 lpcr = kvm->arch.host_lpcr;
+
 	/*
 	 * The guest has exited, so guest MMU context is no longer being
 	 * non-speculatively accessed, but a hwsync is needed before the
@@ -594,8 +601,8 @@  static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
 	asm volatile("hwsync" ::: "memory");
 	isync();
 	mtspr(SPRN_PID, pid);
-	mtspr(SPRN_LPID, kvm->arch.host_lpid);
-	mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+	mtspr(SPRN_LPID, lpid);
+	mtspr(SPRN_LPCR, lpcr);
 	/*
 	 * isync is not required after the switch, because mtmsrd with L=0
 	 * is performed after this switch, which is context synchronising.