diff mbox series

[v2,28/37] KVM: PPC: Book3S HV P9: Add helpers for OS SPR handling

Message ID 20210225134652.2127648-29-npiggin@gmail.com
State Superseded
Headers show
Series KVM: PPC: Book3S: C-ify the P9 entry/exit code | expand

Commit Message

Nicholas Piggin Feb. 25, 2021, 1:46 p.m. UTC
This is a first step to wrapping supervisor and user SPR saving and
loading up into helpers, which will then be called independently in
bare metal and nested HV cases in order to optimise SPR access.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 131 ++++++++++++++++++++++-------------
 1 file changed, 84 insertions(+), 47 deletions(-)

Comments

Fabiano Rosas March 2, 2021, 3:04 p.m. UTC | #1
Nicholas Piggin <npiggin@gmail.com> writes:

> This is a first step to wrapping supervisor and user SPR saving and
> loading up into helpers, which will then be called independently in
> bare metal and nested HV cases in order to optimise SPR access.
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---

<snip>

> +/* vcpu guest regs must already be saved */
> +static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
> +				    struct p9_host_os_sprs *host_os_sprs)
> +{
> +	mtspr(SPRN_PSPB, 0);
> +	mtspr(SPRN_WORT, 0);
> +	mtspr(SPRN_UAMOR, 0);
> +	mtspr(SPRN_PSPB, 0);

Not your fault, but PSPB is set twice here.

> +
> +	mtspr(SPRN_DSCR, host_os_sprs->dscr);
> +	mtspr(SPRN_TIDR, host_os_sprs->tidr);
> +	mtspr(SPRN_IAMR, host_os_sprs->iamr);
> +
> +	if (host_os_sprs->amr != vcpu->arch.amr)
> +		mtspr(SPRN_AMR, host_os_sprs->amr);
> +
> +	if (host_os_sprs->fscr != vcpu->arch.fscr)
> +		mtspr(SPRN_FSCR, host_os_sprs->fscr);
> +}
> +

<snip>

> @@ -3605,34 +3666,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
>  	vcpu->arch.dec_expires = dec + tb;
>  	vcpu->cpu = -1;
>  	vcpu->arch.thread_cpu = -1;
> -	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
> -
> -	vcpu->arch.iamr = mfspr(SPRN_IAMR);
> -	vcpu->arch.pspb = mfspr(SPRN_PSPB);
> -	vcpu->arch.fscr = mfspr(SPRN_FSCR);
> -	vcpu->arch.tar = mfspr(SPRN_TAR);
> -	vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
> -	vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
> -	vcpu->arch.bescr = mfspr(SPRN_BESCR);
> -	vcpu->arch.wort = mfspr(SPRN_WORT);
> -	vcpu->arch.tid = mfspr(SPRN_TIDR);
> -	vcpu->arch.amr = mfspr(SPRN_AMR);
> -	vcpu->arch.uamor = mfspr(SPRN_UAMOR);
> -	vcpu->arch.dscr = mfspr(SPRN_DSCR);
> -
> -	mtspr(SPRN_PSPB, 0);
> -	mtspr(SPRN_WORT, 0);
> -	mtspr(SPRN_UAMOR, 0);
> -	mtspr(SPRN_DSCR, host_dscr);
> -	mtspr(SPRN_TIDR, host_tidr);
> -	mtspr(SPRN_IAMR, host_iamr);
> -	mtspr(SPRN_PSPB, 0);
>
> -	if (host_amr != vcpu->arch.amr)
> -		mtspr(SPRN_AMR, host_amr);
> +	restore_p9_host_os_sprs(vcpu, &host_os_sprs);
>
> -	if (host_fscr != vcpu->arch.fscr)
> -		mtspr(SPRN_FSCR, host_fscr);
> +	store_spr_state(vcpu);

store_spr_state should come first, right? We want to save the guest
state before restoring the host state.

>
>  	msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
>  	store_fp_state(&vcpu->arch.fp);
Nicholas Piggin March 4, 2021, 11:02 a.m. UTC | #2
Excerpts from Fabiano Rosas's message of March 3, 2021 1:04 am:
> Nicholas Piggin <npiggin@gmail.com> writes:
> 
>> This is a first step to wrapping supervisor and user SPR saving and
>> loading up into helpers, which will then be called independently in
>> bare metal and nested HV cases in order to optimise SPR access.
>>
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
> 
> <snip>
> 
>> +/* vcpu guest regs must already be saved */
>> +static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
>> +				    struct p9_host_os_sprs *host_os_sprs)
>> +{
>> +	mtspr(SPRN_PSPB, 0);
>> +	mtspr(SPRN_WORT, 0);
>> +	mtspr(SPRN_UAMOR, 0);
>> +	mtspr(SPRN_PSPB, 0);
> 
> Not your fault, but PSPB is set twice here.

Yeah you're right.

>> +
>> +	mtspr(SPRN_DSCR, host_os_sprs->dscr);
>> +	mtspr(SPRN_TIDR, host_os_sprs->tidr);
>> +	mtspr(SPRN_IAMR, host_os_sprs->iamr);
>> +
>> +	if (host_os_sprs->amr != vcpu->arch.amr)
>> +		mtspr(SPRN_AMR, host_os_sprs->amr);
>> +
>> +	if (host_os_sprs->fscr != vcpu->arch.fscr)
>> +		mtspr(SPRN_FSCR, host_os_sprs->fscr);
>> +}
>> +
> 
> <snip>
> 
>> @@ -3605,34 +3666,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
>>  	vcpu->arch.dec_expires = dec + tb;
>>  	vcpu->cpu = -1;
>>  	vcpu->arch.thread_cpu = -1;
>> -	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
>> -
>> -	vcpu->arch.iamr = mfspr(SPRN_IAMR);
>> -	vcpu->arch.pspb = mfspr(SPRN_PSPB);
>> -	vcpu->arch.fscr = mfspr(SPRN_FSCR);
>> -	vcpu->arch.tar = mfspr(SPRN_TAR);
>> -	vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
>> -	vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
>> -	vcpu->arch.bescr = mfspr(SPRN_BESCR);
>> -	vcpu->arch.wort = mfspr(SPRN_WORT);
>> -	vcpu->arch.tid = mfspr(SPRN_TIDR);
>> -	vcpu->arch.amr = mfspr(SPRN_AMR);
>> -	vcpu->arch.uamor = mfspr(SPRN_UAMOR);
>> -	vcpu->arch.dscr = mfspr(SPRN_DSCR);
>> -
>> -	mtspr(SPRN_PSPB, 0);
>> -	mtspr(SPRN_WORT, 0);
>> -	mtspr(SPRN_UAMOR, 0);
>> -	mtspr(SPRN_DSCR, host_dscr);
>> -	mtspr(SPRN_TIDR, host_tidr);
>> -	mtspr(SPRN_IAMR, host_iamr);
>> -	mtspr(SPRN_PSPB, 0);
>>
>> -	if (host_amr != vcpu->arch.amr)
>> -		mtspr(SPRN_AMR, host_amr);
>> +	restore_p9_host_os_sprs(vcpu, &host_os_sprs);
>>
>> -	if (host_fscr != vcpu->arch.fscr)
>> -		mtspr(SPRN_FSCR, host_fscr);
>> +	store_spr_state(vcpu);
> 
> store_spr_state should come first, right? We want to save the guest
> state before restoring the host state.

Yes good catch. I switched that back around later but looks like I
never brought the fix back to the right patch. Interestingly, things 
pretty much work like this if the guest or host doesn't do anything
much with the SPRs!

Thanks,
Nick
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 94989fe2fdfe..ad16331c3370 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3442,6 +3442,84 @@  static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	trace_kvmppc_run_core(vc, 1);
 }
 
+static void load_spr_state(struct kvm_vcpu *vcpu)
+{
+	mtspr(SPRN_DSCR, vcpu->arch.dscr);
+	mtspr(SPRN_IAMR, vcpu->arch.iamr);
+	mtspr(SPRN_PSPB, vcpu->arch.pspb);
+	mtspr(SPRN_FSCR, vcpu->arch.fscr);
+	mtspr(SPRN_TAR, vcpu->arch.tar);
+	mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+	mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+	mtspr(SPRN_BESCR, vcpu->arch.bescr);
+	mtspr(SPRN_WORT, vcpu->arch.wort);
+	mtspr(SPRN_TIDR, vcpu->arch.tid);
+	/* XXX: DAR, DSISR must be set with MSR[RI] clear (or hstate as appropriate) */
+	mtspr(SPRN_AMR, vcpu->arch.amr);
+	mtspr(SPRN_UAMOR, vcpu->arch.uamor);
+
+	if (!(vcpu->arch.ctrl & 1))
+		mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+}
+
+static void store_spr_state(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
+
+	vcpu->arch.iamr = mfspr(SPRN_IAMR);
+	vcpu->arch.pspb = mfspr(SPRN_PSPB);
+	vcpu->arch.fscr = mfspr(SPRN_FSCR);
+	vcpu->arch.tar = mfspr(SPRN_TAR);
+	vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+	vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+	vcpu->arch.bescr = mfspr(SPRN_BESCR);
+	vcpu->arch.wort = mfspr(SPRN_WORT);
+	vcpu->arch.tid = mfspr(SPRN_TIDR);
+	vcpu->arch.amr = mfspr(SPRN_AMR);
+	vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+	vcpu->arch.dscr = mfspr(SPRN_DSCR);
+}
+
+/*
+ * Privileged (non-hypervisor) host registers to save.
+ */
+struct p9_host_os_sprs {
+	unsigned long dscr;
+	unsigned long tidr;
+	unsigned long iamr;
+	unsigned long amr;
+	unsigned long fscr;
+};
+
+static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
+{
+	host_os_sprs->dscr = mfspr(SPRN_DSCR);
+	host_os_sprs->tidr = mfspr(SPRN_TIDR);
+	host_os_sprs->iamr = mfspr(SPRN_IAMR);
+	host_os_sprs->amr = mfspr(SPRN_AMR);
+	host_os_sprs->fscr = mfspr(SPRN_FSCR);
+}
+
+/* vcpu guest regs must already be saved */
+static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+				    struct p9_host_os_sprs *host_os_sprs)
+{
+	mtspr(SPRN_PSPB, 0);
+	mtspr(SPRN_WORT, 0);
+	mtspr(SPRN_UAMOR, 0);
+	mtspr(SPRN_PSPB, 0);
+
+	mtspr(SPRN_DSCR, host_os_sprs->dscr);
+	mtspr(SPRN_TIDR, host_os_sprs->tidr);
+	mtspr(SPRN_IAMR, host_os_sprs->iamr);
+
+	if (host_os_sprs->amr != vcpu->arch.amr)
+		mtspr(SPRN_AMR, host_os_sprs->amr);
+
+	if (host_os_sprs->fscr != vcpu->arch.fscr)
+		mtspr(SPRN_FSCR, host_os_sprs->fscr);
+}
+
 /*
  * Virtual-mode guest entry for POWER9 and later when the host and
  * guest are both using the radix MMU.  The LPIDR has already been set.
@@ -3450,11 +3528,7 @@  static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 			 unsigned long lpcr)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
-	unsigned long host_dscr = mfspr(SPRN_DSCR);
-	unsigned long host_tidr = mfspr(SPRN_TIDR);
-	unsigned long host_iamr = mfspr(SPRN_IAMR);
-	unsigned long host_amr = mfspr(SPRN_AMR);
-	unsigned long host_fscr = mfspr(SPRN_FSCR);
+	struct p9_host_os_sprs host_os_sprs;
 	s64 dec;
 	u64 tb, next_timer;
 	int trap, save_pmu;
@@ -3469,6 +3543,8 @@  static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 
 	vcpu->arch.ceded = 0;
 
+	save_p9_host_os_sprs(&host_os_sprs);
+
 	kvmhv_save_host_pmu();		/* saves it to PACA kvm_hstate */
 
 	kvmppc_subcore_enter_guest();
@@ -3496,22 +3572,7 @@  static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 #endif
 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 
-	mtspr(SPRN_DSCR, vcpu->arch.dscr);
-	mtspr(SPRN_IAMR, vcpu->arch.iamr);
-	mtspr(SPRN_PSPB, vcpu->arch.pspb);
-	mtspr(SPRN_FSCR, vcpu->arch.fscr);
-	mtspr(SPRN_TAR, vcpu->arch.tar);
-	mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
-	mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
-	mtspr(SPRN_BESCR, vcpu->arch.bescr);
-	mtspr(SPRN_WORT, vcpu->arch.wort);
-	mtspr(SPRN_TIDR, vcpu->arch.tid);
-	/* XXX: DAR, DSISR must be set with MSR[RI] clear (or hstate as appropriate) */
-	mtspr(SPRN_AMR, vcpu->arch.amr);
-	mtspr(SPRN_UAMOR, vcpu->arch.uamor);
-
-	if (!(vcpu->arch.ctrl & 1))
-		mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+	load_spr_state(vcpu);
 
 	/*
 	 * XXX: must always deal with irq_work_raise via NMI vs setting DEC.
@@ -3605,34 +3666,10 @@  static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 	vcpu->arch.dec_expires = dec + tb;
 	vcpu->cpu = -1;
 	vcpu->arch.thread_cpu = -1;
-	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
-
-	vcpu->arch.iamr = mfspr(SPRN_IAMR);
-	vcpu->arch.pspb = mfspr(SPRN_PSPB);
-	vcpu->arch.fscr = mfspr(SPRN_FSCR);
-	vcpu->arch.tar = mfspr(SPRN_TAR);
-	vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
-	vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
-	vcpu->arch.bescr = mfspr(SPRN_BESCR);
-	vcpu->arch.wort = mfspr(SPRN_WORT);
-	vcpu->arch.tid = mfspr(SPRN_TIDR);
-	vcpu->arch.amr = mfspr(SPRN_AMR);
-	vcpu->arch.uamor = mfspr(SPRN_UAMOR);
-	vcpu->arch.dscr = mfspr(SPRN_DSCR);
-
-	mtspr(SPRN_PSPB, 0);
-	mtspr(SPRN_WORT, 0);
-	mtspr(SPRN_UAMOR, 0);
-	mtspr(SPRN_DSCR, host_dscr);
-	mtspr(SPRN_TIDR, host_tidr);
-	mtspr(SPRN_IAMR, host_iamr);
-	mtspr(SPRN_PSPB, 0);
 
-	if (host_amr != vcpu->arch.amr)
-		mtspr(SPRN_AMR, host_amr);
+	restore_p9_host_os_sprs(vcpu, &host_os_sprs);
 
-	if (host_fscr != vcpu->arch.fscr)
-		mtspr(SPRN_FSCR, host_fscr);
+	store_spr_state(vcpu);
 
 	msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
 	store_fp_state(&vcpu->arch.fp);