Message ID | 1519753958-11756-8-git-send-email-wei.guo.simon@gmail.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | None | expand |
On Wed, Feb 28, 2018 at 01:52:26AM +0800, wei.guo.simon@gmail.com wrote: > From: Simon Guo <wei.guo.simon@gmail.com> > > Currently kernel doesn't use transaction memory. > And there is an issue for privilege guest that: > tbegin/tsuspend/tresume/tabort TM instructions can impact MSR TM bits > without trap into PR host. So following code will lead to a false mfmsr > result: > tbegin <- MSR bits update to Transaction active. > beq <- failover handler branch > mfmsr <- still read MSR bits from magic page with > transaction inactive. > > It is not an issue for non-privilege guest since its mfmsr is not patched > with magic page and will always trap into PR host. > > This patch will always fail tbegin attempt for privilege guest, so that > the above issue is prevented. It is benign since currently (guest) kernel > doesn't initiate a transaction. > > Test case: > https://github.com/justdoitqd/publicFiles/blob/master/test_tbegin_pr.c > > Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> > --- > arch/powerpc/include/asm/kvm_book3s.h | 2 ++ > arch/powerpc/kvm/book3s_emulate.c | 43 +++++++++++++++++++++++++++++++++++ > arch/powerpc/kvm/book3s_pr.c | 11 ++++++++- > 3 files changed, 55 insertions(+), 1 deletion(-) > > diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h > index 2ecb6a3..9690280 100644 > --- a/arch/powerpc/include/asm/kvm_book3s.h > +++ b/arch/powerpc/include/asm/kvm_book3s.h > @@ -258,9 +258,11 @@ extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, > #ifdef CONFIG_PPC_TRANSACTIONAL_MEM > void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); > void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); > +void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); > #else > static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} > static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} > +static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} > #endif > > extern int kvm_irq_bypass; > diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c > index a03533d..90b5f59 100644 > --- a/arch/powerpc/kvm/book3s_emulate.c > +++ b/arch/powerpc/kvm/book3s_emulate.c > @@ -23,6 +23,7 @@ > #include <asm/reg.h> > #include <asm/switch_to.h> > #include <asm/time.h> > +#include <asm/tm.h> > #include "book3s.h" > > #define OP_19_XOP_RFID 18 > @@ -47,6 +48,8 @@ > #define OP_31_XOP_EIOIO 854 > #define OP_31_XOP_SLBMFEE 915 > > +#define OP_31_XOP_TBEGIN 654 > + > /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ > #define OP_31_XOP_DCBZ 1010 > > @@ -362,6 +365,46 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, > > break; > } > +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM > + case OP_31_XOP_TBEGIN: > + { > + if (!cpu_has_feature(CPU_FTR_TM)) > + break; > + > + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { > + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); > + emulated = EMULATE_AGAIN; > + break; > + } > + > + if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { > + preempt_disable(); > + vcpu->arch.cr = (CR0_TBEGIN_FAILURE | > + (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); > + > + vcpu->arch.texasr = (TEXASR_FS | TEXASR_EX | > + (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) > + << TEXASR_FC_LG)); > + > + if ((inst >> 21) & 0x1) > + vcpu->arch.texasr |= TEXASR_ROT; > + > + if (kvmppc_get_msr(vcpu) & MSR_PR) > + vcpu->arch.texasr |= TEXASR_PR; This if statement seems unnecessary, since we only get here when MSR_PR is clear. Paul.
Hi Paul, On Tue, May 15, 2018 at 04:07:55PM +1000, Paul Mackerras wrote: > On Wed, Feb 28, 2018 at 01:52:26AM +0800, wei.guo.simon@gmail.com wrote: > > From: Simon Guo <wei.guo.simon@gmail.com> > > > > Currently kernel doesn't use transaction memory. > > And there is an issue for privilege guest that: > > tbegin/tsuspend/tresume/tabort TM instructions can impact MSR TM bits > > without trap into PR host. So following code will lead to a false mfmsr > > result: > > tbegin <- MSR bits update to Transaction active. > > beq <- failover handler branch > > mfmsr <- still read MSR bits from magic page with > > transaction inactive. > > > > It is not an issue for non-privilege guest since its mfmsr is not patched > > with magic page and will always trap into PR host. > > > > This patch will always fail tbegin attempt for privilege guest, so that > > the above issue is prevented. It is benign since currently (guest) kernel > > doesn't initiate a transaction. > > > > Test case: > > https://github.com/justdoitqd/publicFiles/blob/master/test_tbegin_pr.c > > > > Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> > > --- > > arch/powerpc/include/asm/kvm_book3s.h | 2 ++ > > arch/powerpc/kvm/book3s_emulate.c | 43 +++++++++++++++++++++++++++++++++++ > > arch/powerpc/kvm/book3s_pr.c | 11 ++++++++- > > 3 files changed, 55 insertions(+), 1 deletion(-) > > > > diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h > > index 2ecb6a3..9690280 100644 > > --- a/arch/powerpc/include/asm/kvm_book3s.h > > +++ b/arch/powerpc/include/asm/kvm_book3s.h > > @@ -258,9 +258,11 @@ extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, > > #ifdef CONFIG_PPC_TRANSACTIONAL_MEM > > void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); > > void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); > > +void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); > > #else > > static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} > > static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} > > +static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} > > #endif > > > > extern int kvm_irq_bypass; > > diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c > > index a03533d..90b5f59 100644 > > --- a/arch/powerpc/kvm/book3s_emulate.c > > +++ b/arch/powerpc/kvm/book3s_emulate.c > > @@ -23,6 +23,7 @@ > > #include <asm/reg.h> > > #include <asm/switch_to.h> > > #include <asm/time.h> > > +#include <asm/tm.h> > > #include "book3s.h" > > > > #define OP_19_XOP_RFID 18 > > @@ -47,6 +48,8 @@ > > #define OP_31_XOP_EIOIO 854 > > #define OP_31_XOP_SLBMFEE 915 > > > > +#define OP_31_XOP_TBEGIN 654 > > + > > /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ > > #define OP_31_XOP_DCBZ 1010 > > > > @@ -362,6 +365,46 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, > > > > break; > > } > > +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM > > + case OP_31_XOP_TBEGIN: > > + { > > + if (!cpu_has_feature(CPU_FTR_TM)) > > + break; > > + > > + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { > > + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); > > + emulated = EMULATE_AGAIN; > > + break; > > + } > > + > > + if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { > > + preempt_disable(); > > + vcpu->arch.cr = (CR0_TBEGIN_FAILURE | > > + (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); > > + > > + vcpu->arch.texasr = (TEXASR_FS | TEXASR_EX | > > + (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) > > + << TEXASR_FC_LG)); > > + > > + if ((inst >> 21) & 0x1) > > + vcpu->arch.texasr |= TEXASR_ROT; > > + > > + if (kvmppc_get_msr(vcpu) & MSR_PR) > > + vcpu->arch.texasr |= TEXASR_PR; > > This if statement seems unnecessary, since we only get here when > MSR_PR is clear. Yes. I will remove that. Thanks, - Simon
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 2ecb6a3..9690280 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -258,9 +258,11 @@ extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); +void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); #else static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} +static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} #endif extern int kvm_irq_bypass; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index a03533d..90b5f59 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -23,6 +23,7 @@ #include <asm/reg.h> #include <asm/switch_to.h> #include <asm/time.h> +#include <asm/tm.h> #include "book3s.h" #define OP_19_XOP_RFID 18 @@ -47,6 +48,8 @@ #define OP_31_XOP_EIOIO 854 #define OP_31_XOP_SLBMFEE 915 +#define OP_31_XOP_TBEGIN 654 + /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ #define OP_31_XOP_DCBZ 1010 @@ -362,6 +365,46 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case OP_31_XOP_TBEGIN: + { + if (!cpu_has_feature(CPU_FTR_TM)) + break; + + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); + emulated = EMULATE_AGAIN; + break; + } + + if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { + preempt_disable(); + vcpu->arch.cr = (CR0_TBEGIN_FAILURE | + (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); + + vcpu->arch.texasr = (TEXASR_FS | TEXASR_EX | + (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) + << TEXASR_FC_LG)); + + if ((inst >> 21) & 0x1) + vcpu->arch.texasr |= TEXASR_ROT; + + if (kvmppc_get_msr(vcpu) & MSR_PR) + vcpu->arch.texasr |= TEXASR_PR; + + if (kvmppc_get_msr(vcpu) & MSR_HV) + vcpu->arch.texasr |= TEXASR_HV; + + vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4; + vcpu->arch.tfiar = kvmppc_get_pc(vcpu); + + kvmppc_restore_tm_sprs(vcpu); + preempt_enable(); + } else + emulated = EMULATE_FAIL; + break; + } +#endif default: emulated = EMULATE_FAIL; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 473c819..82e1a74 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -204,6 +204,15 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_BOOK3S_64 smsr |= MSR_ISF | MSR_HV; #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* + * in guest privileged state, we want to fail all TM transactions. + * So disable MSR TM bit so that all tbegin. will be able to be + * trapped into host. + */ + if (!(guest_msr & MSR_PR)) + smsr &= ~MSR_TM; +#endif vcpu->arch.shadow_msr = smsr; } @@ -301,7 +310,7 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) tm_disable(); } -static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) +void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) { tm_enable(); mtspr(SPRN_TFHAR, vcpu->arch.tfhar);