Message ID | 20110328192558.GB11104@schlenkerla.am.freescale.net (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
On 28.03.2011, at 21:25, Scott Wood wrote: > From: yu liu <yu.liu@freescale.com> > > In order to use lazy SPE register save/restore, we need to > know when the guest is using MSR[SPE]. In order to do that, we > need to control the actual MSR[SPE] separately from the guest's > notion of MSR[SPE]. Please make this generic for BookE. There's no reason to have FSL ifdefs in the code. Alex
On Tue, 29 Mar 2011 11:37:44 +0200 Alexander Graf <agraf@suse.de> wrote: > > On 28.03.2011, at 21:25, Scott Wood wrote: > > > From: yu liu <yu.liu@freescale.com> > > > > In order to use lazy SPE register save/restore, we need to > > know when the guest is using MSR[SPE]. In order to do that, we > > need to control the actual MSR[SPE] separately from the guest's > > notion of MSR[SPE]. > > Please make this generic for BookE. There's no reason to have FSL ifdefs in the code. Yeah, I was wondering about that too. Will respin. -Scott
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index bba3b9b..c376f6b 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -217,6 +217,9 @@ struct kvm_vcpu_arch { ulong xer; u32 cr; #endif +#ifdef CONFIG_FSL_BOOKE + ulong msr_block; +#endif #ifdef CONFIG_PPC_BOOK3S ulong shadow_msr; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 23e6a93..75b72c7 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -403,6 +403,9 @@ int main(void) DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); +#ifdef CONFIG_FSL_BOOKE + DEFINE(VCPU_MSR_BLOCK, offsetof(struct kvm_vcpu, arch.msr_block)); +#endif /* book3s */ #ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 492bb70..303a415 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -52,6 +52,23 @@ extern unsigned long kvmppc_booke_handlers; +#ifdef CONFIG_FSL_BOOKE +static inline bool kvmppc_msr_block_has(struct kvm_vcpu *vcpu, u32 block_bit) +{ + return !(vcpu->arch.msr_block & block_bit); +} + +static inline void kvmppc_set_msr_block(struct kvm_vcpu *vcpu, u32 block_bit) +{ + vcpu->arch.msr_block &= ~block_bit; +} + +static inline void kvmppc_clr_msr_block(struct kvm_vcpu *vcpu, u32 block_bit) +{ + vcpu->arch.msr_block |= block_bit; +} +#endif + /* Helper function for "full" MSR writes. No need to call this if only EE is * changing. */ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index ab29f5f..92193c7 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -409,7 +409,6 @@ lightweight_exit: mtctr r3 lwz r3, VCPU_CR(r4) mtcr r3 - lwz r5, VCPU_GPR(r5)(r4) lwz r6, VCPU_GPR(r6)(r4) lwz r7, VCPU_GPR(r7)(r4) lwz r8, VCPU_GPR(r8)(r4) @@ -419,6 +418,11 @@ lightweight_exit: lwz r3, (VCPU_SHARED_MSR + 4)(r3) oris r3, r3, KVMPPC_MSR_MASK@h ori r3, r3, KVMPPC_MSR_MASK@l +#ifdef CONFIG_FSL_BOOKE + lwz r5, VCPU_MSR_BLOCK(r4) + and r3, r3, r5 +#endif + lwz r5, VCPU_GPR(r5)(r4) mtsrr1 r3 /* Clear any debug events which occurred since we disabled MSR[DE]. diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index e762634..acfe052 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -67,6 +67,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ vcpu->vcpu_id = 0; + /* Unblock all msr bits */ + kvmppc_clr_msr_block(vcpu, ~0UL); + return 0; }