@@ -217,6 +217,9 @@ struct kvm_vcpu_arch {
ulong xer;
u32 cr;
#endif
+#ifdef CONFIG_FSL_BOOKE
+ ulong msr_block;
+#endif
#ifdef CONFIG_PPC_BOOK3S
ulong shadow_msr;
@@ -403,6 +403,9 @@ int main(void)
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
+#ifdef CONFIG_FSL_BOOKE
+ DEFINE(VCPU_MSR_BLOCK, offsetof(struct kvm_vcpu, arch.msr_block));
+#endif
/* book3s */
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
@@ -52,6 +52,23 @@
extern unsigned long kvmppc_booke_handlers;
+#ifdef CONFIG_FSL_BOOKE
+static inline bool kvmppc_msr_block_has(struct kvm_vcpu *vcpu, u32 block_bit)
+{
+ return !(vcpu->arch.msr_block & block_bit);
+}
+
+static inline void kvmppc_set_msr_block(struct kvm_vcpu *vcpu, u32 block_bit)
+{
+ vcpu->arch.msr_block &= ~block_bit;
+}
+
+static inline void kvmppc_clr_msr_block(struct kvm_vcpu *vcpu, u32 block_bit)
+{
+ vcpu->arch.msr_block |= block_bit;
+}
+#endif
+
/* Helper function for "full" MSR writes. No need to call this if only EE is
* changing. */
static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
@@ -409,7 +409,6 @@ lightweight_exit:
mtctr r3
lwz r3, VCPU_CR(r4)
mtcr r3
- lwz r5, VCPU_GPR(r5)(r4)
lwz r6, VCPU_GPR(r6)(r4)
lwz r7, VCPU_GPR(r7)(r4)
lwz r8, VCPU_GPR(r8)(r4)
@@ -419,6 +418,11 @@ lightweight_exit:
lwz r3, (VCPU_SHARED_MSR + 4)(r3)
oris r3, r3, KVMPPC_MSR_MASK@h
ori r3, r3, KVMPPC_MSR_MASK@l
+#ifdef CONFIG_FSL_BOOKE
+ lwz r5, VCPU_MSR_BLOCK(r4)
+ and r3, r3, r5
+#endif
+ lwz r5, VCPU_GPR(r5)(r4)
mtsrr1 r3
/* Clear any debug events which occurred since we disabled MSR[DE].
@@ -67,6 +67,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
/* Since booke kvm only support one core, update all vcpus' PIR to 0 */
vcpu->vcpu_id = 0;
+ /* Unblock all msr bits */
+ kvmppc_clr_msr_block(vcpu, ~0UL);
+
return 0;
}