diff mbox

[1/6] kvm: ppc: booke: Enhance wrapper functions to handle shadow registers

Message ID 1405407692-32075-2-git-send-email-Bharat.Bhushan@freescale.com
State New, archived
Headers show

Commit Message

Bharat Bhushan July 15, 2014, 7:01 a.m. UTC
There are shadow registers like, GSPRG[0-3], GSRR0, GSRR1 etc on
BOOKE-HV and these shadow registers are guest accessible.
So these shadow registers needs to be updated on BOOKE-HV.
This patch enhance the existing macros to handle shadow registers.

Signed-off-by: Bharat Bhushan <Bharat.Bhushan@freescale.com>
---
 arch/powerpc/include/asm/kvm_ppc.h | 59 ++++++++++++++++++++++++--------------
 1 file changed, 37 insertions(+), 22 deletions(-)
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index f3f7611..627d61e 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -475,37 +475,52 @@  static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
 #endif
 }
 
-#define SHARED_WRAPPER_GET(reg, size)					\
-static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)	\
+static inline bool is_e500hv(void)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+	return true;
+#else
+	return false;
+#endif
+}
+
+#define SHARED_WRAPPER_GET(reg, size, e500hv_spr)			\
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
 {									\
+	if (is_e500hv() && e500hv_spr)					\
+		return mfspr(e500hv_spr);				\
+									\
 	if (kvmppc_shared_big_endian(vcpu))				\
 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
 	else								\
 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
 }									\
 
-#define SHARED_WRAPPER_SET(reg, size)					\
+#define SHARED_WRAPPER_SET(reg, size, e500hv_spr)			\
 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
 {									\
+	if (is_e500hv() && e500hv_spr)					\
+		mtspr(e500hv_spr, val);					\
+									\
 	if (kvmppc_shared_big_endian(vcpu))				\
 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
 	else								\
 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
 }									\
 
-#define SHARED_WRAPPER(reg, size)					\
-	SHARED_WRAPPER_GET(reg, size)					\
-	SHARED_WRAPPER_SET(reg, size)					\
-
-SHARED_WRAPPER(critical, 64)
-SHARED_WRAPPER(sprg0, 64)
-SHARED_WRAPPER(sprg1, 64)
-SHARED_WRAPPER(sprg2, 64)
-SHARED_WRAPPER(sprg3, 64)
-SHARED_WRAPPER(srr0, 64)
-SHARED_WRAPPER(srr1, 64)
-SHARED_WRAPPER(dar, 64)
-SHARED_WRAPPER_GET(msr, 64)
+#define SHARED_WRAPPER(reg, size, e500hv_spr)				\
+	SHARED_WRAPPER_GET(reg, size, e500hv_spr)			\
+	SHARED_WRAPPER_SET(reg, size, e500hv_spr)			\
+
+SHARED_WRAPPER(critical, 64, 0)
+SHARED_WRAPPER(sprg0, 64, SPRN_GSPRG0)
+SHARED_WRAPPER(sprg1, 64, SPRN_GSPRG1)
+SHARED_WRAPPER(sprg2, 64, SPRN_GSPRG2)
+SHARED_WRAPPER(sprg3, 64, SPRN_GSPRG3)
+SHARED_WRAPPER(srr0, 64, SPRN_GSRR0)
+SHARED_WRAPPER(srr1, 64, SPRN_GSRR1)
+SHARED_WRAPPER(dar, 64, SPRN_GDEAR)
+SHARED_WRAPPER_GET(msr, 64, 0)
 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
 {
 	if (kvmppc_shared_big_endian(vcpu))
@@ -513,12 +528,12 @@  static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
 	else
 	       vcpu->arch.shared->msr = cpu_to_le64(val);
 }
-SHARED_WRAPPER(dsisr, 32)
-SHARED_WRAPPER(int_pending, 32)
-SHARED_WRAPPER(sprg4, 64)
-SHARED_WRAPPER(sprg5, 64)
-SHARED_WRAPPER(sprg6, 64)
-SHARED_WRAPPER(sprg7, 64)
+SHARED_WRAPPER(dsisr, 32, 0)
+SHARED_WRAPPER(int_pending, 32, 0)
+SHARED_WRAPPER(sprg4, 64, 0)
+SHARED_WRAPPER(sprg5, 64, 0)
+SHARED_WRAPPER(sprg6, 64, 0)
+SHARED_WRAPPER(sprg7, 64, 0)
 
 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
 {