diff mbox

[16/18] KVM: PPC: Book3S HV: Add transactional memory support

Message ID 1389176736-26821-17-git-send-email-paulus@samba.org
State New, archived
Headers show

Commit Message

Paul Mackerras Jan. 8, 2014, 10:25 a.m. UTC
This adds saving of the transactional memory (TM) checkpointed state
on guest entry and exit.  We only do this if we see that the guest has
an active transaction.

It also adds emulation of the TM state changes when delivering IRQs
into the guest.  According to the architecture, if we are
transactional when an IRQ occurs the TM state is changed to suspended,
otherwise it's left unchanged.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/kvm/book3s_64_mmu_hv.c     |   9 +-
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 237 +++++++++++++++++++-------------
 2 files changed, 146 insertions(+), 100 deletions(-)

Comments

Alexander Graf Jan. 27, 2014, 12:47 p.m. UTC | #1
On 08.01.2014, at 11:25, Paul Mackerras <paulus@samba.org> wrote:

> This adds saving of the transactional memory (TM) checkpointed state
> on guest entry and exit.  We only do this if we see that the guest has
> an active transaction.
> 
> It also adds emulation of the TM state changes when delivering IRQs
> into the guest.  According to the architecture, if we are
> transactional when an IRQ occurs the TM state is changed to suspended,
> otherwise it's left unchanged.
> 
> Signed-off-by: Michael Neuling <mikey@neuling.org>
> Signed-off-by: Paul Mackerras <paulus@samba.org>
> ---
> arch/powerpc/kvm/book3s_64_mmu_hv.c     |   9 +-
> arch/powerpc/kvm/book3s_hv_rmhandlers.S | 237 +++++++++++++++++++-------------
> 2 files changed, 146 insertions(+), 100 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> index 79e992d..ef971a5 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> @@ -262,7 +262,14 @@ int kvmppc_mmu_hv_init(void)
> 
> static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
> {
> -	kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
> +	unsigned long msr = vcpu->arch.intr_msr;
> +
> +	/* If transactional, change to suspend mode on IRQ delivery */
> +	if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
> +		msr |= MSR_TS_S;
> +	else
> +		msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
> +	kvmppc_set_msr(vcpu, msr);
> }
> 
> /*
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 298d1755..f7bf681 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -28,6 +28,9 @@
> #include <asm/exception-64s.h>
> #include <asm/kvm_book3s_asm.h>
> #include <asm/mmu-hash64.h>
> +#include <uapi/asm/tm.h>

You shouldn't include uapi/ headers from kernel space. Please make this an #include for asm/tm.h and have that header check for __ASSEMBLY__.

> +
> +#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
> 
> #ifdef __LITTLE_ENDIAN__
> #error Need to fix lppaca and SLB shadow accesses in little endian mode
> @@ -597,6 +600,115 @@ BEGIN_FTR_SECTION
>  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
> END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
> 
> +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> +BEGIN_FTR_SECTION
> +	b	1f
> +END_FTR_SECTION_IFCLR(CPU_FTR_TM)
> +
> +	/* Turn on TM/FP/VSX/VMX so we can restore them. */
> +	mfmsr	r5
> +	li	r6, MSR_TM >> 32
> +	sldi	r6, r6, 32
> +	or	r5, r5, r6
> +	ori	r5, r5, MSR_FP
> +BEGIN_FTR_SECTION
> +	oris	r5, r5, MSR_VEC@h
> +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
> +BEGIN_FTR_SECTION
> +	oris	r5, r5, MSR_VSX@h
> +END_FTR_SECTION_IFSET(CPU_FTR_VSX)

I thought we don't support CPUs with TM but no ALTIVEC or VSX?

> +	mtmsrd	r5
> +
> +	/*
> +	 * The user may change these outside of a transaction, so they must
> +	 * always be context switched.
> +	 */
> +	ld	r5, VCPU_TFHAR(r4)
> +	ld	r6, VCPU_TFIAR(r4)
> +	ld	r7, VCPU_TEXASR(r4)
> +	mtspr	SPRN_TFHAR, r5
> +	mtspr	SPRN_TFIAR, r6
> +	mtspr	SPRN_TEXASR, r7
> +
> +	ld	r5, VCPU_MSR(r4)
> +	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
> +	beq	1f	/* TM not active in guest */
> +
> +	/*
> +	 * We need to load up the checkpointed state for the guest.
> +	 * We need to do this early as it will blow away any GPRs, VSRs and
> +	 * some SPRs.
> +	 */
> +
> +	mr	r31, r4
> +	addi	r3, r31, VCPU_FPRS_TM
> +	bl	.load_fp_state
> +BEGIN_FTR_SECTION
> +	addi	r3, r31, VCPU_VRS_TM
> +	bl	.load_vr_state
> +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
> +	mr	r4, r31
> +	lwz	r7, VCPU_VRSAVE_TM(r4)
> +	mtspr	SPRN_VRSAVE, r7
> +
> +	ld	r5, VCPU_LR_TM(r4)
> +	lwz	r6, VCPU_CR_TM(r4)
> +	ld	r7, VCPU_CTR_TM(r4)
> +	ld	r8, VCPU_AMR_TM(r4)
> +	ld	r9, VCPU_TAR_TM(r4)
> +	mtlr	r5
> +	mtcr	r6
> +	mtctr	r7
> +	mtspr	SPRN_AMR, r8
> +	mtspr	SPRN_TAR, r9
> +
> +	/*
> +	 * Load up PPR and DSCR values but don't put them in the actual SPRs 
> +	 * till the last moment to avoid running with userspace PPR and DSCR for
> +	 * too long.
> +	 */
> +	ld	r29, VCPU_DSCR_TM(r4)
> +	ld	r30, VCPU_PPR_TM(r4)
> +
> +	std	r2, PACATMSCRATCH(r13) /* Save TOC */
> +
> +	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
> +	li	r5, 0
> +	mtmsrd	r5, 1
> +
> +	/* Load GPRs r0-r28 */
> +	reg = 0
> +	.rept	29
> +	ld	reg, VCPU_GPRS_TM(reg)(r31)
> +	reg = reg + 1
> +	.endr
> +
> +	mtspr	SPRN_DSCR, r29
> +	mtspr	SPRN_PPR, r30
> +
> +	/* Load final GPRs */
> +	ld	29, VCPU_GPRS_TM(29)(r31)
> +	ld	30, VCPU_GPRS_TM(30)(r31)
> +	ld	31, VCPU_GPRS_TM(31)(r31)
> +
> +	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
> +	TRECHKPT

Where do we fault to if we get a transaction abort now? We can get one because we're not in suspended TM state, right?

> +
> +	/* Now let's get back the state we need. */
> +	HMT_MEDIUM
> +	GET_PACA(r13)
> +	ld	r29, HSTATE_DSCR(r13)
> +	mtspr	SPRN_DSCR, r29
> +	ld	r4, HSTATE_KVM_VCPU(r13)
> +	ld	r1, HSTATE_HOST_R1(r13)
> +	ld	r2, PACATMSCRATCH(r13)
> +
> +	/* Set the MSR RI since we have our registers back. */
> +	li	r5, MSR_RI
> +	mtmsrd	r5, 1
> +1:
> +#endif
> +
> 	/* Load guest PMU registers */
> 	/* R4 is live here (vcpu pointer) */
> 	li	r3, 1
> @@ -704,14 +816,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
> 	ld	r6, VCPU_VTB(r4)
> 	mtspr	SPRN_IC, r5
> 	mtspr	SPRN_VTB, r6
> -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> -	ld	r5, VCPU_TFHAR(r4)
> -	ld	r6, VCPU_TFIAR(r4)
> -	ld	r7, VCPU_TEXASR(r4)
> -	mtspr	SPRN_TFHAR, r5
> -	mtspr	SPRN_TFIAR, r6
> -	mtspr	SPRN_TEXASR, r7
> -#endif
> 	ld	r8, VCPU_EBBHR(r4)
> 	mtspr	SPRN_EBBHR, r8
> 	ld	r5, VCPU_EBBRR(r4)
> @@ -817,7 +921,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
> 12:	mtspr	SPRN_SRR0, r10
> 	mr	r10,r0
> 	mtspr	SPRN_SRR1, r11
> -	ld	r11, VCPU_INTR_MSR(r4)
> +	mr	r9, r4
> +	bl	kvmppc_msr_interrupt
> 5:
> 
> /*
> @@ -1103,12 +1208,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
> BEGIN_FTR_SECTION
> 	b	8f
> END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
> -	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
> -	mfmsr	r8
> -	li	r0, 1
> -	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
> -	mtmsrd	r8
> -
> 	/* Save POWER8-specific registers */
> 	mfspr	r5, SPRN_IAMR
> 	mfspr	r6, SPRN_PSPB
> @@ -1122,14 +1221,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
> 	std	r5, VCPU_IC(r9)
> 	std	r6, VCPU_VTB(r9)
> 	std	r7, VCPU_TAR(r9)
> -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> -	mfspr	r5, SPRN_TFHAR
> -	mfspr	r6, SPRN_TFIAR
> -	mfspr	r7, SPRN_TEXASR
> -	std	r5, VCPU_TFHAR(r9)
> -	std	r6, VCPU_TFIAR(r9)
> -	std	r7, VCPU_TEXASR(r9)
> -#endif
> 	mfspr	r8, SPRN_EBBHR
> 	std	r8, VCPU_EBBHR(r9)
> 	mfspr	r5, SPRN_EBBRR
> @@ -1504,76 +1595,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
> 1:	addi	r8,r8,16
> 	.endr
> 
> -<<<<<<< HEAD
> -=======

Ah, so here you remove it again ;). Please run a bisectability test on the next patch set:

#!/bin/bash -e
for commit in $(git rev-list --reverse "$@"); do
    echo "Commit $commit"
    git checkout "$commit"
    make -j80
done

> -	/* Save DEC */
> -	mfspr	r5,SPRN_DEC
> -	mftb	r6
> -	extsw	r5,r5
> -	add	r5,r5,r6
> -	std	r5,VCPU_DEC_EXPIRES(r9)
> -
> -BEGIN_FTR_SECTION
> -	b	8f
> -END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
> -	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
> -	mfmsr	r8
> -	li	r0, 1
> -	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
> -	mtmsrd	r8
> -
> -	/* Save POWER8-specific registers */
> -	mfspr	r5, SPRN_IAMR
> -	mfspr	r6, SPRN_PSPB
> -	mfspr	r7, SPRN_FSCR
> -	std	r5, VCPU_IAMR(r9)
> -	stw	r6, VCPU_PSPB(r9)
> -	std	r7, VCPU_FSCR(r9)
> -	mfspr	r5, SPRN_IC
> -	mfspr	r6, SPRN_VTB
> -	mfspr	r7, SPRN_TAR
> -	std	r5, VCPU_IC(r9)
> -	std	r6, VCPU_VTB(r9)
> -	std	r7, VCPU_TAR(r9)
> -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> -	mfspr	r5, SPRN_TFHAR
> -	mfspr	r6, SPRN_TFIAR
> -	mfspr	r7, SPRN_TEXASR
> -	std	r5, VCPU_TFHAR(r9)
> -	std	r6, VCPU_TFIAR(r9)
> -	std	r7, VCPU_TEXASR(r9)
> -#endif
> -	mfspr	r8, SPRN_EBBHR
> -	std	r8, VCPU_EBBHR(r9)
> -	mfspr	r5, SPRN_EBBRR
> -	mfspr	r6, SPRN_BESCR
> -	mfspr	r7, SPRN_CSIGR
> -	mfspr	r8, SPRN_TACR
> -	std	r5, VCPU_EBBRR(r9)
> -	std	r6, VCPU_BESCR(r9)
> -	std	r7, VCPU_CSIGR(r9)
> -	std	r8, VCPU_TACR(r9)
> -	mfspr	r5, SPRN_TCSCR
> -	mfspr	r6, SPRN_ACOP
> -	mfspr	r7, SPRN_PID
> -	mfspr	r8, SPRN_WORT
> -	std	r5, VCPU_TCSCR(r9)
> -	std	r6, VCPU_ACOP(r9)
> -	stw	r7, VCPU_GUEST_PID(r9)
> -	std	r8, VCPU_WORT(r9)
> -8:
> -
> -	/* Save and reset AMR and UAMOR before turning on the MMU */
> -BEGIN_FTR_SECTION
> -	mfspr	r5,SPRN_AMR
> -	mfspr	r6,SPRN_UAMOR
> -	std	r5,VCPU_AMR(r9)
> -	std	r6,VCPU_UAMOR(r9)
> -	li	r6,0
> -	mtspr	SPRN_AMR,r6
> -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
> -
> ->>>>>>> a65ae5a... KVM: PPC: Book3S HV: Add new state for transactional memory
> 	/* Unset guest mode */
> 	li	r0, KVM_GUEST_MODE_NONE
> 	stb	r0, HSTATE_IN_GUEST(r13)
> @@ -1627,7 +1648,7 @@ kvmppc_hdsi:
> 	mtspr	SPRN_SRR0, r10
> 	mtspr	SPRN_SRR1, r11
> 	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
> -	ld	r11, VCPU_INTR_MSR(r9)
> +	bl	kvmppc_msr_interrupt
> fast_interrupt_c_return:
> 6:	ld	r7, VCPU_CTR(r9)
> 	lwz	r8, VCPU_XER(r9)
> @@ -1696,7 +1717,7 @@ kvmppc_hisi:
> 1:	mtspr	SPRN_SRR0, r10
> 	mtspr	SPRN_SRR1, r11
> 	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
> -	ld	r11, VCPU_INTR_MSR(r9)
> +	bl	kvmppc_msr_interrupt
> 	b	fast_interrupt_c_return
> 
> 3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
> @@ -1739,7 +1760,7 @@ sc_1_fast_return:
> 	mtspr	SPRN_SRR0,r10
> 	mtspr	SPRN_SRR1,r11
> 	li	r10, BOOK3S_INTERRUPT_SYSCALL
> -	ld	r11, VCPU_INTR_MSR(r9)
> +	bl	kvmppc_msr_interrupt
> 	mr	r4,r9
> 	b	fast_guest_return
> 
> @@ -2067,7 +2088,7 @@ machine_check_realmode:
> 	beq	mc_cont
> 	/* If not, deliver a machine check.  SRR0/1 are already set */
> 	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
> -	ld	r11, VCPU_INTR_MSR(r9)
> +	bl	kvmppc_msr_interrupt
> 	b	fast_interrupt_c_return
> 
> /*
> @@ -2196,7 +2217,9 @@ BEGIN_FTR_SECTION
> END_FTR_SECTION_IFSET(CPU_FTR_VSX)
> #endif
> 	mtmsrd	r8
> +BEGIN_FTR_SECTION
> 	isync
> +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)

Why bother?

> 	addi	r3,r3,VCPU_FPRS
> 	bl	.store_fp_state
> #ifdef CONFIG_ALTIVEC
> @@ -2208,8 +2231,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
> 	mfspr	r6,SPRN_VRSAVE
> 	stw	r6,VCPU_VRSAVE(r3)
> 	mtlr	r30
> -	mtmsrd	r5
> -	isync
> 	blr
> 
> /*
> @@ -2234,7 +2255,9 @@ BEGIN_FTR_SECTION
> END_FTR_SECTION_IFSET(CPU_FTR_VSX)
> #endif
> 	mtmsrd	r8
> +BEGIN_FTR_SECTION
> 	isync
> +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
> 	addi	r3,r4,VCPU_FPRS
> 	bl	.load_fp_state
> #ifdef CONFIG_ALTIVEC
> @@ -2256,3 +2279,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
>  */
> kvmppc_bad_host_intr:
> 	b	.
> +
> +/*
> + * This mimics the MSR transition on IRQ delievery.  The new guest MSR is taken
> + * from VCPU_INTR_MSR and is modified based on the required TM state changes.
> + *   r11 has the guest MSR value
> + *   r9 has a vcpu pointer

This needs to document the registers it touches.


Alex

> + */
> +kvmppc_msr_interrupt:
> +	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
> +	cmpwi	r0, 2 /* Check if we are in transactional state..  */
> +	ld	r11, VCPU_INTR_MSR(r9)
> +	bne	1f
> +	/* ... if transactional, change to suspended */
> +	li	r0, 1
> +1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
> +	blr
> -- 
> 1.8.5.2
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paul Mackerras March 24, 2014, 1:18 a.m. UTC | #2
On Mon, Jan 27, 2014 at 01:47:30PM +0100, Alexander Graf wrote:
> 
> On 08.01.2014, at 11:25, Paul Mackerras <paulus@samba.org> wrote:
> 
> > +	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
> > +	TRECHKPT
> 
> Where do we fault to if we get a transaction abort now? We can get one because we're not in suspended TM state, right?

We're in non-transactional state before the trechkpt. instruction and
in suspended state after it, so in neither case will a transaction
abort cause a jump back to the tbegin.

> > -<<<<<<< HEAD
> > -=======
> 
> Ah, so here you remove it again ;). Please run a bisectability test on the next patch set:

Oops, sorry, yes I'll do that.

Paul.
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 79e992d..ef971a5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,14 @@  int kvmppc_mmu_hv_init(void)
 
 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 {
-	kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
+	unsigned long msr = vcpu->arch.intr_msr;
+
+	/* If transactional, change to suspend mode on IRQ delivery */
+	if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
+		msr |= MSR_TS_S;
+	else
+		msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
+	kvmppc_set_msr(vcpu, msr);
 }
 
 /*
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 298d1755..f7bf681 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -28,6 +28,9 @@ 
 #include <asm/exception-64s.h>
 #include <asm/kvm_book3s_asm.h>
 #include <asm/mmu-hash64.h>
+#include <uapi/asm/tm.h>
+
+#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
 
 #ifdef __LITTLE_ENDIAN__
 #error Need to fix lppaca and SLB shadow accesses in little endian mode
@@ -597,6 +600,115 @@  BEGIN_FTR_SECTION
  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+	b	1f
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+
+	/* Turn on TM/FP/VSX/VMX so we can restore them. */
+	mfmsr	r5
+	li	r6, MSR_TM >> 32
+	sldi	r6, r6, 32
+	or	r5, r5, r6
+	ori	r5, r5, MSR_FP
+BEGIN_FTR_SECTION
+	oris	r5, r5, MSR_VEC@h
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+BEGIN_FTR_SECTION
+	oris	r5, r5, MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+	mtmsrd	r5
+
+	/*
+	 * The user may change these outside of a transaction, so they must
+	 * always be context switched.
+	 */
+	ld	r5, VCPU_TFHAR(r4)
+	ld	r6, VCPU_TFIAR(r4)
+	ld	r7, VCPU_TEXASR(r4)
+	mtspr	SPRN_TFHAR, r5
+	mtspr	SPRN_TFIAR, r6
+	mtspr	SPRN_TEXASR, r7
+
+	ld	r5, VCPU_MSR(r4)
+	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+	beq	1f	/* TM not active in guest */
+
+	/*
+	 * We need to load up the checkpointed state for the guest.
+	 * We need to do this early as it will blow away any GPRs, VSRs and
+	 * some SPRs.
+	 */
+
+	mr	r31, r4
+	addi	r3, r31, VCPU_FPRS_TM
+	bl	.load_fp_state
+BEGIN_FTR_SECTION
+	addi	r3, r31, VCPU_VRS_TM
+	bl	.load_vr_state
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+	mr	r4, r31
+	lwz	r7, VCPU_VRSAVE_TM(r4)
+	mtspr	SPRN_VRSAVE, r7
+
+	ld	r5, VCPU_LR_TM(r4)
+	lwz	r6, VCPU_CR_TM(r4)
+	ld	r7, VCPU_CTR_TM(r4)
+	ld	r8, VCPU_AMR_TM(r4)
+	ld	r9, VCPU_TAR_TM(r4)
+	mtlr	r5
+	mtcr	r6
+	mtctr	r7
+	mtspr	SPRN_AMR, r8
+	mtspr	SPRN_TAR, r9
+
+	/*
+	 * Load up PPR and DSCR values but don't put them in the actual SPRs 
+	 * till the last moment to avoid running with userspace PPR and DSCR for
+	 * too long.
+	 */
+	ld	r29, VCPU_DSCR_TM(r4)
+	ld	r30, VCPU_PPR_TM(r4)
+
+	std	r2, PACATMSCRATCH(r13) /* Save TOC */
+
+	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
+	li	r5, 0
+	mtmsrd	r5, 1
+
+	/* Load GPRs r0-r28 */
+	reg = 0
+	.rept	29
+	ld	reg, VCPU_GPRS_TM(reg)(r31)
+	reg = reg + 1
+	.endr
+
+	mtspr	SPRN_DSCR, r29
+	mtspr	SPRN_PPR, r30
+
+	/* Load final GPRs */
+	ld	29, VCPU_GPRS_TM(29)(r31)
+	ld	30, VCPU_GPRS_TM(30)(r31)
+	ld	31, VCPU_GPRS_TM(31)(r31)
+
+	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
+	TRECHKPT
+
+	/* Now let's get back the state we need. */
+	HMT_MEDIUM
+	GET_PACA(r13)
+	ld	r29, HSTATE_DSCR(r13)
+	mtspr	SPRN_DSCR, r29
+	ld	r4, HSTATE_KVM_VCPU(r13)
+	ld	r1, HSTATE_HOST_R1(r13)
+	ld	r2, PACATMSCRATCH(r13)
+
+	/* Set the MSR RI since we have our registers back. */
+	li	r5, MSR_RI
+	mtmsrd	r5, 1
+1:
+#endif
+
 	/* Load guest PMU registers */
 	/* R4 is live here (vcpu pointer) */
 	li	r3, 1
@@ -704,14 +816,6 @@  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 	ld	r6, VCPU_VTB(r4)
 	mtspr	SPRN_IC, r5
 	mtspr	SPRN_VTB, r6
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-	ld	r5, VCPU_TFHAR(r4)
-	ld	r6, VCPU_TFIAR(r4)
-	ld	r7, VCPU_TEXASR(r4)
-	mtspr	SPRN_TFHAR, r5
-	mtspr	SPRN_TFIAR, r6
-	mtspr	SPRN_TEXASR, r7
-#endif
 	ld	r8, VCPU_EBBHR(r4)
 	mtspr	SPRN_EBBHR, r8
 	ld	r5, VCPU_EBBRR(r4)
@@ -817,7 +921,8 @@  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 12:	mtspr	SPRN_SRR0, r10
 	mr	r10,r0
 	mtspr	SPRN_SRR1, r11
-	ld	r11, VCPU_INTR_MSR(r4)
+	mr	r9, r4
+	bl	kvmppc_msr_interrupt
 5:
 
 /*
@@ -1103,12 +1208,6 @@  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
 BEGIN_FTR_SECTION
 	b	8f
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
-	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
-	mfmsr	r8
-	li	r0, 1
-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
-	mtmsrd	r8
-
 	/* Save POWER8-specific registers */
 	mfspr	r5, SPRN_IAMR
 	mfspr	r6, SPRN_PSPB
@@ -1122,14 +1221,6 @@  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 	std	r5, VCPU_IC(r9)
 	std	r6, VCPU_VTB(r9)
 	std	r7, VCPU_TAR(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-	mfspr	r5, SPRN_TFHAR
-	mfspr	r6, SPRN_TFIAR
-	mfspr	r7, SPRN_TEXASR
-	std	r5, VCPU_TFHAR(r9)
-	std	r6, VCPU_TFIAR(r9)
-	std	r7, VCPU_TEXASR(r9)
-#endif
 	mfspr	r8, SPRN_EBBHR
 	std	r8, VCPU_EBBHR(r9)
 	mfspr	r5, SPRN_EBBRR
@@ -1504,76 +1595,6 @@  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 1:	addi	r8,r8,16
 	.endr
 
-<<<<<<< HEAD
-=======
-	/* Save DEC */
-	mfspr	r5,SPRN_DEC
-	mftb	r6
-	extsw	r5,r5
-	add	r5,r5,r6
-	std	r5,VCPU_DEC_EXPIRES(r9)
-
-BEGIN_FTR_SECTION
-	b	8f
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
-	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
-	mfmsr	r8
-	li	r0, 1
-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
-	mtmsrd	r8
-
-	/* Save POWER8-specific registers */
-	mfspr	r5, SPRN_IAMR
-	mfspr	r6, SPRN_PSPB
-	mfspr	r7, SPRN_FSCR
-	std	r5, VCPU_IAMR(r9)
-	stw	r6, VCPU_PSPB(r9)
-	std	r7, VCPU_FSCR(r9)
-	mfspr	r5, SPRN_IC
-	mfspr	r6, SPRN_VTB
-	mfspr	r7, SPRN_TAR
-	std	r5, VCPU_IC(r9)
-	std	r6, VCPU_VTB(r9)
-	std	r7, VCPU_TAR(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-	mfspr	r5, SPRN_TFHAR
-	mfspr	r6, SPRN_TFIAR
-	mfspr	r7, SPRN_TEXASR
-	std	r5, VCPU_TFHAR(r9)
-	std	r6, VCPU_TFIAR(r9)
-	std	r7, VCPU_TEXASR(r9)
-#endif
-	mfspr	r8, SPRN_EBBHR
-	std	r8, VCPU_EBBHR(r9)
-	mfspr	r5, SPRN_EBBRR
-	mfspr	r6, SPRN_BESCR
-	mfspr	r7, SPRN_CSIGR
-	mfspr	r8, SPRN_TACR
-	std	r5, VCPU_EBBRR(r9)
-	std	r6, VCPU_BESCR(r9)
-	std	r7, VCPU_CSIGR(r9)
-	std	r8, VCPU_TACR(r9)
-	mfspr	r5, SPRN_TCSCR
-	mfspr	r6, SPRN_ACOP
-	mfspr	r7, SPRN_PID
-	mfspr	r8, SPRN_WORT
-	std	r5, VCPU_TCSCR(r9)
-	std	r6, VCPU_ACOP(r9)
-	stw	r7, VCPU_GUEST_PID(r9)
-	std	r8, VCPU_WORT(r9)
-8:
-
-	/* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
-	mfspr	r5,SPRN_AMR
-	mfspr	r6,SPRN_UAMOR
-	std	r5,VCPU_AMR(r9)
-	std	r6,VCPU_UAMOR(r9)
-	li	r6,0
-	mtspr	SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
->>>>>>> a65ae5a... KVM: PPC: Book3S HV: Add new state for transactional memory
 	/* Unset guest mode */
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
@@ -1627,7 +1648,7 @@  kvmppc_hdsi:
 	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
-	ld	r11, VCPU_INTR_MSR(r9)
+	bl	kvmppc_msr_interrupt
 fast_interrupt_c_return:
 6:	ld	r7, VCPU_CTR(r9)
 	lwz	r8, VCPU_XER(r9)
@@ -1696,7 +1717,7 @@  kvmppc_hisi:
 1:	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
-	ld	r11, VCPU_INTR_MSR(r9)
+	bl	kvmppc_msr_interrupt
 	b	fast_interrupt_c_return
 
 3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
@@ -1739,7 +1760,7 @@  sc_1_fast_return:
 	mtspr	SPRN_SRR0,r10
 	mtspr	SPRN_SRR1,r11
 	li	r10, BOOK3S_INTERRUPT_SYSCALL
-	ld	r11, VCPU_INTR_MSR(r9)
+	bl	kvmppc_msr_interrupt
 	mr	r4,r9
 	b	fast_guest_return
 
@@ -2067,7 +2088,7 @@  machine_check_realmode:
 	beq	mc_cont
 	/* If not, deliver a machine check.  SRR0/1 are already set */
 	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
-	ld	r11, VCPU_INTR_MSR(r9)
+	bl	kvmppc_msr_interrupt
 	b	fast_interrupt_c_return
 
 /*
@@ -2196,7 +2217,9 @@  BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 #endif
 	mtmsrd	r8
+BEGIN_FTR_SECTION
 	isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	addi	r3,r3,VCPU_FPRS
 	bl	.store_fp_state
 #ifdef CONFIG_ALTIVEC
@@ -2208,8 +2231,6 @@  END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	mfspr	r6,SPRN_VRSAVE
 	stw	r6,VCPU_VRSAVE(r3)
 	mtlr	r30
-	mtmsrd	r5
-	isync
 	blr
 
 /*
@@ -2234,7 +2255,9 @@  BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 #endif
 	mtmsrd	r8
+BEGIN_FTR_SECTION
 	isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	addi	r3,r4,VCPU_FPRS
 	bl	.load_fp_state
 #ifdef CONFIG_ALTIVEC
@@ -2256,3 +2279,19 @@  END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  */
 kvmppc_bad_host_intr:
 	b	.
+
+/*
+ * This mimics the MSR transition on IRQ delievery.  The new guest MSR is taken
+ * from VCPU_INTR_MSR and is modified based on the required TM state changes.
+ *   r11 has the guest MSR value
+ *   r9 has a vcpu pointer
+ */
+kvmppc_msr_interrupt:
+	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
+	cmpwi	r0, 2 /* Check if we are in transactional state..  */
+	ld	r11, VCPU_INTR_MSR(r9)
+	bne	1f
+	/* ... if transactional, change to suspended */
+	li	r0, 1
+1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+	blr