diff mbox series

[RFC,5/7] KVM: PPC: Remove RECONCILE_IRQ_STATE from guest exit

Message ID 20210308095244.3195782-7-npiggin@gmail.com
State New
Headers show
Series None | expand

Commit Message

Nicholas Piggin March 8, 2021, 9:52 a.m. UTC
Change KVM to keep the irq soft-mask state hard disabled when entering
the guest, to avoid "reconciling" the state when exiting the guest.

IRQ tracing still has to be updated so host irqs are disabled when the
guest is exited, but this is now done in C, similarly to Book3S HV.

[ I don't have BookE or PR KVM setup to actually test this, it's not
  so important as the 64e conversion to new interrupt code, but it
  would be nice if we can get rid of this reconciling from asm entirely ]
---
 arch/powerpc/include/asm/irqflags.h   | 58 ---------------------------
 arch/powerpc/include/asm/kvm_ppc.h    | 22 ----------
 arch/powerpc/kvm/book3s_pr.c          |  8 +++-
 arch/powerpc/kvm/booke.c              |  9 ++++-
 arch/powerpc/kvm/bookehv_interrupts.S |  9 -----
 5 files changed, 13 insertions(+), 93 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index 1a6c1ce17735..47d46712928a 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -11,64 +11,6 @@ 
  */
 #include <asm/hw_irq.h>
 
-#else
-#ifdef CONFIG_TRACE_IRQFLAGS
-#ifdef CONFIG_IRQSOFF_TRACER
-/*
- * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
- * which is the stack frame here, we need to force a stack frame
- * in case we came from user space.
- */
-#define TRACE_WITH_FRAME_BUFFER(func)		\
-	mflr	r0;				\
-	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
-	std	r0, 16(r1);			\
-	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
-	bl func;				\
-	ld	r1, 0(r1);			\
-	ld	r1, 0(r1);
-#else
-#define TRACE_WITH_FRAME_BUFFER(func)		\
-	bl func;
-#endif
-
-/*
- * These are calls to C code, so the caller must be prepared for volatiles to
- * be clobbered.
- */
-#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
-#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
-
-/*
- * This is used by assembly code to soft-disable interrupts first and
- * reconcile irq state.
- *
- * NB: This may call C code, so the caller must be prepared for volatiles to
- * be clobbered.
- */
-#define RECONCILE_IRQ_STATE(__rA, __rB)		\
-	lbz	__rA,PACAIRQSOFTMASK(r13);	\
-	lbz	__rB,PACAIRQHAPPENED(r13);	\
-	andi.	__rA,__rA,IRQS_DISABLED;	\
-	li	__rA,IRQS_DISABLED;		\
-	ori	__rB,__rB,PACA_IRQ_HARD_DIS;	\
-	stb	__rB,PACAIRQHAPPENED(r13);	\
-	bne	44f;				\
-	stb	__rA,PACAIRQSOFTMASK(r13);	\
-	TRACE_DISABLE_INTS;			\
-44:
-
-#else
-#define TRACE_ENABLE_INTS
-#define TRACE_DISABLE_INTS
-
-#define RECONCILE_IRQ_STATE(__rA, __rB)		\
-	lbz	__rA,PACAIRQHAPPENED(r13);	\
-	li	__rB,IRQS_DISABLED;		\
-	ori	__rA,__rA,PACA_IRQ_HARD_DIS;	\
-	stb	__rB,PACAIRQSOFTMASK(r13);	\
-	stb	__rA,PACAIRQHAPPENED(r13)
-#endif
 #endif
 
 #endif
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 8aacd76bb702..749c5cadc883 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -996,28 +996,6 @@  static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
 }
 
-/*
- * Please call after prepare_to_enter. This function puts the lazy ee and irq
- * disabled tracking state back to normal mode, without actually enabling
- * interrupts.
- */
-static inline void kvmppc_fix_ee_before_entry(void)
-{
-	trace_hardirqs_on();
-
-#ifdef CONFIG_PPC64
-	/*
-	 * To avoid races, the caller must have gone directly from having
-	 * interrupts fully-enabled to hard-disabled.
-	 */
-	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
-
-	/* Only need to enable IRQs by hard enabling them after this */
-	local_paca->irq_happened = 0;
-	irq_soft_mask_set(IRQS_ENABLED);
-#endif
-}
-
 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
 {
 	ulong ea;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 913944dc3620..40f1f4e207bc 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1157,6 +1157,8 @@  int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 	int r = RESUME_HOST;
 	int s;
 
+	trace_hardirqs_on();
+
 	vcpu->stat.sum_exits++;
 
 	run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -1460,7 +1462,7 @@  int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 			r = s;
 		else {
 			/* interrupts now hard-disabled */
-			kvmppc_fix_ee_before_entry();
+			trace_hardirqs_off();
 		}
 
 		kvmppc_handle_lost_ext(vcpu);
@@ -1855,10 +1857,12 @@  static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 	if (kvmppc_get_msr(vcpu) & MSR_FP)
 		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 
-	kvmppc_fix_ee_before_entry();
+	trace_hardirqs_on();
 
 	ret = __kvmppc_vcpu_run(vcpu);
 
+	trace_hardirqs_off();
+
 	kvmppc_clear_debug(vcpu);
 
 	/* No need for guest_exit. It's done in handle_exit.
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7d5fe43f85c4..bcccebee6c3c 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -776,10 +776,13 @@  int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
 	current->thread.debug = vcpu->arch.dbg_reg;
 
 	vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
-	kvmppc_fix_ee_before_entry();
+
+	trace_hardirqs_on();
 
 	ret = __kvmppc_vcpu_run(vcpu);
 
+	trace_hardirqs_off();
+
 	/* No need for guest_exit. It's done in handle_exit.
 	   We also get here with interrupts enabled. */
 
@@ -991,6 +994,8 @@  int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 	u32 last_inst = KVM_INST_FETCH_FAILED;
 	enum emulation_result emulated = EMULATE_DONE;
 
+	trace_hardirqs_on();
+
 	/* update before a new last_exit_type is rewritten */
 	kvmppc_update_timing_stats(vcpu);
 
@@ -1357,7 +1362,7 @@  int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 			r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
 		else {
 			/* interrupts now hard-disabled */
-			kvmppc_fix_ee_before_entry();
+			trace_hardirqs_off();
 			kvmppc_load_guest_fp(vcpu);
 			kvmppc_load_guest_altivec(vcpu);
 		}
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 8262c14fc9e6..b5fe6fb53c66 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -424,15 +424,6 @@  _GLOBAL(kvmppc_resume_host)
 	mtspr	SPRN_EPCR, r3
 	isync
 
-#ifdef CONFIG_64BIT
-	/*
-	 * We enter with interrupts disabled in hardware, but
-	 * we need to call RECONCILE_IRQ_STATE to ensure
-	 * that the software state is kept in sync.
-	 */
-	RECONCILE_IRQ_STATE(r3,r5)
-#endif
-
 	/* Switch to kernel stack and jump to handler. */
 	mr	r3, r4
 	mr	r5, r14 /* intno */