diff mbox series

[v6,2/2] KVM: PPC: Book3S HV: Stop forwarding all HFUs to L1

Message ID 20210806134506.2649735-3-farosas@linux.ibm.com
State New
Headers show
Series KVM: PPC: Book3S HV: Nested guest state sanitising changes | expand

Commit Message

Fabiano Rosas Aug. 6, 2021, 1:45 p.m. UTC
If the nested hypervisor has no access to a facility because it has
been disabled by the host, it should also not be able to see the
Hypervisor Facility Unavailable that arises from one of its guests
trying to access the facility.

This patch turns a HFU that happened in L2 into a Hypervisor Emulation
Assistance interrupt and forwards it to L1 for handling. The ones that
happened because L1 explicitly disabled the facility for L2 are still
let through, along with the corresponding Cause bits in the HFSCR.

Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_hv.c        | 13 +++++++++++++
 arch/powerpc/kvm/book3s_hv_nested.c | 29 +++++++++++++++++++++++------
 2 files changed, 36 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 085fb8ecbf68..9123b493c79e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1837,6 +1837,19 @@  static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
 		r = RESUME_HOST;
 		break;
 	}
+	case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+		/*
+		 * We might decide later to turn this interrupt into a
+		 * HEAI. Load the last instruction now that we can go
+		 * back into the guest to retry if needed.
+		 */
+		r = kvmppc_get_last_inst(vcpu, INST_GENERIC,
+					 &vcpu->arch.emul_inst);
+		if (r != EMULATE_DONE)
+			r = RESUME_GUEST;
+		else
+			r = RESUME_HOST;
+		break;
 	default:
 		r = RESUME_HOST;
 		break;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 1823674d46ef..1904697a3132 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -99,7 +99,7 @@  static void byteswap_hv_regs(struct hv_guest_state *hr)
 	hr->dawrx1 = swab64(hr->dawrx1);
 }
 
-static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
+static void save_hv_return_state(struct kvm_vcpu *vcpu,
 				 struct hv_guest_state *hr)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
@@ -118,7 +118,7 @@  static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
 	hr->pidr = vcpu->arch.pid;
 	hr->cfar = vcpu->arch.cfar;
 	hr->ppr = vcpu->arch.ppr;
-	switch (trap) {
+	switch (vcpu->arch.trap) {
 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
 		hr->hdar = vcpu->arch.fault_dar;
 		hr->hdsisr = vcpu->arch.fault_dsisr;
@@ -128,9 +128,26 @@  static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
 		hr->asdr = vcpu->arch.fault_gpa;
 		break;
 	case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
-		hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
-			     (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
-		break;
+	{
+		u64 cause = vcpu->arch.hfscr >> 56;
+
+		WARN_ON_ONCE(cause >= BITS_PER_LONG);
+
+		if (!(hr->hfscr & (1UL << cause))) {
+			hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
+				     (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
+			break;
+		}
+
+		/*
+		 * We have disabled this facility, so it does not
+		 * exist from L1's perspective. Turn it into a
+		 * HEAI. The instruction was already loaded at
+		 * kvmppc_handle_nested_exit().
+		 */
+		vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
+		fallthrough;
+	}
 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
 		hr->heir = vcpu->arch.emul_inst;
 		break;
@@ -388,7 +405,7 @@  long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
 	delta_ic = vcpu->arch.ic - l2_hv.ic;
 	delta_vtb = vc->vtb - l2_hv.vtb;
-	save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
+	save_hv_return_state(vcpu, &l2_hv);
 
 	/* restore L1 state */
 	vcpu->arch.nested = NULL;