@@ -34,3 +34,4 @@ header-y += termios.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
+header-y += epapr_hcalls.h
@@ -90,9 +90,19 @@ struct kvm_vcpu_arch_shared {
};
#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
-#define HC_VENDOR_KVM (42 << 16)
+
+#include <asm/epapr_hcalls.h>
+
+/* ePAPR Hypercall Vendor ID */
+#define HC_VENDOR_EPAPR (EV_EPAPR_VENDOR_ID << 16)
+#define HC_VENDOR_KVM (EV_KVM_VENDOR_ID << 16)
+
+/* ePAPR Hypercall Token */
+#define HC_EV_IDLE EV_IDLE
+
+/* ePAPR Hypercall Return Codes */
#define HC_EV_SUCCESS 0
-#define HC_EV_UNIMPLEMENTED 12
+#define HC_EV_UNIMPLEMENTED EV_UNIMPLEMENTED
#define KVM_FEATURE_MAGIC_PAGE 1
@@ -593,6 +593,12 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
unsigned int priority;
if (vcpu->requests) {
+ /* kvm_vcpu_block() sets KVM_REQ_UNHALT, but it is
+ * not cleared elsewhere as on x86. Clear it here
+ * for now, otherwise we never go idle.
+ */
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+
if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
smp_mb();
update_timer_ints(vcpu);
@@ -48,8 +48,7 @@ static unsigned int perfmon_refcount;
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
- bool ret = !(v->arch.shared->msr & MSR_WE) ||
- !!(v->arch.pending_exceptions) ||
+ bool ret = !!(v->arch.pending_exceptions) ||
v->requests;
#ifdef CONFIG_BOOKE
@@ -100,6 +99,10 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
/* Second return value is in r4 */
break;
+ case HC_VENDOR_EPAPR | HC_EV_IDLE:
+ r = HC_EV_SUCCESS;
+ kvm_vcpu_block(vcpu);
+ break;
default:
r = HC_EV_UNIMPLEMENTED;
break;
@@ -908,6 +911,8 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
pvinfo->hcall[2] = inst_sc;
pvinfo->hcall[3] = inst_nop;
+ pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
+
return 0;
}
@@ -438,6 +438,8 @@ struct kvm_ppc_pvinfo {
__u8 pad[108];
};
+#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
+
#define KVMIO 0xAE
/*