@@ -1792,6 +1792,7 @@ typedef struct CPUArchState {
uint64_t xen_vcpu_info_default_gpa;
uint64_t xen_vcpu_time_info_gpa;
uint64_t xen_vcpu_runstate_gpa;
+ uint8_t xen_vcpu_callback_vector;
#endif
#if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags;
@@ -10,3 +10,4 @@ kvm_x86_update_msi_routes(int num) "Updated %d MSI routes"
kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 " a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64
kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
+kvm_xen_set_vcpu_callback(int cpu, int vector) "callback vcpu %d vector %d"
@@ -28,6 +28,7 @@
#include "standard-headers/xen/sched.h"
#include "standard-headers/xen/memory.h"
#include "standard-headers/xen/hvm/hvm_op.h"
+#include "standard-headers/xen/hvm/params.h"
#include "standard-headers/xen/vcpu.h"
#include "standard-headers/xen/event_channel.h"
@@ -194,7 +195,8 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
fi.submap |= 1 << XENFEAT_writable_page_tables |
1 << XENFEAT_writable_descriptor_tables |
1 << XENFEAT_auto_translated_physmap |
- 1 << XENFEAT_supervisor_mode_kernel;
+ 1 << XENFEAT_supervisor_mode_kernel |
+ 1 << XENFEAT_hvm_callback_vector;
}
err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi));
@@ -221,6 +223,31 @@ static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
}
+static int kvm_xen_set_vcpu_callback_vector(CPUState *cs)
+{
+ uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
+ struct kvm_xen_vcpu_attr xva;
+
+ xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR;
+ xva.u.vector = vector;
+
+ trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector);
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva);
+}
+
+static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_callback_vector = data.host_int;
+
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ kvm_xen_set_vcpu_callback_vector(cs);
+ }
+}
+
static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
@@ -277,12 +304,16 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_info_default_gpa = INVALID_GPA;
env->xen_vcpu_time_info_gpa = INVALID_GPA;
env->xen_vcpu_runstate_gpa = INVALID_GPA;
+ env->xen_vcpu_callback_vector = 0;
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
INVALID_GPA);
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ kvm_xen_set_vcpu_callback_vector(cs);
+ }
}
@@ -457,17 +488,53 @@ static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu,
return true;
}
+static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit,
+ X86CPU *cpu, uint64_t arg)
+{
+ struct xen_hvm_evtchn_upcall_vector up;
+ CPUState *target_cs;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(up) == 8);
+
+ if (kvm_copy_from_gva(CPU(cpu), arg, &up, sizeof(up))) {
+ return -EFAULT;
+ }
+
+ if (up.vector < 0x10) {
+ return -EINVAL;
+ }
+
+ target_cs = qemu_get_cpu(up.vcpu);
+ if (!target_cs) {
+ return -EINVAL;
+ }
+
+ async_run_on_cpu(target_cs, do_set_vcpu_callback_vector,
+ RUN_ON_CPU_HOST_INT(up.vector));
+ return 0;
+}
+
static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, uint64_t arg)
{
+ int ret = -ENOSYS;
switch (cmd) {
+ case HVMOP_set_evtchn_upcall_vector:
+ ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu,
+ exit->u.hcall.params[0]);
+ break;
+
case HVMOP_pagetable_dying:
- exit->u.hcall.result = -ENOSYS;
- return true;
+ ret = -ENOSYS;
+ break;
default:
return false;
}
+
+ exit->u.hcall.result = ret;
+ return true;
}
static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
@@ -801,6 +868,17 @@ int kvm_put_xen_state(CPUState *cs)
}
}
+ if (!kvm_xen_has_cap(EVTCHN_SEND)) {
+ return 0;
+ }
+
+ if (env->xen_vcpu_callback_vector) {
+ ret = kvm_xen_set_vcpu_callback_vector(cs);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -1274,6 +1274,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
+ VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
VMSTATE_END_OF_LIST()
}
};