@@ -208,6 +208,11 @@ static bool valid_port(evtchn_port_t port)
}
}
+static bool valid_vcpu(uint32_t vcpu)
+{
+ return !!qemu_get_cpu(vcpu);
+}
+
int xen_evtchn_status_op(struct evtchn_status *status)
{
XenEvtchnState *s = xen_evtchn_singleton;
@@ -398,6 +403,20 @@ static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
}
}
+static bool virq_is_global(uint32_t virq)
+{
+ switch (virq) {
+ case VIRQ_TIMER:
+ case VIRQ_DEBUG:
+ case VIRQ_XENOPROF:
+ case VIRQ_XENPMU:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static void free_port(XenEvtchnState *s, evtchn_port_t port)
{
s->port_table[port].type = EVTCHNSTAT_closed;
@@ -411,6 +430,28 @@ static void free_port(XenEvtchnState *s, evtchn_port_t port)
}
}
+static int allocate_port(XenEvtchnState *s, uint32_t vcpu, uint16_t type,
+ uint16_t val, evtchn_port_t *port)
+{
+ evtchn_port_t p = 1;
+
+ for (p = 1; valid_port(p); p++) {
+ if (s->port_table[p].type == EVTCHNSTAT_closed) {
+ s->port_table[p].vcpu = vcpu;
+ s->port_table[p].type = type;
+ s->port_table[p].type_val = val;
+
+ *port = p;
+
+ if (s->nr_ports < p + 1)
+ s->nr_ports = p + 1;
+
+ return 0;
+ }
+ }
+ return -ENOSPC;
+}
+
static int close_port(XenEvtchnState *s, evtchn_port_t port)
{
XenEvtchnPort *p = &s->port_table[port];
@@ -419,6 +460,11 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port)
case EVTCHNSTAT_closed:
return -ENOENT;
+ case EVTCHNSTAT_virq:
+ kvm_xen_set_vcpu_virq(virq_is_global(p->type_val) ? 0 : p->vcpu,
+ p->type_val, 0);
+ break;
+
default:
break;
}
@@ -470,3 +516,40 @@ int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
return ret;
}
+
+int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int ret;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (virq->virq >= NR_VIRQS) {
+ return -EINVAL;
+ }
+
+ /* Global VIRQ must be allocated on vCPU0 first */
+ if (virq_is_global(virq->virq) && virq->vcpu != 0) {
+ return -EINVAL;
+ }
+
+ if (!valid_vcpu(virq->vcpu)) {
+ return -ENOENT;
+ }
+
+ qemu_mutex_lock(&s->port_lock);
+
+ ret = allocate_port(s, virq->vcpu, EVTCHNSTAT_virq, virq->virq, &virq->port);
+ if (!ret) {
+ ret = kvm_xen_set_vcpu_virq(virq->vcpu, virq->virq, virq->port);
+ if (ret) {
+ free_port(s, virq->port);
+ }
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+
+ return ret;
+}
@@ -16,6 +16,8 @@ int xen_evtchn_set_callback_param(uint64_t param);
struct evtchn_status;
struct evtchn_close;
struct evtchn_unmask;
+struct evtchn_bind_virq;
int xen_evtchn_status_op(struct evtchn_status *status);
int xen_evtchn_close_op(struct evtchn_close *close);
int xen_evtchn_unmask_op(struct evtchn_unmask *unmask);
+int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq);
@@ -14,5 +14,6 @@
void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id);
+int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port);
#endif /* QEMU_SYSEMU_KVM_XEN_H */
@@ -27,6 +27,8 @@
#include "qapi/qapi-types-common.h"
#include "qemu/cpu-float.h"
+#define XEN_NR_VIRQS 24
+
/* The x86 has a strong memory model with some store-after-load re-ordering */
#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
@@ -1793,6 +1795,7 @@ typedef struct CPUArchState {
uint64_t xen_vcpu_time_info_gpa;
uint64_t xen_vcpu_runstate_gpa;
uint8_t xen_vcpu_callback_vector;
+ uint16_t xen_virq[XEN_NR_VIRQS];
#endif
#if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags;
@@ -287,6 +287,52 @@ void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id)
qemu_cpu_kick(cs);
}
+static int kvm_xen_set_vcpu_timer(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ struct kvm_xen_vcpu_attr va = {
+ .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
+ .u.timer.port = env->xen_virq[VIRQ_TIMER],
+ .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
+ .u.timer.expires_ns = 0,
+ };
+
+ return kvm_vm_ioctl(kvm_state, KVM_XEN_VCPU_SET_ATTR, &va);
+}
+
+static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data)
+{
+ kvm_xen_set_vcpu_timer(cs);
+}
+
+int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port)
+{
+ CPUState *cs = qemu_get_cpu(vcpu_id);
+
+ if (!cs) {
+ return -ENOENT;
+ }
+
+ /* cpu.h doesn't include the actual Xen header. */
+ qemu_build_assert(NR_VIRQS == XEN_NR_VIRQS);
+
+ if (virq >= NR_VIRQS) {
+ return -EINVAL;
+ }
+
+ if (port && X86_CPU(cs)->env.xen_virq[virq]) {
+ return -EEXIST;
+ }
+
+ X86_CPU(cs)->env.xen_virq[virq] = port;
+ if (virq == VIRQ_TIMER) {
+ async_run_on_cpu(cs, do_set_vcpu_timer_virq, RUN_ON_CPU_HOST_INT(port));
+ }
+ return 0;
+}
+
static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
@@ -724,6 +770,21 @@ static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
err = xen_evtchn_unmask_op(&unmask);
break;
}
+ case EVTCHNOP_bind_virq: {
+ struct evtchn_bind_virq virq;
+
+ qemu_build_assert(sizeof(virq) == 12);
+ if (kvm_copy_from_gva(cs, arg, &virq, sizeof(virq))) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = xen_evtchn_bind_virq_op(&virq);
+ if (!err && kvm_copy_to_gva(cs, arg, &virq, sizeof(virq))) {
+ err = -EFAULT;
+ }
+ break;
+ }
default:
return false;
}
@@ -1280,6 +1280,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
+ VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS),
VMSTATE_END_OF_LIST()
}
};