@@ -145,6 +145,7 @@ struct XenEvtchnState {
uint16_t gsi_pirq[GSI_NUM_PINS];
/* Bitmap of allocated PIRQs (serialized) */
uint64_t pirq_inuse[DIV_ROUND_UP(MAX_XEN_PIRQ, 64)];
+ uint32_t pirq_gsi_set;
/* Per-PIRQ information (rebuilt on migration) */
struct pirq_info pirq[MAX_XEN_PIRQ];
@@ -210,6 +211,7 @@ static const VMStateDescription xen_evtchn_vmstate = {
VMSTATE_UINT16_ARRAY(gsi_pirq, XenEvtchnState, GSI_NUM_PINS),
VMSTATE_UINT64_ARRAY(pirq_inuse, XenEvtchnState,
DIV_ROUND_UP(MAX_XEN_PIRQ, 64)),
+ VMSTATE_UINT32(pirq_gsi_set, XenEvtchnState),
VMSTATE_END_OF_LIST()
}
};
@@ -1466,6 +1468,51 @@ static int allocate_pirq(XenEvtchnState *s, int type, int gsi)
return pirq;
}
+bool xen_evtchn_set_gsi(int gsi, int level)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int pirq;
+
+ assert(qemu_mutex_iothread_locked());
+
+ if (!s || gsi < 0 || gsi > GSI_NUM_PINS) {
+ return false;
+ }
+
+ /*
+ * Check that that it *isn't* the event channel GSI, and thus
+ * that we are not recursing and it's safe to take s->port_lock.
+ *
+ * Locking aside, it's perfectly sane to bail out early for that
+ * special case, as it would make no sense for the event channel
+ * GSI to be routed back to event channels, when the delivery
+ * method is to raise the GSI... that recursion wouldn't *just*
+ * be a locking issue.
+ */
+ if (gsi && gsi == s->callback_gsi) {
+ return false;
+ }
+
+ QEMU_LOCK_GUARD(&s->port_lock);
+
+ pirq = s->gsi_pirq[gsi];
+ if (!pirq) {
+ return false;
+ }
+
+ if (level) {
+ int port = s->pirq[pirq].port;
+
+ s->pirq_gsi_set |= (1U << gsi);
+ if (port) {
+ set_port_pending(s, port);
+ }
+ } else {
+ s->pirq_gsi_set &= ~(1U << gsi);
+ }
+ return true;
+}
+
int xen_physdev_map_pirq(struct physdev_map_pirq *map)
{
XenEvtchnState *s = xen_evtchn_singleton;
@@ -1572,8 +1619,13 @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
if (gsi < 0) {
return -EINVAL;
}
+ if (s->pirq_gsi_set & (1U << gsi)) {
+ int port = s->pirq[pirq].port;
+ if (port) {
+ set_port_pending(s, port);
+ }
+ }
- // XX: Reassert a level IRQ if needed */
return 0;
}
@@ -24,6 +24,8 @@ void xen_evtchn_set_callback_level(int level);
int xen_evtchn_set_port(uint16_t port);
+bool xen_evtchn_set_gsi(int gsi, int level);
+
/*
* These functions mirror the libxenevtchn library API, providing the QEMU
* backend side of "interdomain" event channels.
@@ -61,6 +61,11 @@
#include CONFIG_DEVICES
#include "kvm/kvm_i386.h"
+#ifdef CONFIG_XEN_EMU
+#include "hw/xen/xen.h"
+#include "hw/i386/kvm/xen_evtchn.h"
+#endif
+
/* Physical Address of PVH entry point read from kernel ELF NOTE */
static size_t pvh_start_addr;
@@ -608,6 +613,17 @@ void gsi_handler(void *opaque, int n, int level)
}
/* fall through */
case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1:
+#ifdef CONFIG_XEN_EMU
+ /*
+ * Xen delivers the GSI to the Legacy PIC (not that Legacy PIC
+ * routing actually works properly under Xen). And then to
+ * *either* the PIRQ handling or the I/OAPIC depending on
+ * whether the former wants it.
+ */
+ if (xen_mode == XEN_EMULATE && xen_evtchn_set_gsi(n, level)) {
+ break;
+ }
+#endif
qemu_set_irq(s->ioapic_irq[n], level);
break;
case IO_APIC_SECONDARY_IRQBASE