@@ -40,6 +40,9 @@ typedef struct XenPCIPassthroughState XenPCIPassthroughState;
#define XEN_PT_DEVICE(obj) \
OBJECT_CHECK(XenPCIPassthroughState, (obj), TYPE_XEN_PT_DEVICE)
+uint32_t igd_read_opregion(XenPCIPassthroughState *s);
+void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val);
+
/* function type for config reg */
typedef int (*xen_pt_conf_reg_init)
(XenPCIPassthroughState *, XenPTRegInfo *, uint32_t real_offset,
@@ -66,8 +69,9 @@ typedef int (*xen_pt_conf_byte_read)
#define XEN_PT_BAR_ALLF 0xFFFFFFFF
#define XEN_PT_BAR_UNMAPPED (-1)
-#define PCI_CAP_MAX 48
+#define XEN_PCI_CAP_MAX 48
+#define XEN_PCI_INTEL_OPREGION 0xfc
typedef enum {
XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */
@@ -552,6 +552,22 @@ static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
return 0;
}
+static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry,
+ uint32_t *value, uint32_t valid_mask)
+{
+ *value = igd_read_opregion(s);
+ return 0;
+}
+
+static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint32_t *value,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ igd_write_opregion(s, *value);
+ return 0;
+}
+
/* Header Type0 reg static information table */
static XenPTRegInfo xen_pt_emu_reg_header0[] = {
/* Vendor ID reg */
@@ -1492,6 +1508,19 @@ static XenPTRegInfo xen_pt_emu_reg_msix[] = {
},
};
+static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = {
+ /* Intel IGFX OpRegion reg */
+ {
+ .offset = 0x0,
+ .size = 4,
+ .init_val = 0,
+ .u.dw.read = xen_pt_intel_opregion_read,
+ .u.dw.write = xen_pt_intel_opregion_write,
+ },
+ {
+ .size = 0,
+ },
+};
/****************************
* Capabilities
@@ -1729,6 +1758,14 @@ static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
.size_init = xen_pt_msix_size_init,
.emu_regs = xen_pt_emu_reg_msix,
},
+ /* Intel IGD Opregion group */
+ {
+ .grp_id = XEN_PCI_INTEL_OPREGION,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = 0x4,
+ .size_init = xen_pt_reg_grp_size_init,
+ .emu_regs = xen_pt_emu_reg_igd_opregion,
+ },
{
.grp_size = 0,
},
@@ -1779,7 +1816,7 @@ out:
static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
{
uint8_t id;
- unsigned max_cap = PCI_CAP_MAX;
+ unsigned max_cap = XEN_PCI_CAP_MAX;
uint8_t pos = PCI_CAPABILITY_LIST;
uint8_t status = 0;
@@ -1858,7 +1895,8 @@ int xen_pt_config_init(XenPCIPassthroughState *s)
uint32_t reg_grp_offset = 0;
XenPTRegGroup *reg_grp_entry = NULL;
- if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) {
+ if (xen_pt_emu_reg_grps[i].grp_id != 0xFF
+ && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) {
if (xen_pt_hide_dev_cap(&s->real_device,
xen_pt_emu_reg_grps[i].grp_id)) {
continue;
@@ -1871,6 +1909,15 @@ int xen_pt_config_init(XenPCIPassthroughState *s)
}
}
+ /*
+ * By default we will trap up to 0x40 in the cfg space.
+ * If an intel device is pass through we need to trap 0xfc,
+ * therefore the size should be 0xff.
+ */
+ if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) {
+ reg_grp_offset = XEN_PCI_INTEL_OPREGION;
+ }
+
reg_grp_entry = g_new0(XenPTRegGroup, 1);
QLIST_INIT(®_grp_entry->reg_tbl_list);
QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
@@ -5,6 +5,11 @@
#include "xen-host-pci-device.h"
#include "hw/xen/xen_backend.h"
+static unsigned long igd_guest_opregion;
+static unsigned long igd_host_opregion;
+
+#define XEN_PCI_INTEL_OPREGION_MASK 0xfff
+
typedef struct VGARegion {
int type; /* Memory or port I/O */
uint64_t guest_base_addr;
@@ -81,6 +86,7 @@ int xen_pt_register_vga_regions(XenHostPCIDevice *dev)
int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev)
{
int i = 0;
+ int ret = 0;
if (!is_igd_vga_passthrough(dev)) {
return 0;
@@ -107,6 +113,17 @@ int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev)
}
}
+ if (igd_guest_opregion) {
+ ret = xc_domain_memory_mapping(xen_xc, xen_domid,
+ (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT),
+ (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
+ 3,
+ DPCI_REMOVE_MAPPING);
+ if (ret) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -188,3 +205,68 @@ int xen_pt_setup_vga(XenPCIPassthroughState *s, XenHostPCIDevice *dev)
cpu_physical_memory_rw(0xc0000, bios, bios_size, 1);
return 0;
}
+
+uint32_t igd_read_opregion(XenPCIPassthroughState *s)
+{
+ uint32_t val = 0;
+
+ if (!igd_guest_opregion) {
+ return val;
+ }
+
+ val = igd_guest_opregion;
+
+ XEN_PT_LOG(&s->dev, "Read opregion val=%x\n", val);
+ return val;
+}
+
+#define XEN_PCI_INTEL_OPREGION_PAGES 0x3
+#define XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED 0x1
+void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val)
+{
+ int ret;
+
+ if (igd_guest_opregion) {
+ XEN_PT_LOG(&s->dev, "opregion register already been set, ignoring %x\n",
+ val);
+ return;
+ }
+
+ /* We just work with LE. */
+ xen_host_pci_get_block(&s->real_device, XEN_PCI_INTEL_OPREGION,
+ (uint8_t *)&igd_host_opregion, 4);
+ igd_guest_opregion = (unsigned long)(val & ~XEN_PCI_INTEL_OPREGION_MASK)
+ | (igd_host_opregion & XEN_PCI_INTEL_OPREGION_MASK);
+
+ ret = xc_domain_iomem_permission(xen_xc, xen_domid,
+ (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
+ XEN_PCI_INTEL_OPREGION_PAGES,
+ XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED);
+
+ if (ret) {
+ XEN_PT_ERR(&s->dev, "[%d]:Can't enable to access IGD host opregion:"
+ " 0x%lx.\n", ret,
+ (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT)),
+ igd_guest_opregion = 0;
+ return;
+ }
+
+ ret = xc_domain_memory_mapping(xen_xc, xen_domid,
+ (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT),
+ (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
+ XEN_PCI_INTEL_OPREGION_PAGES,
+ DPCI_ADD_MAPPING);
+
+ if (ret) {
+ XEN_PT_ERR(&s->dev, "[%d]:Can't map IGD host opregion:0x%lx to"
+ " guest opregion:0x%lx.\n", ret,
+ (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
+ (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT));
+ igd_guest_opregion = 0;
+ return;
+ }
+
+ XEN_PT_LOG(&s->dev, "Map OpRegion: 0x%lx -> 0x%lx\n",
+ (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
+ (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT));
+}