@@ -567,10 +567,13 @@ int hvm_domain_initialise(struct domain *d)
rtc_init(d);
hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
- hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+ hvm_init_ioreq_servers(d);
register_portio_handler(d, 0xe9, 1, hvm_print_line);
+ if ( hvm_init_pci_emul(d) )
+ goto fail2;
+
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
goto fail2;
@@ -645,8 +648,8 @@ static void hvm_destroy_ioreq_servers(struct domain *d)
void hvm_domain_relinquish_resources(struct domain *d)
{
- hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
- hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+ hvm_destroy_ioreq_servers(d);
+ hvm_destroy_pci_emul(d);
msixtbl_pt_cleanup(d);
@@ -1104,27 +1107,11 @@ int hvm_vcpu_initialise(struct vcpu *v)
&& (rc = nestedhvm_vcpu_initialise(v)) < 0 )
goto fail3;
- /* Create ioreq event channel. */
- rc = alloc_unbound_xen_event_channel(v, 0, NULL);
- if ( rc < 0 )
- goto fail4;
-
- /* Register ioreq event channel. */
- v->arch.hvm_vcpu.xen_port = rc;
-
- if ( v->vcpu_id == 0 )
- {
- /* Create bufioreq event channel. */
- rc = alloc_unbound_xen_event_channel(v, 0, NULL);
- if ( rc < 0 )
- goto fail2;
- v->domain->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] = rc;
- }
+ rc = hvm_ioreq_servers_new_vcpu(v);
+ if ( rc != 0 )
+ goto fail3;
- spin_lock(&v->domain->arch.hvm_domain.ioreq.lock);
- if ( v->domain->arch.hvm_domain.ioreq.va != NULL )
- get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
- spin_unlock(&v->domain->arch.hvm_domain.ioreq.lock);
+ v->arch.hvm_vcpu.ioreq = &v->domain->arch.hvm_domain.ioreq;
spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
Prepare/Release structure for multiple ioreq servers. Signed-off-by: Julien Grall <julien.grall@citrix.com> --- xen/arch/x86/hvm/hvm.c | 33 ++++++++++----------------------- 1 files changed, 10 insertions(+), 23 deletions(-)