diff mbox

[3/4] xen: introduce an event channel for buffered io event notifications

Message ID 1321368671-1134-3-git-send-email-stefano.stabellini@eu.citrix.com
State New
Headers show

Commit Message

Stefano Stabellini Nov. 15, 2011, 2:51 p.m. UTC
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

Use the newly introduced HVM_PARAM_BUFIOREQ_EVTCHN to receive
notifications for buffered io events.
After the first notification is received leave the event channel masked
and setup a timer to process the rest of the batch.
Once we have completed processing the batch, unmask the event channel
and delete the timer.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 xen-all.c |   38 ++++++++++++++++++++++++++++++++------
 1 files changed, 32 insertions(+), 6 deletions(-)

Comments

Ian Campbell Nov. 15, 2011, 5:13 p.m. UTC | #1
On Tue, 2011-11-15 at 14:51 +0000, stefano.stabellini@eu.citrix.com
wrote:
> From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> 
> Use the newly introduced HVM_PARAM_BUFIOREQ_EVTCHN to receive
> notifications for buffered io events.
> After the first notification is received leave the event channel masked
> and setup a timer to process the rest of the batch.
> Once we have completed processing the batch, unmask the event channel
> and delete the timer.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
>  xen-all.c |   38 ++++++++++++++++++++++++++++++++------
>  1 files changed, 32 insertions(+), 6 deletions(-)
> 
> diff --git a/xen-all.c b/xen-all.c
> index b5e28ab..b28d7e7 100644
> --- a/xen-all.c
> +++ b/xen-all.c
> @@ -70,6 +70,8 @@ typedef struct XenIOState {
>      QEMUTimer *buffered_io_timer;
>      /* the evtchn port for polling the notification, */
>      evtchn_port_t *ioreq_local_port;
> +    /* evtchn local port for buffered io */
> +    evtchn_port_t bufioreq_local_port;
>      /* the evtchn fd for polling */
>      XenEvtchn xce_handle;
>      /* which vcpu we are serving */
> @@ -516,6 +518,12 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
>      evtchn_port_t port;
>  
>      port = xc_evtchn_pending(state->xce_handle);
> +    if (port == state->bufioreq_local_port) {
> +        qemu_mod_timer(state->buffered_io_timer,
> +                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
> +        return NULL;
> +    }
> +
>      if (port != -1) {
>          for (i = 0; i < smp_cpus; i++) {
>              if (state->ioreq_local_port[i] == port) {
> @@ -664,16 +672,18 @@ static void handle_ioreq(ioreq_t *req)
>      }
>  }
>  
> -static void handle_buffered_iopage(XenIOState *state)
> +static int handle_buffered_iopage(XenIOState *state)
>  {
>      buf_ioreq_t *buf_req = NULL;
>      ioreq_t req;
>      int qw;
>  
>      if (!state->buffered_io_page) {
> -        return;
> +        return 0;
>      }
>  
> +    memset(&req, 0x00, sizeof(req));
> +
>      while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
>          buf_req = &state->buffered_io_page->buf_ioreq[
>              state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
> @@ -698,15 +708,21 @@ static void handle_buffered_iopage(XenIOState *state)
>          xen_mb();
>          state->buffered_io_page->read_pointer += qw ? 2 : 1;
>      }
> +
> +    return req.count;
>  }
>  
>  static void handle_buffered_io(void *opaque)
>  {
>      XenIOState *state = opaque;
>  
> -    handle_buffered_iopage(state);
> -    qemu_mod_timer(state->buffered_io_timer,
> -                   BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
> +    if (handle_buffered_iopage(state)) {
> +        qemu_mod_timer(state->buffered_io_timer,
> +                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
> +    } else {
> +        qemu_del_timer(state->buffered_io_timer);
> +        xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
> +    }
>  }
>  
>  static void cpu_handle_ioreq(void *opaque)
> @@ -836,7 +852,6 @@ static void xen_main_loop_prepare(XenIOState *state)
>  
>      state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
>                                                   state);
> -    qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock));
>  
>      if (evtchn_fd != -1) {
>          qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
> @@ -888,6 +903,7 @@ int xen_hvm_init(void)
>  {
>      int i, rc;
>      unsigned long ioreq_pfn;
> +    unsigned long bufioreq_evtchn;
>      XenIOState *state;
>  
>      state = g_malloc0(sizeof (XenIOState));
> @@ -937,6 +953,16 @@ int xen_hvm_init(void)
>          state->ioreq_local_port[i] = rc;
>      }
>  
> +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
> +            &bufioreq_evtchn);
> +    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
> +            (uint32_t)bufioreq_evtchn);
> +    if (rc == -1) {
> +        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
> +        return -1;
> +    }
> +    state->bufioreq_local_port = rc;

Does this fallback gracefully on hypervisors which don't have this new
hvm param? It doesn't look like it but perhaps I'm missing something.

> +
>      /* Init RAM management */
>      xen_map_cache_init();
>      xen_ram_init(ram_size);
Stefano Stabellini Nov. 15, 2011, 5:20 p.m. UTC | #2
On Tue, 15 Nov 2011, Ian Campbell wrote:
> > +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
> > +            &bufioreq_evtchn);
> > +    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
> > +            (uint32_t)bufioreq_evtchn);
> > +    if (rc == -1) {
> > +        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
> > +        return -1;
> > +    }
> > +    state->bufioreq_local_port = rc;
> 
> Does this fallback gracefully on hypervisors which don't have this new
> hvm param? It doesn't look like it but perhaps I'm missing something.

No, it does not.
However upstream Qemu doesn't work very well with Xen 4.1 anyway, the
first Xen release that is going to support it will be Xen 4.2 that
should have this feature.
Ian Campbell Nov. 15, 2011, 5:24 p.m. UTC | #3
On Tue, 2011-11-15 at 17:20 +0000, Stefano Stabellini wrote:
> On Tue, 15 Nov 2011, Ian Campbell wrote:
> > > +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
> > > +            &bufioreq_evtchn);
> > > +    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
> > > +            (uint32_t)bufioreq_evtchn);
> > > +    if (rc == -1) {
> > > +        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
> > > +        return -1;
> > > +    }
> > > +    state->bufioreq_local_port = rc;
> > 
> > Does this fallback gracefully on hypervisors which don't have this new
> > hvm param? It doesn't look like it but perhaps I'm missing something.
> 
> No, it does not.
> However upstream Qemu doesn't work very well with Xen 4.1 anyway, the
> first Xen release that is going to support it will be Xen 4.2 that
> should have this feature.

In which case I think you need to handle the resultant error from
xc_get_hvm_param() gracefully with a suitable error message which says
something along those lines.

Ian.
diff mbox

Patch

diff --git a/xen-all.c b/xen-all.c
index b5e28ab..b28d7e7 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -70,6 +70,8 @@  typedef struct XenIOState {
     QEMUTimer *buffered_io_timer;
     /* the evtchn port for polling the notification, */
     evtchn_port_t *ioreq_local_port;
+    /* evtchn local port for buffered io */
+    evtchn_port_t bufioreq_local_port;
     /* the evtchn fd for polling */
     XenEvtchn xce_handle;
     /* which vcpu we are serving */
@@ -516,6 +518,12 @@  static ioreq_t *cpu_get_ioreq(XenIOState *state)
     evtchn_port_t port;
 
     port = xc_evtchn_pending(state->xce_handle);
+    if (port == state->bufioreq_local_port) {
+        qemu_mod_timer(state->buffered_io_timer,
+                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+        return NULL;
+    }
+
     if (port != -1) {
         for (i = 0; i < smp_cpus; i++) {
             if (state->ioreq_local_port[i] == port) {
@@ -664,16 +672,18 @@  static void handle_ioreq(ioreq_t *req)
     }
 }
 
-static void handle_buffered_iopage(XenIOState *state)
+static int handle_buffered_iopage(XenIOState *state)
 {
     buf_ioreq_t *buf_req = NULL;
     ioreq_t req;
     int qw;
 
     if (!state->buffered_io_page) {
-        return;
+        return 0;
     }
 
+    memset(&req, 0x00, sizeof(req));
+
     while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
         buf_req = &state->buffered_io_page->buf_ioreq[
             state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
@@ -698,15 +708,21 @@  static void handle_buffered_iopage(XenIOState *state)
         xen_mb();
         state->buffered_io_page->read_pointer += qw ? 2 : 1;
     }
+
+    return req.count;
 }
 
 static void handle_buffered_io(void *opaque)
 {
     XenIOState *state = opaque;
 
-    handle_buffered_iopage(state);
-    qemu_mod_timer(state->buffered_io_timer,
-                   BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+    if (handle_buffered_iopage(state)) {
+        qemu_mod_timer(state->buffered_io_timer,
+                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
+    } else {
+        qemu_del_timer(state->buffered_io_timer);
+        xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
+    }
 }
 
 static void cpu_handle_ioreq(void *opaque)
@@ -836,7 +852,6 @@  static void xen_main_loop_prepare(XenIOState *state)
 
     state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
                                                  state);
-    qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock));
 
     if (evtchn_fd != -1) {
         qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
@@ -888,6 +903,7 @@  int xen_hvm_init(void)
 {
     int i, rc;
     unsigned long ioreq_pfn;
+    unsigned long bufioreq_evtchn;
     XenIOState *state;
 
     state = g_malloc0(sizeof (XenIOState));
@@ -937,6 +953,16 @@  int xen_hvm_init(void)
         state->ioreq_local_port[i] = rc;
     }
 
+    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
+            &bufioreq_evtchn);
+    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+            (uint32_t)bufioreq_evtchn);
+    if (rc == -1) {
+        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
+        return -1;
+    }
+    state->bufioreq_local_port = rc;
+
     /* Init RAM management */
     xen_map_cache_init();
     xen_ram_init(ram_size);