diff mbox

[RFC,v2,05/10] virtio-net rsc: Create timer to drain the packets from the cache pool

Message ID 1454264009-24094-6-git-send-email-wexu@redhat.com
State New
Headers show

Commit Message

Wei Xu Jan. 31, 2016, 6:13 p.m. UTC
From: Wei Xu <wei@wei-thinkpad.nay.redhat.com>

The timer will only be triggered if the packets pool is not empty,
and it'll drain off all the cached packets, this is to reduce the
delay to upper layer protocol stack.

Signed-off-by: Wei Xu <wexu@redhat.com>
---
 hw/net/virtio-net.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

Comments

Jason Wang Feb. 1, 2016, 6:28 a.m. UTC | #1
On 02/01/2016 02:13 AM, wexu@redhat.com wrote:
> From: Wei Xu <wei@wei-thinkpad.nay.redhat.com>
>
> The timer will only be triggered if the packets pool is not empty,
> and it'll drain off all the cached packets, this is to reduce the
> delay to upper layer protocol stack.
>
> Signed-off-by: Wei Xu <wexu@redhat.com>
> ---
>  hw/net/virtio-net.c | 38 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 38 insertions(+)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 4f77fbe..93df0d5 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -48,12 +48,17 @@
>  
>  #define MAX_VIRTIO_IP_PAYLOAD  (65535 + IP_OFFSET)
>  
> +/* Purge coalesced packets timer interval */
> +#define RSC_TIMER_INTERVAL  500000

Any hints for choosing this as default value? Do we need a property for
user to change this?

> +
>  /* Global statistics */
>  static uint32_t rsc_chain_no_mem;
>  
>  /* Switcher to enable/disable rsc */
>  static bool virtio_net_rsc_bypass;
>  
> +static uint32_t rsc_timeout = RSC_TIMER_INTERVAL;
> +
>  /* Coalesce callback for ipv4/6 */
>  typedef int32_t (VirtioNetCoalesce) (NetRscChain *chain, NetRscSeg *seg,
>                                       const uint8_t *buf, size_t size);
> @@ -1625,6 +1630,35 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
>      return 0;
>  }
>  
> +static void virtio_net_rsc_purge(void *opq)
> +{
> +    int ret = 0;
> +    NetRscChain *chain = (NetRscChain *)opq;
> +    NetRscSeg *seg, *rn;
> +
> +    QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
> +        if (!qemu_can_send_packet(seg->nc)) {
> +            /* Should quit or continue? not sure if one or some
> +            * of the queues fail would happen, try continue here */

This looks wrong, qemu_can_send_packet() is used for nc's peer not nc
itself.

> +            continue;
> +        }
> +
> +        ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
> +        QTAILQ_REMOVE(&chain->buffers, seg, next);
> +        g_free(seg->buf);
> +        g_free(seg);
> +
> +        if (ret == 0) {
> +            /* Try next queue */

Try next seg?

> +            continue;
> +        }

Why need above?

> +    }
> +
> +    if (!QTAILQ_EMPTY(&chain->buffers)) {
> +        timer_mod(chain->drain_timer,
> +                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);

Need stop/start the timer during vm stop/start to save cpu.

> +    }
> +}
>  
>  static void virtio_net_rsc_cleanup(VirtIONet *n)
>  {
> @@ -1810,6 +1844,8 @@ static size_t virtio_net_rsc_callback(NetRscChain *chain, NetClientState *nc,
>          if (!virtio_net_rsc_cache_buf(chain, nc, buf, size)) {
>              return 0;
>          } else {
> +            timer_mod(chain->drain_timer,
> +                      qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);
>              return size;
>          }
>      }
> @@ -1877,6 +1913,8 @@ static NetRscChain *virtio_net_rsc_lookup_chain(NetClientState *nc,
>      }
>  
>      chain->proto = proto;
> +    chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> +                                      virtio_net_rsc_purge, chain);
>      chain->do_receive = virtio_net_rsc_receive4;
>  
>      QTAILQ_INIT(&chain->buffers);
Wei Xu Feb. 1, 2016, 8:39 a.m. UTC | #2
On 02/01/2016 02:28 PM, Jason Wang wrote:
>
> On 02/01/2016 02:13 AM, wexu@redhat.com wrote:
>> From: Wei Xu <wei@wei-thinkpad.nay.redhat.com>
>>
>> The timer will only be triggered if the packets pool is not empty,
>> and it'll drain off all the cached packets, this is to reduce the
>> delay to upper layer protocol stack.
>>
>> Signed-off-by: Wei Xu <wexu@redhat.com>
>> ---
>>   hw/net/virtio-net.c | 38 ++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 38 insertions(+)
>>
>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>> index 4f77fbe..93df0d5 100644
>> --- a/hw/net/virtio-net.c
>> +++ b/hw/net/virtio-net.c
>> @@ -48,12 +48,17 @@
>>   
>>   #define MAX_VIRTIO_IP_PAYLOAD  (65535 + IP_OFFSET)
>>   
>> +/* Purge coalesced packets timer interval */
>> +#define RSC_TIMER_INTERVAL  500000
> Any hints for choosing this as default value? Do we need a property for
> user to change this?
This is still under estimation, 300ms -500ms is a good value to adapt 
the test, this should be configurable.
>> +
>>   /* Global statistics */
>>   static uint32_t rsc_chain_no_mem;
>>   
>>   /* Switcher to enable/disable rsc */
>>   static bool virtio_net_rsc_bypass;
>>   
>> +static uint32_t rsc_timeout = RSC_TIMER_INTERVAL;
>> +
>>   /* Coalesce callback for ipv4/6 */
>>   typedef int32_t (VirtioNetCoalesce) (NetRscChain *chain, NetRscSeg *seg,
>>                                        const uint8_t *buf, size_t size);
>> @@ -1625,6 +1630,35 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
>>       return 0;
>>   }
>>   
>> +static void virtio_net_rsc_purge(void *opq)
>> +{
>> +    int ret = 0;
>> +    NetRscChain *chain = (NetRscChain *)opq;
>> +    NetRscSeg *seg, *rn;
>> +
>> +    QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
>> +        if (!qemu_can_send_packet(seg->nc)) {
>> +            /* Should quit or continue? not sure if one or some
>> +            * of the queues fail would happen, try continue here */
> This looks wrong, qemu_can_send_packet() is used for nc's peer not nc
> itself.
OK.
>
>> +            continue;
>> +        }
>> +
>> +        ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
>> +        QTAILQ_REMOVE(&chain->buffers, seg, next);
>> +        g_free(seg->buf);
>> +        g_free(seg);
>> +
>> +        if (ret == 0) {
>> +            /* Try next queue */
> Try next seg?
Yes, it's seg.
>
>> +            continue;
>> +        }
> Why need above?
yes, it's optional, my maybe can help if there are extra codes after 
this, will remove this.
>
>> +    }
>> +
>> +    if (!QTAILQ_EMPTY(&chain->buffers)) {
>> +        timer_mod(chain->drain_timer,
>> +                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);
> Need stop/start the timer during vm stop/start to save cpu.
Thanks, do you know where should i add the code?
>
>> +    }
>> +}
>>   
>>   static void virtio_net_rsc_cleanup(VirtIONet *n)
>>   {
>> @@ -1810,6 +1844,8 @@ static size_t virtio_net_rsc_callback(NetRscChain *chain, NetClientState *nc,
>>           if (!virtio_net_rsc_cache_buf(chain, nc, buf, size)) {
>>               return 0;
>>           } else {
>> +            timer_mod(chain->drain_timer,
>> +                      qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);
>>               return size;
>>           }
>>       }
>> @@ -1877,6 +1913,8 @@ static NetRscChain *virtio_net_rsc_lookup_chain(NetClientState *nc,
>>       }
>>   
>>       chain->proto = proto;
>> +    chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>> +                                      virtio_net_rsc_purge, chain);
>>       chain->do_receive = virtio_net_rsc_receive4;
>>   
>>       QTAILQ_INIT(&chain->buffers);
>
Jason Wang Feb. 1, 2016, 9:31 a.m. UTC | #3
On 02/01/2016 04:39 PM, Wei Xu wrote:
> On 02/01/2016 02:28 PM, Jason Wang wrote:
>>
>> On 02/01/2016 02:13 AM, wexu@redhat.com wrote:
>>> From: Wei Xu <wei@wei-thinkpad.nay.redhat.com>
>>>
>>> The timer will only be triggered if the packets pool is not empty,
>>> and it'll drain off all the cached packets, this is to reduce the
>>> delay to upper layer protocol stack.
>>>
>>> Signed-off-by: Wei Xu <wexu@redhat.com>
>>> ---
>>>   hw/net/virtio-net.c | 38 ++++++++++++++++++++++++++++++++++++++
>>>   1 file changed, 38 insertions(+)
>>>
>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>> index 4f77fbe..93df0d5 100644
>>> --- a/hw/net/virtio-net.c
>>> +++ b/hw/net/virtio-net.c
>>> @@ -48,12 +48,17 @@
>>>     #define MAX_VIRTIO_IP_PAYLOAD  (65535 + IP_OFFSET)
>>>   +/* Purge coalesced packets timer interval */
>>> +#define RSC_TIMER_INTERVAL  500000
>> Any hints for choosing this as default value? Do we need a property for
>> user to change this?
> This is still under estimation, 300ms -500ms is a good value to adapt
> the test, this should be configurable.
>>> +
>>>   /* Global statistics */
>>>   static uint32_t rsc_chain_no_mem;
>>>     /* Switcher to enable/disable rsc */
>>>   static bool virtio_net_rsc_bypass;
>>>   +static uint32_t rsc_timeout = RSC_TIMER_INTERVAL;
>>> +
>>>   /* Coalesce callback for ipv4/6 */
>>>   typedef int32_t (VirtioNetCoalesce) (NetRscChain *chain, NetRscSeg
>>> *seg,
>>>                                        const uint8_t *buf, size_t
>>> size);
>>> @@ -1625,6 +1630,35 @@ static int
>>> virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
>>>       return 0;
>>>   }
>>>   +static void virtio_net_rsc_purge(void *opq)
>>> +{
>>> +    int ret = 0;
>>> +    NetRscChain *chain = (NetRscChain *)opq;
>>> +    NetRscSeg *seg, *rn;
>>> +
>>> +    QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
>>> +        if (!qemu_can_send_packet(seg->nc)) {
>>> +            /* Should quit or continue? not sure if one or some
>>> +            * of the queues fail would happen, try continue here */
>> This looks wrong, qemu_can_send_packet() is used for nc's peer not nc
>> itself.
> OK.
>>
>>> +            continue;
>>> +        }
>>> +
>>> +        ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
>>> +        QTAILQ_REMOVE(&chain->buffers, seg, next);
>>> +        g_free(seg->buf);
>>> +        g_free(seg);
>>> +
>>> +        if (ret == 0) {
>>> +            /* Try next queue */
>> Try next seg?
> Yes, it's seg.
>>
>>> +            continue;
>>> +        }
>> Why need above?
> yes, it's optional, my maybe can help if there are extra codes after
> this, will remove this.
>>
>>> +    }
>>> +
>>> +    if (!QTAILQ_EMPTY(&chain->buffers)) {
>>> +        timer_mod(chain->drain_timer,
>>> +                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
>>> rsc_timeout);
>> Need stop/start the timer during vm stop/start to save cpu.
> Thanks, do you know where should i add the code?

You may want to look at the vmstate change handler in virtio-net.c.
Wei Xu Feb. 1, 2016, 1:31 p.m. UTC | #4
On 02/01/2016 05:31 PM, Jason Wang wrote:
>
> On 02/01/2016 04:39 PM, Wei Xu wrote:
>> On 02/01/2016 02:28 PM, Jason Wang wrote:
>>> On 02/01/2016 02:13 AM, wexu@redhat.com wrote:
>>>> From: Wei Xu <wei@wei-thinkpad.nay.redhat.com>
>>>>
>>>> The timer will only be triggered if the packets pool is not empty,
>>>> and it'll drain off all the cached packets, this is to reduce the
>>>> delay to upper layer protocol stack.
>>>>
>>>> Signed-off-by: Wei Xu <wexu@redhat.com>
>>>> ---
>>>>    hw/net/virtio-net.c | 38 ++++++++++++++++++++++++++++++++++++++
>>>>    1 file changed, 38 insertions(+)
>>>>
>>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>>> index 4f77fbe..93df0d5 100644
>>>> --- a/hw/net/virtio-net.c
>>>> +++ b/hw/net/virtio-net.c
>>>> @@ -48,12 +48,17 @@
>>>>      #define MAX_VIRTIO_IP_PAYLOAD  (65535 + IP_OFFSET)
>>>>    +/* Purge coalesced packets timer interval */
>>>> +#define RSC_TIMER_INTERVAL  500000
>>> Any hints for choosing this as default value? Do we need a property for
>>> user to change this?
>> This is still under estimation, 300ms -500ms is a good value to adapt
>> the test, this should be configurable.
>>>> +
>>>>    /* Global statistics */
>>>>    static uint32_t rsc_chain_no_mem;
>>>>      /* Switcher to enable/disable rsc */
>>>>    static bool virtio_net_rsc_bypass;
>>>>    +static uint32_t rsc_timeout = RSC_TIMER_INTERVAL;
>>>> +
>>>>    /* Coalesce callback for ipv4/6 */
>>>>    typedef int32_t (VirtioNetCoalesce) (NetRscChain *chain, NetRscSeg
>>>> *seg,
>>>>                                         const uint8_t *buf, size_t
>>>> size);
>>>> @@ -1625,6 +1630,35 @@ static int
>>>> virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
>>>>        return 0;
>>>>    }
>>>>    +static void virtio_net_rsc_purge(void *opq)
>>>> +{
>>>> +    int ret = 0;
>>>> +    NetRscChain *chain = (NetRscChain *)opq;
>>>> +    NetRscSeg *seg, *rn;
>>>> +
>>>> +    QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
>>>> +        if (!qemu_can_send_packet(seg->nc)) {
>>>> +            /* Should quit or continue? not sure if one or some
>>>> +            * of the queues fail would happen, try continue here */
>>> This looks wrong, qemu_can_send_packet() is used for nc's peer not nc
>>> itself.
>> OK.
>>>> +            continue;
>>>> +        }
>>>> +
>>>> +        ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
>>>> +        QTAILQ_REMOVE(&chain->buffers, seg, next);
>>>> +        g_free(seg->buf);
>>>> +        g_free(seg);
>>>> +
>>>> +        if (ret == 0) {
>>>> +            /* Try next queue */
>>> Try next seg?
>> Yes, it's seg.
>>>> +            continue;
>>>> +        }
>>> Why need above?
>> yes, it's optional, my maybe can help if there are extra codes after
>> this, will remove this.
>>>> +    }
>>>> +
>>>> +    if (!QTAILQ_EMPTY(&chain->buffers)) {
>>>> +        timer_mod(chain->drain_timer,
>>>> +                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
>>>> rsc_timeout);
>>> Need stop/start the timer during vm stop/start to save cpu.
>> Thanks, do you know where should i add the code?
> You may want to look at the vmstate change handler in virtio-net.c.
great, thanks a lot.
>
diff mbox

Patch

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 4f77fbe..93df0d5 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -48,12 +48,17 @@ 
 
 #define MAX_VIRTIO_IP_PAYLOAD  (65535 + IP_OFFSET)
 
+/* Purge coalesced packets timer interval */
+#define RSC_TIMER_INTERVAL  500000
+
 /* Global statistics */
 static uint32_t rsc_chain_no_mem;
 
 /* Switcher to enable/disable rsc */
 static bool virtio_net_rsc_bypass;
 
+static uint32_t rsc_timeout = RSC_TIMER_INTERVAL;
+
 /* Coalesce callback for ipv4/6 */
 typedef int32_t (VirtioNetCoalesce) (NetRscChain *chain, NetRscSeg *seg,
                                      const uint8_t *buf, size_t size);
@@ -1625,6 +1630,35 @@  static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
     return 0;
 }
 
+static void virtio_net_rsc_purge(void *opq)
+{
+    int ret = 0;
+    NetRscChain *chain = (NetRscChain *)opq;
+    NetRscSeg *seg, *rn;
+
+    QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
+        if (!qemu_can_send_packet(seg->nc)) {
+            /* Should quit or continue? not sure if one or some
+            * of the queues fail would happen, try continue here */
+            continue;
+        }
+
+        ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
+        QTAILQ_REMOVE(&chain->buffers, seg, next);
+        g_free(seg->buf);
+        g_free(seg);
+
+        if (ret == 0) {
+            /* Try next queue */
+            continue;
+        }
+    }
+
+    if (!QTAILQ_EMPTY(&chain->buffers)) {
+        timer_mod(chain->drain_timer,
+                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);
+    }
+}
 
 static void virtio_net_rsc_cleanup(VirtIONet *n)
 {
@@ -1810,6 +1844,8 @@  static size_t virtio_net_rsc_callback(NetRscChain *chain, NetClientState *nc,
         if (!virtio_net_rsc_cache_buf(chain, nc, buf, size)) {
             return 0;
         } else {
+            timer_mod(chain->drain_timer,
+                      qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);
             return size;
         }
     }
@@ -1877,6 +1913,8 @@  static NetRscChain *virtio_net_rsc_lookup_chain(NetClientState *nc,
     }
 
     chain->proto = proto;
+    chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+                                      virtio_net_rsc_purge, chain);
     chain->do_receive = virtio_net_rsc_receive4;
 
     QTAILQ_INIT(&chain->buffers);