From patchwork Mon Nov 5 07:45:02 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: jiangyiwen X-Patchwork-Id: 992893 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 42pPpv2p9bz9sB7 for ; Mon, 5 Nov 2018 18:45:11 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1729265AbeKERDa (ORCPT ); Mon, 5 Nov 2018 12:03:30 -0500 Received: from szxga07-in.huawei.com ([45.249.212.35]:43142 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1727054AbeKERDa (ORCPT ); Mon, 5 Nov 2018 12:03:30 -0500 Received: from DGGEMS401-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 86017C8D776CB; Mon, 5 Nov 2018 15:45:06 +0800 (CST) Received: from [127.0.0.1] (10.177.16.168) by DGGEMS401-HUB.china.huawei.com (10.3.19.201) with Microsoft SMTP Server id 14.3.408.0; Mon, 5 Nov 2018 15:45:03 +0800 To: , Jason Wang CC: , , From: jiangyiwen Subject: [PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest Message-ID: <5BDFF4FE.1080500@huawei.com> Date: Mon, 5 Nov 2018 15:45:02 +0800 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Thunderbird/38.5.1 MIME-Version: 1.0 X-Originating-IP: [10.177.16.168] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org In driver probing, if virtio has VIRTIO_VSOCK_F_MRG_RXBUF feature, it will fill mergeable rx buffer, support for host send mergeable rx buffer. It will fill a page everytime to compact with small packet and big packet. Signed-off-by: Yiwen Jiang --- include/linux/virtio_vsock.h | 3 ++ net/vmw_vsock/virtio_transport.c | 72 +++++++++++++++++++++++++++++----------- 2 files changed, 56 insertions(+), 19 deletions(-) diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index e223e26..bf84418 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -14,6 +14,9 @@ #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) +/* Virtio-vsock feature */ +#define VIRTIO_VSOCK_F_MRG_RXBUF 0 /* Host can merge receive buffers. */ + enum { VSOCK_VQ_RX = 0, /* for host to guest data */ VSOCK_VQ_TX = 1, /* for guest to host data */ diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 5d3cce9..2040a9e 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -64,6 +64,7 @@ struct virtio_vsock { struct virtio_vsock_event event_list[8]; u32 guest_cid; + bool mergeable; }; static struct virtio_vsock *virtio_vsock_get(void) @@ -256,6 +257,25 @@ static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, return 0; } +static int fill_mergeable_rx_buff(struct virtqueue *vq) +{ + void *page = NULL; + struct scatterlist sg; + int err; + + page = (void *)get_zeroed_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + sg_init_one(&sg, page, PAGE_SIZE); + + err = virtqueue_add_inbuf(vq, &sg, 1, page, GFP_KERNEL); + if (err < 0) + free_page((unsigned long) page); + + return err; +} + static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; @@ -267,27 +287,33 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) vq = vsock->vqs[VSOCK_VQ_RX]; do { - pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); - if (!pkt) - break; + if (vsock->mergeable) { + ret = fill_mergeable_rx_buff(vq); + if (ret) + break; + } else { + pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); + if (!pkt) + break; - pkt->buf = kmalloc(buf_len, GFP_KERNEL); - if (!pkt->buf) { - virtio_transport_free_pkt(pkt); - break; - } + pkt->buf = kmalloc(buf_len, GFP_KERNEL); + if (!pkt->buf) { + virtio_transport_free_pkt(pkt); + break; + } - pkt->len = buf_len; + pkt->len = buf_len; - sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); - sgs[0] = &hdr; + sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); + sgs[0] = &hdr; - sg_init_one(&buf, pkt->buf, buf_len); - sgs[1] = &buf; - ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); - if (ret) { - virtio_transport_free_pkt(pkt); - break; + sg_init_one(&buf, pkt->buf, buf_len); + sgs[1] = &buf; + ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); + if (ret) { + virtio_transport_free_pkt(pkt); + break; + } } vsock->rx_buf_nr++; } while (vq->num_free); @@ -588,6 +614,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev) if (ret < 0) goto out_vqs; + if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_MRG_RXBUF)) + vsock->mergeable = true; + vsock->rx_buf_nr = 0; vsock->rx_buf_max_nr = 0; atomic_set(&vsock->queued_replies, 0); @@ -640,8 +669,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev) vdev->config->reset(vdev); mutex_lock(&vsock->rx_lock); - while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) - virtio_transport_free_pkt(pkt); + while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) { + if (vsock->mergeable) + free_page((unsigned long)(void *)pkt); + else + virtio_transport_free_pkt(pkt); + } mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->tx_lock); @@ -683,6 +716,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev) }; static unsigned int features[] = { + VIRTIO_VSOCK_F_MRG_RXBUF, }; static struct virtio_driver virtio_vsock_driver = {