Message ID | 20210602034750.23377-9-lulu@redhat.com |
---|---|
State | New |
Headers | show |
Series | vhost-vdpa: add support for configure interrupt | expand |
在 2021/6/2 上午11:47, Cindy Lu 写道: > inorder s/inorder/In order/ > to support configure interrupt, we need to decouple > virtqueue from vector use and vector release function > this patch introduce vector_release_one and vector_use_one > to support one vector. > > Signed-off-by: Cindy Lu <lulu@redhat.com> I think we need to reorder the patches to let such decoupling comes first in this series. > --- > hw/virtio/virtio-pci.c | 122 ++++++++++++++++++++--------------------- > 1 file changed, 61 insertions(+), 61 deletions(-) > > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c > index 6a4ef413a4..f863c89de6 100644 > --- a/hw/virtio/virtio-pci.c > +++ b/hw/virtio/virtio-pci.c > @@ -666,7 +666,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev, > } > > static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, > - unsigned int queue_no, > unsigned int vector) > { > VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; > @@ -710,85 +709,86 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, > ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); > assert(ret == 0); > } > - > -static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) > +static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no, > + EventNotifier **n, unsigned int *vector) > { > PCIDevice *dev = &proxy->pci_dev; > VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); > - VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); > - unsigned int vector; > - int ret, queue_no; > VirtQueue *vq; > - EventNotifier *n; > - for (queue_no = 0; queue_no < nvqs; queue_no++) { > + > + if (queue_no == VIRTIO_CONFIG_IRQ_IDX) { > + return -1; > + } else { > if (!virtio_queue_get_num(vdev, queue_no)) { > - break; > - } > - vector = virtio_queue_vector(vdev, queue_no); > - if (vector >= msix_nr_vectors_allocated(dev)) { > - continue; > - } > - ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); > - if (ret < 0) { > - goto undo; > - } > - /* If guest supports masking, set up irqfd now. > - * Otherwise, delay until unmasked in the frontend. > - */ > - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > - vq = virtio_get_queue(vdev, queue_no); > - n = virtio_queue_get_guest_notifier(vq); > - ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); > - if (ret < 0) { > - kvm_virtio_pci_vq_vector_release(proxy, vector); > - goto undo; > - } > + return -1; > } > + *vector = virtio_queue_vector(vdev, queue_no); > + vq = virtio_get_queue(vdev, queue_no); > + *n = virtio_queue_get_guest_notifier(vq); > + } > + if (*vector >= msix_nr_vectors_allocated(dev)) { > + return -1; > } > return 0; > +} > > +static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no) > +{ To ease the reviewer, let's separate this patch into two. 1) factoring out the core logic 2) decouple the vq Thanks > + unsigned int vector; > + int ret; > + EventNotifier *n; > + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); > + if (ret < 0) { > + return ret; > + } > + ret = kvm_virtio_pci_vq_vector_use(proxy, vector); > + if (ret < 0) { > + goto undo; > + } > + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); > + if (ret < 0) { > + goto undo; > + } > + return 0; > undo: > - while (--queue_no >= 0) { > - vector = virtio_queue_vector(vdev, queue_no); > - if (vector >= msix_nr_vectors_allocated(dev)) { > - continue; > - } > - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > - vq = virtio_get_queue(vdev, queue_no); > - n = virtio_queue_get_guest_notifier(vq); > - kvm_virtio_pci_irqfd_release(proxy, n, vector); > - } > - kvm_virtio_pci_vq_vector_release(proxy, vector); > + kvm_virtio_pci_irqfd_release(proxy, n, vector); > + return ret; > +} > +static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) > +{ > + int queue_no; > + int ret = 0; > + for (queue_no = 0; queue_no < nvqs; queue_no++) { > + ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); > } > return ret; > } > > -static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) > + > +static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy, > + int queue_no) > { > - PCIDevice *dev = &proxy->pci_dev; > VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); > unsigned int vector; > - int queue_no; > - VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); > - VirtQueue *vq; > EventNotifier *n; > + int ret; > + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); > + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); > + if (ret < 0) { > + return; > + } > + > + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > + kvm_virtio_pci_irqfd_release(proxy, n, vector); > + } > + kvm_virtio_pci_vq_vector_release(proxy, vector); > +} > +static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) > +{ > + int queue_no; > + > for (queue_no = 0; queue_no < nvqs; queue_no++) { > - if (!virtio_queue_get_num(vdev, queue_no)) { > - break; > - } > - vector = virtio_queue_vector(vdev, queue_no); > - if (vector >= msix_nr_vectors_allocated(dev)) { > - continue; > - } > - /* If guest supports masking, clean up irqfd now. > - * Otherwise, it was cleaned when masked in the frontend. > - */ > - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > - vq = virtio_get_queue(vdev, queue_no); > - n = virtio_queue_get_guest_notifier(vq); > - kvm_virtio_pci_irqfd_release(proxy, n, vector); > - } > - kvm_virtio_pci_vq_vector_release(proxy, vector); > + kvm_virtio_pci_vector_release_one(proxy, queue_no); > } > } >
On Thu, Jun 3, 2021 at 2:39 PM Jason Wang <jasowang@redhat.com> wrote: > > > 在 2021/6/2 上午11:47, Cindy Lu 写道: > > inorder > > > s/inorder/In order/ > > > > to support configure interrupt, we need to decouple > > virtqueue from vector use and vector release function > > this patch introduce vector_release_one and vector_use_one > > to support one vector. > > > > Signed-off-by: Cindy Lu <lulu@redhat.com> > > > I think we need to reorder the patches to let such decoupling comes > first in this series. > > > > --- > > hw/virtio/virtio-pci.c | 122 ++++++++++++++++++++--------------------- > > 1 file changed, 61 insertions(+), 61 deletions(-) > > > > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c > > index 6a4ef413a4..f863c89de6 100644 > > --- a/hw/virtio/virtio-pci.c > > +++ b/hw/virtio/virtio-pci.c > > @@ -666,7 +666,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev, > > } > > > > static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, > > - unsigned int queue_no, > > unsigned int vector) > > { > > VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; > > @@ -710,85 +709,86 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, > > ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); > > assert(ret == 0); > > } > > - > > -static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) > > +static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no, > > + EventNotifier **n, unsigned int *vector) > > { > > PCIDevice *dev = &proxy->pci_dev; > > VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); > > - VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); > > - unsigned int vector; > > - int ret, queue_no; > > VirtQueue *vq; > > - EventNotifier *n; > > - for (queue_no = 0; queue_no < nvqs; queue_no++) { > > + > > + if (queue_no == VIRTIO_CONFIG_IRQ_IDX) { > > + return -1; > > + } else { > > if (!virtio_queue_get_num(vdev, queue_no)) { > > - break; > > - } > > - vector = virtio_queue_vector(vdev, queue_no); > > - if (vector >= msix_nr_vectors_allocated(dev)) { > > - continue; > > - } > > - ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); > > - if (ret < 0) { > > - goto undo; > > - } > > - /* If guest supports masking, set up irqfd now. > > - * Otherwise, delay until unmasked in the frontend. > > - */ > > - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > > - vq = virtio_get_queue(vdev, queue_no); > > - n = virtio_queue_get_guest_notifier(vq); > > - ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); > > - if (ret < 0) { > > - kvm_virtio_pci_vq_vector_release(proxy, vector); > > - goto undo; > > - } > > + return -1; > > } > > + *vector = virtio_queue_vector(vdev, queue_no); > > + vq = virtio_get_queue(vdev, queue_no); > > + *n = virtio_queue_get_guest_notifier(vq); > > + } > > + if (*vector >= msix_nr_vectors_allocated(dev)) { > > + return -1; > > } > > return 0; > > +} > > > > +static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no) > > +{ > > > To ease the reviewer, let's separate this patch into two. > > 1) factoring out the core logic > 2) decouple the vq > > Thanks > Thanks Jason, I will change this part > > > + unsigned int vector; > > + int ret; > > + EventNotifier *n; > > + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); > > + if (ret < 0) { > > + return ret; > > + } > > + ret = kvm_virtio_pci_vq_vector_use(proxy, vector); > > + if (ret < 0) { > > + goto undo; > > + } > > + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); > > + if (ret < 0) { > > + goto undo; > > + } > > + return 0; > > undo: > > - while (--queue_no >= 0) { > > - vector = virtio_queue_vector(vdev, queue_no); > > - if (vector >= msix_nr_vectors_allocated(dev)) { > > - continue; > > - } > > - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > > - vq = virtio_get_queue(vdev, queue_no); > > - n = virtio_queue_get_guest_notifier(vq); > > - kvm_virtio_pci_irqfd_release(proxy, n, vector); > > - } > > - kvm_virtio_pci_vq_vector_release(proxy, vector); > > + kvm_virtio_pci_irqfd_release(proxy, n, vector); > > + return ret; > > +} > > +static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) > > +{ > > + int queue_no; > > + int ret = 0; > > + for (queue_no = 0; queue_no < nvqs; queue_no++) { > > + ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); > > } > > return ret; > > } > > > > -static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) > > + > > +static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy, > > + int queue_no) > > { > > - PCIDevice *dev = &proxy->pci_dev; > > VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); > > unsigned int vector; > > - int queue_no; > > - VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); > > - VirtQueue *vq; > > EventNotifier *n; > > + int ret; > > + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); > > + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); > > + if (ret < 0) { > > + return; > > + } > > + > > + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > > + kvm_virtio_pci_irqfd_release(proxy, n, vector); > > + } > > + kvm_virtio_pci_vq_vector_release(proxy, vector); > > +} > > +static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) > > +{ > > + int queue_no; > > + > > for (queue_no = 0; queue_no < nvqs; queue_no++) { > > - if (!virtio_queue_get_num(vdev, queue_no)) { > > - break; > > - } > > - vector = virtio_queue_vector(vdev, queue_no); > > - if (vector >= msix_nr_vectors_allocated(dev)) { > > - continue; > > - } > > - /* If guest supports masking, clean up irqfd now. > > - * Otherwise, it was cleaned when masked in the frontend. > > - */ > > - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { > > - vq = virtio_get_queue(vdev, queue_no); > > - n = virtio_queue_get_guest_notifier(vq); > > - kvm_virtio_pci_irqfd_release(proxy, n, vector); > > - } > > - kvm_virtio_pci_vq_vector_release(proxy, vector); > > + kvm_virtio_pci_vector_release_one(proxy, queue_no); > > } > > } > > >
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 6a4ef413a4..f863c89de6 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -666,7 +666,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev, } static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, - unsigned int queue_no, unsigned int vector) { VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; @@ -710,85 +709,86 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); assert(ret == 0); } - -static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) +static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no, + EventNotifier **n, unsigned int *vector) { PCIDevice *dev = &proxy->pci_dev; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); - unsigned int vector; - int ret, queue_no; VirtQueue *vq; - EventNotifier *n; - for (queue_no = 0; queue_no < nvqs; queue_no++) { + + if (queue_no == VIRTIO_CONFIG_IRQ_IDX) { + return -1; + } else { if (!virtio_queue_get_num(vdev, queue_no)) { - break; - } - vector = virtio_queue_vector(vdev, queue_no); - if (vector >= msix_nr_vectors_allocated(dev)) { - continue; - } - ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); - if (ret < 0) { - goto undo; - } - /* If guest supports masking, set up irqfd now. - * Otherwise, delay until unmasked in the frontend. - */ - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { - vq = virtio_get_queue(vdev, queue_no); - n = virtio_queue_get_guest_notifier(vq); - ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); - if (ret < 0) { - kvm_virtio_pci_vq_vector_release(proxy, vector); - goto undo; - } + return -1; } + *vector = virtio_queue_vector(vdev, queue_no); + vq = virtio_get_queue(vdev, queue_no); + *n = virtio_queue_get_guest_notifier(vq); + } + if (*vector >= msix_nr_vectors_allocated(dev)) { + return -1; } return 0; +} +static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no) +{ + unsigned int vector; + int ret; + EventNotifier *n; + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); + if (ret < 0) { + return ret; + } + ret = kvm_virtio_pci_vq_vector_use(proxy, vector); + if (ret < 0) { + goto undo; + } + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); + if (ret < 0) { + goto undo; + } + return 0; undo: - while (--queue_no >= 0) { - vector = virtio_queue_vector(vdev, queue_no); - if (vector >= msix_nr_vectors_allocated(dev)) { - continue; - } - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { - vq = virtio_get_queue(vdev, queue_no); - n = virtio_queue_get_guest_notifier(vq); - kvm_virtio_pci_irqfd_release(proxy, n, vector); - } - kvm_virtio_pci_vq_vector_release(proxy, vector); + kvm_virtio_pci_irqfd_release(proxy, n, vector); + return ret; +} +static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) +{ + int queue_no; + int ret = 0; + for (queue_no = 0; queue_no < nvqs; queue_no++) { + ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); } return ret; } -static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) + +static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy, + int queue_no) { - PCIDevice *dev = &proxy->pci_dev; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); unsigned int vector; - int queue_no; - VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); - VirtQueue *vq; EventNotifier *n; + int ret; + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); + if (ret < 0) { + return; + } + + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + kvm_virtio_pci_irqfd_release(proxy, n, vector); + } + kvm_virtio_pci_vq_vector_release(proxy, vector); +} +static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) +{ + int queue_no; + for (queue_no = 0; queue_no < nvqs; queue_no++) { - if (!virtio_queue_get_num(vdev, queue_no)) { - break; - } - vector = virtio_queue_vector(vdev, queue_no); - if (vector >= msix_nr_vectors_allocated(dev)) { - continue; - } - /* If guest supports masking, clean up irqfd now. - * Otherwise, it was cleaned when masked in the frontend. - */ - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { - vq = virtio_get_queue(vdev, queue_no); - n = virtio_queue_get_guest_notifier(vq); - kvm_virtio_pci_irqfd_release(proxy, n, vector); - } - kvm_virtio_pci_vq_vector_release(proxy, vector); + kvm_virtio_pci_vector_release_one(proxy, queue_no); } }
inorder to support configure interrupt, we need to decouple virtqueue from vector use and vector release function this patch introduce vector_release_one and vector_use_one to support one vector. Signed-off-by: Cindy Lu <lulu@redhat.com> --- hw/virtio/virtio-pci.c | 122 ++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 61 deletions(-)