Message ID | 1448264471-25066-1-git-send-email-jasowang@redhat.com |
---|---|
State | New |
Headers | show |
On Mon, Nov 23, 2015 at 03:41:11PM +0800, Jason Wang wrote: > Currently, all virtio devices bypass IOMMU completely. This is because > address_space_memory is assumed and used during DMA emulation. This > patch converts the virtio core API to use DMA API. This idea is > > - introducing a new transport specific helper to query the dma address > space. (only pci version is implemented). > - query and use this address space during virtio device guest memory > accessing > > With this virtiodevices will not bypass IOMMU anymore. Little tested with > intel_iommu=on with virtio guest DMA series posted in > https://lkml.org/lkml/2015/10/28/64. > > TODO: > - Feature bit for this This probably implies it should only be implemented if device supports the modern mode. Not sure what's the best way to handle transitional devices. > - Implement this for all transports Tested with vhost, and it does not seem to work. So more TODO: - make this work with vhost-user and vhost-net. Also, it seems prudent to add - make the new behaviour conditional on a new property > Signed-off-by: Jason Wang <jasowang@redhat.com> > --- > hw/block/virtio-blk.c | 2 +- > hw/char/virtio-serial-bus.c | 2 +- > hw/scsi/virtio-scsi.c | 2 +- > hw/virtio/virtio-pci.c | 9 +++++++++ > hw/virtio/virtio.c | 36 +++++++++++++++++++-------------- > include/hw/virtio/virtio-access.h | 42 +++++++++++++++++++++++++++++---------- > include/hw/virtio/virtio-bus.h | 1 + > include/hw/virtio/virtio.h | 2 +- > 8 files changed, 67 insertions(+), 29 deletions(-) > > diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c > index e70fccf..5499480 100644 > --- a/hw/block/virtio-blk.c > +++ b/hw/block/virtio-blk.c > @@ -846,7 +846,7 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, > req->next = s->rq; > s->rq = req; > > - virtqueue_map(&req->elem); > + virtqueue_map(vdev, &req->elem); > } > > return 0; > diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c > index 497b0af..39e9ed2 100644 > --- a/hw/char/virtio-serial-bus.c > +++ b/hw/char/virtio-serial-bus.c > @@ -705,7 +705,7 @@ static int fetch_active_ports_list(QEMUFile *f, int version_id, > > qemu_get_buffer(f, (unsigned char *)&port->elem, > sizeof(port->elem)); > - virtqueue_map(&port->elem); > + virtqueue_map(&s->parent_obj, &port->elem); > > /* > * Port was throttled on source machine. Let's > diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c > index 7655401..0734d27 100644 > --- a/hw/scsi/virtio-scsi.c > +++ b/hw/scsi/virtio-scsi.c > @@ -206,7 +206,7 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq) > req = virtio_scsi_init_req(s, vs->cmd_vqs[n]); > qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem)); > > - virtqueue_map(&req->elem); > + virtqueue_map(&vs->parent_obj, &req->elem); > > if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, > sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) { > diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c > index dd48562..876505d 100644 > --- a/hw/virtio/virtio-pci.c > +++ b/hw/virtio/virtio-pci.c > @@ -1211,6 +1211,14 @@ static int virtio_pci_query_nvectors(DeviceState *d) > return proxy->nvectors; > } > > +static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) > +{ > + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); > + PCIDevice *dev = &proxy->pci_dev; > + > + return pci_get_address_space(dev); > +} > + > static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, > struct virtio_pci_cap *cap) > { > @@ -2476,6 +2484,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) > k->device_plugged = virtio_pci_device_plugged; > k->device_unplugged = virtio_pci_device_unplugged; > k->query_nvectors = virtio_pci_query_nvectors; > + k->get_dma_as = virtio_pci_get_dma_as; > } > > static const TypeInfo virtio_pci_bus_info = { > diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c > index 1edef59..e09430d 100644 > --- a/hw/virtio/virtio.c > +++ b/hw/virtio/virtio.c > @@ -21,6 +21,7 @@ > #include "hw/virtio/virtio-bus.h" > #include "migration/migration.h" > #include "hw/virtio/virtio-access.h" > +#include "sysemu/dma.h" > > /* > * The alignment to use between consumer and producer parts of vring. > @@ -247,6 +248,7 @@ int virtio_queue_empty(VirtQueue *vq) > static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, > unsigned int len) > { > + AddressSpace *dma_as = virtio_get_dma_as(vq->vdev); > unsigned int offset; > int i; > > @@ -254,17 +256,17 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, > for (i = 0; i < elem->in_num; i++) { > size_t size = MIN(len - offset, elem->in_sg[i].iov_len); > > - cpu_physical_memory_unmap(elem->in_sg[i].iov_base, > - elem->in_sg[i].iov_len, > - 1, size); > + dma_memory_unmap(dma_as, elem->in_sg[i].iov_base, elem->in_sg[i].iov_len, > + DMA_DIRECTION_FROM_DEVICE, size); > > offset += size; > } > > for (i = 0; i < elem->out_num; i++) > - cpu_physical_memory_unmap(elem->out_sg[i].iov_base, > - elem->out_sg[i].iov_len, > - 0, elem->out_sg[i].iov_len); > + dma_memory_unmap(dma_as, elem->out_sg[i].iov_base, > + elem->out_sg[i].iov_len, > + DMA_DIRECTION_TO_DEVICE, > + elem->out_sg[i].iov_len); > } > > void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem, > @@ -448,9 +450,9 @@ int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, > return in_bytes <= in_total && out_bytes <= out_total; > } > > -static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, > - unsigned int *num_sg, unsigned int max_size, > - int is_write) > +static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, > + hwaddr *addr, unsigned int *num_sg, > + unsigned int max_size, int is_write) > { > unsigned int i; > hwaddr len; > @@ -469,7 +471,10 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, > > for (i = 0; i < *num_sg; i++) { > len = sg[i].iov_len; > - sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); > + sg[i].iov_base = dma_memory_map(virtio_get_dma_as(vdev), > + addr[i], &len, is_write ? > + DMA_DIRECTION_FROM_DEVICE : > + DMA_DIRECTION_TO_DEVICE); > if (!sg[i].iov_base) { > error_report("virtio: error trying to map MMIO memory"); > exit(1); > @@ -491,13 +496,14 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, > } > } > > -void virtqueue_map(VirtQueueElement *elem) > +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem) > { > - virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num, > + virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, > MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)), > 1); > - virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num, > - MIN(ARRAY_SIZE(elem->out_sg), ARRAY_SIZE(elem->out_addr)), > + virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, > + MIN(ARRAY_SIZE(elem->out_sg), > + ARRAY_SIZE(elem->out_addr)), > 0); > } > > @@ -562,7 +568,7 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) > } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); > > /* Now map what we have collected */ > - virtqueue_map(elem); > + virtqueue_map(vdev, elem); > > elem->index = head; > > diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h > index 8aec843..cca7d2a 100644 > --- a/include/hw/virtio/virtio-access.h > +++ b/include/hw/virtio/virtio-access.h > @@ -15,8 +15,20 @@ > #ifndef _QEMU_VIRTIO_ACCESS_H > #define _QEMU_VIRTIO_ACCESS_H > #include "hw/virtio/virtio.h" > +#include "hw/virtio/virtio-bus.h" > #include "exec/address-spaces.h" > > +static inline AddressSpace *virtio_get_dma_as(VirtIODevice *vdev) > +{ > + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); > + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); > + > + if (k->get_dma_as) { > + return k->get_dma_as(qbus->parent); > + } > + return &address_space_memory; > +} > + > static inline bool virtio_access_is_big_endian(VirtIODevice *vdev) > { > if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { > @@ -47,45 +59,55 @@ static inline bool virtio_legacy_is_cross_endian(VirtIODevice *vdev) > > static inline uint16_t virtio_lduw_phys(VirtIODevice *vdev, hwaddr pa) > { > + AddressSpace *dma_as = virtio_get_dma_as(vdev); > + > if (virtio_access_is_big_endian(vdev)) { > - return lduw_be_phys(&address_space_memory, pa); > + return lduw_be_phys(dma_as, pa); > } > - return lduw_le_phys(&address_space_memory, pa); > + return lduw_le_phys(dma_as, pa); > } > > static inline uint32_t virtio_ldl_phys(VirtIODevice *vdev, hwaddr pa) > { > + AddressSpace *dma_as = virtio_get_dma_as(vdev); > + > if (virtio_access_is_big_endian(vdev)) { > - return ldl_be_phys(&address_space_memory, pa); > + return ldl_be_phys(dma_as, pa); > } > - return ldl_le_phys(&address_space_memory, pa); > + return ldl_le_phys(dma_as, pa); > } > > static inline uint64_t virtio_ldq_phys(VirtIODevice *vdev, hwaddr pa) > { > + AddressSpace *dma_as = virtio_get_dma_as(vdev); > + > if (virtio_access_is_big_endian(vdev)) { > - return ldq_be_phys(&address_space_memory, pa); > + return ldq_be_phys(dma_as, pa); > } > - return ldq_le_phys(&address_space_memory, pa); > + return ldq_le_phys(dma_as, pa); > } > > static inline void virtio_stw_phys(VirtIODevice *vdev, hwaddr pa, > uint16_t value) > { > + AddressSpace *dma_as = virtio_get_dma_as(vdev); > + > if (virtio_access_is_big_endian(vdev)) { > - stw_be_phys(&address_space_memory, pa, value); > + stw_be_phys(dma_as, pa, value); > } else { > - stw_le_phys(&address_space_memory, pa, value); > + stw_le_phys(dma_as, pa, value); > } > } > > static inline void virtio_stl_phys(VirtIODevice *vdev, hwaddr pa, > uint32_t value) > { > + AddressSpace *dma_as = virtio_get_dma_as(vdev); > + > if (virtio_access_is_big_endian(vdev)) { > - stl_be_phys(&address_space_memory, pa, value); > + stl_be_phys(dma_as, pa, value); > } else { > - stl_le_phys(&address_space_memory, pa, value); > + stl_le_phys(dma_as, pa, value); > } > } > > diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h > index 6c3d4cb..638735e 100644 > --- a/include/hw/virtio/virtio-bus.h > +++ b/include/hw/virtio/virtio-bus.h > @@ -71,6 +71,7 @@ typedef struct VirtioBusClass { > * Note that changing this will break migration for this transport. > */ > bool has_variable_vring_alignment; > + AddressSpace *(*get_dma_as)(DeviceState *d); > } VirtioBusClass; > > struct VirtioBusState { > diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h > index 205fadf..7a8ef13 100644 > --- a/include/hw/virtio/virtio.h > +++ b/include/hw/virtio/virtio.h > @@ -151,7 +151,7 @@ void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem, > void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, > unsigned int len, unsigned int idx); > > -void virtqueue_map(VirtQueueElement *elem); > +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem); > int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem); > int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, > unsigned int out_bytes); > -- > 2.5.0
On Mon, 23 Nov 2015 15:41:11 +0800 Jason Wang <jasowang@redhat.com> wrote: > Currently, all virtio devices bypass IOMMU completely. This is because > address_space_memory is assumed and used during DMA emulation. This > patch converts the virtio core API to use DMA API. This idea is > > - introducing a new transport specific helper to query the dma address > space. (only pci version is implemented). > - query and use this address space during virtio device guest memory > accessing > > With this virtiodevices will not bypass IOMMU anymore. Little tested with > intel_iommu=on with virtio guest DMA series posted in > https://lkml.org/lkml/2015/10/28/64. > > TODO: > - Feature bit for this I'm still not convinced about that feature bit stuff. It just feels wrong to use a mechanism that conveys negotiable device features to configure what is basically a platform/hypervisor feature. I'd rather see this out of the virtio layer and into the pci layer. > - Implement this for all transports Is it OK to just keep a fallback to today's implementation for transports for which the iommu concept doesn't make sense and that will always have an identity mapping? > > Signed-off-by: Jason Wang <jasowang@redhat.com> > --- > hw/block/virtio-blk.c | 2 +- > hw/char/virtio-serial-bus.c | 2 +- > hw/scsi/virtio-scsi.c | 2 +- > hw/virtio/virtio-pci.c | 9 +++++++++ > hw/virtio/virtio.c | 36 +++++++++++++++++++-------------- > include/hw/virtio/virtio-access.h | 42 +++++++++++++++++++++++++++++---------- > include/hw/virtio/virtio-bus.h | 1 + > include/hw/virtio/virtio.h | 2 +- > 8 files changed, 67 insertions(+), 29 deletions(-) FWIW, this doesn't seem to break for the s390-ccw-virtio machine (only snifftested).
On Mon, Nov 23, 2015 at 10:36:45AM +0100, Cornelia Huck wrote: > On Mon, 23 Nov 2015 15:41:11 +0800 > Jason Wang <jasowang@redhat.com> wrote: > > > Currently, all virtio devices bypass IOMMU completely. This is because > > address_space_memory is assumed and used during DMA emulation. This > > patch converts the virtio core API to use DMA API. This idea is > > > > - introducing a new transport specific helper to query the dma address > > space. (only pci version is implemented). > > - query and use this address space during virtio device guest memory > > accessing > > > > With this virtiodevices will not bypass IOMMU anymore. Little tested with > > intel_iommu=on with virtio guest DMA series posted in > > https://lkml.org/lkml/2015/10/28/64. > > > > TODO: > > - Feature bit for this > > I'm still not convinced about that feature bit stuff. It just feels > wrong to use a mechanism that conveys negotiable device features to > configure what is basically a platform/hypervisor feature. I'd rather > see this out of the virtio layer and into the pci layer. I agree it's not something that needs to be negotiated. But given that we are changing device and driver behaviour in a drastic way, it seems prudent to have a way for guest and host to discover that, even if it's just in case. Whether it's a feature bit or something else pci-specific, depends on whether this makes sense for any other transport. > > - Implement this for all transports > > Is it OK to just keep a fallback to today's implementation for > transports for which the iommu concept doesn't make sense and that will > always have an identity mapping? Is there a reason why iommu does not make any sense for ccw or mmio? > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > --- > > hw/block/virtio-blk.c | 2 +- > > hw/char/virtio-serial-bus.c | 2 +- > > hw/scsi/virtio-scsi.c | 2 +- > > hw/virtio/virtio-pci.c | 9 +++++++++ > > hw/virtio/virtio.c | 36 +++++++++++++++++++-------------- > > include/hw/virtio/virtio-access.h | 42 +++++++++++++++++++++++++++++---------- > > include/hw/virtio/virtio-bus.h | 1 + > > include/hw/virtio/virtio.h | 2 +- > > 8 files changed, 67 insertions(+), 29 deletions(-) > > FWIW, this doesn't seem to break for the s390-ccw-virtio machine (only > snifftested).
On Mon, 23 Nov 2015 11:52:50 +0200 "Michael S. Tsirkin" <mst@redhat.com> wrote: > On Mon, Nov 23, 2015 at 10:36:45AM +0100, Cornelia Huck wrote: > > On Mon, 23 Nov 2015 15:41:11 +0800 > > Jason Wang <jasowang@redhat.com> wrote: > > > > > Currently, all virtio devices bypass IOMMU completely. This is because > > > address_space_memory is assumed and used during DMA emulation. This > > > patch converts the virtio core API to use DMA API. This idea is > > > > > > - introducing a new transport specific helper to query the dma address > > > space. (only pci version is implemented). > > > - query and use this address space during virtio device guest memory > > > accessing > > > > > > With this virtiodevices will not bypass IOMMU anymore. Little tested with > > > intel_iommu=on with virtio guest DMA series posted in > > > https://lkml.org/lkml/2015/10/28/64. > > > > > > TODO: > > > - Feature bit for this > > > > I'm still not convinced about that feature bit stuff. It just feels > > wrong to use a mechanism that conveys negotiable device features to > > configure what is basically a platform/hypervisor feature. I'd rather > > see this out of the virtio layer and into the pci layer. > > I agree it's not something that needs to be negotiated. > > But given that we are changing device and driver behaviour in a drastic > way, it seems prudent to have a way for guest and host to discover that, > even if it's just in case. > > Whether it's a feature bit or something else pci-specific, > depends on whether this makes sense for any other transport. > > > > - Implement this for all transports > > > > Is it OK to just keep a fallback to today's implementation for > > transports for which the iommu concept doesn't make sense and that will > > always have an identity mapping? > > Is there a reason why iommu does not make any sense for ccw or mmio? Channel I/O uses a different concept when performing data transfers: The addresses referenced by the channel program can be anywhere in the guest's main memory (today's qemu only supports adresses under 2G, as IDAWs are not yet implemented). Basically, the OS will refer to addresses anywhere in its address space and the channel subsystem is subsequently free to read from/write to that memory. This also applies to queues: The OS will tell the channel subsystem/device which memory areas can be accessed (the proprietary qdio transport is the same as virtio in that regard). The first time we needed an iommu on s390 was when pci was introduced. If I understand correctly what an iommu does, that concept does not match. For dma, we can fall back to an identity mapping, but I don't see how we can fit in an iommu. The mmio transport is a completely different beast; I'm afraid I don't know enough about it to say how it interacts with iommus.
On 23 November 2015 at 14:34, Cornelia Huck <cornelia.huck@de.ibm.com> wrote: > The mmio transport is a completely different beast; I'm afraid I don't > know enough about it to say how it interacts with iommus. Conceptually, it's just a device that does DMA, I think. You could in theory put it behind an IOMMU, but nobody does (and I don't know whether the kernel code could cope with that). thanks -- PMM
On 11/23/2015 04:06 PM, Michael S. Tsirkin wrote: > On Mon, Nov 23, 2015 at 03:41:11PM +0800, Jason Wang wrote: >> Currently, all virtio devices bypass IOMMU completely. This is because >> address_space_memory is assumed and used during DMA emulation. This >> patch converts the virtio core API to use DMA API. This idea is >> >> - introducing a new transport specific helper to query the dma address >> space. (only pci version is implemented). >> - query and use this address space during virtio device guest memory >> accessing >> >> With this virtiodevices will not bypass IOMMU anymore. Little tested with >> intel_iommu=on with virtio guest DMA series posted in >> https://lkml.org/lkml/2015/10/28/64. >> >> TODO: >> - Feature bit for this > This probably implies it should only be implemented > if device supports the modern mode. > Not sure what's the best way to handle transitional > devices. Don't see the issue here. Transitional devices could decide based on the feature bit? , for the name, how about something like VIRTIO_F_HOST_IOMMU ? > >> - Implement this for all transports > Tested with vhost, and it does not seem to work. > So more TODO: > - make this work with vhost-user and vhost-net. > > Also, it seems prudent to add > - make the new behaviour conditional on a new property Right. > >> Signed-off-by: Jason Wang <jasowang@redhat.com> >> --- >> hw/block/virtio-blk.c | 2 +- >> hw/char/virtio-serial-bus.c | 2 +- >> hw/scsi/virtio-scsi.c | 2 +- >> hw/virtio/virtio-pci.c | 9 +++++++++ >> hw/virtio/virtio.c | 36 +++++++++++++++++++-------------- >> include/hw/virtio/virtio-access.h | 42 +++++++++++++++++++++++++++++---------- >> include/hw/virtio/virtio-bus.h | 1 + >> include/hw/virtio/virtio.h | 2 +- >> 8 files changed, 67 insertions(+), 29 deletions(-) >> >> diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c >> index e70fccf..5499480 100644 >> --- a/hw/block/virtio-blk.c >> +++ b/hw/block/virtio-blk.c >> @@ -846,7 +846,7 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, >> req->next = s->rq; >> s->rq = req; >> >> - virtqueue_map(&req->elem); >> + virtqueue_map(vdev, &req->elem); >> } >> >> return 0; >> diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c >> index 497b0af..39e9ed2 100644 >> --- a/hw/char/virtio-serial-bus.c >> +++ b/hw/char/virtio-serial-bus.c >> @@ -705,7 +705,7 @@ static int fetch_active_ports_list(QEMUFile *f, int version_id, >> >> qemu_get_buffer(f, (unsigned char *)&port->elem, >> sizeof(port->elem)); >> - virtqueue_map(&port->elem); >> + virtqueue_map(&s->parent_obj, &port->elem); >> >> /* >> * Port was throttled on source machine. Let's >> diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c >> index 7655401..0734d27 100644 >> --- a/hw/scsi/virtio-scsi.c >> +++ b/hw/scsi/virtio-scsi.c >> @@ -206,7 +206,7 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq) >> req = virtio_scsi_init_req(s, vs->cmd_vqs[n]); >> qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem)); >> >> - virtqueue_map(&req->elem); >> + virtqueue_map(&vs->parent_obj, &req->elem); >> >> if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, >> sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) { >> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c >> index dd48562..876505d 100644 >> --- a/hw/virtio/virtio-pci.c >> +++ b/hw/virtio/virtio-pci.c >> @@ -1211,6 +1211,14 @@ static int virtio_pci_query_nvectors(DeviceState *d) >> return proxy->nvectors; >> } >> >> +static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) >> +{ >> + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); >> + PCIDevice *dev = &proxy->pci_dev; >> + >> + return pci_get_address_space(dev); >> +} >> + >> static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, >> struct virtio_pci_cap *cap) >> { >> @@ -2476,6 +2484,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) >> k->device_plugged = virtio_pci_device_plugged; >> k->device_unplugged = virtio_pci_device_unplugged; >> k->query_nvectors = virtio_pci_query_nvectors; >> + k->get_dma_as = virtio_pci_get_dma_as; >> } >> >> static const TypeInfo virtio_pci_bus_info = { >> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c >> index 1edef59..e09430d 100644 >> --- a/hw/virtio/virtio.c >> +++ b/hw/virtio/virtio.c >> @@ -21,6 +21,7 @@ >> #include "hw/virtio/virtio-bus.h" >> #include "migration/migration.h" >> #include "hw/virtio/virtio-access.h" >> +#include "sysemu/dma.h" >> >> /* >> * The alignment to use between consumer and producer parts of vring. >> @@ -247,6 +248,7 @@ int virtio_queue_empty(VirtQueue *vq) >> static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, >> unsigned int len) >> { >> + AddressSpace *dma_as = virtio_get_dma_as(vq->vdev); >> unsigned int offset; >> int i; >> >> @@ -254,17 +256,17 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, >> for (i = 0; i < elem->in_num; i++) { >> size_t size = MIN(len - offset, elem->in_sg[i].iov_len); >> >> - cpu_physical_memory_unmap(elem->in_sg[i].iov_base, >> - elem->in_sg[i].iov_len, >> - 1, size); >> + dma_memory_unmap(dma_as, elem->in_sg[i].iov_base, elem->in_sg[i].iov_len, >> + DMA_DIRECTION_FROM_DEVICE, size); >> >> offset += size; >> } >> >> for (i = 0; i < elem->out_num; i++) >> - cpu_physical_memory_unmap(elem->out_sg[i].iov_base, >> - elem->out_sg[i].iov_len, >> - 0, elem->out_sg[i].iov_len); >> + dma_memory_unmap(dma_as, elem->out_sg[i].iov_base, >> + elem->out_sg[i].iov_len, >> + DMA_DIRECTION_TO_DEVICE, >> + elem->out_sg[i].iov_len); >> } >> >> void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem, >> @@ -448,9 +450,9 @@ int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, >> return in_bytes <= in_total && out_bytes <= out_total; >> } >> >> -static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, >> - unsigned int *num_sg, unsigned int max_size, >> - int is_write) >> +static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, >> + hwaddr *addr, unsigned int *num_sg, >> + unsigned int max_size, int is_write) >> { >> unsigned int i; >> hwaddr len; >> @@ -469,7 +471,10 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, >> >> for (i = 0; i < *num_sg; i++) { >> len = sg[i].iov_len; >> - sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); >> + sg[i].iov_base = dma_memory_map(virtio_get_dma_as(vdev), >> + addr[i], &len, is_write ? >> + DMA_DIRECTION_FROM_DEVICE : >> + DMA_DIRECTION_TO_DEVICE); >> if (!sg[i].iov_base) { >> error_report("virtio: error trying to map MMIO memory"); >> exit(1); >> @@ -491,13 +496,14 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, >> } >> } >> >> -void virtqueue_map(VirtQueueElement *elem) >> +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem) >> { >> - virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num, >> + virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, >> MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)), >> 1); >> - virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num, >> - MIN(ARRAY_SIZE(elem->out_sg), ARRAY_SIZE(elem->out_addr)), >> + virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, >> + MIN(ARRAY_SIZE(elem->out_sg), >> + ARRAY_SIZE(elem->out_addr)), >> 0); >> } >> >> @@ -562,7 +568,7 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) >> } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); >> >> /* Now map what we have collected */ >> - virtqueue_map(elem); >> + virtqueue_map(vdev, elem); >> >> elem->index = head; >> >> diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h >> index 8aec843..cca7d2a 100644 >> --- a/include/hw/virtio/virtio-access.h >> +++ b/include/hw/virtio/virtio-access.h >> @@ -15,8 +15,20 @@ >> #ifndef _QEMU_VIRTIO_ACCESS_H >> #define _QEMU_VIRTIO_ACCESS_H >> #include "hw/virtio/virtio.h" >> +#include "hw/virtio/virtio-bus.h" >> #include "exec/address-spaces.h" >> >> +static inline AddressSpace *virtio_get_dma_as(VirtIODevice *vdev) >> +{ >> + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); >> + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); >> + >> + if (k->get_dma_as) { >> + return k->get_dma_as(qbus->parent); >> + } >> + return &address_space_memory; >> +} >> + >> static inline bool virtio_access_is_big_endian(VirtIODevice *vdev) >> { >> if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { >> @@ -47,45 +59,55 @@ static inline bool virtio_legacy_is_cross_endian(VirtIODevice *vdev) >> >> static inline uint16_t virtio_lduw_phys(VirtIODevice *vdev, hwaddr pa) >> { >> + AddressSpace *dma_as = virtio_get_dma_as(vdev); >> + >> if (virtio_access_is_big_endian(vdev)) { >> - return lduw_be_phys(&address_space_memory, pa); >> + return lduw_be_phys(dma_as, pa); >> } >> - return lduw_le_phys(&address_space_memory, pa); >> + return lduw_le_phys(dma_as, pa); >> } >> >> static inline uint32_t virtio_ldl_phys(VirtIODevice *vdev, hwaddr pa) >> { >> + AddressSpace *dma_as = virtio_get_dma_as(vdev); >> + >> if (virtio_access_is_big_endian(vdev)) { >> - return ldl_be_phys(&address_space_memory, pa); >> + return ldl_be_phys(dma_as, pa); >> } >> - return ldl_le_phys(&address_space_memory, pa); >> + return ldl_le_phys(dma_as, pa); >> } >> >> static inline uint64_t virtio_ldq_phys(VirtIODevice *vdev, hwaddr pa) >> { >> + AddressSpace *dma_as = virtio_get_dma_as(vdev); >> + >> if (virtio_access_is_big_endian(vdev)) { >> - return ldq_be_phys(&address_space_memory, pa); >> + return ldq_be_phys(dma_as, pa); >> } >> - return ldq_le_phys(&address_space_memory, pa); >> + return ldq_le_phys(dma_as, pa); >> } >> >> static inline void virtio_stw_phys(VirtIODevice *vdev, hwaddr pa, >> uint16_t value) >> { >> + AddressSpace *dma_as = virtio_get_dma_as(vdev); >> + >> if (virtio_access_is_big_endian(vdev)) { >> - stw_be_phys(&address_space_memory, pa, value); >> + stw_be_phys(dma_as, pa, value); >> } else { >> - stw_le_phys(&address_space_memory, pa, value); >> + stw_le_phys(dma_as, pa, value); >> } >> } >> >> static inline void virtio_stl_phys(VirtIODevice *vdev, hwaddr pa, >> uint32_t value) >> { >> + AddressSpace *dma_as = virtio_get_dma_as(vdev); >> + >> if (virtio_access_is_big_endian(vdev)) { >> - stl_be_phys(&address_space_memory, pa, value); >> + stl_be_phys(dma_as, pa, value); >> } else { >> - stl_le_phys(&address_space_memory, pa, value); >> + stl_le_phys(dma_as, pa, value); >> } >> } >> >> diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h >> index 6c3d4cb..638735e 100644 >> --- a/include/hw/virtio/virtio-bus.h >> +++ b/include/hw/virtio/virtio-bus.h >> @@ -71,6 +71,7 @@ typedef struct VirtioBusClass { >> * Note that changing this will break migration for this transport. >> */ >> bool has_variable_vring_alignment; >> + AddressSpace *(*get_dma_as)(DeviceState *d); >> } VirtioBusClass; >> >> struct VirtioBusState { >> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h >> index 205fadf..7a8ef13 100644 >> --- a/include/hw/virtio/virtio.h >> +++ b/include/hw/virtio/virtio.h >> @@ -151,7 +151,7 @@ void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem, >> void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, >> unsigned int len, unsigned int idx); >> >> -void virtqueue_map(VirtQueueElement *elem); >> +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem); >> int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem); >> int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, >> unsigned int out_bytes); >> -- >> 2.5.0
On 11/23/2015 05:36 PM, Cornelia Huck wrote: > On Mon, 23 Nov 2015 15:41:11 +0800 > Jason Wang <jasowang@redhat.com> wrote: > >> Currently, all virtio devices bypass IOMMU completely. This is because >> address_space_memory is assumed and used during DMA emulation. This >> patch converts the virtio core API to use DMA API. This idea is >> >> - introducing a new transport specific helper to query the dma address >> space. (only pci version is implemented). >> - query and use this address space during virtio device guest memory >> accessing >> >> With this virtiodevices will not bypass IOMMU anymore. Little tested with >> intel_iommu=on with virtio guest DMA series posted in >> https://lkml.org/lkml/2015/10/28/64. >> >> TODO: >> - Feature bit for this > I'm still not convinced about that feature bit stuff. It just feels > wrong to use a mechanism that conveys negotiable device features to > configure what is basically a platform/hypervisor feature. I'd rather > see this out of the virtio layer and into the pci layer. > >> - Implement this for all transports > Is it OK to just keep a fallback to today's implementation for > transports for which the iommu concept doesn't make sense and that will > always have an identity mapping? Yes it is. This patch keeps this fallback (e.g using address_space_memory) for the transport without an iommu implementation. > >> Signed-off-by: Jason Wang <jasowang@redhat.com> >> --- >> hw/block/virtio-blk.c | 2 +- >> hw/char/virtio-serial-bus.c | 2 +- >> hw/scsi/virtio-scsi.c | 2 +- >> hw/virtio/virtio-pci.c | 9 +++++++++ >> hw/virtio/virtio.c | 36 +++++++++++++++++++-------------- >> include/hw/virtio/virtio-access.h | 42 +++++++++++++++++++++++++++++---------- >> include/hw/virtio/virtio-bus.h | 1 + >> include/hw/virtio/virtio.h | 2 +- >> 8 files changed, 67 insertions(+), 29 deletions(-) > FWIW, this doesn't seem to break for the s390-ccw-virtio machine (only > snifftested). > Good to know this. Thanks for the testing.
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index e70fccf..5499480 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -846,7 +846,7 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, req->next = s->rq; s->rq = req; - virtqueue_map(&req->elem); + virtqueue_map(vdev, &req->elem); } return 0; diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index 497b0af..39e9ed2 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -705,7 +705,7 @@ static int fetch_active_ports_list(QEMUFile *f, int version_id, qemu_get_buffer(f, (unsigned char *)&port->elem, sizeof(port->elem)); - virtqueue_map(&port->elem); + virtqueue_map(&s->parent_obj, &port->elem); /* * Port was throttled on source machine. Let's diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 7655401..0734d27 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -206,7 +206,7 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq) req = virtio_scsi_init_req(s, vs->cmd_vqs[n]); qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem)); - virtqueue_map(&req->elem); + virtqueue_map(&vs->parent_obj, &req->elem); if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) { diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index dd48562..876505d 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -1211,6 +1211,14 @@ static int virtio_pci_query_nvectors(DeviceState *d) return proxy->nvectors; } +static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + PCIDevice *dev = &proxy->pci_dev; + + return pci_get_address_space(dev); +} + static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, struct virtio_pci_cap *cap) { @@ -2476,6 +2484,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) k->device_plugged = virtio_pci_device_plugged; k->device_unplugged = virtio_pci_device_unplugged; k->query_nvectors = virtio_pci_query_nvectors; + k->get_dma_as = virtio_pci_get_dma_as; } static const TypeInfo virtio_pci_bus_info = { diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 1edef59..e09430d 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -21,6 +21,7 @@ #include "hw/virtio/virtio-bus.h" #include "migration/migration.h" #include "hw/virtio/virtio-access.h" +#include "sysemu/dma.h" /* * The alignment to use between consumer and producer parts of vring. @@ -247,6 +248,7 @@ int virtio_queue_empty(VirtQueue *vq) static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { + AddressSpace *dma_as = virtio_get_dma_as(vq->vdev); unsigned int offset; int i; @@ -254,17 +256,17 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, for (i = 0; i < elem->in_num; i++) { size_t size = MIN(len - offset, elem->in_sg[i].iov_len); - cpu_physical_memory_unmap(elem->in_sg[i].iov_base, - elem->in_sg[i].iov_len, - 1, size); + dma_memory_unmap(dma_as, elem->in_sg[i].iov_base, elem->in_sg[i].iov_len, + DMA_DIRECTION_FROM_DEVICE, size); offset += size; } for (i = 0; i < elem->out_num; i++) - cpu_physical_memory_unmap(elem->out_sg[i].iov_base, - elem->out_sg[i].iov_len, - 0, elem->out_sg[i].iov_len); + dma_memory_unmap(dma_as, elem->out_sg[i].iov_base, + elem->out_sg[i].iov_len, + DMA_DIRECTION_TO_DEVICE, + elem->out_sg[i].iov_len); } void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem, @@ -448,9 +450,9 @@ int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, return in_bytes <= in_total && out_bytes <= out_total; } -static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, - unsigned int *num_sg, unsigned int max_size, - int is_write) +static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, + hwaddr *addr, unsigned int *num_sg, + unsigned int max_size, int is_write) { unsigned int i; hwaddr len; @@ -469,7 +471,10 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, for (i = 0; i < *num_sg; i++) { len = sg[i].iov_len; - sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); + sg[i].iov_base = dma_memory_map(virtio_get_dma_as(vdev), + addr[i], &len, is_write ? + DMA_DIRECTION_FROM_DEVICE : + DMA_DIRECTION_TO_DEVICE); if (!sg[i].iov_base) { error_report("virtio: error trying to map MMIO memory"); exit(1); @@ -491,13 +496,14 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, } } -void virtqueue_map(VirtQueueElement *elem) +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem) { - virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num, + virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)), 1); - virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num, - MIN(ARRAY_SIZE(elem->out_sg), ARRAY_SIZE(elem->out_addr)), + virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, + MIN(ARRAY_SIZE(elem->out_sg), + ARRAY_SIZE(elem->out_addr)), 0); } @@ -562,7 +568,7 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max); /* Now map what we have collected */ - virtqueue_map(elem); + virtqueue_map(vdev, elem); elem->index = head; diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h index 8aec843..cca7d2a 100644 --- a/include/hw/virtio/virtio-access.h +++ b/include/hw/virtio/virtio-access.h @@ -15,8 +15,20 @@ #ifndef _QEMU_VIRTIO_ACCESS_H #define _QEMU_VIRTIO_ACCESS_H #include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" #include "exec/address-spaces.h" +static inline AddressSpace *virtio_get_dma_as(VirtIODevice *vdev) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + if (k->get_dma_as) { + return k->get_dma_as(qbus->parent); + } + return &address_space_memory; +} + static inline bool virtio_access_is_big_endian(VirtIODevice *vdev) { if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { @@ -47,45 +59,55 @@ static inline bool virtio_legacy_is_cross_endian(VirtIODevice *vdev) static inline uint16_t virtio_lduw_phys(VirtIODevice *vdev, hwaddr pa) { + AddressSpace *dma_as = virtio_get_dma_as(vdev); + if (virtio_access_is_big_endian(vdev)) { - return lduw_be_phys(&address_space_memory, pa); + return lduw_be_phys(dma_as, pa); } - return lduw_le_phys(&address_space_memory, pa); + return lduw_le_phys(dma_as, pa); } static inline uint32_t virtio_ldl_phys(VirtIODevice *vdev, hwaddr pa) { + AddressSpace *dma_as = virtio_get_dma_as(vdev); + if (virtio_access_is_big_endian(vdev)) { - return ldl_be_phys(&address_space_memory, pa); + return ldl_be_phys(dma_as, pa); } - return ldl_le_phys(&address_space_memory, pa); + return ldl_le_phys(dma_as, pa); } static inline uint64_t virtio_ldq_phys(VirtIODevice *vdev, hwaddr pa) { + AddressSpace *dma_as = virtio_get_dma_as(vdev); + if (virtio_access_is_big_endian(vdev)) { - return ldq_be_phys(&address_space_memory, pa); + return ldq_be_phys(dma_as, pa); } - return ldq_le_phys(&address_space_memory, pa); + return ldq_le_phys(dma_as, pa); } static inline void virtio_stw_phys(VirtIODevice *vdev, hwaddr pa, uint16_t value) { + AddressSpace *dma_as = virtio_get_dma_as(vdev); + if (virtio_access_is_big_endian(vdev)) { - stw_be_phys(&address_space_memory, pa, value); + stw_be_phys(dma_as, pa, value); } else { - stw_le_phys(&address_space_memory, pa, value); + stw_le_phys(dma_as, pa, value); } } static inline void virtio_stl_phys(VirtIODevice *vdev, hwaddr pa, uint32_t value) { + AddressSpace *dma_as = virtio_get_dma_as(vdev); + if (virtio_access_is_big_endian(vdev)) { - stl_be_phys(&address_space_memory, pa, value); + stl_be_phys(dma_as, pa, value); } else { - stl_le_phys(&address_space_memory, pa, value); + stl_le_phys(dma_as, pa, value); } } diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h index 6c3d4cb..638735e 100644 --- a/include/hw/virtio/virtio-bus.h +++ b/include/hw/virtio/virtio-bus.h @@ -71,6 +71,7 @@ typedef struct VirtioBusClass { * Note that changing this will break migration for this transport. */ bool has_variable_vring_alignment; + AddressSpace *(*get_dma_as)(DeviceState *d); } VirtioBusClass; struct VirtioBusState { diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 205fadf..7a8ef13 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -151,7 +151,7 @@ void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem, void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len, unsigned int idx); -void virtqueue_map(VirtQueueElement *elem); +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem); int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem); int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, unsigned int out_bytes);
Currently, all virtio devices bypass IOMMU completely. This is because address_space_memory is assumed and used during DMA emulation. This patch converts the virtio core API to use DMA API. This idea is - introducing a new transport specific helper to query the dma address space. (only pci version is implemented). - query and use this address space during virtio device guest memory accessing With this virtiodevices will not bypass IOMMU anymore. Little tested with intel_iommu=on with virtio guest DMA series posted in https://lkml.org/lkml/2015/10/28/64. TODO: - Feature bit for this - Implement this for all transports Signed-off-by: Jason Wang <jasowang@redhat.com> --- hw/block/virtio-blk.c | 2 +- hw/char/virtio-serial-bus.c | 2 +- hw/scsi/virtio-scsi.c | 2 +- hw/virtio/virtio-pci.c | 9 +++++++++ hw/virtio/virtio.c | 36 +++++++++++++++++++-------------- include/hw/virtio/virtio-access.h | 42 +++++++++++++++++++++++++++++---------- include/hw/virtio/virtio-bus.h | 1 + include/hw/virtio/virtio.h | 2 +- 8 files changed, 67 insertions(+), 29 deletions(-)