@@ -99,6 +99,7 @@ OF_FFS_FILES = \
$(SLOFCMNDIR)/fs/graphics.fs \
$(SLOFCMNDIR)/fs/generic-disk.fs \
$(SLOFCMNDIR)/fs/dma-function.fs \
+ $(SLOFCMNDIR)/fs/dma-instance-function.fs \
$(SLOFCMNDIR)/fs/pci-device.fs \
$(SLOFCMNDIR)/fs/pci-bridge.fs \
$(SLOFCMNDIR)/fs/pci-properties.fs \
@@ -29,6 +29,7 @@
#define VIRTIO_F_RING_INDIRECT_DESC BIT(28)
#define VIRTIO_F_RING_EVENT_IDX BIT(29)
#define VIRTIO_F_VERSION_1 BIT(32)
+#define VIRTIO_F_IOMMU_PLATFORM BIT(33)
#define VIRTIO_TIMEOUT 5000 /* 5 sec timeout */
@@ -84,6 +85,8 @@ struct vqs {
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
+ void **desc_gpas; /* to get gpa from desc->addr (which is ioba) */
+ uint64_t bus_desc;
};
struct virtio_device {
@@ -109,6 +112,8 @@ extern struct vring_used *virtio_get_vring_used(struct virtio_device *dev, int q
extern void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
uint64_t addr, uint32_t len,
uint16_t flags, uint16_t next);
+extern void virtio_free_desc(struct vqs *vq, int id, uint64_t features);
+void *virtio_desc_addr(struct virtio_device *vdev, int queue, int id);
extern struct vqs *virtio_queue_init_vq(struct virtio_device *dev, unsigned int id);
extern void virtio_queue_term_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id);
@@ -129,6 +129,10 @@ static int virtio_9p_transact(void *opaque, uint8_t *tx, int tx_size, uint8_t *r
// do something better
mb();
}
+
+ virtio_free_desc(vq, id, dev->features);
+ virtio_free_desc(vq, id + 1, dev->features);
+
if (i == 0) {
return -1;
}
@@ -195,6 +195,10 @@ virtioblk_transfer(struct virtio_device *dev, char *buf, uint64_t blocknum,
break;
}
+ virtio_free_desc(vq, id, dev->features);
+ virtio_free_desc(vq, id + 1, dev->features);
+ virtio_free_desc(vq, id + 2, dev->features);
+
if (status == 0)
return cnt;
@@ -255,6 +255,9 @@ static int virtionet_xmit(struct virtio_net *vnet, char *buf, int len)
idx = virtio_modern16_to_cpu(vdev, vq_tx->avail->idx);
id = (idx * 2) % vq_tx->size;
+ virtio_free_desc(vq_tx, id, vdev->features);
+ virtio_free_desc(vq_tx, id + 1, vdev->features);
+
/* Set up virtqueue descriptor for header */
virtio_fill_desc(vq_tx, id, vdev->features, (uint64_t)nethdr,
net_hdr_size, VRING_DESC_F_NEXT, id + 1);
@@ -317,7 +320,7 @@ static int virtionet_receive(struct virtio_net *vnet, char *buf, int maxlen)
#endif
/* Copy data to destination buffer */
- memcpy(buf, (void *)virtio_modern64_to_cpu(vdev, vq_rx->desc[id].addr), len);
+ memcpy(buf, virtio_desc_addr(vdev, VQ_RX, id), len);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
@@ -81,6 +81,11 @@ int virtioscsi_send(struct virtio_device *dev,
break;
}
+ virtio_free_desc(vq, id, dev->features);
+ virtio_free_desc(vq, id + 1, dev->features);
+ if (!(buf == NULL || buf_len == 0))
+ virtio_free_desc(vq, id + 2, dev->features);
+
return 0;
}
@@ -108,7 +108,7 @@ void virtio_serial_shutdown(struct virtio_device *dev)
int virtio_serial_putchar(struct virtio_device *dev, char c)
{
- int id;
+ int id, ret;
uint32_t vq_size, time;
volatile uint16_t *current_used_idx;
uint16_t last_used_idx, avail_idx;
@@ -135,17 +135,22 @@ int virtio_serial_putchar(struct virtio_device *dev, char c)
virtio_queue_notify(dev, TX_Q);
/* Wait for host to consume the descriptor */
+ ret = 1;
time = SLOF_GetTimer() + VIRTIO_TIMEOUT;
while (*current_used_idx == last_used_idx) {
// do something better
mb();
if (time < SLOF_GetTimer()) {
printf("virtio_serial_putchar failed! \n");
- return 0;
+ ret = 0;
+ break;
}
}
- return 1;
+free_exit:
+ virtio_free_desc(vq, id, dev->features);
+
+ return ret;
}
char virtio_serial_getchar(struct virtio_device *dev)
@@ -165,7 +170,7 @@ char virtio_serial_getchar(struct virtio_device *dev)
% vq_rx->size;
/* Copy data to destination buffer */
- memcpy(buf, (void *)virtio_modern64_to_cpu(dev, vq_rx->desc[id - 1].addr), RX_ELEM_SIZE);
+ memcpy(buf, virtio_desc_addr(dev, RX_Q, id - 1), RX_ELEM_SIZE);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
@@ -273,6 +273,17 @@ void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
next %= vq->size;
if (features & VIRTIO_F_VERSION_1) {
+ if (features & VIRTIO_F_IOMMU_PLATFORM) {
+ void *gpa = (void *) addr;
+
+ if (!vq->desc_gpas) {
+ fprintf(stderr, "IOMMU setup has not been done!\n");
+ return;
+ }
+
+ addr = SLOF_dma_map_in(gpa, len, 0);
+ vq->desc_gpas[id] = gpa;
+ }
desc->addr = cpu_to_le64(addr);
desc->len = cpu_to_le32(len);
desc->flags = cpu_to_le16(flags);
@@ -285,6 +296,32 @@ void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
}
}
+void virtio_free_desc(struct vqs *vq, int id, uint64_t features)
+{
+ struct vring_desc *desc;
+
+ id %= vq->size;
+ desc = &vq->desc[id];
+
+ if (features & VIRTIO_F_VERSION_1) {
+ if (features & VIRTIO_F_IOMMU_PLATFORM) {
+ SLOF_dma_map_out(le64_to_cpu(desc->addr),
+ 0, le32_to_cpu(desc->len));
+ vq->desc_gpas[id] = NULL;
+ }
+ }
+}
+
+void *virtio_desc_addr(struct virtio_device *vdev, int queue, int id)
+{
+ struct vqs *vq = &vdev->vq[queue];
+
+ if (vq->desc_gpas)
+ return vq->desc_gpas[id];
+
+ return (void *) virtio_modern64_to_cpu(vdev, vq->desc[id].addr);
+}
+
/**
* Reset virtio device
*/
@@ -326,6 +363,19 @@ static void virtio_set_qaddr(struct virtio_device *dev, int queue, unsigned long
uint64_t q_used;
uint32_t q_size = virtio_get_qsize(dev, queue);
+ if (dev->features & VIRTIO_F_IOMMU_PLATFORM) {
+ unsigned long cb;
+
+ cb = q_size * sizeof(struct vring_desc);
+ cb += sizeof(struct vring_avail) + sizeof(uint16_t) * q_size;
+ cb = VQ_ALIGN(cb);
+ cb += sizeof(struct vring_used) + sizeof(uint16_t) * q_size;
+ cb = VQ_ALIGN(cb);
+ q_desc = SLOF_dma_map_in((void *)q_desc, cb, 0);
+
+ dev->vq[queue].bus_desc = q_desc;
+ }
+
virtio_pci_write64(dev->common.addr + offset_of(struct virtio_dev_common, q_desc), q_desc);
q_avail = q_desc + q_size * sizeof(struct vring_desc);
virtio_pci_write64(dev->common.addr + offset_of(struct virtio_dev_common, q_avail), q_avail);
@@ -373,14 +423,41 @@ struct vqs *virtio_queue_init_vq(struct virtio_device *dev, unsigned int id)
vq->avail->flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
vq->avail->idx = 0;
+ if (dev->features & VIRTIO_F_IOMMU_PLATFORM)
+ vq->desc_gpas = SLOF_alloc_mem_aligned(
+ vq->size * sizeof(vq->desc_gpas[0]), 4096);
return vq;
}
void virtio_queue_term_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id)
{
- if (vq->desc)
+ if (vq->desc_gpas) {
+ int i;
+
+ for (i = 0; i < vq->size; ++i)
+ virtio_free_desc(vq, i, dev->features);
+
+ memset(vq->desc_gpas, 0, vq->size * sizeof(vq->desc_gpas[0]));
+ SLOF_free_mem(vq->desc_gpas,
+ vq->size * sizeof(vq->desc_gpas[0]));
+ }
+ if (vq->desc) {
+ if (dev->features & VIRTIO_F_IOMMU_PLATFORM) {
+ unsigned long cb;
+ uint32_t q_size = virtio_get_qsize(dev, vq->id);
+
+ cb = q_size * sizeof(struct vring_desc);
+ cb += sizeof(struct vring_avail) + sizeof(uint16_t) * q_size;
+ cb = VQ_ALIGN(cb);
+ cb += sizeof(struct vring_used) + sizeof(uint16_t) * q_size;
+ cb = VQ_ALIGN(cb);
+
+ SLOF_dma_map_out(vq->bus_desc, 0, cb);
+ }
+
SLOF_free_mem(vq->desc, virtio_vring_size(vq->size));
+ }
memset(vq, 0, sizeof(*vq));
}
@@ -474,6 +551,9 @@ int virtio_negotiate_guest_features(struct virtio_device *dev, uint64_t features
return -1;
}
+ if (host_features & VIRTIO_F_IOMMU_PLATFORM)
+ features |= VIRTIO_F_IOMMU_PLATFORM;
+
virtio_set_guest_features(dev, features);
host_features = virtio_get_host_features(dev);
if ((host_features & features) != features) {
@@ -491,6 +571,7 @@ int virtio_negotiate_guest_features(struct virtio_device *dev, uint64_t features
return -1;
dev->features = features;
+ printf("Negotiated %llx\n", dev->features);
return 0;
}
@@ -143,6 +143,9 @@ check-for-nvramrc
8a0 cp
+\ For DMA functions used by client/package instances.
+#include "dma-instance-function.fs"
+
\ The client interface.
#include "client.fs"
\ ELF binary file format.
new file mode 100644
@@ -0,0 +1,28 @@
+\ ****************************************************************************/
+\ * Copyright (c) 2011 IBM Corporation
+\ * All rights reserved.
+\ * This program and the accompanying materials
+\ * are made available under the terms of the BSD License
+\ * which accompanies this distribution, and is available at
+\ * http://www.opensource.org/licenses/bsd-license.php
+\ *
+\ * Contributors:
+\ * IBM Corporation - initial implementation
+\ ****************************************************************************/
+
+\ DMA memory allocation functions
+: dma-alloc ( size -- virt )
+ s" dma-alloc" $call-parent
+;
+
+: dma-free ( virt size -- )
+ s" dma-free" $call-parent
+;
+
+: dma-map-in ( virt size cacheable? -- devaddr )
+ s" dma-map-in" $call-parent
+;
+
+: dma-map-out ( virt devaddr size -- )
+ s" dma-map-out" $call-parent
+;
When QEMU is started with iommu_platform=on, the guest driver must accept it or the device will fail. This enables IOMMU support for virtio-net, -scsi, -block, -serial, -9p devices. -serial and -9p are only compile tested though. For virtio-net we map all RX buffers once and TX when xmit() is called and unmap older pages when we are about to reuse the VQ descriptor. As all other devices are synchronous, we unmap IOMMU pages right after completion of a transaction. This depends on QEMU's: https://patchwork.ozlabs.org/patch/1194067/ This should apply on top of: https://patchwork.ozlabs.org/patch/1194075/ Copied-bits-and-pieces-from: Michael Roth <mdroth@linux.vnet.ibm.com> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> --- Changes: v2: * added Mike's fs/dma-instance-function.fs * total rework --- board-qemu/slof/Makefile | 1 + lib/libvirtio/virtio.h | 5 ++ lib/libvirtio/virtio-9p.c | 4 ++ lib/libvirtio/virtio-blk.c | 4 ++ lib/libvirtio/virtio-net.c | 5 +- lib/libvirtio/virtio-scsi.c | 5 ++ lib/libvirtio/virtio-serial.c | 13 +++-- lib/libvirtio/virtio.c | 83 +++++++++++++++++++++++++++++++- board-qemu/slof/OF.fs | 3 ++ slof/fs/dma-instance-function.fs | 28 +++++++++++ 10 files changed, 145 insertions(+), 6 deletions(-) create mode 100644 slof/fs/dma-instance-function.fs