@@ -29,6 +29,7 @@
#define VIRTIO_F_RING_INDIRECT_DESC BIT(28)
#define VIRTIO_F_RING_EVENT_IDX BIT(29)
#define VIRTIO_F_VERSION_1 BIT(32)
+#define VIRTIO_F_IOMMU_PLATFORM BIT(33)
#define VIRTIO_TIMEOUT 5000 /* 5 sec timeout */
@@ -83,6 +84,8 @@ struct vqs {
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
+ void **desc_gpas; /* to get gpa from desc->addr (which is ioba) */
+ uint64_t bus_desc;
};
struct virtio_device {
@@ -108,6 +111,8 @@ extern struct vring_used *virtio_get_vring_used(struct virtio_device *dev, int q
extern void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
uint64_t addr, uint32_t len,
uint16_t flags, uint16_t next);
+extern void virtio_free_desc(struct vqs *vq, int id, uint64_t features);
+void *virtio_desc_addr(struct virtio_device *vdev, int queue, int id);
extern struct vqs *virtio_queue_init_vq(struct virtio_device *dev, unsigned int id);
extern void virtio_queue_term_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id);
@@ -129,6 +129,10 @@ static int virtio_9p_transact(void *opaque, uint8_t *tx, int tx_size, uint8_t *r
// do something better
mb();
}
+
+ virtio_free_desc(vq, id, dev->features);
+ virtio_free_desc(vq, id + 1, dev->features);
+
if (i == 0) {
return -1;
}
@@ -195,6 +195,10 @@ virtioblk_transfer(struct virtio_device *dev, char *buf, uint64_t blocknum,
break;
}
+ virtio_free_desc(vq, id, dev->features);
+ virtio_free_desc(vq, id + 1, dev->features);
+ virtio_free_desc(vq, id + 2, dev->features);
+
if (status == 0)
return cnt;
@@ -255,6 +255,9 @@ static int virtionet_xmit(struct virtio_net *vnet, char *buf, int len)
idx = virtio_modern16_to_cpu(vdev, vq_tx->avail->idx);
id = (idx * 2) % vq_tx->size;
+ virtio_free_desc(vq_tx, id, vdev->features);
+ virtio_free_desc(vq_tx, id + 1, vdev->features);
+
/* Set up virtqueue descriptor for header */
virtio_fill_desc(vq_tx, id, vdev->features, (uint64_t)nethdr,
net_hdr_size, VRING_DESC_F_NEXT, id + 1);
@@ -317,7 +320,7 @@ static int virtionet_receive(struct virtio_net *vnet, char *buf, int maxlen)
#endif
/* Copy data to destination buffer */
- memcpy(buf, (void *)virtio_modern64_to_cpu(vdev, vq_rx->desc[id].addr), len);
+ memcpy(buf, virtio_desc_addr(vdev, VQ_RX, id), len);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
@@ -81,6 +81,11 @@ int virtioscsi_send(struct virtio_device *dev,
break;
}
+ virtio_free_desc(vq, id, dev->features);
+ virtio_free_desc(vq, id + 1, dev->features);
+ if (!(buf == NULL || buf_len == 0))
+ virtio_free_desc(vq, id + 2, dev->features);
+
return 0;
}
@@ -108,7 +108,7 @@ void virtio_serial_shutdown(struct virtio_device *dev)
int virtio_serial_putchar(struct virtio_device *dev, char c)
{
- int id;
+ int id, ret;
uint32_t time;
volatile uint16_t *current_used_idx;
uint16_t last_used_idx, avail_idx;
@@ -133,17 +133,21 @@ int virtio_serial_putchar(struct virtio_device *dev, char c)
virtio_queue_notify(dev, TX_Q);
/* Wait for host to consume the descriptor */
+ ret = 1;
time = SLOF_GetTimer() + VIRTIO_TIMEOUT;
while (*current_used_idx == last_used_idx) {
// do something better
mb();
if (time < SLOF_GetTimer()) {
printf("virtio_serial_putchar failed! \n");
- return 0;
+ ret = 0;
+ break;
}
}
- return 1;
+ virtio_free_desc(vq, id, dev->features);
+
+ return ret;
}
char virtio_serial_getchar(struct virtio_device *dev)
@@ -163,7 +167,7 @@ char virtio_serial_getchar(struct virtio_device *dev)
% vq_rx->size;
/* Copy data to destination buffer */
- memcpy(buf, (void *)virtio_modern64_to_cpu(dev, vq_rx->desc[id - 1].addr), RX_ELEM_SIZE);
+ memcpy(buf, virtio_desc_addr(dev, RX_Q, id - 1), RX_ELEM_SIZE);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
@@ -273,6 +273,17 @@ void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
next %= vq->size;
if (features & VIRTIO_F_VERSION_1) {
+ if (features & VIRTIO_F_IOMMU_PLATFORM) {
+ void *gpa = (void *) addr;
+
+ if (!vq->desc_gpas) {
+ fprintf(stderr, "IOMMU setup has not been done!\n");
+ return;
+ }
+
+ addr = SLOF_dma_map_in(gpa, len, 0);
+ vq->desc_gpas[id] = gpa;
+ }
desc->addr = cpu_to_le64(addr);
desc->len = cpu_to_le32(len);
desc->flags = cpu_to_le16(flags);
@@ -285,6 +296,34 @@ void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
}
}
+void virtio_free_desc(struct vqs *vq, int id, uint64_t features)
+{
+ struct vring_desc *desc;
+
+ id %= vq->size;
+ desc = &vq->desc[id];
+
+ if (!(features & VIRTIO_F_VERSION_1) ||
+ !(features & VIRTIO_F_IOMMU_PLATFORM))
+ return;
+
+ if (!vq->desc_gpas[id])
+ return;
+
+ SLOF_dma_map_out(le64_to_cpu(desc->addr), 0, le32_to_cpu(desc->len));
+ vq->desc_gpas[id] = NULL;
+}
+
+void *virtio_desc_addr(struct virtio_device *vdev, int queue, int id)
+{
+ struct vqs *vq = &vdev->vq[queue];
+
+ if (vq->desc_gpas)
+ return vq->desc_gpas[id];
+
+ return (void *) virtio_modern64_to_cpu(vdev, vq->desc[id].addr);
+}
+
/**
* Reset virtio device
*/
@@ -326,6 +365,21 @@ static void virtio_set_qaddr(struct virtio_device *dev, int queue, unsigned long
uint64_t q_used;
uint32_t q_size = virtio_get_qsize(dev, queue);
+ if (dev->features & VIRTIO_F_IOMMU_PLATFORM) {
+ unsigned long cb;
+
+ cb = q_size * sizeof(struct vring_desc);
+ cb += sizeof(struct vring_avail) +
+ sizeof(uint16_t) * q_size;
+ cb = VQ_ALIGN(cb);
+ cb += sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * q_size;
+ cb = VQ_ALIGN(cb);
+ q_desc = SLOF_dma_map_in((void *)q_desc, cb, 0);
+
+ dev->vq[queue].bus_desc = q_desc;
+ }
+
virtio_pci_write64(dev->common.addr + offset_of(struct virtio_dev_common, q_desc), q_desc);
q_avail = q_desc + q_size * sizeof(struct vring_desc);
virtio_pci_write64(dev->common.addr + offset_of(struct virtio_dev_common, q_avail), q_avail);
@@ -372,14 +426,42 @@ struct vqs *virtio_queue_init_vq(struct virtio_device *dev, unsigned int id)
vq->avail->flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
vq->avail->idx = 0;
+ if (dev->features & VIRTIO_F_IOMMU_PLATFORM)
+ vq->desc_gpas = SLOF_alloc_mem_aligned(
+ vq->size * sizeof(vq->desc_gpas[0]), 4096);
return vq;
}
void virtio_queue_term_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id)
{
- if (vq->desc)
+ if (vq->desc_gpas) {
+ int i;
+
+ for (i = 0; i < vq->size; ++i)
+ virtio_free_desc(vq, i, dev->features);
+
+ SLOF_free_mem(vq->desc_gpas,
+ vq->size * sizeof(vq->desc_gpas[0]));
+ }
+ if (vq->desc) {
+ if (dev->features & VIRTIO_F_IOMMU_PLATFORM) {
+ unsigned long cb;
+ uint32_t q_size = virtio_get_qsize(dev, id);
+
+ cb = q_size * sizeof(struct vring_desc);
+ cb += sizeof(struct vring_avail) +
+ sizeof(uint16_t) * q_size;
+ cb = VQ_ALIGN(cb);
+ cb += sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * q_size;
+ cb = VQ_ALIGN(cb);
+
+ SLOF_dma_map_out(vq->bus_desc, 0, cb);
+ }
+
SLOF_free_mem(vq->desc, virtio_vring_size(vq->size));
+ }
memset(vq, 0, sizeof(*vq));
}
@@ -473,6 +555,9 @@ int virtio_negotiate_guest_features(struct virtio_device *dev, uint64_t features
return -1;
}
+ if (host_features & VIRTIO_F_IOMMU_PLATFORM)
+ features |= VIRTIO_F_IOMMU_PLATFORM;
+
virtio_set_guest_features(dev, features);
host_features = virtio_get_host_features(dev);
if ((host_features & features) != features) {