@@ -17,32 +17,32 @@
static inline uint16_t virtio_cpu_to_modern16(struct virtio_device *dev, uint16_t val)
{
- return dev->is_modern ? cpu_to_le16(val) : val;
+ return (dev->features & VIRTIO_F_VERSION_1) ? cpu_to_le16(val) : val;
}
static inline uint32_t virtio_cpu_to_modern32(struct virtio_device *dev, uint32_t val)
{
- return dev->is_modern ? cpu_to_le32(val) : val;
+ return (dev->features & VIRTIO_F_VERSION_1) ? cpu_to_le32(val) : val;
}
static inline uint64_t virtio_cpu_to_modern64(struct virtio_device *dev, uint64_t val)
{
- return dev->is_modern ? cpu_to_le64(val) : val;
+ return (dev->features & VIRTIO_F_VERSION_1) ? cpu_to_le64(val) : val;
}
static inline uint16_t virtio_modern16_to_cpu(struct virtio_device *dev, uint16_t val)
{
- return dev->is_modern ? le16_to_cpu(val) : val;
+ return (dev->features & VIRTIO_F_VERSION_1) ? le16_to_cpu(val) : val;
}
static inline uint32_t virtio_modern32_to_cpu(struct virtio_device *dev, uint32_t val)
{
- return dev->is_modern ? le32_to_cpu(val) : val;
+ return (dev->features & VIRTIO_F_VERSION_1) ? le32_to_cpu(val) : val;
}
static inline uint64_t virtio_modern64_to_cpu(struct virtio_device *dev, uint64_t val)
{
- return dev->is_modern ? le64_to_cpu(val) : val;
+ return (dev->features & VIRTIO_F_VERSION_1) ? le64_to_cpu(val) : val;
}
#endif /* _LIBVIRTIO_INTERNAL_H */
@@ -27,8 +27,6 @@ enum {
struct virtio_net {
net_driver_t driver;
struct virtio_device vdev;
- struct vqs vq_rx;
- struct vqs vq_tx;
};
/* VIRTIO_NET Feature bits */
@@ -14,7 +14,6 @@
#define _LIBVIRTIO_H
#include <stdint.h>
-#include <stdbool.h>
/* Device status bits */
#define VIRTIO_STAT_ACKNOWLEDGE 1
@@ -78,8 +77,16 @@ struct virtio_cap {
uint8_t cap_id;
};
+struct vqs {
+ uint32_t size;
+ void *buf_mem;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
struct virtio_device {
- uint32_t is_modern; /* Indicates whether to use virtio 1.0 */
+ uint64_t features;
struct virtio_cap legacy;
struct virtio_cap common;
struct virtio_cap notify;
@@ -87,15 +94,7 @@ struct virtio_device {
struct virtio_cap device;
struct virtio_cap pci;
uint32_t notify_off_mul;
-};
-
-struct vqs {
- uint64_t id; /* Queue ID */
- uint32_t size;
- void *buf_mem;
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
+ struct vqs vq[3];
};
/* Parts of the virtqueue are aligned on a 4096 byte page boundary */
@@ -106,10 +105,10 @@ extern unsigned int virtio_get_qsize(struct virtio_device *dev, int queue);
extern struct vring_desc *virtio_get_vring_desc(struct virtio_device *dev, int queue);
extern struct vring_avail *virtio_get_vring_avail(struct virtio_device *dev, int queue);
extern struct vring_used *virtio_get_vring_used(struct virtio_device *dev, int queue);
-extern void virtio_fill_desc(struct vring_desc *desc, bool is_modern,
+extern void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
uint64_t addr, uint32_t len,
uint16_t flags, uint16_t next);
-extern int virtio_queue_init_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id);
+extern struct vqs *virtio_queue_init_vq(struct virtio_device *dev, unsigned int id);
extern void virtio_queue_term_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id);
extern struct virtio_device *virtio_setup_vd(void);
@@ -89,25 +89,19 @@ static int virtio_9p_transact(void *opaque, uint8_t *tx, int tx_size, uint8_t *r
uint32_t *rx_size)
{
struct virtio_device *dev = opaque;
- struct vring_desc *desc;
int id, i;
uint32_t vq_size;
- struct vring_desc *vq_desc;
- struct vring_avail *vq_avail;
- struct vring_used *vq_used;
volatile uint16_t *current_used_idx;
uint16_t last_used_idx, avail_idx;
+ struct vqs *vq = &dev->vq[0];
/* Virt IO queues. */
vq_size = virtio_get_qsize(dev, 0);
- vq_desc = virtio_get_vring_desc(dev, 0);
- vq_avail = virtio_get_vring_avail(dev, 0);
- vq_used = virtio_get_vring_used(dev, 0);
- last_used_idx = vq_used->idx;
- current_used_idx = &vq_used->idx;
+ last_used_idx = vq->used->idx;
+ current_used_idx = &vq->used->idx;
- avail_idx = virtio_modern16_to_cpu(dev, vq_avail->idx);
+ avail_idx = virtio_modern16_to_cpu(dev, vq->avail->idx);
/* Determine descriptor index */
id = (avail_idx * 3) % vq_size;
@@ -115,19 +109,18 @@ static int virtio_9p_transact(void *opaque, uint8_t *tx, int tx_size, uint8_t *r
/* TX in first queue item. */
dprint_buffer("TX", tx, tx_size);
- desc = &vq_desc[id];
- virtio_fill_desc(desc, dev->is_modern, (uint64_t)tx, tx_size,
- VRING_DESC_F_NEXT, (id + 1) % vq_size);
+ virtio_fill_desc(vq, id, dev->features, (uint64_t)tx, tx_size,
+ VRING_DESC_F_NEXT, id + 1);
/* RX in the second queue item. */
- desc = &vq_desc[(id + 1) % vq_size];
- virtio_fill_desc(desc, dev->is_modern, (uint64_t)rx, *rx_size,
+ virtio_fill_desc(vq, id + 1, dev->features, (uint64_t)rx,
+ *rx_size,
VRING_DESC_F_WRITE, 0);
/* Tell HV that the queue is ready */
- vq_avail->ring[avail_idx % vq_size] = virtio_cpu_to_modern16 (dev, id);
+ vq->avail->ring[avail_idx % vq_size] = virtio_cpu_to_modern16 (dev, id);
mb();
- vq_avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
+ vq->avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
virtio_queue_notify(dev, 0);
/* Receive the response. */
@@ -161,7 +154,7 @@ static int virtio_9p_transact(void *opaque, uint8_t *tx, int tx_size, uint8_t *r
int virtio_9p_init(struct virtio_device *dev, void *tx_buf, void *rx_buf,
int buf_size)
{
- struct vqs vq;
+ struct vqs *vq;
int status = VIRTIO_STAT_ACKNOWLEDGE;
/* Check for double open */
@@ -182,7 +175,7 @@ int virtio_9p_init(struct virtio_device *dev, void *tx_buf, void *rx_buf,
virtio_set_status(dev, status);
/* Device specific setup - we do not support special features */
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
if (virtio_negotiate_guest_features(dev, VIRTIO_F_VERSION_1))
goto dev_error;
virtio_get_status(dev, &status);
@@ -190,12 +183,10 @@ int virtio_9p_init(struct virtio_device *dev, void *tx_buf, void *rx_buf,
virtio_set_guest_features(dev, 0);
}
- if (virtio_queue_init_vq(dev, &vq, 0))
+ vq = virtio_queue_init_vq(dev, 0);
+ if (!vq)
goto dev_error;
- vq.avail->flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
- vq.avail->idx = 0;
-
/* Tell HV that setup succeeded */
status |= VIRTIO_STAT_DRIVER_OK;
virtio_set_status(dev, status);
@@ -11,6 +11,7 @@
*****************************************************************************/
#include <stdio.h>
+#include <stdbool.h>
#include <cpu.h>
#include <helpers.h>
#include <byteorder.h>
@@ -28,7 +29,7 @@
int
virtioblk_init(struct virtio_device *dev)
{
- struct vqs vq;
+ struct vqs *vq;
int blk_size = DEFAULT_SECTOR_SIZE;
uint64_t features;
int status = VIRTIO_STAT_ACKNOWLEDGE;
@@ -43,7 +44,7 @@ virtioblk_init(struct virtio_device *dev)
status |= VIRTIO_STAT_DRIVER;
virtio_set_status(dev, status);
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
/* Negotiate features and sets FEATURES_OK if successful */
if (virtio_negotiate_guest_features(dev, DRIVER_FEATURE_SUPPORT))
goto dev_error;
@@ -54,12 +55,10 @@ virtioblk_init(struct virtio_device *dev)
virtio_set_guest_features(dev, VIRTIO_BLK_F_BLK_SIZE);
}
- if (virtio_queue_init_vq(dev, &vq, 0))
+ vq = virtio_queue_init_vq(dev, 0);
+ if (!vq)
goto dev_error;
- vq.avail->flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
- vq.avail->idx = 0;
-
/* Tell HV that setup succeeded */
status |= VIRTIO_STAT_DRIVER_OK;
virtio_set_status(dev, status);
@@ -121,15 +120,12 @@ int
virtioblk_transfer(struct virtio_device *dev, char *buf, uint64_t blocknum,
long cnt, unsigned int type)
{
- struct vring_desc *desc;
int id;
static struct virtio_blk_req blkhdr;
//struct virtio_blk_config *blkconf;
uint64_t capacity;
- uint32_t vq_size, time;
- struct vring_desc *vq_desc; /* Descriptor vring */
- struct vring_avail *vq_avail; /* "Available" vring */
- struct vring_used *vq_used; /* "Used" vring */
+ uint32_t time;
+ struct vqs *vq = &dev->vq[0];
volatile uint8_t status = -1;
volatile uint16_t *current_used_idx;
uint16_t last_used_idx, avail_idx;
@@ -155,43 +151,37 @@ virtioblk_transfer(struct virtio_device *dev, char *buf, uint64_t blocknum,
return 0;
}
- vq_size = virtio_get_qsize(dev, 0);
- vq_desc = virtio_get_vring_desc(dev, 0);
- vq_avail = virtio_get_vring_avail(dev, 0);
- vq_used = virtio_get_vring_used(dev, 0);
+ avail_idx = virtio_modern16_to_cpu(dev, vq->avail->idx);
- avail_idx = virtio_modern16_to_cpu(dev, vq_avail->idx);
-
- last_used_idx = vq_used->idx;
- current_used_idx = &vq_used->idx;
+ last_used_idx = vq->used->idx;
+ current_used_idx = &vq->used->idx;
/* Set up header */
- fill_blk_hdr(&blkhdr, dev->is_modern, type | VIRTIO_BLK_T_BARRIER,
+ fill_blk_hdr(&blkhdr, dev->features, type | VIRTIO_BLK_T_BARRIER,
1, blocknum * blk_size / DEFAULT_SECTOR_SIZE);
/* Determine descriptor index */
- id = (avail_idx * 3) % vq_size;
+ id = (avail_idx * 3) % vq->size;
/* Set up virtqueue descriptor for header */
- desc = &vq_desc[id];
- virtio_fill_desc(desc, dev->is_modern, (uint64_t)&blkhdr,
+ virtio_fill_desc(vq, id, dev->features, (uint64_t)&blkhdr,
sizeof(struct virtio_blk_req),
- VRING_DESC_F_NEXT, (id + 1) % vq_size);
+ VRING_DESC_F_NEXT, id + 1);
/* Set up virtqueue descriptor for data */
- desc = &vq_desc[(id + 1) % vq_size];
- virtio_fill_desc(desc, dev->is_modern, (uint64_t)buf, cnt * blk_size,
+ virtio_fill_desc(vq, id + 1, dev->features, (uint64_t)buf,
+ cnt * blk_size,
VRING_DESC_F_NEXT | ((type & 1) ? 0 : VRING_DESC_F_WRITE),
- (id + 2) % vq_size);
+ id + 2);
/* Set up virtqueue descriptor for status */
- desc = &vq_desc[(id + 2) % vq_size];
- virtio_fill_desc(desc, dev->is_modern, (uint64_t)&status, 1,
+ virtio_fill_desc(vq, id + 2, dev->features,
+ (uint64_t)&status, 1,
VRING_DESC_F_WRITE, 0);
- vq_avail->ring[avail_idx % vq_size] = virtio_cpu_to_modern16 (dev, id);
+ vq->avail->ring[avail_idx % vq->size] = virtio_cpu_to_modern16 (dev, id);
mb();
- vq_avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
+ vq->avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
/* Tell HV that the queue is ready */
virtio_queue_notify(dev, 0);
@@ -89,8 +89,8 @@ static int virtionet_init_pci(struct virtio_net *vnet, struct virtio_device *dev
* second the transmit queue, and the forth is the control queue for
* networking options.
* We are only interested in the receive and transmit queue here. */
- if (virtio_queue_init_vq(vdev, &vnet->vq_rx, VQ_RX) ||
- virtio_queue_init_vq(vdev, &vnet->vq_tx, VQ_TX)) {
+ if (!virtio_queue_init_vq(vdev, VQ_RX) ||
+ !virtio_queue_init_vq(vdev, VQ_TX)) {
virtio_set_status(vdev, VIRTIO_STAT_ACKNOWLEDGE|VIRTIO_STAT_DRIVER
|VIRTIO_STAT_FAILED);
return -1;
@@ -113,8 +113,7 @@ static int virtionet_init(struct virtio_net *vnet)
int status = VIRTIO_STAT_ACKNOWLEDGE | VIRTIO_STAT_DRIVER;
struct virtio_device *vdev = &vnet->vdev;
net_driver_t *driver = &vnet->driver;
- struct vqs *vq_tx = &vnet->vq_tx;
- struct vqs *vq_rx = &vnet->vq_rx;
+ struct vqs *vq_tx = &vdev->vq[VQ_TX], *vq_rx = &vdev->vq[VQ_RX];
dprintf("virtionet_init(%02x:%02x:%02x:%02x:%02x:%02x)\n",
driver->mac_addr[0], driver->mac_addr[1],
@@ -128,7 +127,7 @@ static int virtionet_init(struct virtio_net *vnet)
virtio_set_status(vdev, status);
/* Device specific setup */
- if (vdev->is_modern) {
+ if (vdev->features & VIRTIO_F_VERSION_1) {
if (virtio_negotiate_guest_features(vdev, DRIVER_FEATURE_SUPPORT))
goto dev_error;
net_hdr_size = sizeof(struct virtio_net_hdr_v1);
@@ -152,11 +151,11 @@ static int virtionet_init(struct virtio_net *vnet)
+ i * (BUFFER_ENTRY_SIZE+net_hdr_size);
uint32_t id = i*2;
/* Descriptor for net_hdr: */
- virtio_fill_desc(&vq_rx->desc[id], vdev->is_modern, addr, net_hdr_size,
+ virtio_fill_desc(vq_rx, id, vdev->features, addr, net_hdr_size,
VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, id + 1);
/* Descriptor for data: */
- virtio_fill_desc(&vq_rx->desc[id+1], vdev->is_modern, addr + net_hdr_size,
+ virtio_fill_desc(vq_rx, id + 1, vdev->features, addr + net_hdr_size,
BUFFER_ENTRY_SIZE, VRING_DESC_F_WRITE, 0);
vq_rx->avail->ring[i] = virtio_cpu_to_modern16(vdev, id);
@@ -200,8 +199,8 @@ static int virtionet_term(struct virtio_net *vnet)
{
struct virtio_device *vdev = &vnet->vdev;
net_driver_t *driver = &vnet->driver;
- struct vqs *vq_rx = &vnet->vq_rx;
- struct vqs *vq_tx = &vnet->vq_tx;
+ struct vqs *vq_tx = &vnet->vdev.vq[VQ_TX];
+ struct vqs *vq_rx = &vnet->vdev.vq[VQ_RX];
dprintf("virtionet_term()\n");
@@ -237,7 +236,7 @@ static int virtionet_xmit(struct virtio_net *vnet, char *buf, int len)
static struct virtio_net_hdr nethdr_legacy;
void *nethdr = &nethdr_legacy;
struct virtio_device *vdev = &vnet->vdev;
- struct vqs *vq_tx = &vnet->vq_tx;
+ struct vqs *vq_tx = &vdev->vq[VQ_TX];
if (len > BUFFER_ENTRY_SIZE) {
printf("virtionet: Packet too big!\n");
@@ -246,7 +245,7 @@ static int virtionet_xmit(struct virtio_net *vnet, char *buf, int len)
dprintf("\nvirtionet_xmit(packet at %p, %d bytes)\n", buf, len);
- if (vdev->is_modern)
+ if (vdev->features & VIRTIO_F_VERSION_1)
nethdr = &nethdr_v1;
memset(nethdr, 0, net_hdr_size);
@@ -256,11 +255,11 @@ static int virtionet_xmit(struct virtio_net *vnet, char *buf, int len)
id = (idx * 2) % vq_tx->size;
/* Set up virtqueue descriptor for header */
- virtio_fill_desc(&vq_tx->desc[id], vdev->is_modern, (uint64_t)nethdr,
+ virtio_fill_desc(vq_tx, id, vdev->features, (uint64_t)nethdr,
net_hdr_size, VRING_DESC_F_NEXT, id + 1);
/* Set up virtqueue descriptor for data */
- virtio_fill_desc(&vq_tx->desc[id+1], vdev->is_modern, (uint64_t)buf, len, 0, 0);
+ virtio_fill_desc(vq_tx, id + 1, vdev->features, (uint64_t)buf, len, 0, 0);
vq_tx->avail->ring[idx % vq_tx->size] = virtio_cpu_to_modern16(vdev, id);
sync();
@@ -283,7 +282,7 @@ static int virtionet_receive(struct virtio_net *vnet, char *buf, int maxlen)
uint32_t id, idx;
uint16_t avail_idx;
struct virtio_device *vdev = &vnet->vdev;
- struct vqs *vq_rx = &vnet->vq_rx;
+ struct vqs *vq_rx = &vnet->vdev.vq[VQ_RX];
idx = virtio_modern16_to_cpu(vdev, vq_rx->used->idx);
@@ -23,63 +23,54 @@ int virtioscsi_send(struct virtio_device *dev,
struct virtio_scsi_resp_cmd *resp,
int is_read, void *buf, uint64_t buf_len)
{
- struct vring_desc *vq_desc; /* Descriptor vring */
- struct vring_avail *vq_avail; /* "Available" vring */
- struct vring_used *vq_used; /* "Used" vring */
volatile uint16_t *current_used_idx;
uint16_t last_used_idx, avail_idx;
int id;
- uint32_t vq_size, time;
+ uint32_t time;
+ struct vqs *vq = &dev->vq[VIRTIO_SCSI_REQUEST_VQ];
- int vq = VIRTIO_SCSI_REQUEST_VQ;
+ avail_idx = virtio_modern16_to_cpu(dev, vq->avail->idx);
- vq_size = virtio_get_qsize(dev, vq);
- vq_desc = virtio_get_vring_desc(dev, vq);
- vq_avail = virtio_get_vring_avail(dev, vq);
- vq_used = virtio_get_vring_used(dev, vq);
-
- avail_idx = virtio_modern16_to_cpu(dev, vq_avail->idx);
-
- last_used_idx = vq_used->idx;
- current_used_idx = &vq_used->idx;
+ last_used_idx = vq->used->idx;
+ current_used_idx = &vq->used->idx;
/* Determine descriptor index */
- id = (avail_idx * 3) % vq_size;
- virtio_fill_desc(&vq_desc[id], dev->is_modern, (uint64_t)req, sizeof(*req), VRING_DESC_F_NEXT,
- (id + 1) % vq_size);
+ id = (avail_idx * 3) % vq->size;
+ virtio_fill_desc(vq, id, dev->features, (uint64_t)req, sizeof(*req), VRING_DESC_F_NEXT,
+ id + 1);
if (buf == NULL || buf_len == 0) {
/* Set up descriptor for response information */
- virtio_fill_desc(&vq_desc[(id + 1) % vq_size], dev->is_modern,
+ virtio_fill_desc(vq, id + 1, dev->features,
(uint64_t)resp, sizeof(*resp),
VRING_DESC_F_WRITE, 0);
} else if (is_read) {
/* Set up descriptor for response information */
- virtio_fill_desc(&vq_desc[(id + 1) % vq_size], dev->is_modern,
+ virtio_fill_desc(vq, id + 1, dev->features,
(uint64_t)resp, sizeof(*resp),
VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
- (id + 2) % vq_size);
+ id + 2);
/* Set up virtqueue descriptor for data from device */
- virtio_fill_desc(&vq_desc[(id + 2) % vq_size], dev->is_modern,
+ virtio_fill_desc(vq, id + 2, dev->features,
(uint64_t)buf, buf_len, VRING_DESC_F_WRITE, 0);
} else {
/* Set up virtqueue descriptor for data to device */
- virtio_fill_desc(&vq_desc[(id + 1) % vq_size], dev->is_modern,
+ virtio_fill_desc(vq, id + 1, dev->features,
(uint64_t)buf, buf_len, VRING_DESC_F_NEXT,
- (id + 2) % vq_size);
+ id + 2);
/* Set up descriptor for response information */
- virtio_fill_desc(&vq_desc[(id + 2) % vq_size], dev->is_modern,
+ virtio_fill_desc(vq, id + 2, dev->features,
(uint64_t)resp, sizeof(*resp),
VRING_DESC_F_WRITE, 0);
}
- vq_avail->ring[avail_idx % vq_size] = virtio_cpu_to_modern16(dev, id);
+ vq->avail->ring[avail_idx % vq->size] = virtio_cpu_to_modern16(dev, id);
mb();
- vq_avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
+ vq->avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
/* Tell HV that the vq is ready */
- virtio_queue_notify(dev, vq);
+ virtio_queue_notify(dev, VIRTIO_SCSI_REQUEST_VQ);
/* Wait for host to consume the descriptor */
time = SLOF_GetTimer() + VIRTIO_TIMEOUT;
@@ -99,9 +90,8 @@ int virtioscsi_send(struct virtio_device *dev,
*/
int virtioscsi_init(struct virtio_device *dev)
{
- struct vqs vq_ctrl, vq_event, vq_request;
+ struct vqs *vq_ctrl, *vq_event, *vq_request;
int status = VIRTIO_STAT_ACKNOWLEDGE;
- uint16_t flags;
/* Reset device */
// XXX That will clear the virtq base. We need to move
@@ -117,7 +107,7 @@ int virtioscsi_init(struct virtio_device *dev)
virtio_set_status(dev, status);
/* Device specific setup - we do not support special features right now */
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
if (virtio_negotiate_guest_features(dev, VIRTIO_F_VERSION_1))
goto dev_error;
virtio_get_status(dev, &status);
@@ -125,21 +115,12 @@ int virtioscsi_init(struct virtio_device *dev)
virtio_set_guest_features(dev, 0);
}
- if (virtio_queue_init_vq(dev, &vq_ctrl, VIRTIO_SCSI_CONTROL_VQ) ||
- virtio_queue_init_vq(dev, &vq_event, VIRTIO_SCSI_EVENT_VQ) ||
- virtio_queue_init_vq(dev, &vq_request, VIRTIO_SCSI_REQUEST_VQ))
+ vq_ctrl = virtio_queue_init_vq(dev, VIRTIO_SCSI_CONTROL_VQ);
+ vq_event = virtio_queue_init_vq(dev, VIRTIO_SCSI_EVENT_VQ);
+ vq_request = virtio_queue_init_vq(dev, VIRTIO_SCSI_REQUEST_VQ);
+ if (!vq_ctrl || !vq_event || !vq_request)
goto dev_error;
- flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
- vq_ctrl.avail->flags = flags;
- vq_ctrl.avail->idx = 0;
-
- vq_event.avail->flags = flags;
- vq_event.avail->idx = 0;
-
- vq_request.avail->flags = flags;
- vq_request.avail->idx = 0;
-
/* Tell HV that setup succeeded */
status |= VIRTIO_STAT_DRIVER_OK;
virtio_set_status(dev, status);
@@ -30,13 +30,11 @@
#define RX_Q 0
#define TX_Q 1
-static struct vqs vq_rx;
-static struct vqs vq_tx;
static uint16_t last_rx_idx; /* Last index in RX "used" ring */
int virtio_serial_init(struct virtio_device *dev)
{
- struct vring_avail *vq_avail;
+ struct vqs *vq_rx, *vq_tx;
int status = VIRTIO_STAT_ACKNOWLEDGE;
int i;
@@ -50,7 +48,7 @@ int virtio_serial_init(struct virtio_device *dev)
status |= VIRTIO_STAT_DRIVER;
virtio_set_status(dev, status);
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
/* Negotiate features and sets FEATURES_OK if successful */
if (virtio_negotiate_guest_features(dev, DRIVER_FEATURE_SUPPORT))
goto dev_error;
@@ -58,36 +56,34 @@ int virtio_serial_init(struct virtio_device *dev)
virtio_get_status(dev, &status);
}
- if (virtio_queue_init_vq(dev, &vq_rx, RX_Q))
+ vq_rx = virtio_queue_init_vq(dev, RX_Q);
+ if (!vq_rx)
goto dev_error;
/* Allocate memory for multiple receive buffers */
- vq_rx.buf_mem = SLOF_alloc_mem(RX_ELEM_SIZE * RX_NUM_ELEMS);
- if (!vq_rx.buf_mem) {
+ vq_rx->buf_mem = SLOF_alloc_mem(RX_ELEM_SIZE * RX_NUM_ELEMS);
+ if (!vq_rx->buf_mem) {
printf("virtio-serial: Failed to allocate buffers!\n");
goto dev_error;
}
/* Prepare receive buffer queue */
for (i = 0; i < RX_NUM_ELEMS; i++) {
- uint64_t addr = (uint64_t)vq_rx.buf_mem + i * RX_ELEM_SIZE;
+ uint64_t addr = (uint64_t)vq_rx->buf_mem + i * RX_ELEM_SIZE;
/* Descriptor for data: */
- virtio_fill_desc(&vq_rx.desc[i], dev->is_modern, addr, 1, VRING_DESC_F_WRITE, 0);
- vq_rx.avail->ring[i] = virtio_cpu_to_modern16(dev, i);
+ virtio_fill_desc(vq_rx, i, dev->features, addr, 1, VRING_DESC_F_WRITE, 0);
+ vq_rx->avail->ring[i] = virtio_cpu_to_modern16(dev, i);
}
- vq_rx.avail->flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
- vq_rx.avail->idx = virtio_cpu_to_modern16(dev, RX_NUM_ELEMS);
+ vq_rx->avail->idx = virtio_cpu_to_modern16(dev, RX_NUM_ELEMS);
sync();
- last_rx_idx = virtio_modern16_to_cpu(dev, vq_rx.used->idx);
+ last_rx_idx = virtio_modern16_to_cpu(dev, vq_rx->used->idx);
- if (virtio_queue_init_vq(dev, &vq_tx, TX_Q))
+ vq_tx = virtio_queue_init_vq(dev, TX_Q);
+ if (vq_tx)
goto dev_error;
- vq_avail = virtio_get_vring_avail(dev, TX_Q);
- vq_avail->flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
- vq_avail->idx = 0;
/* Tell HV that setup succeeded */
status |= VIRTIO_STAT_DRIVER_OK;
@@ -112,35 +108,26 @@ void virtio_serial_shutdown(struct virtio_device *dev)
int virtio_serial_putchar(struct virtio_device *dev, char c)
{
- struct vring_desc *desc;
int id;
- uint32_t vq_size, time;
- struct vring_desc *vq_desc;
- struct vring_avail *vq_avail;
- struct vring_used *vq_used;
+ uint32_t time;
volatile uint16_t *current_used_idx;
uint16_t last_used_idx, avail_idx;
+ struct vqs *vq = &dev->vq[TX_Q];
- vq_size = virtio_get_qsize(dev, TX_Q);
- vq_desc = virtio_get_vring_desc(dev, TX_Q);
- vq_avail = virtio_get_vring_avail(dev, TX_Q);
- vq_used = virtio_get_vring_used(dev, TX_Q);
+ avail_idx = virtio_modern16_to_cpu(dev, vq->avail->idx);
- avail_idx = virtio_modern16_to_cpu(dev, vq_avail->idx);
-
- last_used_idx = vq_used->idx;
- current_used_idx = &vq_used->idx;
+ last_used_idx = vq->used->idx;
+ current_used_idx = &vq->used->idx;
/* Determine descriptor index */
- id = avail_idx % vq_size;
+ id = avail_idx % vq->size;
/* Set up virtqueue descriptor for header */
- desc = &vq_desc[id];
- virtio_fill_desc(desc, dev->is_modern, (uint64_t)&c, 1, 0, 0);
+ virtio_fill_desc(vq, id, dev->features, (uint64_t)&c, 1, 0, 0);
- vq_avail->ring[avail_idx % vq_size] = virtio_cpu_to_modern16 (dev, id);
+ vq->avail->ring[avail_idx % vq->size] = virtio_cpu_to_modern16 (dev, id);
mb();
- vq_avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
+ vq->avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
/* Tell HV that the queue is ready */
virtio_queue_notify(dev, TX_Q);
@@ -159,33 +146,32 @@ int virtio_serial_putchar(struct virtio_device *dev, char c)
return 1;
}
-static uint16_t last_rx_idx; /* Last index in RX "used" ring */
-
char virtio_serial_getchar(struct virtio_device *dev)
{
int id, idx;
char buf[RX_NUM_ELEMS] = {0};
uint16_t avail_idx;
+ struct vqs *vq_rx = &dev->vq[RX_Q];
- idx = virtio_modern16_to_cpu(dev, vq_rx.used->idx);
+ idx = virtio_modern16_to_cpu(dev, vq_rx->used->idx);
if (last_rx_idx == idx) {
/* Nothing received yet */
return 0;
}
- id = (virtio_modern32_to_cpu(dev, vq_rx.used->ring[last_rx_idx % vq_rx.size].id) + 1)
- % vq_rx.size;
+ id = (virtio_modern32_to_cpu(dev, vq_rx->used->ring[last_rx_idx % vq_rx->size].id) + 1)
+ % vq_rx->size;
/* Copy data to destination buffer */
- memcpy(buf, (void *)virtio_modern64_to_cpu(dev, vq_rx.desc[id - 1].addr), RX_ELEM_SIZE);
+ memcpy(buf, (void *)virtio_modern64_to_cpu(dev, vq_rx->desc[id - 1].addr), RX_ELEM_SIZE);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
- avail_idx = virtio_modern16_to_cpu(dev, vq_rx.avail->idx);
- vq_rx.avail->ring[avail_idx % vq_rx.size] = virtio_cpu_to_modern16(dev, id - 1);
+ avail_idx = virtio_modern16_to_cpu(dev, vq_rx->avail->idx);
+ vq_rx->avail->ring[avail_idx % vq_rx->size] = virtio_cpu_to_modern16(dev, id - 1);
sync();
- vq_rx.avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
+ vq_rx->avail->idx = virtio_cpu_to_modern16(dev, avail_idx + 1);
sync();
/* Tell HV that RX queue entry is ready */
@@ -196,7 +182,9 @@ char virtio_serial_getchar(struct virtio_device *dev)
int virtio_serial_haschar(struct virtio_device *dev)
{
- if (last_rx_idx == virtio_modern16_to_cpu(dev, vq_rx.used->idx))
+ struct vqs *vq_rx = &dev->vq[RX_Q];
+
+ if (last_rx_idx == virtio_modern16_to_cpu(dev, vq_rx->used->idx))
return 0;
else
return 1;
@@ -11,7 +11,6 @@
*****************************************************************************/
#include <stdio.h>
-#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
@@ -20,6 +19,7 @@
#include <byteorder.h>
#include "virtio.h"
#include "helpers.h"
+#include "virtio-internal.h"
/* PCI virtio header offsets */
#define VIRTIOHDR_DEVICE_FEATURES 0
@@ -98,15 +98,6 @@ static void virtio_pci_write64(void *addr, uint64_t val)
ci_write_32(addr + 4, cpu_to_le32(hi));
}
-static uint64_t virtio_pci_read64(void *addr)
-{
- uint64_t hi, lo;
-
- lo = le32_to_cpu(ci_read_32(addr));
- hi = le32_to_cpu(ci_read_32(addr + 4));
- return (hi << 32) | lo;
-}
-
static void virtio_cap_set_base_addr(struct virtio_cap *cap, uint32_t offset)
{
uint64_t addr;
@@ -183,9 +174,9 @@ struct virtio_device *virtio_setup_vd(void)
if (dev->common.cap_id && dev->notify.cap_id &&
dev->isr.cap_id && dev->device.cap_id) {
- dev->is_modern = 1;
+ dev->features = VIRTIO_F_VERSION_1;
} else {
- dev->is_modern = 0;
+ dev->features = 0;
dev->legacy.cap_id = 0;
dev->legacy.bar = 0;
virtio_cap_set_base_addr(&dev->legacy, 0);
@@ -215,7 +206,7 @@ unsigned int virtio_get_qsize(struct virtio_device *dev, int queue)
{
unsigned int size = 0;
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
void *addr = dev->common.addr + offset_of(struct virtio_dev_common, q_select);
ci_write_16(addr, cpu_to_le16(queue));
eieio();
@@ -241,24 +232,7 @@ unsigned int virtio_get_qsize(struct virtio_device *dev, int queue)
*/
struct vring_desc *virtio_get_vring_desc(struct virtio_device *dev, int queue)
{
- struct vring_desc *desc = 0;
-
- if (dev->is_modern) {
- void *q_sel = dev->common.addr + offset_of(struct virtio_dev_common, q_select);
- void *q_desc = dev->common.addr + offset_of(struct virtio_dev_common, q_desc);
-
- ci_write_16(q_sel, cpu_to_le16(queue));
- eieio();
- desc = (void *)(virtio_pci_read64(q_desc));
- } else {
- ci_write_16(dev->legacy.addr+VIRTIOHDR_QUEUE_SELECT,
- cpu_to_le16(queue));
- eieio();
- desc = (void*)(4096L *
- le32_to_cpu(ci_read_32(dev->legacy.addr+VIRTIOHDR_QUEUE_ADDRESS)));
- }
-
- return desc;
+ return dev->vq[queue].desc;
}
@@ -270,18 +244,7 @@ struct vring_desc *virtio_get_vring_desc(struct virtio_device *dev, int queue)
*/
struct vring_avail *virtio_get_vring_avail(struct virtio_device *dev, int queue)
{
- if (dev->is_modern) {
- void *q_sel = dev->common.addr + offset_of(struct virtio_dev_common, q_select);
- void *q_avail = dev->common.addr + offset_of(struct virtio_dev_common, q_avail);
-
- ci_write_16(q_sel, cpu_to_le16(queue));
- eieio();
- return (void *)(virtio_pci_read64(q_avail));
- }
- else {
- return (void*)((uint64_t)virtio_get_vring_desc(dev, queue) +
- virtio_get_qsize(dev, queue) * sizeof(struct vring_desc));
- }
+ return dev->vq[queue].avail;
}
@@ -293,28 +256,23 @@ struct vring_avail *virtio_get_vring_avail(struct virtio_device *dev, int queue)
*/
struct vring_used *virtio_get_vring_used(struct virtio_device *dev, int queue)
{
- if (dev->is_modern) {
- void *q_sel = dev->common.addr + offset_of(struct virtio_dev_common, q_select);
- void *q_used = dev->common.addr + offset_of(struct virtio_dev_common, q_used);
-
- ci_write_16(q_sel, cpu_to_le16(queue));
- eieio();
- return (void *)(virtio_pci_read64(q_used));
- } else {
- return (void*)VQ_ALIGN((uint64_t)virtio_get_vring_avail(dev, queue)
- + virtio_get_qsize(dev, queue)
- * sizeof(struct vring_avail));
- }
+ return dev->vq[queue].used;
}
/**
* Fill the virtio ring descriptor depending on the legacy mode or virtio 1.0
*/
-void virtio_fill_desc(struct vring_desc *desc, bool is_modern,
+void virtio_fill_desc(struct vqs *vq, int id, uint64_t features,
uint64_t addr, uint32_t len,
uint16_t flags, uint16_t next)
{
- if (is_modern) {
+ struct vring_desc *desc;
+
+ id %= vq->size;
+ desc = &vq->desc[id];
+ next %= vq->size;
+
+ if (features & VIRTIO_F_VERSION_1) {
desc->addr = cpu_to_le64(addr);
desc->len = cpu_to_le32(len);
desc->flags = cpu_to_le16(flags);
@@ -341,7 +299,7 @@ void virtio_reset_device(struct virtio_device *dev)
*/
void virtio_queue_notify(struct virtio_device *dev, int queue)
{
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
void *q_sel = dev->common.addr + offset_of(struct virtio_dev_common, q_select);
void *q_ntfy = dev->common.addr + offset_of(struct virtio_dev_common, q_notify_off);
void *addr;
@@ -362,7 +320,7 @@ void virtio_queue_notify(struct virtio_device *dev, int queue)
*/
static void virtio_set_qaddr(struct virtio_device *dev, int queue, unsigned long qaddr)
{
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
uint64_t q_desc = qaddr;
uint64_t q_avail;
uint64_t q_used;
@@ -385,20 +343,37 @@ static void virtio_set_qaddr(struct virtio_device *dev, int queue, unsigned long
}
}
-int virtio_queue_init_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id)
+struct vqs *virtio_queue_init_vq(struct virtio_device *dev, unsigned int id)
{
+ struct vqs *vq;
+
+ if (id >= sizeof(dev->vq)/sizeof(dev->vq[0])) {
+ printf("Queue index is too big!\n");
+ return NULL;
+ }
+ vq = &dev->vq[id];
+
+ memset(vq, 0, sizeof(*vq));
+
vq->size = virtio_get_qsize(dev, id);
vq->desc = SLOF_alloc_mem_aligned(virtio_vring_size(vq->size), 4096);
if (!vq->desc) {
printf("memory allocation failed!\n");
- return -1;
+ return NULL;
}
+
+ vq->avail = (void *) vq->desc + vq->size * sizeof(struct vring_desc);
+ vq->used = (void *) VQ_ALIGN((unsigned long) vq->avail +
+ sizeof(struct vring_avail) +
+ sizeof(uint16_t) * vq->size);
+
memset(vq->desc, 0, virtio_vring_size(vq->size));
virtio_set_qaddr(dev, id, (unsigned long)vq->desc);
- vq->avail = virtio_get_vring_avail(dev, id);
- vq->used = virtio_get_vring_used(dev, id);
- vq->id = id;
- return 0;
+
+ vq->avail->flags = virtio_cpu_to_modern16(dev, VRING_AVAIL_F_NO_INTERRUPT);
+ vq->avail->idx = 0;
+
+ return vq;
}
void virtio_queue_term_vq(struct virtio_device *dev, struct vqs *vq, unsigned int id)
@@ -413,7 +388,7 @@ void virtio_queue_term_vq(struct virtio_device *dev, struct vqs *vq, unsigned in
*/
void virtio_set_status(struct virtio_device *dev, int status)
{
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
ci_write_8(dev->common.addr +
offset_of(struct virtio_dev_common, dev_status), status);
} else {
@@ -426,7 +401,7 @@ void virtio_set_status(struct virtio_device *dev, int status)
*/
void virtio_get_status(struct virtio_device *dev, int *status)
{
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
*status = ci_read_8(dev->common.addr +
offset_of(struct virtio_dev_common, dev_status));
} else {
@@ -440,7 +415,7 @@ void virtio_get_status(struct virtio_device *dev, int *status)
void virtio_set_guest_features(struct virtio_device *dev, uint64_t features)
{
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
uint32_t f1 = (features >> 32) & 0xFFFFFFFF;
uint32_t f0 = features & 0xFFFFFFFF;
void *addr = dev->common.addr;
@@ -466,7 +441,7 @@ uint64_t virtio_get_host_features(struct virtio_device *dev)
{
uint64_t features = 0;
- if (dev->is_modern) {
+ if (dev->features & VIRTIO_F_VERSION_1) {
uint32_t f0 = 0, f1 = 0;
void *addr = dev->common.addr;
@@ -514,6 +489,8 @@ int virtio_negotiate_guest_features(struct virtio_device *dev, uint64_t features
if ((status & VIRTIO_STAT_FEATURES_OK) != VIRTIO_STAT_FEATURES_OK)
return -1;
+ dev->features = features;
+
return 0;
}
@@ -526,7 +503,7 @@ uint64_t virtio_get_config(struct virtio_device *dev, int offset, int size)
uint32_t hi, lo;
void *confbase;
- if (dev->is_modern)
+ if (dev->features & VIRTIO_F_VERSION_1)
confbase = dev->device.addr;
else
confbase = dev->legacy.addr+VIRTIOHDR_DEVICE_CONFIG;
@@ -537,12 +514,12 @@ uint64_t virtio_get_config(struct virtio_device *dev, int offset, int size)
break;
case 2:
val = ci_read_16(confbase+offset);
- if (dev->is_modern)
+ if (dev->features & VIRTIO_F_VERSION_1)
val = le16_to_cpu(val);
break;
case 4:
val = ci_read_32(confbase+offset);
- if (dev->is_modern)
+ if (dev->features & VIRTIO_F_VERSION_1)
val = le32_to_cpu(val);
break;
case 8:
@@ -551,7 +528,7 @@ uint64_t virtio_get_config(struct virtio_device *dev, int offset, int size)
*/
lo = ci_read_32(confbase+offset);
hi = ci_read_32(confbase+offset+4);
- if (dev->is_modern)
+ if (dev->features & VIRTIO_F_VERSION_1)
val = (uint64_t)le32_to_cpu(hi) << 32 | le32_to_cpu(lo);
else
val = (uint64_t)hi << 32 | lo;
@@ -571,7 +548,7 @@ int __virtio_read_config(struct virtio_device *dev, void *dst,
unsigned char *buf = dst;
int i;
- if (dev->is_modern)
+ if (dev->features & VIRTIO_F_VERSION_1)
confbase = dev->device.addr;
else
confbase = dev->legacy.addr+VIRTIOHDR_DEVICE_CONFIG;