@@ -502,10 +502,14 @@ static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id)
req->next = s->rq;
s->rq = req;
- virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr,
- req->elem.in_num, 1);
- virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr,
- req->elem.out_num, 0);
+ if (virtqueue_map_sg(s->vq, req->elem.in_sg, req->elem.in_addr,
+ req->elem.in_num, 1)) {
+ return -EINVAL;
+ }
+ if (virtqueue_map_sg(s->vq, req->elem.out_sg, req->elem.out_addr,
+ req->elem.out_num, 0)) {
+ return -EINVAL;
+ }
}
return 0;
@@ -679,10 +679,15 @@ static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_buffer(f, (unsigned char *)&port->elem,
sizeof(port->elem));
- virtqueue_map_sg(port->elem.in_sg, port->elem.in_addr,
- port->elem.in_num, 1);
- virtqueue_map_sg(port->elem.out_sg, port->elem.out_addr,
- port->elem.out_num, 1);
+ if (virtqueue_map_sg(port->ivq, port->elem.in_sg,
+ port->elem.in_addr,
+ port->elem.in_num, 1)) {
+ return -EINVAL;
+ }
+ if (virtqueue_map_sg(port->ovq, port->elem.out_sg, port->elem.out_addr,
+ port->elem.out_num, 1)) {
+ return -EINVAL;
+ }
/*
* Port was throttled on source machine. Let's
@@ -16,6 +16,7 @@
#include "trace.h"
#include "virtio.h"
#include "sysemu.h"
+#include "qemu-error.h"
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. */
@@ -253,15 +254,15 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
/* Check it isn't doing very strange things with descriptor numbers. */
if (num_heads > vq->vring.num) {
- fprintf(stderr, "Guest moved used index from %u to %u",
- idx, vring_avail_idx(vq));
- exit(1);
+ virtio_error(vq->vdev, "Guest moved used index from %u to %u",
+ idx, vring_avail_idx(vq));
+ return 0;
}
return num_heads;
}
-static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
+static int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
{
unsigned int head;
@@ -271,14 +272,14 @@ static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
/* If their number is silly, that's a fatal mistake. */
if (head >= vq->vring.num) {
- fprintf(stderr, "Guest says index %u is available", head);
- exit(1);
+ virtio_error(vq->vdev, "Guest says index %u is available", head);
+ return -1;
}
return head;
}
-static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
+static unsigned virtqueue_next_desc(VirtQueue *vq, target_phys_addr_t desc_pa,
unsigned int i, unsigned int max)
{
unsigned int next;
@@ -293,8 +294,8 @@ static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
wmb();
if (next >= max) {
- fprintf(stderr, "Desc next is %u", next);
- exit(1);
+ virtio_error(vq->vdev, "Desc next is %u", next);
+ return max;
}
return next;
@@ -316,18 +317,21 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
max = vq->vring.num;
num_bufs = total_bufs;
i = virtqueue_get_head(vq, idx++);
+ if (i < 0) {
+ return 0;
+ }
desc_pa = vq->vring.desc;
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
- fprintf(stderr, "Invalid size for indirect buffer table\n");
- exit(1);
+ virtio_error(vq->vdev, "Invalid size for indirect buffer table\n");
+ return 0;
}
/* If we've got too many, that implies a descriptor loop. */
if (num_bufs >= max) {
- fprintf(stderr, "Looped descriptor");
- exit(1);
+ virtio_error(vq->vdev, "Looped descriptor");
+ return 0;
}
/* loop over the indirect descriptor table */
@@ -340,8 +344,8 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
do {
/* If we've got too many, that implies a descriptor loop. */
if (++num_bufs > max) {
- fprintf(stderr, "Looped descriptor");
- exit(1);
+ virtio_error(vq->vdev, "Looped descriptor");
+ return 0;
}
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
@@ -353,7 +357,7 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
(out_total += vring_desc_len(desc_pa, i)) >= out_bytes)
return 1;
}
- } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
+ } while ((i = virtqueue_next_desc(vq, desc_pa, i, max)) != max);
if (!indirect)
total_bufs = num_bufs;
@@ -364,8 +368,8 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
return 0;
}
-void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
- size_t num_sg, int is_write)
+int virtqueue_map_sg(VirtQueue *vq, struct iovec *sg,
+ target_phys_addr_t *addr, size_t num_sg, int is_write)
{
unsigned int i;
target_phys_addr_t len;
@@ -374,15 +378,16 @@ void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
len = sg[i].iov_len;
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
- fprintf(stderr, "virtio: trying to map MMIO memory\n");
- exit(1);
+ return -1;
}
}
+ return 0;
}
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
{
- unsigned int i, head, max;
+ int i, head;
+ unsigned max;
target_phys_addr_t desc_pa = vq->vring.desc;
if (!virtqueue_num_heads(vq, vq->last_avail_idx))
@@ -394,11 +399,14 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
max = vq->vring.num;
i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
+ if (i < 0) {
+ return 0;
+ }
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
- fprintf(stderr, "Invalid size for indirect buffer table\n");
- exit(1);
+ virtio_error(vq->vdev, "Invalid size for indirect buffer table\n");
+ return 0;
}
/* loop over the indirect descriptor table */
@@ -423,14 +431,18 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
/* If we've got too many, that implies a descriptor loop. */
if ((elem->in_num + elem->out_num) > max) {
- fprintf(stderr, "Looped descriptor");
- exit(1);
+ virtio_error(vq->vdev, "Looped descriptor");
+ return 0;
}
- } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
+ } while ((i = virtqueue_next_desc(vq, desc_pa, i, max)) != max);
/* Now map what we have collected */
- virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
- virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
+ if (virtqueue_map_sg(vq, elem->in_sg, elem->in_addr, elem->in_num, 1)) {
+ return 0;
+ }
+ if (virtqueue_map_sg(vq, elem->out_sg, elem->out_addr, elem->out_num, 0)) {
+ return 0;
+ }
elem->index = head;
@@ -863,3 +875,14 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
return &vq->host_notifier;
}
+
+void virtio_error(VirtIODevice *vdev, const char *fmt, ...)
+{
+ va_list ap;
+
+ virtio_set_status(vdev, VIRTIO_CONFIG_S_FAILED);
+
+ va_start(ap, fmt);
+ error_vprintf(fmt, ap);
+ va_end(ap);
+}
@@ -134,6 +134,9 @@ static inline void virtio_set_status(VirtIODevice *vdev, uint8_t val)
vdev->status = val;
}
+void virtio_error(VirtIODevice *vdev, const char *fmt, ...)
+ __attribute__ ((format(printf, 2, 3)));
+
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
void (*handle_output)(VirtIODevice *,
VirtQueue *));
@@ -144,8 +147,8 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count);
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx);
-void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
- size_t num_sg, int is_write);
+int virtqueue_map_sg(VirtQueue *vq, struct iovec *sg,
+ target_phys_addr_t *addr, size_t num_sg, int is_write);
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes);
When guest does something illegal, such as programming invalid index values in the virtio device, qemu currently tends to crash. With virtio, a better idea is to log an error, and set status to FAIL which stops the device. Add an API to do this, and fix core, blk and serial to use it on error. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- hw/virtio-blk.c | 12 +++++-- hw/virtio-serial-bus.c | 13 +++++-- hw/virtio.c | 79 +++++++++++++++++++++++++++++++----------------- hw/virtio.h | 7 +++- 4 files changed, 73 insertions(+), 38 deletions(-)