Message ID | 20100302165434.GA8690@redhat.com |
---|---|
State | New |
Headers | show |
On 03/02/2010 10:54 AM, Michael S. Tsirkin wrote: > abort if it is > > Signed-off-by: Michael S. Tsirkin<mst@redhat.com> > --- > > So the following is a simple solution for unstable > ring mappings security issue: simply detect this and stop. > > Will repost series with this later after some testing, > but this is an RFC to get early feedback if any. > It's certainly a reasonable compromise. Regards, Anthony Liguori > hw/vhost.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++--- > hw/vhost.h | 3 +++ > 2 files changed, 52 insertions(+), 3 deletions(-) > > diff --git a/hw/vhost.c b/hw/vhost.c > index 3b3a109..b9e115e 100644 > --- a/hw/vhost.c > +++ b/hw/vhost.c > @@ -256,6 +256,33 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) > dev->log_size = size; > } > > +static int vhost_verify_ring_mappings(struct vhost_dev *dev, > + uint64_t start_addr, > + uint64_t size) > +{ > + int i; > + for (i = 0; i< dev->nvqs; ++i) { > + struct vhost_virtqueue *vq = dev->vqs + i; > + target_phys_addr_t l; > + void *p; > + > + if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) > + continue; > + l = vq->ring_size; > + p = cpu_physical_memory_map(vq->ring_phys,&l, 1); > + if (!p || l != vq->ring_size) { > + fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); > + return -ENOMEM; > + } > + if (p != vq->ring) { > + fprintf(stderr, "Ring buffer relocated for ring %d\n", i); > + return -EBUSY; > + } > + cpu_physical_memory_unmap(p, l, 0, 0); > + } > + return 0; > +} > + > static void vhost_client_set_memory(CPUPhysMemoryClient *client, > target_phys_addr_t start_addr, > ram_addr_t size, > @@ -284,6 +311,12 @@ static void vhost_client_set_memory(CPUPhysMemoryClient *client, > if (!dev->started) { > return; > } > + > + if (dev->started) { > + r = vhost_verify_ring_mappings(dev, start_addr, size); > + assert(r>= 0); > + } > + > if (!dev->log_enabled) { > r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); > assert(r>= 0); > @@ -442,6 +475,14 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, > goto fail_alloc_used; > } > > + vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); > + vq->ring_phys = a = virtio_queue_get_ring(vdev, idx); > + vq->ring = cpu_physical_memory_map(a,&l, 1); > + if (!vq->ring || l != s) { > + r = -ENOMEM; > + goto fail_alloc_ring; > + } > + > r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled); > if (r< 0) { > r = -errno; > @@ -485,6 +526,9 @@ fail_host_notifier: > vdev->binding->guest_notifier(vdev->binding_opaque, idx, false); > fail_guest_notifier: > fail_alloc: > + cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), > + 0, 0); > +fail_alloc_ring: > cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), > 0, 0); > fail_alloc_used: > @@ -526,12 +570,14 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev, > } > virtio_queue_set_last_avail_idx(vdev, idx, state.num); > assert (r>= 0); > + cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), > + 0, virtio_queue_get_ring_size(vdev, idx)); > cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), > 0, 0); > cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), > 0, 0); > cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), > 0, 0); > } > > int vhost_dev_init(struct vhost_dev *hdev, int devfd) > diff --git a/hw/vhost.h b/hw/vhost.h > index 48b52c7..86dd834 100644 > --- a/hw/vhost.h > +++ b/hw/vhost.h > @@ -14,6 +14,9 @@ struct vhost_virtqueue { > int num; > unsigned long long used_phys; > unsigned used_size; > + void *ring; > + unsigned long long ring_phys; > + unsigned ring_size; > }; > > typedef unsigned long vhost_log_chunk_t; >
diff --git a/hw/vhost.c b/hw/vhost.c index 3b3a109..b9e115e 100644 --- a/hw/vhost.c +++ b/hw/vhost.c @@ -256,6 +256,33 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) dev->log_size = size; } +static int vhost_verify_ring_mappings(struct vhost_dev *dev, + uint64_t start_addr, + uint64_t size) +{ + int i; + for (i = 0; i < dev->nvqs; ++i) { + struct vhost_virtqueue *vq = dev->vqs + i; + target_phys_addr_t l; + void *p; + + if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) + continue; + l = vq->ring_size; + p = cpu_physical_memory_map(vq->ring_phys, &l, 1); + if (!p || l != vq->ring_size) { + fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); + return -ENOMEM; + } + if (p != vq->ring) { + fprintf(stderr, "Ring buffer relocated for ring %d\n", i); + return -EBUSY; + } + cpu_physical_memory_unmap(p, l, 0, 0); + } + return 0; +} + static void vhost_client_set_memory(CPUPhysMemoryClient *client, target_phys_addr_t start_addr, ram_addr_t size, @@ -284,6 +311,12 @@ static void vhost_client_set_memory(CPUPhysMemoryClient *client, if (!dev->started) { return; } + + if (dev->started) { + r = vhost_verify_ring_mappings(dev, start_addr, size); + assert(r >= 0); + } + if (!dev->log_enabled) { r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); @@ -442,6 +475,14 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, goto fail_alloc_used; } + vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); + vq->ring_phys = a = virtio_queue_get_ring(vdev, idx); + vq->ring = cpu_physical_memory_map(a, &l, 1); + if (!vq->ring || l != s) { + r = -ENOMEM; + goto fail_alloc_ring; + } + r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled); if (r < 0) { r = -errno; @@ -485,6 +526,9 @@ fail_host_notifier: vdev->binding->guest_notifier(vdev->binding_opaque, idx, false); fail_guest_notifier: fail_alloc: + cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), + 0, 0); +fail_alloc_ring: cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 0, 0); fail_alloc_used: @@ -526,12 +570,14 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev, } virtio_queue_set_last_avail_idx(vdev, idx, state.num); assert (r >= 0); + cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), + 0, virtio_queue_get_ring_size(vdev, idx)); cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 0, 0); cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 0, 0); cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 0, 0); } int vhost_dev_init(struct vhost_dev *hdev, int devfd) diff --git a/hw/vhost.h b/hw/vhost.h index 48b52c7..86dd834 100644 --- a/hw/vhost.h +++ b/hw/vhost.h @@ -14,6 +14,9 @@ struct vhost_virtqueue { int num; unsigned long long used_phys; unsigned used_size; + void *ring; + unsigned long long ring_phys; + unsigned ring_size; }; typedef unsigned long vhost_log_chunk_t;
abort if it is Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- So the following is a simple solution for unstable ring mappings security issue: simply detect this and stop. Will repost series with this later after some testing, but this is an RFC to get early feedback if any. hw/vhost.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++--- hw/vhost.h | 3 +++ 2 files changed, 52 insertions(+), 3 deletions(-)