Message ID | 20190924074834.14995-1-juergh@canonical.com |
---|---|
State | New |
Headers | show |
Series | [SRU,Xenial,CVE-2019-14821] KVM: coalesced_mmio: add bounds checking | expand |
On Tue, Sep 24, 2019 at 09:48:34AM +0200, Juerg Haefliger wrote: > From: Matt Delco <delco@chromium.org> > > commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream. > > The first/last indexes are typically shared with a user app. > The app can change the 'last' index that the kernel uses > to store the next result. This change sanity checks the index > before using it for writing to a potentially arbitrary address. > > This fixes CVE-2019-14821. > > Cc: stable@vger.kernel.org > Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)") > Signed-off-by: Matt Delco <delco@chromium.org> > Signed-off-by: Jim Mattson <jmattson@google.com> > Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com > [Use READ_ONCE. - Paolo] > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> > > CVE-2019-14821 > > (cherry picked from commit ae41539657ce0a4e9f4588e89e5e19a8b8f11928 linux-4.4.y) > Signed-off-by: Juerg Haefliger <juergh@canonical.com> > --- > virt/kvm/coalesced_mmio.c | 17 ++++++++++------- > 1 file changed, 10 insertions(+), 7 deletions(-) > > diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c > index 571c1ce37d15..5c1efb869df2 100644 > --- a/virt/kvm/coalesced_mmio.c > +++ b/virt/kvm/coalesced_mmio.c > @@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, > return 1; > } > > -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) > +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) > { > struct kvm_coalesced_mmio_ring *ring; > unsigned avail; > @@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) > * there is always one unused entry in the buffer > */ > ring = dev->kvm->coalesced_mmio_ring; > - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; > + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; > if (avail == 0) { > /* full */ > return 0; > @@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, > { > struct kvm_coalesced_mmio_dev *dev = to_mmio(this); > struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; > + __u32 insert; > > if (!coalesced_mmio_in_range(dev, addr, len)) > return -EOPNOTSUPP; > > spin_lock(&dev->kvm->ring_lock); > > - if (!coalesced_mmio_has_room(dev)) { > + insert = READ_ONCE(ring->last); > + if (!coalesced_mmio_has_room(dev, insert) || > + insert >= KVM_COALESCED_MMIO_MAX) { > spin_unlock(&dev->kvm->ring_lock); > return -EOPNOTSUPP; > } > > /* copy data in first free entry of the ring */ > > - ring->coalesced_mmio[ring->last].phys_addr = addr; > - ring->coalesced_mmio[ring->last].len = len; > - memcpy(ring->coalesced_mmio[ring->last].data, val, len); > + ring->coalesced_mmio[insert].phys_addr = addr; > + ring->coalesced_mmio[insert].len = len; > + memcpy(ring->coalesced_mmio[insert].data, val, len); > smp_wmb(); > - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; > + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; > spin_unlock(&dev->kvm->ring_lock); > return 0; > } > -- > 2.20.1 > > > -- > kernel-team mailing list > kernel-team@lists.ubuntu.com > https://lists.ubuntu.com/mailman/listinfo/kernel-team ACK for all flavors. Acked-by: Sultan Alsawaf <sultan.alsawaf@canonical.com>
On 2019-09-24 09:48:34, Juerg Haefliger wrote: > From: Matt Delco <delco@chromium.org> > > commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream. > > The first/last indexes are typically shared with a user app. > The app can change the 'last' index that the kernel uses > to store the next result. This change sanity checks the index > before using it for writing to a potentially arbitrary address. > > This fixes CVE-2019-14821. > > Cc: stable@vger.kernel.org > Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)") > Signed-off-by: Matt Delco <delco@chromium.org> > Signed-off-by: Jim Mattson <jmattson@google.com> > Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com > [Use READ_ONCE. - Paolo] > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> > > CVE-2019-14821 > > (cherry picked from commit ae41539657ce0a4e9f4588e89e5e19a8b8f11928 linux-4.4.y) > Signed-off-by: Juerg Haefliger <juergh@canonical.com> Acked-by: Tyler Hicks <tyhicks@canonical.com> Thanks! Tyler > --- > virt/kvm/coalesced_mmio.c | 17 ++++++++++------- > 1 file changed, 10 insertions(+), 7 deletions(-) > > diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c > index 571c1ce37d15..5c1efb869df2 100644 > --- a/virt/kvm/coalesced_mmio.c > +++ b/virt/kvm/coalesced_mmio.c > @@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, > return 1; > } > > -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) > +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) > { > struct kvm_coalesced_mmio_ring *ring; > unsigned avail; > @@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) > * there is always one unused entry in the buffer > */ > ring = dev->kvm->coalesced_mmio_ring; > - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; > + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; > if (avail == 0) { > /* full */ > return 0; > @@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, > { > struct kvm_coalesced_mmio_dev *dev = to_mmio(this); > struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; > + __u32 insert; > > if (!coalesced_mmio_in_range(dev, addr, len)) > return -EOPNOTSUPP; > > spin_lock(&dev->kvm->ring_lock); > > - if (!coalesced_mmio_has_room(dev)) { > + insert = READ_ONCE(ring->last); > + if (!coalesced_mmio_has_room(dev, insert) || > + insert >= KVM_COALESCED_MMIO_MAX) { > spin_unlock(&dev->kvm->ring_lock); > return -EOPNOTSUPP; > } > > /* copy data in first free entry of the ring */ > > - ring->coalesced_mmio[ring->last].phys_addr = addr; > - ring->coalesced_mmio[ring->last].len = len; > - memcpy(ring->coalesced_mmio[ring->last].data, val, len); > + ring->coalesced_mmio[insert].phys_addr = addr; > + ring->coalesced_mmio[insert].len = len; > + memcpy(ring->coalesced_mmio[insert].data, val, len); > smp_wmb(); > - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; > + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; > spin_unlock(&dev->kvm->ring_lock); > return 0; > } > -- > 2.20.1 > > > -- > kernel-team mailing list > kernel-team@lists.ubuntu.com > https://lists.ubuntu.com/mailman/listinfo/kernel-team
On 2019-09-24 09:48:34 , Juerg Haefliger wrote: > From: Matt Delco <delco@chromium.org> > > commit b60fe990c6b07ef6d4df67bc0530c7c90a62623a upstream. > > The first/last indexes are typically shared with a user app. > The app can change the 'last' index that the kernel uses > to store the next result. This change sanity checks the index > before using it for writing to a potentially arbitrary address. > > This fixes CVE-2019-14821. > > Cc: stable@vger.kernel.org > Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)") > Signed-off-by: Matt Delco <delco@chromium.org> > Signed-off-by: Jim Mattson <jmattson@google.com> > Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com > [Use READ_ONCE. - Paolo] > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> > > CVE-2019-14821 > > (cherry picked from commit ae41539657ce0a4e9f4588e89e5e19a8b8f11928 linux-4.4.y) > Signed-off-by: Juerg Haefliger <juergh@canonical.com> > --- > virt/kvm/coalesced_mmio.c | 17 ++++++++++------- > 1 file changed, 10 insertions(+), 7 deletions(-) > > diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c > index 571c1ce37d15..5c1efb869df2 100644 > --- a/virt/kvm/coalesced_mmio.c > +++ b/virt/kvm/coalesced_mmio.c > @@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, > return 1; > } > > -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) > +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) > { > struct kvm_coalesced_mmio_ring *ring; > unsigned avail; > @@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) > * there is always one unused entry in the buffer > */ > ring = dev->kvm->coalesced_mmio_ring; > - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; > + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; > if (avail == 0) { > /* full */ > return 0; > @@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, > { > struct kvm_coalesced_mmio_dev *dev = to_mmio(this); > struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; > + __u32 insert; > > if (!coalesced_mmio_in_range(dev, addr, len)) > return -EOPNOTSUPP; > > spin_lock(&dev->kvm->ring_lock); > > - if (!coalesced_mmio_has_room(dev)) { > + insert = READ_ONCE(ring->last); > + if (!coalesced_mmio_has_room(dev, insert) || > + insert >= KVM_COALESCED_MMIO_MAX) { > spin_unlock(&dev->kvm->ring_lock); > return -EOPNOTSUPP; > } > > /* copy data in first free entry of the ring */ > > - ring->coalesced_mmio[ring->last].phys_addr = addr; > - ring->coalesced_mmio[ring->last].len = len; > - memcpy(ring->coalesced_mmio[ring->last].data, val, len); > + ring->coalesced_mmio[insert].phys_addr = addr; > + ring->coalesced_mmio[insert].len = len; > + memcpy(ring->coalesced_mmio[insert].data, val, len); > smp_wmb(); > - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; > + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; > spin_unlock(&dev->kvm->ring_lock); > return 0; > } > -- > 2.20.1 > > > -- > kernel-team mailing list > kernel-team@lists.ubuntu.com > https://lists.ubuntu.com/mailman/listinfo/kernel-team
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 571c1ce37d15..5c1efb869df2 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, return 1; } -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) { struct kvm_coalesced_mmio_ring *ring; unsigned avail; @@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) * there is always one unused entry in the buffer */ ring = dev->kvm->coalesced_mmio_ring; - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; if (avail == 0) { /* full */ return 0; @@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; + __u32 insert; if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; spin_lock(&dev->kvm->ring_lock); - if (!coalesced_mmio_has_room(dev)) { + insert = READ_ONCE(ring->last); + if (!coalesced_mmio_has_room(dev, insert) || + insert >= KVM_COALESCED_MMIO_MAX) { spin_unlock(&dev->kvm->ring_lock); return -EOPNOTSUPP; } /* copy data in first free entry of the ring */ - ring->coalesced_mmio[ring->last].phys_addr = addr; - ring->coalesced_mmio[ring->last].len = len; - memcpy(ring->coalesced_mmio[ring->last].data, val, len); + ring->coalesced_mmio[insert].phys_addr = addr; + ring->coalesced_mmio[insert].len = len; + memcpy(ring->coalesced_mmio[insert].data, val, len); smp_wmb(); - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; spin_unlock(&dev->kvm->ring_lock); return 0; }