From 07b21bd4c2e66a0a761f9489f8d909919c752699 Mon Sep 17 00:00:00 2001 From: "Justin M. Forbes" Date: Thu, 19 Sep 2019 10:35:02 -0500 Subject: Forgot patch --- kvm-coalesced_mmio-add-bounds-checking.patch | 83 ++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 kvm-coalesced_mmio-add-bounds-checking.patch diff --git a/kvm-coalesced_mmio-add-bounds-checking.patch b/kvm-coalesced_mmio-add-bounds-checking.patch new file mode 100644 index 000000000..ddd2568f9 --- /dev/null +++ b/kvm-coalesced_mmio-add-bounds-checking.patch @@ -0,0 +1,83 @@ +From b60fe990c6b07ef6d4df67bc0530c7c90a62623a Mon Sep 17 00:00:00 2001 +From: Matt Delco +Date: Mon, 16 Sep 2019 14:16:54 -0700 +Subject: KVM: coalesced_mmio: add bounds checking + +The first/last indexes are typically shared with a user app. +The app can change the 'last' index that the kernel uses +to store the next result. This change sanity checks the index +before using it for writing to a potentially arbitrary address. + +This fixes CVE-2019-14821. + +Cc: stable@vger.kernel.org +Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)") +Signed-off-by: Matt Delco +Signed-off-by: Jim Mattson +Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com +[Use READ_ONCE. - Paolo] +Signed-off-by: Paolo Bonzini +--- + virt/kvm/coalesced_mmio.c | 19 +++++++++++-------- + 1 file changed, 11 insertions(+), 8 deletions(-) + +diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c +index 5294abb3f178..8ffd07e2a160 100644 +--- a/virt/kvm/coalesced_mmio.c ++++ b/virt/kvm/coalesced_mmio.c +@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, + return 1; + } + +-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) ++static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) + { + struct kvm_coalesced_mmio_ring *ring; + unsigned avail; +@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) + * there is always one unused entry in the buffer + */ + ring = dev->kvm->coalesced_mmio_ring; +- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; ++ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; + if (avail == 0) { + /* full */ + return 0; +@@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, + { + struct kvm_coalesced_mmio_dev *dev = to_mmio(this); + struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; ++ __u32 insert; + + if (!coalesced_mmio_in_range(dev, addr, len)) + return -EOPNOTSUPP; + + spin_lock(&dev->kvm->ring_lock); + +- if (!coalesced_mmio_has_room(dev)) { ++ insert = READ_ONCE(ring->last); ++ if (!coalesced_mmio_has_room(dev, insert) || ++ insert >= KVM_COALESCED_MMIO_MAX) { + spin_unlock(&dev->kvm->ring_lock); + return -EOPNOTSUPP; + } + + /* copy data in first free entry of the ring */ + +- ring->coalesced_mmio[ring->last].phys_addr = addr; +- ring->coalesced_mmio[ring->last].len = len; +- memcpy(ring->coalesced_mmio[ring->last].data, val, len); +- ring->coalesced_mmio[ring->last].pio = dev->zone.pio; ++ ring->coalesced_mmio[insert].phys_addr = addr; ++ ring->coalesced_mmio[insert].len = len; ++ memcpy(ring->coalesced_mmio[insert].data, val, len); ++ ring->coalesced_mmio[insert].pio = dev->zone.pio; + smp_wmb(); +- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; ++ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; + spin_unlock(&dev->kvm->ring_lock); + return 0; + } +-- +cgit 1.2-0.3.lf.el7 + -- cgit