summaryrefslogtreecommitdiffstats
path: root/vfio-pci-block-user-access-to-disabled-device-MMIO.patch
diff options
context:
space:
mode:
Diffstat (limited to 'vfio-pci-block-user-access-to-disabled-device-MMIO.patch')
-rw-r--r--vfio-pci-block-user-access-to-disabled-device-MMIO.patch857
1 files changed, 0 insertions, 857 deletions
diff --git a/vfio-pci-block-user-access-to-disabled-device-MMIO.patch b/vfio-pci-block-user-access-to-disabled-device-MMIO.patch
deleted file mode 100644
index f289b448f..000000000
--- a/vfio-pci-block-user-access-to-disabled-device-MMIO.patch
+++ /dev/null
@@ -1,857 +0,0 @@
-From MAILER-DAEMON Wed May 20 15:47:40 2020
-Subject: [PATCH v2 1/3] vfio/type1: Support faulting PFNMAP vmas
-From: Alex Williamson <alex.williamson@redhat.com>
-To: kvm@vger.kernel.org
-Cc: linux-kernel@vger.kernel.org, cohuck@redhat.com, jgg@ziepe.ca
-Date: Tue, 05 May 2020 15:54:44 -0600
-Message-ID: <158871568480.15589.17339878308143043906.stgit@gimli.home>
-In-Reply-To: <158871401328.15589.17598154478222071285.stgit@gimli.home>
-References: <158871401328.15589.17598154478222071285.stgit@gimli.home>
-Sender: kvm-owner@vger.kernel.org
-List-ID: <kvm.vger.kernel.org>
-X-Mailing-List: kvm@vger.kernel.org
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 7bit
-
-With conversion to follow_pfn(), DMA mapping a PFNMAP range depends on
-the range being faulted into the vma. Add support to manually provide
-that, in the same way as done on KVM with hva_to_pfn_remapped().
-
-Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
-Reviewed-by: Peter Xu <peterx@redhat.com>
----
- drivers/vfio/vfio_iommu_type1.c | 36 +++++++++++++++++++++++++++++++++---
- 1 file changed, 33 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
-index cc1d64765ce7..4a4cb7cd86b2 100644
---- a/drivers/vfio/vfio_iommu_type1.c
-+++ b/drivers/vfio/vfio_iommu_type1.c
-@@ -317,6 +317,32 @@ static int put_pfn(unsigned long pfn, int prot)
- return 0;
- }
-
-+static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
-+ unsigned long vaddr, unsigned long *pfn,
-+ bool write_fault)
-+{
-+ int ret;
-+
-+ ret = follow_pfn(vma, vaddr, pfn);
-+ if (ret) {
-+ bool unlocked = false;
-+
-+ ret = fixup_user_fault(NULL, mm, vaddr,
-+ FAULT_FLAG_REMOTE |
-+ (write_fault ? FAULT_FLAG_WRITE : 0),
-+ &unlocked);
-+ if (unlocked)
-+ return -EAGAIN;
-+
-+ if (ret)
-+ return ret;
-+
-+ ret = follow_pfn(vma, vaddr, pfn);
-+ }
-+
-+ return ret;
-+}
-+
- static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
- int prot, unsigned long *pfn)
- {
-@@ -339,12 +365,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
-
- vaddr = untagged_addr(vaddr);
-
-+retry:
- vma = find_vma_intersection(mm, vaddr, vaddr + 1);
-
- if (vma && vma->vm_flags & VM_PFNMAP) {
-- if (!follow_pfn(vma, vaddr, pfn) &&
-- is_invalid_reserved_pfn(*pfn))
-- ret = 0;
-+ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
-+ if (ret == -EAGAIN)
-+ goto retry;
-+
-+ if (!ret && !is_invalid_reserved_pfn(*pfn))
-+ ret = -EFAULT;
- }
- done:
- up_read(&mm->mmap_sem);
-
-
-From MAILER-DAEMON Wed May 20 15:47:40 2020
-Subject: [PATCH v2 2/3] vfio-pci: Fault mmaps to enable vma tracking
-From: Alex Williamson <alex.williamson@redhat.com>
-To: kvm@vger.kernel.org
-Cc: linux-kernel@vger.kernel.org, cohuck@redhat.com, jgg@ziepe.ca
-Date: Tue, 05 May 2020 15:54:53 -0600
-Message-ID: <158871569380.15589.16950418949340311053.stgit@gimli.home>
-In-Reply-To: <158871401328.15589.17598154478222071285.stgit@gimli.home>
-References: <158871401328.15589.17598154478222071285.stgit@gimli.home>
-Sender: kvm-owner@vger.kernel.org
-List-ID: <kvm.vger.kernel.org>
-X-Mailing-List: kvm@vger.kernel.org
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 7bit
-
-Rather than calling remap_pfn_range() when a region is mmap'd, setup
-a vm_ops handler to support dynamic faulting of the range on access.
-This allows us to manage a list of vmas actively mapping the area that
-we can later use to invalidate those mappings. The open callback
-invalidates the vma range so that all tracking is inserted in the
-fault handler and removed in the close handler.
-
-Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
-Reviewed-by: Peter Xu <peterx@redhat.com>
----
- drivers/vfio/pci/vfio_pci.c | 76 ++++++++++++++++++++++++++++++++++-
- drivers/vfio/pci/vfio_pci_private.h | 7 +++
- 2 files changed, 81 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
-index 6c6b37b5c04e..66a545a01f8f 100644
---- a/drivers/vfio/pci/vfio_pci.c
-+++ b/drivers/vfio/pci/vfio_pci.c
-@@ -1299,6 +1299,70 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
- return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
- }
-
-+static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
-+ struct vm_area_struct *vma)
-+{
-+ struct vfio_pci_mmap_vma *mmap_vma;
-+
-+ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
-+ if (!mmap_vma)
-+ return -ENOMEM;
-+
-+ mmap_vma->vma = vma;
-+
-+ mutex_lock(&vdev->vma_lock);
-+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
-+ mutex_unlock(&vdev->vma_lock);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Zap mmaps on open so that we can fault them in on access and therefore
-+ * our vma_list only tracks mappings accessed since last zap.
-+ */
-+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
-+{
-+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-+}
-+
-+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
-+{
-+ struct vfio_pci_device *vdev = vma->vm_private_data;
-+ struct vfio_pci_mmap_vma *mmap_vma;
-+
-+ mutex_lock(&vdev->vma_lock);
-+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
-+ if (mmap_vma->vma == vma) {
-+ list_del(&mmap_vma->vma_next);
-+ kfree(mmap_vma);
-+ break;
-+ }
-+ }
-+ mutex_unlock(&vdev->vma_lock);
-+}
-+
-+static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
-+{
-+ struct vm_area_struct *vma = vmf->vma;
-+ struct vfio_pci_device *vdev = vma->vm_private_data;
-+
-+ if (vfio_pci_add_vma(vdev, vma))
-+ return VM_FAULT_OOM;
-+
-+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
-+ return VM_FAULT_SIGBUS;
-+
-+ return VM_FAULT_NOPAGE;
-+}
-+
-+static const struct vm_operations_struct vfio_pci_mmap_ops = {
-+ .open = vfio_pci_mmap_open,
-+ .close = vfio_pci_mmap_close,
-+ .fault = vfio_pci_mmap_fault,
-+};
-+
- static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
- {
- struct vfio_pci_device *vdev = device_data;
-@@ -1357,8 +1421,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
-
-- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-- req_len, vma->vm_page_prot);
-+ /*
-+ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
-+ * change vm_flags within the fault handler. Set them now.
-+ */
-+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-+ vma->vm_ops = &vfio_pci_mmap_ops;
-+
-+ return 0;
- }
-
- static void vfio_pci_request(void *device_data, unsigned int count)
-@@ -1608,6 +1678,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- spin_lock_init(&vdev->irqlock);
- mutex_init(&vdev->ioeventfds_lock);
- INIT_LIST_HEAD(&vdev->ioeventfds_list);
-+ mutex_init(&vdev->vma_lock);
-+ INIT_LIST_HEAD(&vdev->vma_list);
-
- ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
- if (ret) {
-diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
-index 36ec69081ecd..9b25f9f6ce1d 100644
---- a/drivers/vfio/pci/vfio_pci_private.h
-+++ b/drivers/vfio/pci/vfio_pci_private.h
-@@ -92,6 +92,11 @@ struct vfio_pci_vf_token {
- struct mutex lock;
- };
-
-+struct vfio_pci_mmap_vma {
-+ struct vm_area_struct *vma;
-+ struct list_head vma_next;
-+};
-+
- struct vfio_pci_device {
- struct pci_dev *pdev;
- void __iomem *barmap[PCI_STD_NUM_BARS];
-@@ -132,6 +137,8 @@ struct vfio_pci_device {
- struct list_head dummy_resources_list;
- struct mutex ioeventfds_lock;
- struct list_head ioeventfds_list;
-+ struct mutex vma_lock;
-+ struct list_head vma_list;
- };
-
- #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
-
-
-From MAILER-DAEMON Wed May 20 15:47:40 2020
-Subject: [PATCH v2 3/3] vfio-pci: Invalidate mmaps and block MMIO access on disabled memory
-From: Alex Williamson <alex.williamson@redhat.com>
-To: kvm@vger.kernel.org
-Cc: linux-kernel@vger.kernel.org, cohuck@redhat.com, jgg@ziepe.ca
-Date: Tue, 05 May 2020 15:55:02 -0600
-Message-ID: <158871570274.15589.10563806532874116326.stgit@gimli.home>
-In-Reply-To: <158871401328.15589.17598154478222071285.stgit@gimli.home>
-References: <158871401328.15589.17598154478222071285.stgit@gimli.home>
-Sender: kvm-owner@vger.kernel.org
-List-ID: <kvm.vger.kernel.org>
-X-Mailing-List: kvm@vger.kernel.org
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 7bit
-
-Accessing the disabled memory space of a PCI device would typically
-result in a master abort response on conventional PCI, or an
-unsupported request on PCI express. The user would generally see
-these as a -1 response for the read return data and the write would be
-silently discarded, possibly with an uncorrected, non-fatal AER error
-triggered on the host. Some systems however take it upon themselves
-to bring down the entire system when they see something that might
-indicate a loss of data, such as this discarded write to a disabled
-memory space.
-
-To avoid this, we want to try to block the user from accessing memory
-spaces while they're disabled. We start with a semaphore around the
-memory enable bit, where writers modify the memory enable state and
-must be serialized, while readers make use of the memory region and
-can access in parallel. Writers include both direct manipulation via
-the command register, as well as any reset path where the internal
-mechanics of the reset may both explicitly and implicitly disable
-memory access, and manipulation of the MSI-X configuration, where the
-MSI-X vector table resides in MMIO space of the device. Readers
-include the read and write file ops to access the vfio device fd
-offsets as well as memory mapped access. In the latter case, we make
-use of our new vma list support to zap, or invalidate, those memory
-mappings in order to force them to be faulted back in on access.
-
-Our semaphore usage will stall user access to MMIO spaces across
-internal operations like reset, but the user might experience new
-behavior when trying to access the MMIO space while disabled via the
-PCI command register. Access via read or write while disabled will
-return -EIO and access via memory maps will result in a SIGBUS. This
-is expected to be compatible with known use cases and potentially
-provides better error handling capabilities than present in the
-hardware, while avoiding the more readily accessible and severe
-platform error responses that might otherwise occur.
-
-Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
----
- drivers/vfio/pci/vfio_pci.c | 263 +++++++++++++++++++++++++++++++----
- drivers/vfio/pci/vfio_pci_config.c | 36 ++++-
- drivers/vfio/pci/vfio_pci_intrs.c | 18 ++
- drivers/vfio/pci/vfio_pci_private.h | 5 +
- drivers/vfio/pci/vfio_pci_rdwr.c | 12 ++
- 5 files changed, 300 insertions(+), 34 deletions(-)
-
-diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
-index 66a545a01f8f..49ae9faa6099 100644
---- a/drivers/vfio/pci/vfio_pci.c
-+++ b/drivers/vfio/pci/vfio_pci.c
-@@ -26,6 +26,7 @@
- #include <linux/vfio.h>
- #include <linux/vgaarb.h>
- #include <linux/nospec.h>
-+#include <linux/sched/mm.h>
-
- #include "vfio_pci_private.h"
-
-@@ -184,6 +185,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
-
- static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
- static void vfio_pci_disable(struct vfio_pci_device *vdev);
-+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
-
- /*
- * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
-@@ -736,6 +738,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
- return 0;
- }
-
-+struct vfio_devices {
-+ struct vfio_device **devices;
-+ int cur_index;
-+ int max_index;
-+};
-+
- static long vfio_pci_ioctl(void *device_data,
- unsigned int cmd, unsigned long arg)
- {
-@@ -984,8 +992,16 @@ static long vfio_pci_ioctl(void *device_data,
- return ret;
-
- } else if (cmd == VFIO_DEVICE_RESET) {
-- return vdev->reset_works ?
-- pci_try_reset_function(vdev->pdev) : -EINVAL;
-+ int ret;
-+
-+ if (!vdev->reset_works)
-+ return -EINVAL;
-+
-+ vfio_pci_zap_and_down_write_memory_lock(vdev);
-+ ret = pci_try_reset_function(vdev->pdev);
-+ up_write(&vdev->memory_lock);
-+
-+ return ret;
-
- } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
- struct vfio_pci_hot_reset_info hdr;
-@@ -1065,8 +1081,9 @@ static long vfio_pci_ioctl(void *device_data,
- int32_t *group_fds;
- struct vfio_pci_group_entry *groups;
- struct vfio_pci_group_info info;
-+ struct vfio_devices devs = { .cur_index = 0 };
- bool slot = false;
-- int i, count = 0, ret = 0;
-+ int i, group_idx, mem_idx = 0, count = 0, ret = 0;
-
- minsz = offsetofend(struct vfio_pci_hot_reset, count);
-
-@@ -1118,9 +1135,9 @@ static long vfio_pci_ioctl(void *device_data,
- * user interface and store the group and iommu ID. This
- * ensures the group is held across the reset.
- */
-- for (i = 0; i < hdr.count; i++) {
-+ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
- struct vfio_group *group;
-- struct fd f = fdget(group_fds[i]);
-+ struct fd f = fdget(group_fds[group_idx]);
- if (!f.file) {
- ret = -EBADF;
- break;
-@@ -1133,8 +1150,9 @@ static long vfio_pci_ioctl(void *device_data,
- break;
- }
-
-- groups[i].group = group;
-- groups[i].id = vfio_external_user_iommu_id(group);
-+ groups[group_idx].group = group;
-+ groups[group_idx].id =
-+ vfio_external_user_iommu_id(group);
- }
-
- kfree(group_fds);
-@@ -1153,13 +1171,63 @@ static long vfio_pci_ioctl(void *device_data,
- ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_validate_devs,
- &info, slot);
-- if (!ret)
-- /* User has access, do the reset */
-- ret = pci_reset_bus(vdev->pdev);
-+ if (ret)
-+ goto hot_reset_release;
-+
-+ devs.max_index = count;
-+ devs.devices = kcalloc(count, sizeof(struct vfio_device *),
-+ GFP_KERNEL);
-+ if (!devs.devices) {
-+ ret = -ENOMEM;
-+ goto hot_reset_release;
-+ }
-+
-+ /*
-+ * We need to get memory_lock for each device, but devices
-+ * can share mmap_sem, therefore we need to zap and hold
-+ * the vma_lock for each device, and only then get each
-+ * memory_lock.
-+ */
-+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
-+ vfio_pci_try_zap_and_vma_lock_cb,
-+ &devs, slot);
-+ if (ret)
-+ goto hot_reset_release;
-+
-+ for (; mem_idx < devs.cur_index; mem_idx++) {
-+ struct vfio_pci_device *tmp;
-+
-+ tmp = vfio_device_data(devs.devices[mem_idx]);
-+
-+ ret = down_write_trylock(&tmp->memory_lock);
-+ if (!ret) {
-+ ret = -EBUSY;
-+ goto hot_reset_release;
-+ }
-+ mutex_unlock(&tmp->vma_lock);
-+ }
-+
-+ /* User has access, do the reset */
-+ ret = pci_reset_bus(vdev->pdev);
-
- hot_reset_release:
-- for (i--; i >= 0; i--)
-- vfio_group_put_external_user(groups[i].group);
-+ for (i = 0; i < devs.cur_index; i++) {
-+ struct vfio_device *device;
-+ struct vfio_pci_device *tmp;
-+
-+ device = devs.devices[i];
-+ tmp = vfio_device_data(device);
-+
-+ if (i < mem_idx)
-+ up_write(&tmp->memory_lock);
-+ else
-+ mutex_unlock(&tmp->vma_lock);
-+ vfio_device_put(device);
-+ }
-+ kfree(devs.devices);
-+
-+ for (group_idx--; group_idx >= 0; group_idx--)
-+ vfio_group_put_external_user(groups[group_idx].group);
-
- kfree(groups);
- return ret;
-@@ -1299,8 +1367,107 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
- return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
- }
-
--static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
-- struct vm_area_struct *vma)
-+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
-+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
-+{
-+ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
-+
-+ /*
-+ * Lock ordering:
-+ * vma_lock is nested under mmap_sem for vm_ops callback paths.
-+ * The memory_lock semaphore is used by both code paths calling
-+ * into this function to zap vmas and the vm_ops.fault callback
-+ * to protect the memory enable state of the device.
-+ *
-+ * When zapping vmas we need to maintain the mmap_sem => vma_lock
-+ * ordering, which requires using vma_lock to walk vma_list to
-+ * acquire an mm, then dropping vma_lock to get the mmap_sem and
-+ * reacquiring vma_lock. This logic is derived from similar
-+ * requirements in uverbs_user_mmap_disassociate().
-+ *
-+ * mmap_sem must always be the top-level lock when it is taken.
-+ * Therefore we can only hold the memory_lock write lock when
-+ * vma_list is empty, as we'd need to take mmap_sem to clear
-+ * entries. vma_list can only be guaranteed empty when holding
-+ * vma_lock, thus memory_lock is nested under vma_lock.
-+ *
-+ * This enables the vm_ops.fault callback to acquire vma_lock,
-+ * followed by memory_lock read lock, while already holding
-+ * mmap_sem without risk of deadlock.
-+ */
-+ while (1) {
-+ struct mm_struct *mm = NULL;
-+
-+ if (try) {
-+ if (!mutex_trylock(&vdev->vma_lock))
-+ return 0;
-+ } else {
-+ mutex_lock(&vdev->vma_lock);
-+ }
-+ while (!list_empty(&vdev->vma_list)) {
-+ mmap_vma = list_first_entry(&vdev->vma_list,
-+ struct vfio_pci_mmap_vma,
-+ vma_next);
-+ mm = mmap_vma->vma->vm_mm;
-+ if (mmget_not_zero(mm))
-+ break;
-+
-+ list_del(&mmap_vma->vma_next);
-+ kfree(mmap_vma);
-+ mm = NULL;
-+ }
-+ if (!mm)
-+ return 1;
-+ mutex_unlock(&vdev->vma_lock);
-+
-+ if (try) {
-+ if (!down_read_trylock(&mm->mmap_sem)) {
-+ mmput(mm);
-+ return 0;
-+ }
-+ } else {
-+ down_read(&mm->mmap_sem);
-+ }
-+ if (mmget_still_valid(mm)) {
-+ if (try) {
-+ if (!mutex_trylock(&vdev->vma_lock)) {
-+ up_read(&mm->mmap_sem);
-+ mmput(mm);
-+ return 0;
-+ }
-+ } else {
-+ mutex_lock(&vdev->vma_lock);
-+ }
-+ list_for_each_entry_safe(mmap_vma, tmp,
-+ &vdev->vma_list, vma_next) {
-+ struct vm_area_struct *vma = mmap_vma->vma;
-+
-+ if (vma->vm_mm != mm)
-+ continue;
-+
-+ list_del(&mmap_vma->vma_next);
-+ kfree(mmap_vma);
-+
-+ zap_vma_ptes(vma, vma->vm_start,
-+ vma->vm_end - vma->vm_start);
-+ }
-+ mutex_unlock(&vdev->vma_lock);
-+ }
-+ up_read(&mm->mmap_sem);
-+ mmput(mm);
-+ }
-+}
-+
-+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
-+{
-+ vfio_pci_zap_and_vma_lock(vdev, false);
-+ down_write(&vdev->memory_lock);
-+ mutex_unlock(&vdev->vma_lock);
-+}
-+
-+/* Caller holds vma_lock */
-+static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
-+ struct vm_area_struct *vma)
- {
- struct vfio_pci_mmap_vma *mmap_vma;
-
-@@ -1309,10 +1476,7 @@ static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
- return -ENOMEM;
-
- mmap_vma->vma = vma;
--
-- mutex_lock(&vdev->vma_lock);
- list_add(&mmap_vma->vma_next, &vdev->vma_list);
-- mutex_unlock(&vdev->vma_lock);
-
- return 0;
- }
-@@ -1346,15 +1510,32 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
- {
- struct vm_area_struct *vma = vmf->vma;
- struct vfio_pci_device *vdev = vma->vm_private_data;
-+ vm_fault_t ret = VM_FAULT_NOPAGE;
-
-- if (vfio_pci_add_vma(vdev, vma))
-- return VM_FAULT_OOM;
-+ mutex_lock(&vdev->vma_lock);
-+ down_read(&vdev->memory_lock);
-+
-+ if (!__vfio_pci_memory_enabled(vdev)) {
-+ ret = VM_FAULT_SIGBUS;
-+ mutex_unlock(&vdev->vma_lock);
-+ goto up_out;
-+ }
-+
-+ if (__vfio_pci_add_vma(vdev, vma)) {
-+ ret = VM_FAULT_OOM;
-+ mutex_unlock(&vdev->vma_lock);
-+ goto up_out;
-+ }
-+
-+ mutex_unlock(&vdev->vma_lock);
-
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
-- return VM_FAULT_SIGBUS;
-+ ret = VM_FAULT_SIGBUS;
-
-- return VM_FAULT_NOPAGE;
-+up_out:
-+ up_read(&vdev->memory_lock);
-+ return ret;
- }
-
- static const struct vm_operations_struct vfio_pci_mmap_ops = {
-@@ -1680,6 +1861,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- INIT_LIST_HEAD(&vdev->ioeventfds_list);
- mutex_init(&vdev->vma_lock);
- INIT_LIST_HEAD(&vdev->vma_list);
-+ init_rwsem(&vdev->memory_lock);
-
- ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
- if (ret) {
-@@ -1933,12 +2115,6 @@ static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
- kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
- }
-
--struct vfio_devices {
-- struct vfio_device **devices;
-- int cur_index;
-- int max_index;
--};
--
- static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
- {
- struct vfio_devices *devs = data;
-@@ -1969,6 +2145,39 @@ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
- return 0;
- }
-
-+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
-+{
-+ struct vfio_devices *devs = data;
-+ struct vfio_device *device;
-+ struct vfio_pci_device *vdev;
-+
-+ if (devs->cur_index == devs->max_index)
-+ return -ENOSPC;
-+
-+ device = vfio_device_get_from_dev(&pdev->dev);
-+ if (!device)
-+ return -EINVAL;
-+
-+ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
-+ vfio_device_put(device);
-+ return -EBUSY;
-+ }
-+
-+ vdev = vfio_device_data(device);
-+
-+ /*
-+ * Locking multiple devices is prone to deadlock, runaway and
-+ * unwind if we hit contention.
-+ */
-+ if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
-+ vfio_device_put(device);
-+ return -EBUSY;
-+ }
-+
-+ devs->devices[devs->cur_index++] = device;
-+ return 0;
-+}
-+
- /*
- * If a bus or slot reset is available for the provided device and:
- * - All of the devices affected by that bus or slot reset are unused
-diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
-index 90c0b80f8acf..3dcddbd572e6 100644
---- a/drivers/vfio/pci/vfio_pci_config.c
-+++ b/drivers/vfio/pci/vfio_pci_config.c
-@@ -395,6 +395,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
- *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
- }
-
-+/* Caller should hold memory_lock semaphore */
-+bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
-+{
-+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
-+
-+ return cmd & PCI_COMMAND_MEMORY;
-+}
-+
- /*
- * Restore the *real* BARs after we detect a FLR or backdoor reset.
- * (backdoor = some device specific technique that we didn't catch)
-@@ -556,13 +564,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
-
- new_cmd = le32_to_cpu(val);
-
-+ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
-+ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
-+ new_io = !!(new_cmd & PCI_COMMAND_IO);
-+
- phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
- virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
- new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
-
-- phys_io = !!(phys_cmd & PCI_COMMAND_IO);
-- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
-- new_io = !!(new_cmd & PCI_COMMAND_IO);
-+ if (!new_mem)
-+ vfio_pci_zap_and_down_write_memory_lock(vdev);
-+ else
-+ down_write(&vdev->memory_lock);
-
- /*
- * If the user is writing mem/io enable (new_mem/io) and we
-@@ -579,8 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
- }
-
- count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
-- if (count < 0)
-+ if (count < 0) {
-+ if (offset == PCI_COMMAND)
-+ up_write(&vdev->memory_lock);
- return count;
-+ }
-
- /*
- * Save current memory/io enable bits in vconfig to allow for
-@@ -591,6 +607,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
-
- *virt_cmd &= cpu_to_le16(~mask);
- *virt_cmd |= cpu_to_le16(new_cmd & mask);
-+
-+ up_write(&vdev->memory_lock);
- }
-
- /* Emulate INTx disable */
-@@ -828,8 +846,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
- pos - offset + PCI_EXP_DEVCAP,
- &cap);
-
-- if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
-+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
-+ vfio_pci_zap_and_down_write_memory_lock(vdev);
- pci_try_reset_function(vdev->pdev);
-+ up_write(&vdev->memory_lock);
-+ }
- }
-
- /*
-@@ -907,8 +928,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
- pos - offset + PCI_AF_CAP,
- &cap);
-
-- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
-+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
-+ vfio_pci_zap_and_down_write_memory_lock(vdev);
- pci_try_reset_function(vdev->pdev);
-+ up_write(&vdev->memory_lock);
-+ }
- }
-
- return count;
-diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
-index 2056f3f85f59..54102a7eb9d3 100644
---- a/drivers/vfio/pci/vfio_pci_intrs.c
-+++ b/drivers/vfio/pci/vfio_pci_intrs.c
-@@ -626,6 +626,8 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
- int (*func)(struct vfio_pci_device *vdev, unsigned index,
- unsigned start, unsigned count, uint32_t flags,
- void *data) = NULL;
-+ int ret;
-+ u16 cmd;
-
- switch (index) {
- case VFIO_PCI_INTX_IRQ_INDEX:
-@@ -673,5 +675,19 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
- if (!func)
- return -ENOTTY;
-
-- return func(vdev, index, start, count, flags, data);
-+ if (index == VFIO_PCI_MSIX_IRQ_INDEX) {
-+ down_write(&vdev->memory_lock);
-+ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
-+ pci_write_config_word(vdev->pdev, PCI_COMMAND,
-+ cmd | PCI_COMMAND_MEMORY);
-+ }
-+
-+ ret = func(vdev, index, start, count, flags, data);
-+
-+ if (index == VFIO_PCI_MSIX_IRQ_INDEX) {
-+ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
-+ up_write(&vdev->memory_lock);
-+ }
-+
-+ return ret;
- }
-diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
-index 9b25f9f6ce1d..c4f25f1e80d7 100644
---- a/drivers/vfio/pci/vfio_pci_private.h
-+++ b/drivers/vfio/pci/vfio_pci_private.h
-@@ -139,6 +139,7 @@ struct vfio_pci_device {
- struct list_head ioeventfds_list;
- struct mutex vma_lock;
- struct list_head vma_list;
-+ struct rw_semaphore memory_lock;
- };
-
- #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
-@@ -181,6 +182,10 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
- extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
- pci_power_t state);
-
-+extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
-+extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
-+ *vdev);
-+
- #ifdef CONFIG_VFIO_PCI_IGD
- extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
- #else
-diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
-index a87992892a9f..f58c45308682 100644
---- a/drivers/vfio/pci/vfio_pci_rdwr.c
-+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
-@@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
- size_t x_start = 0, x_end = 0;
- resource_size_t end;
- void __iomem *io;
-+ struct resource *res = &vdev->pdev->resource[bar];
- ssize_t done;
-
- if (pci_resource_start(pdev, bar))
-@@ -200,8 +201,19 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
- x_end = vdev->msix_offset + vdev->msix_size;
- }
-
-+ if (res->flags & IORESOURCE_MEM) {
-+ down_read(&vdev->memory_lock);
-+ if (!__vfio_pci_memory_enabled(vdev)) {
-+ up_read(&vdev->memory_lock);
-+ return -EIO;
-+ }
-+ }
-+
- done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
-
-+ if (res->flags & IORESOURCE_MEM)
-+ up_read(&vdev->memory_lock);
-+
- if (done >= 0)
- *ppos += done;
-
-
-