summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThorsten Leemhuis <fedora@leemhuis.info>2020-05-20 22:22:59 +0200
committerThorsten Leemhuis <fedora@leemhuis.info>2020-05-20 22:22:59 +0200
commitc1973aa73f36efb60266c4abeea658f7129c1dec (patch)
treea71bbd1f73aa81d2e07b4288868021986ba151d4
parentcad8e05c7fb91e3d5ef5081b96cf0d4121e749c5 (diff)
parent5cc680aaa3cd73854048b0c43aef31b5ed7e0245 (diff)
downloadkernel-c1973aa73f36efb60266c4abeea658f7129c1dec.tar.gz
kernel-c1973aa73f36efb60266c4abeea658f7129c1dec.tar.xz
kernel-c1973aa73f36efb60266c4abeea658f7129c1dec.zip
Merge remote-tracking branch 'origin/f32' into f32-user-thl-vanilla-fedora
-rw-r--r--0001-platform-x86-sony-laptop-SNC-calls-should-handle-BUF.patch114
-rw-r--r--arm64-drm-tegra-Fix-SMMU-support-on-Tegra124-and-Tegra210.patch320
-rw-r--r--kernel.spec25
-rw-r--r--usb-usbfs-correct-kernel-user-page-attribute-mismatch.patch104
-rw-r--r--vboxguest-fixes.patch843
-rw-r--r--vfio-pci-block-user-access-to-disabled-device-MMIO.patch857
6 files changed, 1831 insertions, 432 deletions
diff --git a/0001-platform-x86-sony-laptop-SNC-calls-should-handle-BUF.patch b/0001-platform-x86-sony-laptop-SNC-calls-should-handle-BUF.patch
new file mode 100644
index 000000000..dcae360a7
--- /dev/null
+++ b/0001-platform-x86-sony-laptop-SNC-calls-should-handle-BUF.patch
@@ -0,0 +1,114 @@
+From 47828d22539f76c8c9dcf2a55f18ea3a8039d8ef Mon Sep 17 00:00:00 2001
+From: Mattia Dongili <malattia@linux.it>
+Date: Fri, 8 May 2020 09:14:04 +0900
+Subject: [PATCH] platform/x86: sony-laptop: SNC calls should handle BUFFER
+ types
+
+After commit 6d232b29cfce ("ACPICA: Dispatcher: always generate buffer
+objects for ASL create_field() operator") ACPICA creates buffers even
+when new fields are small enough to fit into an integer.
+Many SNC calls counted on the old behaviour.
+Since sony-laptop already handles the INTEGER/BUFFER case in
+sony_nc_buffer_call, switch sony_nc_int_call to use its more generic
+function instead.
+
+Fixes: 6d232b29cfce ("ACPICA: Dispatcher: always generate buffer objects for ASL create_field() operator")
+Reported-by: Dominik Mierzejewski <dominik@greysector.net>
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=207491
+Reported-by: William Bader <williambader@hotmail.com>
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1830150
+Signed-off-by: Mattia Dongili <malattia@linux.it>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+---
+ drivers/platform/x86/sony-laptop.c | 53 +++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index 51309f7ceede..6932cd11e660 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -757,33 +757,6 @@ static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+ return result;
+ }
+
+-static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
+- int *result)
+-{
+- union acpi_object *object = NULL;
+- if (value) {
+- u64 v = *value;
+- object = __call_snc_method(handle, name, &v);
+- } else
+- object = __call_snc_method(handle, name, NULL);
+-
+- if (!object)
+- return -EINVAL;
+-
+- if (object->type != ACPI_TYPE_INTEGER) {
+- pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+- ACPI_TYPE_INTEGER, object->type);
+- kfree(object);
+- return -EINVAL;
+- }
+-
+- if (result)
+- *result = object->integer.value;
+-
+- kfree(object);
+- return 0;
+-}
+-
+ #define MIN(a, b) (a > b ? b : a)
+ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ void *buffer, size_t buflen)
+@@ -795,17 +768,20 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ if (!object)
+ return -EINVAL;
+
+- if (object->type == ACPI_TYPE_BUFFER) {
++ if (!buffer) {
++ /* do nothing */
++ } else if (object->type == ACPI_TYPE_BUFFER) {
+ len = MIN(buflen, object->buffer.length);
++ memset(buffer, 0, buflen);
+ memcpy(buffer, object->buffer.pointer, len);
+
+ } else if (object->type == ACPI_TYPE_INTEGER) {
+ len = MIN(buflen, sizeof(object->integer.value));
++ memset(buffer, 0, buflen);
+ memcpy(buffer, &object->integer.value, len);
+
+ } else {
+- pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+- ACPI_TYPE_BUFFER, object->type);
++ pr_warn("Unexpected acpi_object: 0x%x\n", object->type);
+ ret = -EINVAL;
+ }
+
+@@ -813,6 +789,23 @@ static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ return ret;
+ }
+
++static int sony_nc_int_call(acpi_handle handle, char *name, int *value, int
++ *result)
++{
++ int ret;
++
++ if (value) {
++ u64 v = *value;
++
++ ret = sony_nc_buffer_call(handle, name, &v, result,
++ sizeof(*result));
++ } else {
++ ret = sony_nc_buffer_call(handle, name, NULL, result,
++ sizeof(*result));
++ }
++ return ret;
++}
++
+ struct sony_nc_handles {
+ u16 cap[0x10];
+ struct device_attribute devattr;
+--
+2.26.2
+
diff --git a/arm64-drm-tegra-Fix-SMMU-support-on-Tegra124-and-Tegra210.patch b/arm64-drm-tegra-Fix-SMMU-support-on-Tegra124-and-Tegra210.patch
deleted file mode 100644
index 3d43cd710..000000000
--- a/arm64-drm-tegra-Fix-SMMU-support-on-Tegra124-and-Tegra210.patch
+++ /dev/null
@@ -1,320 +0,0 @@
-From patchwork Wed Mar 25 20:16:03 2020
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-X-Patchwork-Submitter: Thierry Reding <thierry.reding@gmail.com>
-X-Patchwork-Id: 1261638
-Return-Path: <linux-tegra-owner@vger.kernel.org>
-X-Original-To: incoming@patchwork.ozlabs.org
-Delivered-To: patchwork-incoming@bilbo.ozlabs.org
-Authentication-Results: ozlabs.org; spf=none (no SPF record)
- smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67;
- helo=vger.kernel.org;
- envelope-from=linux-tegra-owner@vger.kernel.org;
- receiver=<UNKNOWN>)
-Authentication-Results: ozlabs.org;
- dmarc=pass (p=none dis=none) header.from=gmail.com
-Authentication-Results: ozlabs.org; dkim=pass (2048-bit key;
- unprotected) header.d=gmail.com header.i=@gmail.com
- header.a=rsa-sha256 header.s=20161025 header.b=sj7XVrax;
- dkim-atps=neutral
-Received: from vger.kernel.org (vger.kernel.org [209.132.180.67])
- by ozlabs.org (Postfix) with ESMTP id 48nfWs1X7mz9sRf
- for <incoming@patchwork.ozlabs.org>;
- Thu, 26 Mar 2020 07:16:09 +1100 (AEDT)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S1727374AbgCYUQI (ORCPT <rfc822;incoming@patchwork.ozlabs.org>);
- Wed, 25 Mar 2020 16:16:08 -0400
-Received: from mail-wm1-f68.google.com ([209.85.128.68]:50585 "EHLO
- mail-wm1-f68.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
- with ESMTP id S1727328AbgCYUQI (ORCPT
- <rfc822;linux-tegra@vger.kernel.org>);
- Wed, 25 Mar 2020 16:16:08 -0400
-Received: by mail-wm1-f68.google.com with SMTP id d198so4073496wmd.0
- for <linux-tegra@vger.kernel.org>;
- Wed, 25 Mar 2020 13:16:07 -0700 (PDT)
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;
- h=from:to:cc:subject:date:message-id:mime-version
- :content-transfer-encoding;
- bh=yPLnagV0XBnTWT+nGjtRaD+LnSq2BqmeAJnp8U+CWPw=;
- b=sj7XVraxdwiyRAeepEQ0wy1nLUUH6vcloNotxoFwaAZmvU2GILePtp+OM8VZxzmSg1
- qVjos+BzgdtxI0QGYvlsRwZJmw1PdwfTDzM8kMKmP2AfXDgnFG7LZsGZnzTmdPqErqG6
- RfQwpZiPunHplEvI/epnPHACQlV9HoX+teAIWP9gyJkMYwBCVOirkfv4yGqGZWyEciZ2
- yM5mGeUZ/OprHtVVEEuF5yb50CJm8cBEHBMr2ooS+0jm+avVEG8DKe9QM2nWgJB7+TXH
- 7+iryK1A4PDr9L6syw0p6sAbkFd2+P/p44d/rqsKPWTQG0lkd0cgRHx9fVPls/P4Snyr
- JwCA==
-X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
- d=1e100.net; s=20161025;
- h=x-gm-message-state:from:to:cc:subject:date:message-id:mime-version
- :content-transfer-encoding;
- bh=yPLnagV0XBnTWT+nGjtRaD+LnSq2BqmeAJnp8U+CWPw=;
- b=HWu2t1YnW/GoMLlkfp6ZQha8CvUnfMi/OK1zsN3hDtTtMLwVQL9YBFPvXYfAASIGzA
- qXmgdbIdQmwOXRxlDmgcXk8KcOJmvnJTSoE+GPeLrKGVq9h2c6XLINshs7RDWqY7//GM
- /NMVkESX/sVh5qVQYVzsQOBWAsLkwpVAmt3lJ81XrCGdA/L5aN2FWOftTWJWoStgtHuB
- 9N27ffBkV8/72gDCcGxM/lJlfxMBcfPIEMDGWlErsl2U/EPtF+e5AH1kF9/a+lImxa1h
- vBlXvgfPKazfOLm1jA809U0QJrCy5bmTOJsaLqnkLPNJRyvlY6JZqk8a1Wc4u6l44uoI
- 4l3g==
-X-Gm-Message-State: ANhLgQ0GzmzHn/uC4G4GzXRW/D8i6fcQ7Y04Wxx+yBOvoeixp0lD9PYD
- 9Q7E3Ezt7uCnfh5D41Ym8jY=
-X-Google-Smtp-Source: ADFU+vvV+Qjqcd+wksczhsC9MSisSEM36LfhftNulFkmYxqwCfpDcq22YDEoWHYpgjaXwwZC4lgCyg==
-X-Received: by 2002:a7b:c842:: with SMTP id c2mr5416219wml.154.1585167366416;
- Wed, 25 Mar 2020 13:16:06 -0700 (PDT)
-Received: from localhost
- (p200300E41F4A9B0076D02BFFFE273F51.dip0.t-ipconnect.de.
- [2003:e4:1f4a:9b00:76d0:2bff:fe27:3f51])
- by smtp.gmail.com with ESMTPSA id
- i4sm132568wrm.32.2020.03.25.13.16.05
- (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
- Wed, 25 Mar 2020 13:16:05 -0700 (PDT)
-From: Thierry Reding <thierry.reding@gmail.com>
-To: Thierry Reding <thierry.reding@gmail.com>
-Cc: dri-devel@lists.freedesktop.org, linux-tegra@vger.kernel.org
-Subject: [PATCH 1/2] drm/tegra: Fix SMMU support on Tegra124 and Tegra210
-Date: Wed, 25 Mar 2020 21:16:03 +0100
-Message-Id: <20200325201604.833898-1-thierry.reding@gmail.com>
-X-Mailer: git-send-email 2.24.1
-MIME-Version: 1.0
-Sender: linux-tegra-owner@vger.kernel.org
-Precedence: bulk
-List-ID: <linux-tegra.vger.kernel.org>
-X-Mailing-List: linux-tegra@vger.kernel.org
-
-From: Thierry Reding <treding@nvidia.com>
-
-When testing whether or not to enable the use of the SMMU, consult the
-supported DMA mask rather than the actually configured DMA mask, since
-the latter might already have been restricted.
-
-Fixes: 2d9384ff9177 ("drm/tegra: Relax IOMMU usage criteria on old Tegra")
-Signed-off-by: Thierry Reding <treding@nvidia.com>
-Tested-by: Jon Hunter <jonathanh@nvidia.com>
----
- drivers/gpu/drm/tegra/drm.c | 3 ++-
- drivers/gpu/host1x/dev.c | 13 +++++++++++++
- include/linux/host1x.h | 3 +++
- 3 files changed, 18 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
-index bd268028fb3d..583cd6e0ae27 100644
---- a/drivers/gpu/drm/tegra/drm.c
-+++ b/drivers/gpu/drm/tegra/drm.c
-@@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
-
- static bool host1x_drm_wants_iommu(struct host1x_device *dev)
- {
-+ struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
- struct iommu_domain *domain;
-
- /*
-@@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
- * sufficient and whether or not the host1x is attached to an IOMMU
- * doesn't matter.
- */
-- if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
-+ if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
- return true;
-
- return domain != NULL;
-diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
-index 388bcc2889aa..40a4b9f8b861 100644
---- a/drivers/gpu/host1x/dev.c
-+++ b/drivers/gpu/host1x/dev.c
-@@ -502,6 +502,19 @@ static void __exit tegra_host1x_exit(void)
- }
- module_exit(tegra_host1x_exit);
-
-+/**
-+ * host1x_get_dma_mask() - query the supported DMA mask for host1x
-+ * @host1x: host1x instance
-+ *
-+ * Note that this returns the supported DMA mask for host1x, which can be
-+ * different from the applicable DMA mask under certain circumstances.
-+ */
-+u64 host1x_get_dma_mask(struct host1x *host1x)
-+{
-+ return host1x->info->dma_mask;
-+}
-+EXPORT_SYMBOL(host1x_get_dma_mask);
-+
- MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
- MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
- MODULE_DESCRIPTION("Host1x driver for Tegra products");
-diff --git a/include/linux/host1x.h b/include/linux/host1x.h
-index 62d216ff1097..c230b4e70d75 100644
---- a/include/linux/host1x.h
-+++ b/include/linux/host1x.h
-@@ -17,9 +17,12 @@ enum host1x_class {
- HOST1X_CLASS_GR3D = 0x60,
- };
-
-+struct host1x;
- struct host1x_client;
- struct iommu_group;
-
-+u64 host1x_get_dma_mask(struct host1x *host1x);
-+
- /**
- * struct host1x_client_ops - host1x client operations
- * @init: host1x client initialization code
-
-From patchwork Wed Mar 25 20:16:04 2020
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-X-Patchwork-Submitter: Thierry Reding <thierry.reding@gmail.com>
-X-Patchwork-Id: 1261639
-Return-Path: <linux-tegra-owner@vger.kernel.org>
-X-Original-To: incoming@patchwork.ozlabs.org
-Delivered-To: patchwork-incoming@bilbo.ozlabs.org
-Authentication-Results: ozlabs.org; spf=none (no SPF record)
- smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67;
- helo=vger.kernel.org;
- envelope-from=linux-tegra-owner@vger.kernel.org;
- receiver=<UNKNOWN>)
-Authentication-Results: ozlabs.org;
- dmarc=pass (p=none dis=none) header.from=gmail.com
-Authentication-Results: ozlabs.org; dkim=pass (2048-bit key;
- unprotected) header.d=gmail.com header.i=@gmail.com
- header.a=rsa-sha256 header.s=20161025 header.b=XXUz449u;
- dkim-atps=neutral
-Received: from vger.kernel.org (vger.kernel.org [209.132.180.67])
- by ozlabs.org (Postfix) with ESMTP id 48nfWw6NvSz9sPk
- for <incoming@patchwork.ozlabs.org>;
- Thu, 26 Mar 2020 07:16:12 +1100 (AEDT)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S1727316AbgCYUQM (ORCPT <rfc822;incoming@patchwork.ozlabs.org>);
- Wed, 25 Mar 2020 16:16:12 -0400
-Received: from mail-wr1-f65.google.com ([209.85.221.65]:33914 "EHLO
- mail-wr1-f65.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
- with ESMTP id S1727328AbgCYUQM (ORCPT
- <rfc822;linux-tegra@vger.kernel.org>);
- Wed, 25 Mar 2020 16:16:12 -0400
-Received: by mail-wr1-f65.google.com with SMTP id 65so4990084wrl.1
- for <linux-tegra@vger.kernel.org>;
- Wed, 25 Mar 2020 13:16:09 -0700 (PDT)
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;
- h=from:to:cc:subject:date:message-id:in-reply-to:references
- :mime-version:content-transfer-encoding;
- bh=aW1zxIHiei+l8kDSE2lVXf/aMBDE/GtIkGFrQXvKkrY=;
- b=XXUz449uJivXz+1lH6pKa9IvT3vUx61/skXaEyQxpkslFR268FwckKE0ryQDUx701N
- hFN9ocSGCuE6bKpdgya8YmthXDASOYWZzKV0R5jms1rqgazVMF6jARv+kE4Jaj9Ek4tl
- 4eTpmnHinx0xIrgGWCQbfltjb+zAE5XOGX8UCX1526r3yQQpu+OQlKZ70Tvq3pdw0zfT
- URkTU8sfdTa9DCxUSsUukPcK9vKOk6XHkFleL6FisODDvXphdzzLa1TCv9UTGLrUsHSd
- XDrukLto5efrUE03q5jP6ZN4xbnLDbhY6IkB7PAW1qwSPG/Eg0p0ivpJ58+QwwmBH6zF
- ByDQ==
-X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
- d=1e100.net; s=20161025;
- h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to
- :references:mime-version:content-transfer-encoding;
- bh=aW1zxIHiei+l8kDSE2lVXf/aMBDE/GtIkGFrQXvKkrY=;
- b=DIWKPWCoYx1rnX34DSkRPm2K6lR1SurVvq+IIY5Nrc9uq+E3pmXQcActG0DDAHHK8a
- SgnziEvuWTeROgrlwONYq+FUZRQ6s1TRR1+qDXqAlRtdebU/cEep+LRvdzJe/qJBpPqd
- SnSTR3Xntgo7EcyLRj9YqSodasylPt3OzrhuDudfTSQtKZghElLfyJV/tzgwG+OC3TD4
- RJAykZ0tgWHy7Bc1UB+z6LovuT/sgcPUSLfNqDehQWqwQeqHqXgFAomUN0CCEr2YdjkT
- sCpBZPqKtb22FdDWlDiNnEkEmMPA+K4MIWbZL9VuvArjFaaBn6fBxvnX4tAKEcOiKeUy
- EZXw==
-X-Gm-Message-State: ANhLgQ1Vj1gSFYKgV/7jV1T3UIwTE5jasGmLOhuuGuWvjBs2xXUgieyz
- VhNVgYIYU/8R/0Vx9Hv44rw=
-X-Google-Smtp-Source: ADFU+vtTfrVHW69I+ZhOz8qw8xUje/j42rKoNxAP2wTt+E5WQ5s6QhBcgeHzC4Bw5Q5NdWxjLUtZ/g==
-X-Received: by 2002:adf:800e:: with SMTP id 14mr5104354wrk.369.1585167368929;
- Wed, 25 Mar 2020 13:16:08 -0700 (PDT)
-Received: from localhost
- (p200300E41F4A9B0076D02BFFFE273F51.dip0.t-ipconnect.de.
- [2003:e4:1f4a:9b00:76d0:2bff:fe27:3f51])
- by smtp.gmail.com with ESMTPSA id
- e9sm151985wrw.30.2020.03.25.13.16.07
- (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
- Wed, 25 Mar 2020 13:16:07 -0700 (PDT)
-From: Thierry Reding <thierry.reding@gmail.com>
-To: Thierry Reding <thierry.reding@gmail.com>
-Cc: dri-devel@lists.freedesktop.org, linux-tegra@vger.kernel.org
-Subject: [PATCH 2/2] gpu: host1x: Use SMMU on Tegra124 and Tegra210
-Date: Wed, 25 Mar 2020 21:16:04 +0100
-Message-Id: <20200325201604.833898-2-thierry.reding@gmail.com>
-X-Mailer: git-send-email 2.24.1
-In-Reply-To: <20200325201604.833898-1-thierry.reding@gmail.com>
-References: <20200325201604.833898-1-thierry.reding@gmail.com>
-MIME-Version: 1.0
-Sender: linux-tegra-owner@vger.kernel.org
-Precedence: bulk
-List-ID: <linux-tegra.vger.kernel.org>
-X-Mailing-List: linux-tegra@vger.kernel.org
-
-From: Thierry Reding <treding@nvidia.com>
-
-Tegra124 and Tegra210 support addressing more than 32 bits of physical
-memory. However, since their host1x does not support the wide GATHER
-opcode, they should use the SMMU if at all possible to ensure that all
-the system memory can be used for command buffers, irrespective of
-whether or not the host1x firewall is enabled.
-
-Signed-off-by: Thierry Reding <treding@nvidia.com>
-Tested-by: Jon Hunter <jonathanh@nvidia.com>
----
- drivers/gpu/host1x/dev.c | 46 ++++++++++++++++++++++++++++++++++++----
- 1 file changed, 42 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
-index 40a4b9f8b861..d24344e91922 100644
---- a/drivers/gpu/host1x/dev.c
-+++ b/drivers/gpu/host1x/dev.c
-@@ -192,17 +192,55 @@ static void host1x_setup_sid_table(struct host1x *host)
- }
- }
-
-+static bool host1x_wants_iommu(struct host1x *host1x)
-+{
-+ /*
-+ * If we support addressing a maximum of 32 bits of physical memory
-+ * and if the host1x firewall is enabled, there's no need to enable
-+ * IOMMU support. This can happen for example on Tegra20, Tegra30
-+ * and Tegra114.
-+ *
-+ * Tegra124 and later can address up to 34 bits of physical memory and
-+ * many platforms come equipped with more than 2 GiB of system memory,
-+ * which requires crossing the 4 GiB boundary. But there's a catch: on
-+ * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
-+ * only address up to 32 bits of memory in GATHER opcodes, which means
-+ * that command buffers need to either be in the first 2 GiB of system
-+ * memory (which could quickly lead to memory exhaustion), or command
-+ * buffers need to be treated differently from other buffers (which is
-+ * not possible with the current ABI).
-+ *
-+ * A third option is to use the IOMMU in these cases to make sure all
-+ * buffers will be mapped into a 32-bit IOVA space that host1x can
-+ * address. This allows all of the system memory to be used and works
-+ * within the limitations of the host1x on these SoCs.
-+ *
-+ * In summary, default to enable IOMMU on Tegra124 and later. For any
-+ * of the earlier SoCs, only use the IOMMU for additional safety when
-+ * the host1x firewall is disabled.
-+ */
-+ if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
-+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
-+ return false;
-+ }
-+
-+ return true;
-+}
-+
- static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
- {
- struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
- int err;
-
- /*
-- * If the host1x firewall is enabled, there's no need to enable IOMMU
-- * support. Similarly, if host1x is already attached to an IOMMU (via
-- * the DMA API), don't try to attach again.
-+ * We may not always want to enable IOMMU support (for example if the
-+ * host1x firewall is already enabled and we don't support addressing
-+ * more than 32 bits of physical memory), so check for that first.
-+ *
-+ * Similarly, if host1x is already attached to an IOMMU (via the DMA
-+ * API), don't try to attach again.
- */
-- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
-+ if (!host1x_wants_iommu(host) || domain)
- return domain;
-
- host->group = iommu_group_get(host->dev);
diff --git a/kernel.spec b/kernel.spec
index a69a6a836..330d7e226 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -838,9 +838,6 @@ Patch303: ACPI-irq-Workaround-firmware-issue-on-X-Gene-based-m400.patch
Patch304: ARM-tegra-usb-no-reset.patch
-# https://patchwork.kernel.org/patch/11527525/
-Patch305: usb-usbfs-correct-kernel-user-page-attribute-mismatch.patch
-
# Raspberry Pi
# https://patchwork.kernel.org/cover/11353083/
Patch310: arm64-pinctrl-bcm2835-Add-support-for-all-BCM2711-GPIOs.patch
@@ -864,8 +861,6 @@ Patch321: arm64-serial-8250_tegra-Create-Tegra-specific-8250-driver.patch
Patch324: regulator-pwm-Don-t-warn-on-probe-deferral.patch
# http://patchwork.ozlabs.org/patch/1243112/
Patch325: backlight-lp855x-Ensure-regulators-are-disabled-on-probe-failure.patch
-# https://patchwork.ozlabs.org/patch/1261638/
-Patch326: arm64-drm-tegra-Fix-SMMU-support-on-Tegra124-and-Tegra210.patch
# http://patchwork.ozlabs.org/patch/1221384/
Patch327: PCI-Add-MCFG-quirks-for-Tegra194-host-controllers.patch
# https://patchwork.ozlabs.org/patch/1281134/
@@ -927,9 +922,6 @@ Patch511: e1000e-bump-up-timeout-to-wait-when-ME-un-configure-ULP-mode.patch
Patch512: drm-dp_mst-Fix-drm_dp_send_dpcd_write-return-code.patch
-# CVE-2020-10711 rhbz 1825116 1834778
-Patch513: net-netlabel-cope-with-NULL-catmap.patch
-
#rhbz 1779611
Patch514: tpm-check-event-log-version-before-reading-final-eve.patch
@@ -942,6 +934,15 @@ Patch516: 0001-pwm-lpss-Fix-get_state-runtime-pm-reference-handling.patch
# kernel.org bz 206217
Patch517: RFC-PCI-tegra-Revert-raw_violation_fixup-for-tegra124.patch
+# CVE-2020-12888 rhbz 1836245 1836244
+Patch518: vfio-pci-block-user-access-to-disabled-device-MMIO.patch
+
+# rhbz 1789545
+Patch519: vboxguest-fixes.patch
+
+# rhbz 1830150
+Patch520: 0001-platform-x86-sony-laptop-SNC-calls-should-handle-BUF.patch
+
# END OF PATCH DEFINITIONS
%endif
@@ -3041,6 +3042,14 @@ fi
#
#
%changelog
+* Wed May 20 2020 Hans de Goede <hdegoede@redhat.com> - 5.6.14-300
+- Fix automatic guest resolution resizing of VirtualBox VMs (rhbz 1789545)
+- Fix Sony laptop hang on resume from suspend (rhbz 1830150)
+
+* Wed May 20 2020 Justin M. Forbes <jforbes@fedoraproject.org>
+- Linux v5.6.14
+- Fix CVE-2020-12888 (rhbz 1836245 1836244)
+
* Mon May 18 2020 Justin M. Forbes <jforbes@fedoraproject.org>
- Fix stability issue with the jetson-tk1 NIC
diff --git a/usb-usbfs-correct-kernel-user-page-attribute-mismatch.patch b/usb-usbfs-correct-kernel-user-page-attribute-mismatch.patch
deleted file mode 100644
index ad65b8db6..000000000
--- a/usb-usbfs-correct-kernel-user-page-attribute-mismatch.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From patchwork Mon May 4 20:13:48 2020
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-X-Patchwork-Submitter: Jeremy Linton <jeremy.linton@arm.com>
-X-Patchwork-Id: 11527525
-Return-Path: <SRS0=7ANT=6S=vger.kernel.org=linux-usb-owner@kernel.org>
-Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org
- [172.30.200.123])
- by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 1F74F92A
- for <patchwork-linux-usb@patchwork.kernel.org>;
- Mon, 4 May 2020 20:14:04 +0000 (UTC)
-Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
- by mail.kernel.org (Postfix) with ESMTP id 11A4B20746
- for <patchwork-linux-usb@patchwork.kernel.org>;
- Mon, 4 May 2020 20:14:04 +0000 (UTC)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S1726756AbgEDUOB (ORCPT
- <rfc822;patchwork-linux-usb@patchwork.kernel.org>);
- Mon, 4 May 2020 16:14:01 -0400
-Received: from foss.arm.com ([217.140.110.172]:52874 "EHLO foss.arm.com"
- rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP
- id S1726111AbgEDUOA (ORCPT <rfc822;linux-usb@vger.kernel.org>);
- Mon, 4 May 2020 16:14:00 -0400
-Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])
- by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 5BD30101E;
- Mon, 4 May 2020 13:14:00 -0700 (PDT)
-Received: from mammon-tx2.austin.arm.com (mammon-tx2.austin.arm.com
- [10.118.28.62])
- by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id
- 52CCC3F71F;
- Mon, 4 May 2020 13:14:00 -0700 (PDT)
-From: Jeremy Linton <jeremy.linton@arm.com>
-To: linux-usb@vger.kernel.org
-Cc: gregkh@linuxfoundation.org, stern@rowland.harvard.edu,
- git@thegavinli.com, jarkko.sakkinen@linux.intel.com,
- linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
- mark.rutland@arm.com, maz@kernel.org, robin.murphy@arm.com,
- Jeremy Linton <jeremy.linton@arm.com>
-Subject: [PATCH v2] usb: usbfs: correct kernel->user page attribute mismatch
-Date: Mon, 4 May 2020 15:13:48 -0500
-Message-Id: <20200504201348.1183246-1-jeremy.linton@arm.com>
-X-Mailer: git-send-email 2.24.1
-MIME-Version: 1.0
-Sender: linux-usb-owner@vger.kernel.org
-Precedence: bulk
-List-ID: <linux-usb.vger.kernel.org>
-X-Mailing-List: linux-usb@vger.kernel.org
-
-On some architectures (e.g. arm64) requests for
-IO coherent memory may use non-cachable attributes if
-the relevant device isn't cache coherent. If these
-pages are then remapped into userspace as cacheable,
-they may not be coherent with the non-cacheable mappings.
-
-In particular this happens with libusb, when it attempts
-to create zero-copy buffers for use by rtl-sdr
-(https://github.com/osmocom/rtl-sdr/). On low end arm
-devices with non-coherent USB ports, the application will
-be unexpectedly killed, while continuing to work fine on
-arm machines with coherent USB controllers.
-
-This bug has been discovered/reported a few times over
-the last few years. In the case of rtl-sdr a compile time
-option to enable/disable zero copy was implemented to
-work around it.
-
-Rather than relaying on application specific workarounds,
-dma_mmap_coherent() can be used instead of remap_pfn_range().
-The page cache/etc attributes will then be correctly set in
-userspace to match the kernel mapping.
-
-Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
----
-v1->v2:
- Update commit message and change to dma_mmap_coherent()
- from dma_mmap_attr(,,,0) which are the same.
-
- drivers/usb/core/devio.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
-index 6833c918abce..b9db9812d6c5 100644
---- a/drivers/usb/core/devio.c
-+++ b/drivers/usb/core/devio.c
-@@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
- {
- struct usb_memory *usbm = NULL;
- struct usb_dev_state *ps = file->private_data;
-+ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
- size_t size = vma->vm_end - vma->vm_start;
- void *mem;
- unsigned long flags;
-@@ -250,9 +251,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
- usbm->vma_use_count = 1;
- INIT_LIST_HEAD(&usbm->memlist);
-
-- if (remap_pfn_range(vma, vma->vm_start,
-- virt_to_phys(usbm->mem) >> PAGE_SHIFT,
-- size, vma->vm_page_prot) < 0) {
-+ if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) {
- dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
- return -EAGAIN;
- }
diff --git a/vboxguest-fixes.patch b/vboxguest-fixes.patch
new file mode 100644
index 000000000..90c95b374
--- /dev/null
+++ b/vboxguest-fixes.patch
@@ -0,0 +1,843 @@
+From ba5ea5d9d5d2ade5156cf2bc452ff6d1f5c4db37 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 12:21:30 +0200
+Subject: [PATCH 1/8] virt: vbox: Fix VBGL_IOCTL_VMMDEV_REQUEST_BIG and _LOG
+ req numbers to match upstream
+
+Until this commit the mainline kernel version (this version) of the
+vboxguest module contained a bug where it defined
+VBGL_IOCTL_VMMDEV_REQUEST_BIG and VBGL_IOCTL_LOG using
+_IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead of
+_IO(V, ...) as the out of tree VirtualBox upstream version does.
+
+Since the VirtualBox userspace bits are always built against VirtualBox
+upstream's headers, this means that so far the mainline kernel version
+of the vboxguest module has been failing these 2 ioctls with -ENOTTY.
+I guess that VBGL_IOCTL_VMMDEV_REQUEST_BIG is never used causing us to
+not hit that one and sofar the vboxguest driver has failed to actually
+log any log messages passed it through VBGL_IOCTL_LOG.
+
+This commit changes the VBGL_IOCTL_VMMDEV_REQUEST_BIG and VBGL_IOCTL_LOG
+defines to match the out of tree VirtualBox upstream vboxguest version,
+while keeping compatibility with the old wrong request defines so as
+to not break the kernel ABI in case someone has been using the old
+request defines.
+
+Fixes: f6ddd094f579 ("virt: Add vboxguest driver for Virtual Box Guest integration UAPI")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 4 +++-
+ drivers/virt/vboxguest/vboxguest_core.h | 15 +++++++++++++++
+ drivers/virt/vboxguest/vboxguest_linux.c | 3 ++-
+ include/uapi/linux/vboxguest.h | 4 ++--
+ 4 files changed, 22 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index b690a8a4bf9e..8fab04e76c14 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -1520,7 +1520,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
+
+ /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
+ if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
+- req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
+ return vbg_ioctl_vmmrequest(gdev, session, data);
+
+ if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
+@@ -1558,6 +1559,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
+ case VBG_IOCTL_HGCM_CALL(0):
+ return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
+ case VBG_IOCTL_LOG(0):
++ case VBG_IOCTL_LOG_ALT(0):
+ return vbg_ioctl_log(data);
+ }
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
+index 4188c12b839f..77c3a9c8255d 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.h
++++ b/drivers/virt/vboxguest/vboxguest_core.h
+@@ -15,6 +15,21 @@
+ #include <linux/vboxguest.h>
+ #include "vmmdev.h"
+
++/*
++ * The mainline kernel version (this version) of the vboxguest module
++ * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and
++ * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead
++ * of _IO(V, ...) as the out of tree VirtualBox upstream version does.
++ *
++ * These _ALT definitions keep compatibility with the wrong defines the
++ * mainline kernel version used for a while.
++ * Note the VirtualBox userspace bits have always been built against
++ * VirtualBox upstream's headers, so this is likely not necessary. But
++ * we must never break our ABI so we keep these around to be 100% sure.
++ */
++#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
++#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
++
+ struct vbg_session;
+
+ /** VBox guest memory balloon. */
+diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
+index 6e8c0f1c1056..32c2c52f7e84 100644
+--- a/drivers/virt/vboxguest/vboxguest_linux.c
++++ b/drivers/virt/vboxguest/vboxguest_linux.c
+@@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+ * the need for a bounce-buffer and another copy later on.
+ */
+ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
+- req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT;
+
+ if (is_vmmdev_req)
+ buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
+index 9cec58a6a5ea..f79d7abe27db 100644
+--- a/include/uapi/linux/vboxguest.h
++++ b/include/uapi/linux/vboxguest.h
+@@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20);
+
+
+ /* IOCTL to perform a VMM Device request larger then 1KB. */
+-#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
++#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3)
+
+
+ /** VBG_IOCTL_HGCM_CONNECT data structure. */
+@@ -198,7 +198,7 @@ struct vbg_ioctl_log {
+ } u;
+ };
+
+-#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
++#define VBG_IOCTL_LOG(s) _IO('V', 9)
+
+
+ /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
+--
+2.26.2
+
+From 14dc3b46e666343c55e5b253ed3dd4c57b13e778 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 13:23:06 +0200
+Subject: [PATCH 2/8] virt: vbox: Fix guest capabilities mask check
+
+Check the passed in capabilities against VMMDEV_GUEST_CAPABILITIES_MASK
+instead of against VMMDEV_EVENT_VALID_EVENT_MASK.
+This tightens the allowed mask from 0x7ff to 0x7.
+
+Fixes: 0ba002bc4393 ("virt: Add vboxguest driver for Virtual Box Guest integration")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 2 +-
+ drivers/virt/vboxguest/vmmdev.h | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index 8fab04e76c14..18ebd7a6af98 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -1444,7 +1444,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
+ or_mask = caps->u.in.or_mask;
+ not_mask = caps->u.in.not_mask;
+
+- if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
++ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
+ return -EINVAL;
+
+ ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
+diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
+index 6337b8d75d96..21f408120e3f 100644
+--- a/drivers/virt/vboxguest/vmmdev.h
++++ b/drivers/virt/vboxguest/vmmdev.h
+@@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8);
+ * not.
+ */
+ #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2)
++/* The mask of valid capabilities, for sanity checking. */
++#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U
+
+ /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */
+ struct vmmdev_hypervisorinfo {
+--
+2.26.2
+
+From 172353a61c23015611a341c2f3f4888d866a560b Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 14:33:13 +0200
+Subject: [PATCH 3/8] virt: vbox: Rename guest_caps struct members to
+ set_guest_caps
+
+Rename guest_caps[_tracker] struct members to set_guest_caps[_tracker]
+this is a preparation patch for adding support for the
+VBGL_IOCTL_GUEST_CAPS_ACQUIRE ioctl.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 20 ++++++++++----------
+ drivers/virt/vboxguest/vboxguest_core.h | 9 +++++----
+ 2 files changed, 15 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index 18ebd7a6af98..aee5eff229f2 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -699,17 +699,17 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+ mutex_lock(&gdev->session_mutex);
+
+ /* Apply the changes to the session mask. */
+- previous = session->guest_caps;
+- session->guest_caps |= or_mask;
+- session->guest_caps &= ~not_mask;
++ previous = session->set_guest_caps;
++ session->set_guest_caps |= or_mask;
++ session->set_guest_caps &= ~not_mask;
+
+ /* If anything actually changed, update the global usage counters. */
+- changed = previous ^ session->guest_caps;
++ changed = previous ^ session->set_guest_caps;
+ if (!changed)
+ goto out;
+
+- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
+- or_mask = gdev->guest_caps_tracker.mask;
++ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
++ or_mask = gdev->set_guest_caps_tracker.mask;
+
+ if (gdev->guest_caps_host == or_mask || !req)
+ goto out;
+@@ -726,9 +726,9 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+ if (session_termination)
+ goto out;
+
+- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
+- session->guest_caps);
+- session->guest_caps = previous;
++ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
++ session->set_guest_caps);
++ session->set_guest_caps = previous;
+ }
+
+ out:
+@@ -1452,7 +1452,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
+ if (ret)
+ return ret;
+
+- caps->u.out.session_caps = session->guest_caps;
++ caps->u.out.session_caps = session->set_guest_caps;
+ caps->u.out.global_caps = gdev->guest_caps_host;
+
+ return 0;
+diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
+index 77c3a9c8255d..dc745a033164 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.h
++++ b/drivers/virt/vboxguest/vboxguest_core.h
+@@ -118,11 +118,12 @@ struct vbg_dev {
+ u32 event_filter_host;
+
+ /**
+- * Usage counters for guest capabilities. Indexed by capability bit
++ * Usage counters for guest capabilities requested through
++ * vbg_set_session_capabilities(). Indexed by capability bit
+ * number, one count per session using a capability.
+ * Protected by session_mutex.
+ */
+- struct vbg_bit_usage_tracker guest_caps_tracker;
++ struct vbg_bit_usage_tracker set_guest_caps_tracker;
+ /**
+ * The guest capabilities last reported to the host (or UINT32_MAX).
+ * Protected by session_mutex.
+@@ -164,11 +165,11 @@ struct vbg_session {
+ */
+ u32 event_filter;
+ /**
+- * Guest capabilities for this session.
++ * Guest capabilities set through vbg_set_session_capabilities().
+ * A capability claimed by any guest session will be reported to the
+ * host. Protected by vbg_gdev.session_mutex.
+ */
+- u32 guest_caps;
++ u32 set_guest_caps;
+ /** VMMDEV_REQUESTOR_* flags */
+ u32 requestor;
+ /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
+--
+2.26.2
+
+From c5cf459d4d98a7993f5e00d5d2b826c55bbce562 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 15:30:29 +0200
+Subject: [PATCH 4/8] virt: vbox: Add vbg_set_host_capabilities() helper
+ function
+
+Add vbg_set_host_capabilities() helper function, this is a preparation
+patch for adding support for the VBGL_IOCTL_GUEST_CAPS_ACQUIRE ioctl.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 79 ++++++++++++++-----------
+ 1 file changed, 46 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index aee5eff229f2..15b3cb618c6e 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -661,6 +661,48 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
+ return vbg_status_code_to_errno(rc);
+ }
+
++/**
++ * Set guest capabilities on the host.
++ * Must be called with gdev->session_mutex hold.
++ * Return: 0 or negative errno value.
++ * @gdev: The Guest extension device.
++ * @session: The session.
++ * @session_termination: Set if we're called by the session cleanup code.
++ */
++static int vbg_set_host_capabilities(struct vbg_dev *gdev,
++ struct vbg_session *session,
++ bool session_termination)
++{
++ struct vmmdev_mask *req;
++ u32 caps;
++ int rc;
++
++ WARN_ON(!mutex_is_locked(&gdev->session_mutex));
++
++ caps = gdev->set_guest_caps_tracker.mask;
++
++ if (gdev->guest_caps_host == caps)
++ return 0;
++
++ /* On termination the requestor is the kernel, as we're cleaning up. */
++ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
++ session_termination ? VBG_KERNEL_REQUEST :
++ session->requestor);
++ if (!req) {
++ gdev->guest_caps_host = U32_MAX;
++ return -ENOMEM;
++ }
++
++ req->or_mask = caps;
++ req->not_mask = ~caps;
++ rc = vbg_req_perform(gdev, req);
++ vbg_req_free(req, sizeof(*req));
++
++ gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX;
++
++ return vbg_status_code_to_errno(rc);
++}
++
+ /**
+ * Sets the guest capabilities for a session. Takes the session spinlock.
+ * Return: 0 or negative errno value.
+@@ -678,23 +720,8 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+ u32 or_mask, u32 not_mask,
+ bool session_termination)
+ {
+- struct vmmdev_mask *req;
+ u32 changed, previous;
+- int rc, ret = 0;
+-
+- /*
+- * Allocate a request buffer before taking the spinlock, when
+- * the session is being terminated the requestor is the kernel,
+- * as we're cleaning up.
+- */
+- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+- session_termination ? VBG_KERNEL_REQUEST :
+- session->requestor);
+- if (!req) {
+- if (!session_termination)
+- return -ENOMEM;
+- /* Ignore allocation failure, we must do session cleanup. */
+- }
++ int ret = 0;
+
+ mutex_lock(&gdev->session_mutex);
+
+@@ -709,23 +736,10 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+ goto out;
+
+ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
+- or_mask = gdev->set_guest_caps_tracker.mask;
+-
+- if (gdev->guest_caps_host == or_mask || !req)
+- goto out;
+-
+- gdev->guest_caps_host = or_mask;
+- req->or_mask = or_mask;
+- req->not_mask = ~or_mask;
+- rc = vbg_req_perform(gdev, req);
+- if (rc < 0) {
+- ret = vbg_status_code_to_errno(rc);
+-
+- /* Failed, roll back (unless it's session termination time). */
+- gdev->guest_caps_host = U32_MAX;
+- if (session_termination)
+- goto out;
+
++ ret = vbg_set_host_capabilities(gdev, session, session_termination);
++ /* Roll back on failure, unless it's session termination time. */
++ if (ret < 0 && !session_termination) {
+ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
+ session->set_guest_caps);
+ session->set_guest_caps = previous;
+@@ -733,7 +747,6 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+
+ out:
+ mutex_unlock(&gdev->session_mutex);
+- vbg_req_free(req, sizeof(*req));
+
+ return ret;
+ }
+--
+2.26.2
+
+From 2f33e58bcc8c69f938629dc57c8ad631724f02f0 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 18:04:30 +0200
+Subject: [PATCH 5/8] virt: vbox: Add support for the new
+ VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES ioctl
+
+Add support for the new VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES ioctl, this
+is necessary for automatic resizing of the guest resolution to match the
+VM-window size to work with the new VMSVGA virtual GPU which is now the
+new default in VirtualBox.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1789545
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 163 +++++++++++++++++++++++-
+ drivers/virt/vboxguest/vboxguest_core.h | 14 ++
+ include/uapi/linux/vboxguest.h | 24 ++++
+ 3 files changed, 200 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index 15b3cb618c6e..4f1addaa3f6f 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -679,7 +679,7 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev,
+
+ WARN_ON(!mutex_is_locked(&gdev->session_mutex));
+
+- caps = gdev->set_guest_caps_tracker.mask;
++ caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask;
+
+ if (gdev->guest_caps_host == caps)
+ return 0;
+@@ -703,6 +703,113 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev,
+ return vbg_status_code_to_errno(rc);
+ }
+
++/**
++ * Acquire (get exclusive access) guest capabilities for a session.
++ * Takes the session mutex.
++ * Return: 0 or negative errno value.
++ * @gdev: The Guest extension device.
++ * @session: The session.
++ * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX).
++ * @or_mask: The capabilities to add.
++ * @not_mask: The capabilities to remove.
++ * @session_termination: Set if we're called by the session cleanup code.
++ * This tweaks the error handling so we perform
++ * proper session cleanup even if the host
++ * misbehaves.
++ */
++static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
++ struct vbg_session *session,
++ u32 or_mask, u32 not_mask,
++ u32 flags, bool session_termination)
++{
++ unsigned long irqflags;
++ bool wakeup = false;
++ int ret = 0;
++
++ mutex_lock(&gdev->session_mutex);
++
++ if (gdev->set_guest_caps_tracker.mask & or_mask) {
++ vbg_err("%s error: cannot acquire caps which are currently set\n",
++ __func__);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * Mark any caps in the or_mask as now being in acquire-mode. Note
++ * once caps are in acquire_mode they always stay in this mode.
++ * This impacts event handling, so we take the event-lock.
++ */
++ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
++ gdev->acquire_mode_guest_caps |= or_mask;
++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
++
++ /* If we only have to switch the caps to acquire mode, we're done. */
++ if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
++ goto out;
++
++ not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
++ not_mask &= session->acquired_guest_caps;
++ or_mask &= ~session->acquired_guest_caps;
++
++ if (or_mask == 0 && not_mask == 0)
++ goto out;
++
++ if (gdev->acquired_guest_caps & or_mask) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ gdev->acquired_guest_caps |= or_mask;
++ gdev->acquired_guest_caps &= ~not_mask;
++ /* session->acquired_guest_caps impacts event handling, take the lock */
++ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
++ session->acquired_guest_caps |= or_mask;
++ session->acquired_guest_caps &= ~not_mask;
++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
++
++ ret = vbg_set_host_capabilities(gdev, session, session_termination);
++ /* Roll back on failure, unless it's session termination time. */
++ if (ret < 0 && !session_termination) {
++ gdev->acquired_guest_caps &= ~or_mask;
++ gdev->acquired_guest_caps |= not_mask;
++ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
++ session->acquired_guest_caps &= ~or_mask;
++ session->acquired_guest_caps |= not_mask;
++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
++ }
++
++ /*
++ * If we added a capability, check if that means some other thread in
++ * our session should be unblocked because there are events pending
++ * (the result of vbg_get_allowed_event_mask_for_session() may change).
++ *
++ * HACK ALERT! When the seamless support capability is added we generate
++ * a seamless change event so that the ring-3 client can sync with
++ * the seamless state.
++ */
++ if (ret == 0 && or_mask != 0) {
++ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
++
++ if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
++ gdev->pending_events |=
++ VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
++
++ if (gdev->pending_events)
++ wakeup = true;
++
++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
++
++ if (wakeup)
++ wake_up(&gdev->event_wq);
++ }
++
++out:
++ mutex_unlock(&gdev->session_mutex);
++
++ return ret;
++}
++
+ /**
+ * Sets the guest capabilities for a session. Takes the session spinlock.
+ * Return: 0 or negative errno value.
+@@ -725,6 +832,13 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+
+ mutex_lock(&gdev->session_mutex);
+
++ if (gdev->acquire_mode_guest_caps & or_mask) {
++ vbg_err("%s error: cannot set caps which are in acquire_mode\n",
++ __func__);
++ ret = -EBUSY;
++ goto out;
++ }
++
+ /* Apply the changes to the session mask. */
+ previous = session->set_guest_caps;
+ session->set_guest_caps |= or_mask;
+@@ -962,6 +1076,7 @@ void vbg_core_close_session(struct vbg_session *session)
+ struct vbg_dev *gdev = session->gdev;
+ int i, rc;
+
++ vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true);
+ vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
+ vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
+
+@@ -1019,6 +1134,25 @@ static int vbg_ioctl_driver_version_info(
+ return 0;
+ }
+
++/* Must be called with the event_lock held */
++static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev,
++ struct vbg_session *session)
++{
++ u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
++ u32 session_acquired_caps = session->acquired_guest_caps;
++ u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
++
++ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) &&
++ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
++ allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
++
++ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) &&
++ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
++ allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
++
++ return allowed_events;
++}
++
+ static bool vbg_wait_event_cond(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ u32 event_mask)
+@@ -1030,6 +1164,7 @@ static bool vbg_wait_event_cond(struct vbg_dev *gdev,
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+ events = gdev->pending_events & event_mask;
++ events &= vbg_get_allowed_event_mask_for_session(gdev, session);
+ wakeup = events || session->cancel_waiters;
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+@@ -1044,6 +1179,7 @@ static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
+ {
+ u32 events = gdev->pending_events & event_mask;
+
++ events &= vbg_get_allowed_event_mask_for_session(gdev, session);
+ gdev->pending_events &= ~events;
+ return events;
+ }
+@@ -1445,6 +1581,29 @@ static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
+ false);
+ }
+
++static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev,
++ struct vbg_session *session,
++ struct vbg_ioctl_acquire_guest_caps *caps)
++{
++ u32 flags, or_mask, not_mask;
++
++ if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0))
++ return -EINVAL;
++
++ flags = caps->u.in.flags;
++ or_mask = caps->u.in.or_mask;
++ not_mask = caps->u.in.not_mask;
++
++ if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK)
++ return -EINVAL;
++
++ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
++ return -EINVAL;
++
++ return vbg_acquire_session_capabilities(gdev, session, or_mask,
++ not_mask, flags, false);
++}
++
+ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
+ {
+@@ -1554,6 +1713,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
+ return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
+ case VBG_IOCTL_CHANGE_FILTER_MASK:
+ return vbg_ioctl_change_filter_mask(gdev, session, data);
++ case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
++ return vbg_ioctl_acquire_guest_capabilities(gdev, session, data);
+ case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
+ return vbg_ioctl_change_guest_capabilities(gdev, session, data);
+ case VBG_IOCTL_CHECK_BALLOON:
+diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
+index dc745a033164..ab4bf64e2cec 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.h
++++ b/drivers/virt/vboxguest/vboxguest_core.h
+@@ -117,6 +117,15 @@ struct vbg_dev {
+ */
+ u32 event_filter_host;
+
++ /**
++ * Guest capabilities which have been switched to acquire_mode.
++ */
++ u32 acquire_mode_guest_caps;
++ /**
++ * Guest capabilities acquired by vbg_acquire_session_capabilities().
++ * Only one session can acquire a capability at a time.
++ */
++ u32 acquired_guest_caps;
+ /**
+ * Usage counters for guest capabilities requested through
+ * vbg_set_session_capabilities(). Indexed by capability bit
+@@ -164,6 +173,11 @@ struct vbg_session {
+ * host filter. Protected by vbg_gdev.session_mutex.
+ */
+ u32 event_filter;
++ /**
++ * Guest capabilities acquired by vbg_acquire_session_capabilities().
++ * Only one session can acquire a capability at a time.
++ */
++ u32 acquired_guest_caps;
+ /**
+ * Guest capabilities set through vbg_set_session_capabilities().
+ * A capability claimed by any guest session will be reported to the
+diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
+index f79d7abe27db..15125f6ec60d 100644
+--- a/include/uapi/linux/vboxguest.h
++++ b/include/uapi/linux/vboxguest.h
+@@ -257,6 +257,30 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8);
+ _IOWR('V', 12, struct vbg_ioctl_change_filter)
+
+
++/** VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES data structure. */
++struct vbg_ioctl_acquire_guest_caps {
++ /** The header. */
++ struct vbg_ioctl_hdr hdr;
++ union {
++ struct {
++ /** Flags (VBGL_IOC_AGC_FLAGS_XXX). */
++ __u32 flags;
++ /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */
++ __u32 or_mask;
++ /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */
++ __u32 not_mask;
++ } in;
++ } u;
++};
++VMMDEV_ASSERT_SIZE(vbg_ioctl_acquire_guest_caps, 24 + 12);
++
++#define VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE 0x00000001
++#define VBGL_IOC_AGC_FLAGS_VALID_MASK 0x00000001
++
++#define VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES \
++ _IOWR('V', 13, struct vbg_ioctl_acquire_guest_caps)
++
++
+ /** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */
+ struct vbg_ioctl_set_guest_caps {
+ /** The header. */
+--
+2.26.2
+
+From d34852680360e52c39ea901fbc3778d28f229852 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 11:05:40 +0200
+Subject: [PATCH 6/8] virt: vbox: Add a few new vmmdev request types to the
+ userspace whitelist
+
+Upstream VirtualBox has defined and is using a few new request types for
+vmmdev requests passed through /dev/vboxguest to the hypervisor.
+
+Add the defines for these to vbox_vmmdev_types.h and add add them to the
+whitelists of vmmdev requests which userspace is allowed to make.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1789545
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 2 ++
+ include/uapi/linux/vbox_vmmdev_types.h | 3 +++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index 4f1addaa3f6f..ffd76b949276 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -1299,7 +1299,9 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
+ case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
+ case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
+ case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
++ case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS:
+ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
++ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI:
+ case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
+ case VMMDEVREQ_GET_VRDPCHANGE_REQ:
+ case VMMDEVREQ_LOG_STRING:
+diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
+index c27289fd619a..f8a8d6b3c521 100644
+--- a/include/uapi/linux/vbox_vmmdev_types.h
++++ b/include/uapi/linux/vbox_vmmdev_types.h
+@@ -63,6 +63,7 @@ enum vmmdev_request_type {
+ VMMDEVREQ_SET_GUEST_CAPABILITIES = 56,
+ VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */
+ VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */
++ VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI = 81,
+ VMMDEVREQ_HGCM_CONNECT = 60,
+ VMMDEVREQ_HGCM_DISCONNECT = 61,
+ VMMDEVREQ_HGCM_CALL32 = 62,
+@@ -92,6 +93,8 @@ enum vmmdev_request_type {
+ VMMDEVREQ_WRITE_COREDUMP = 218,
+ VMMDEVREQ_GUEST_HEARTBEAT = 219,
+ VMMDEVREQ_HEARTBEAT_CONFIGURE = 220,
++ VMMDEVREQ_NT_BUG_CHECK = 221,
++ VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS = 222,
+ /* Ensure the enum is a 32 bit data-type */
+ VMMDEVREQ_SIZEHACK = 0x7fffffff
+ };
+--
+2.26.2
+
+From 92887e49b5f83dd802f3486143a9b619c7b56947 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 11:24:43 +0200
+Subject: [PATCH 7/8] virt: vbox: Log unknown ioctl requests as error
+
+Every now and then upstream adds new ioctls without notifying us,
+log unknown ioctl requests as an error to catch these.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index ffd76b949276..e0e343d0ba93 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -1739,7 +1739,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
+ return vbg_ioctl_log(data);
+ }
+
+- vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
++ vbg_err("Userspace made an unknown ioctl req %#08x\n", req);
+ return -ENOTTY;
+ }
+
+--
+2.26.2
+
+From 10a3a8c4a13e608d24db0a4ed4a284470025346d Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 May 2020 18:08:07 +0200
+Subject: [PATCH 8/8] virt: vbox: Fix some comments which talk about the
+ "session spinlock"
+
+The session lock is a mutex, not a spinlock, fix the comments to match.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+---
+ drivers/virt/vboxguest/vboxguest_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index e0e343d0ba93..d99c19551d04 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -559,7 +559,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
+ * Changes the event filter mask for the given session.
+ *
+ * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
+- * do session cleanup. Takes the session spinlock.
++ * do session cleanup. Takes the session mutex.
+ *
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+@@ -811,7 +811,7 @@ static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
+ }
+
+ /**
+- * Sets the guest capabilities for a session. Takes the session spinlock.
++ * Sets the guest capabilities for a session. Takes the session mutex.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The session.
+--
+2.26.2
+
diff --git a/vfio-pci-block-user-access-to-disabled-device-MMIO.patch b/vfio-pci-block-user-access-to-disabled-device-MMIO.patch
new file mode 100644
index 000000000..f289b448f
--- /dev/null
+++ b/vfio-pci-block-user-access-to-disabled-device-MMIO.patch
@@ -0,0 +1,857 @@
+From MAILER-DAEMON Wed May 20 15:47:40 2020
+Subject: [PATCH v2 1/3] vfio/type1: Support faulting PFNMAP vmas
+From: Alex Williamson <alex.williamson@redhat.com>
+To: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, cohuck@redhat.com, jgg@ziepe.ca
+Date: Tue, 05 May 2020 15:54:44 -0600
+Message-ID: <158871568480.15589.17339878308143043906.stgit@gimli.home>
+In-Reply-To: <158871401328.15589.17598154478222071285.stgit@gimli.home>
+References: <158871401328.15589.17598154478222071285.stgit@gimli.home>
+Sender: kvm-owner@vger.kernel.org
+List-ID: <kvm.vger.kernel.org>
+X-Mailing-List: kvm@vger.kernel.org
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 7bit
+
+With conversion to follow_pfn(), DMA mapping a PFNMAP range depends on
+the range being faulted into the vma. Add support to manually provide
+that, in the same way as done on KVM with hva_to_pfn_remapped().
+
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+---
+ drivers/vfio/vfio_iommu_type1.c | 36 +++++++++++++++++++++++++++++++++---
+ 1 file changed, 33 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index cc1d64765ce7..4a4cb7cd86b2 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -317,6 +317,32 @@ static int put_pfn(unsigned long pfn, int prot)
+ return 0;
+ }
+
++static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
++ unsigned long vaddr, unsigned long *pfn,
++ bool write_fault)
++{
++ int ret;
++
++ ret = follow_pfn(vma, vaddr, pfn);
++ if (ret) {
++ bool unlocked = false;
++
++ ret = fixup_user_fault(NULL, mm, vaddr,
++ FAULT_FLAG_REMOTE |
++ (write_fault ? FAULT_FLAG_WRITE : 0),
++ &unlocked);
++ if (unlocked)
++ return -EAGAIN;
++
++ if (ret)
++ return ret;
++
++ ret = follow_pfn(vma, vaddr, pfn);
++ }
++
++ return ret;
++}
++
+ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+ int prot, unsigned long *pfn)
+ {
+@@ -339,12 +365,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+
+ vaddr = untagged_addr(vaddr);
+
++retry:
+ vma = find_vma_intersection(mm, vaddr, vaddr + 1);
+
+ if (vma && vma->vm_flags & VM_PFNMAP) {
+- if (!follow_pfn(vma, vaddr, pfn) &&
+- is_invalid_reserved_pfn(*pfn))
+- ret = 0;
++ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
++ if (ret == -EAGAIN)
++ goto retry;
++
++ if (!ret && !is_invalid_reserved_pfn(*pfn))
++ ret = -EFAULT;
+ }
+ done:
+ up_read(&mm->mmap_sem);
+
+
+From MAILER-DAEMON Wed May 20 15:47:40 2020
+Subject: [PATCH v2 2/3] vfio-pci: Fault mmaps to enable vma tracking
+From: Alex Williamson <alex.williamson@redhat.com>
+To: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, cohuck@redhat.com, jgg@ziepe.ca
+Date: Tue, 05 May 2020 15:54:53 -0600
+Message-ID: <158871569380.15589.16950418949340311053.stgit@gimli.home>
+In-Reply-To: <158871401328.15589.17598154478222071285.stgit@gimli.home>
+References: <158871401328.15589.17598154478222071285.stgit@gimli.home>
+Sender: kvm-owner@vger.kernel.org
+List-ID: <kvm.vger.kernel.org>
+X-Mailing-List: kvm@vger.kernel.org
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 7bit
+
+Rather than calling remap_pfn_range() when a region is mmap'd, setup
+a vm_ops handler to support dynamic faulting of the range on access.
+This allows us to manage a list of vmas actively mapping the area that
+we can later use to invalidate those mappings. The open callback
+invalidates the vma range so that all tracking is inserted in the
+fault handler and removed in the close handler.
+
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+---
+ drivers/vfio/pci/vfio_pci.c | 76 ++++++++++++++++++++++++++++++++++-
+ drivers/vfio/pci/vfio_pci_private.h | 7 +++
+ 2 files changed, 81 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 6c6b37b5c04e..66a545a01f8f 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1299,6 +1299,70 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
+ }
+
++static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
++ struct vm_area_struct *vma)
++{
++ struct vfio_pci_mmap_vma *mmap_vma;
++
++ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
++ if (!mmap_vma)
++ return -ENOMEM;
++
++ mmap_vma->vma = vma;
++
++ mutex_lock(&vdev->vma_lock);
++ list_add(&mmap_vma->vma_next, &vdev->vma_list);
++ mutex_unlock(&vdev->vma_lock);
++
++ return 0;
++}
++
++/*
++ * Zap mmaps on open so that we can fault them in on access and therefore
++ * our vma_list only tracks mappings accessed since last zap.
++ */
++static void vfio_pci_mmap_open(struct vm_area_struct *vma)
++{
++ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
++}
++
++static void vfio_pci_mmap_close(struct vm_area_struct *vma)
++{
++ struct vfio_pci_device *vdev = vma->vm_private_data;
++ struct vfio_pci_mmap_vma *mmap_vma;
++
++ mutex_lock(&vdev->vma_lock);
++ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
++ if (mmap_vma->vma == vma) {
++ list_del(&mmap_vma->vma_next);
++ kfree(mmap_vma);
++ break;
++ }
++ }
++ mutex_unlock(&vdev->vma_lock);
++}
++
++static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
++{
++ struct vm_area_struct *vma = vmf->vma;
++ struct vfio_pci_device *vdev = vma->vm_private_data;
++
++ if (vfio_pci_add_vma(vdev, vma))
++ return VM_FAULT_OOM;
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot))
++ return VM_FAULT_SIGBUS;
++
++ return VM_FAULT_NOPAGE;
++}
++
++static const struct vm_operations_struct vfio_pci_mmap_ops = {
++ .open = vfio_pci_mmap_open,
++ .close = vfio_pci_mmap_close,
++ .fault = vfio_pci_mmap_fault,
++};
++
+ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+ {
+ struct vfio_pci_device *vdev = device_data;
+@@ -1357,8 +1421,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+
+- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+- req_len, vma->vm_page_prot);
++ /*
++ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
++ * change vm_flags within the fault handler. Set them now.
++ */
++ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
++ vma->vm_ops = &vfio_pci_mmap_ops;
++
++ return 0;
+ }
+
+ static void vfio_pci_request(void *device_data, unsigned int count)
+@@ -1608,6 +1678,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ spin_lock_init(&vdev->irqlock);
+ mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->ioeventfds_list);
++ mutex_init(&vdev->vma_lock);
++ INIT_LIST_HEAD(&vdev->vma_list);
+
+ ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
+ if (ret) {
+diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
+index 36ec69081ecd..9b25f9f6ce1d 100644
+--- a/drivers/vfio/pci/vfio_pci_private.h
++++ b/drivers/vfio/pci/vfio_pci_private.h
+@@ -92,6 +92,11 @@ struct vfio_pci_vf_token {
+ struct mutex lock;
+ };
+
++struct vfio_pci_mmap_vma {
++ struct vm_area_struct *vma;
++ struct list_head vma_next;
++};
++
+ struct vfio_pci_device {
+ struct pci_dev *pdev;
+ void __iomem *barmap[PCI_STD_NUM_BARS];
+@@ -132,6 +137,8 @@ struct vfio_pci_device {
+ struct list_head dummy_resources_list;
+ struct mutex ioeventfds_lock;
+ struct list_head ioeventfds_list;
++ struct mutex vma_lock;
++ struct list_head vma_list;
+ };
+
+ #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
+
+
+From MAILER-DAEMON Wed May 20 15:47:40 2020
+Subject: [PATCH v2 3/3] vfio-pci: Invalidate mmaps and block MMIO access on disabled memory
+From: Alex Williamson <alex.williamson@redhat.com>
+To: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, cohuck@redhat.com, jgg@ziepe.ca
+Date: Tue, 05 May 2020 15:55:02 -0600
+Message-ID: <158871570274.15589.10563806532874116326.stgit@gimli.home>
+In-Reply-To: <158871401328.15589.17598154478222071285.stgit@gimli.home>
+References: <158871401328.15589.17598154478222071285.stgit@gimli.home>
+Sender: kvm-owner@vger.kernel.org
+List-ID: <kvm.vger.kernel.org>
+X-Mailing-List: kvm@vger.kernel.org
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 7bit
+
+Accessing the disabled memory space of a PCI device would typically
+result in a master abort response on conventional PCI, or an
+unsupported request on PCI express. The user would generally see
+these as a -1 response for the read return data and the write would be
+silently discarded, possibly with an uncorrected, non-fatal AER error
+triggered on the host. Some systems however take it upon themselves
+to bring down the entire system when they see something that might
+indicate a loss of data, such as this discarded write to a disabled
+memory space.
+
+To avoid this, we want to try to block the user from accessing memory
+spaces while they're disabled. We start with a semaphore around the
+memory enable bit, where writers modify the memory enable state and
+must be serialized, while readers make use of the memory region and
+can access in parallel. Writers include both direct manipulation via
+the command register, as well as any reset path where the internal
+mechanics of the reset may both explicitly and implicitly disable
+memory access, and manipulation of the MSI-X configuration, where the
+MSI-X vector table resides in MMIO space of the device. Readers
+include the read and write file ops to access the vfio device fd
+offsets as well as memory mapped access. In the latter case, we make
+use of our new vma list support to zap, or invalidate, those memory
+mappings in order to force them to be faulted back in on access.
+
+Our semaphore usage will stall user access to MMIO spaces across
+internal operations like reset, but the user might experience new
+behavior when trying to access the MMIO space while disabled via the
+PCI command register. Access via read or write while disabled will
+return -EIO and access via memory maps will result in a SIGBUS. This
+is expected to be compatible with known use cases and potentially
+provides better error handling capabilities than present in the
+hardware, while avoiding the more readily accessible and severe
+platform error responses that might otherwise occur.
+
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+---
+ drivers/vfio/pci/vfio_pci.c | 263 +++++++++++++++++++++++++++++++----
+ drivers/vfio/pci/vfio_pci_config.c | 36 ++++-
+ drivers/vfio/pci/vfio_pci_intrs.c | 18 ++
+ drivers/vfio/pci/vfio_pci_private.h | 5 +
+ drivers/vfio/pci/vfio_pci_rdwr.c | 12 ++
+ 5 files changed, 300 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 66a545a01f8f..49ae9faa6099 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -26,6 +26,7 @@
+ #include <linux/vfio.h>
+ #include <linux/vgaarb.h>
+ #include <linux/nospec.h>
++#include <linux/sched/mm.h>
+
+ #include "vfio_pci_private.h"
+
+@@ -184,6 +185,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
+
+ static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
+ static void vfio_pci_disable(struct vfio_pci_device *vdev);
++static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
+
+ /*
+ * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
+@@ -736,6 +738,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
+ return 0;
+ }
+
++struct vfio_devices {
++ struct vfio_device **devices;
++ int cur_index;
++ int max_index;
++};
++
+ static long vfio_pci_ioctl(void *device_data,
+ unsigned int cmd, unsigned long arg)
+ {
+@@ -984,8 +992,16 @@ static long vfio_pci_ioctl(void *device_data,
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_RESET) {
+- return vdev->reset_works ?
+- pci_try_reset_function(vdev->pdev) : -EINVAL;
++ int ret;
++
++ if (!vdev->reset_works)
++ return -EINVAL;
++
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
++ ret = pci_try_reset_function(vdev->pdev);
++ up_write(&vdev->memory_lock);
++
++ return ret;
+
+ } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
+ struct vfio_pci_hot_reset_info hdr;
+@@ -1065,8 +1081,9 @@ static long vfio_pci_ioctl(void *device_data,
+ int32_t *group_fds;
+ struct vfio_pci_group_entry *groups;
+ struct vfio_pci_group_info info;
++ struct vfio_devices devs = { .cur_index = 0 };
+ bool slot = false;
+- int i, count = 0, ret = 0;
++ int i, group_idx, mem_idx = 0, count = 0, ret = 0;
+
+ minsz = offsetofend(struct vfio_pci_hot_reset, count);
+
+@@ -1118,9 +1135,9 @@ static long vfio_pci_ioctl(void *device_data,
+ * user interface and store the group and iommu ID. This
+ * ensures the group is held across the reset.
+ */
+- for (i = 0; i < hdr.count; i++) {
++ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
+ struct vfio_group *group;
+- struct fd f = fdget(group_fds[i]);
++ struct fd f = fdget(group_fds[group_idx]);
+ if (!f.file) {
+ ret = -EBADF;
+ break;
+@@ -1133,8 +1150,9 @@ static long vfio_pci_ioctl(void *device_data,
+ break;
+ }
+
+- groups[i].group = group;
+- groups[i].id = vfio_external_user_iommu_id(group);
++ groups[group_idx].group = group;
++ groups[group_idx].id =
++ vfio_external_user_iommu_id(group);
+ }
+
+ kfree(group_fds);
+@@ -1153,13 +1171,63 @@ static long vfio_pci_ioctl(void *device_data,
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_validate_devs,
+ &info, slot);
+- if (!ret)
+- /* User has access, do the reset */
+- ret = pci_reset_bus(vdev->pdev);
++ if (ret)
++ goto hot_reset_release;
++
++ devs.max_index = count;
++ devs.devices = kcalloc(count, sizeof(struct vfio_device *),
++ GFP_KERNEL);
++ if (!devs.devices) {
++ ret = -ENOMEM;
++ goto hot_reset_release;
++ }
++
++ /*
++ * We need to get memory_lock for each device, but devices
++ * can share mmap_sem, therefore we need to zap and hold
++ * the vma_lock for each device, and only then get each
++ * memory_lock.
++ */
++ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
++ vfio_pci_try_zap_and_vma_lock_cb,
++ &devs, slot);
++ if (ret)
++ goto hot_reset_release;
++
++ for (; mem_idx < devs.cur_index; mem_idx++) {
++ struct vfio_pci_device *tmp;
++
++ tmp = vfio_device_data(devs.devices[mem_idx]);
++
++ ret = down_write_trylock(&tmp->memory_lock);
++ if (!ret) {
++ ret = -EBUSY;
++ goto hot_reset_release;
++ }
++ mutex_unlock(&tmp->vma_lock);
++ }
++
++ /* User has access, do the reset */
++ ret = pci_reset_bus(vdev->pdev);
+
+ hot_reset_release:
+- for (i--; i >= 0; i--)
+- vfio_group_put_external_user(groups[i].group);
++ for (i = 0; i < devs.cur_index; i++) {
++ struct vfio_device *device;
++ struct vfio_pci_device *tmp;
++
++ device = devs.devices[i];
++ tmp = vfio_device_data(device);
++
++ if (i < mem_idx)
++ up_write(&tmp->memory_lock);
++ else
++ mutex_unlock(&tmp->vma_lock);
++ vfio_device_put(device);
++ }
++ kfree(devs.devices);
++
++ for (group_idx--; group_idx >= 0; group_idx--)
++ vfio_group_put_external_user(groups[group_idx].group);
+
+ kfree(groups);
+ return ret;
+@@ -1299,8 +1367,107 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
+ }
+
+-static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
+- struct vm_area_struct *vma)
++/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
++static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
++{
++ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
++
++ /*
++ * Lock ordering:
++ * vma_lock is nested under mmap_sem for vm_ops callback paths.
++ * The memory_lock semaphore is used by both code paths calling
++ * into this function to zap vmas and the vm_ops.fault callback
++ * to protect the memory enable state of the device.
++ *
++ * When zapping vmas we need to maintain the mmap_sem => vma_lock
++ * ordering, which requires using vma_lock to walk vma_list to
++ * acquire an mm, then dropping vma_lock to get the mmap_sem and
++ * reacquiring vma_lock. This logic is derived from similar
++ * requirements in uverbs_user_mmap_disassociate().
++ *
++ * mmap_sem must always be the top-level lock when it is taken.
++ * Therefore we can only hold the memory_lock write lock when
++ * vma_list is empty, as we'd need to take mmap_sem to clear
++ * entries. vma_list can only be guaranteed empty when holding
++ * vma_lock, thus memory_lock is nested under vma_lock.
++ *
++ * This enables the vm_ops.fault callback to acquire vma_lock,
++ * followed by memory_lock read lock, while already holding
++ * mmap_sem without risk of deadlock.
++ */
++ while (1) {
++ struct mm_struct *mm = NULL;
++
++ if (try) {
++ if (!mutex_trylock(&vdev->vma_lock))
++ return 0;
++ } else {
++ mutex_lock(&vdev->vma_lock);
++ }
++ while (!list_empty(&vdev->vma_list)) {
++ mmap_vma = list_first_entry(&vdev->vma_list,
++ struct vfio_pci_mmap_vma,
++ vma_next);
++ mm = mmap_vma->vma->vm_mm;
++ if (mmget_not_zero(mm))
++ break;
++
++ list_del(&mmap_vma->vma_next);
++ kfree(mmap_vma);
++ mm = NULL;
++ }
++ if (!mm)
++ return 1;
++ mutex_unlock(&vdev->vma_lock);
++
++ if (try) {
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ mmput(mm);
++ return 0;
++ }
++ } else {
++ down_read(&mm->mmap_sem);
++ }
++ if (mmget_still_valid(mm)) {
++ if (try) {
++ if (!mutex_trylock(&vdev->vma_lock)) {
++ up_read(&mm->mmap_sem);
++ mmput(mm);
++ return 0;
++ }
++ } else {
++ mutex_lock(&vdev->vma_lock);
++ }
++ list_for_each_entry_safe(mmap_vma, tmp,
++ &vdev->vma_list, vma_next) {
++ struct vm_area_struct *vma = mmap_vma->vma;
++
++ if (vma->vm_mm != mm)
++ continue;
++
++ list_del(&mmap_vma->vma_next);
++ kfree(mmap_vma);
++
++ zap_vma_ptes(vma, vma->vm_start,
++ vma->vm_end - vma->vm_start);
++ }
++ mutex_unlock(&vdev->vma_lock);
++ }
++ up_read(&mm->mmap_sem);
++ mmput(mm);
++ }
++}
++
++void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
++{
++ vfio_pci_zap_and_vma_lock(vdev, false);
++ down_write(&vdev->memory_lock);
++ mutex_unlock(&vdev->vma_lock);
++}
++
++/* Caller holds vma_lock */
++static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
++ struct vm_area_struct *vma)
+ {
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+@@ -1309,10 +1476,7 @@ static int vfio_pci_add_vma(struct vfio_pci_device *vdev,
+ return -ENOMEM;
+
+ mmap_vma->vma = vma;
+-
+- mutex_lock(&vdev->vma_lock);
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
+- mutex_unlock(&vdev->vma_lock);
+
+ return 0;
+ }
+@@ -1346,15 +1510,32 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_device *vdev = vma->vm_private_data;
++ vm_fault_t ret = VM_FAULT_NOPAGE;
+
+- if (vfio_pci_add_vma(vdev, vma))
+- return VM_FAULT_OOM;
++ mutex_lock(&vdev->vma_lock);
++ down_read(&vdev->memory_lock);
++
++ if (!__vfio_pci_memory_enabled(vdev)) {
++ ret = VM_FAULT_SIGBUS;
++ mutex_unlock(&vdev->vma_lock);
++ goto up_out;
++ }
++
++ if (__vfio_pci_add_vma(vdev, vma)) {
++ ret = VM_FAULT_OOM;
++ mutex_unlock(&vdev->vma_lock);
++ goto up_out;
++ }
++
++ mutex_unlock(&vdev->vma_lock);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+- return VM_FAULT_SIGBUS;
++ ret = VM_FAULT_SIGBUS;
+
+- return VM_FAULT_NOPAGE;
++up_out:
++ up_read(&vdev->memory_lock);
++ return ret;
+ }
+
+ static const struct vm_operations_struct vfio_pci_mmap_ops = {
+@@ -1680,6 +1861,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ mutex_init(&vdev->vma_lock);
+ INIT_LIST_HEAD(&vdev->vma_list);
++ init_rwsem(&vdev->memory_lock);
+
+ ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
+ if (ret) {
+@@ -1933,12 +2115,6 @@ static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
+ kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
+ }
+
+-struct vfio_devices {
+- struct vfio_device **devices;
+- int cur_index;
+- int max_index;
+-};
+-
+ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
+ {
+ struct vfio_devices *devs = data;
+@@ -1969,6 +2145,39 @@ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
+ return 0;
+ }
+
++static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
++{
++ struct vfio_devices *devs = data;
++ struct vfio_device *device;
++ struct vfio_pci_device *vdev;
++
++ if (devs->cur_index == devs->max_index)
++ return -ENOSPC;
++
++ device = vfio_device_get_from_dev(&pdev->dev);
++ if (!device)
++ return -EINVAL;
++
++ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
++ vfio_device_put(device);
++ return -EBUSY;
++ }
++
++ vdev = vfio_device_data(device);
++
++ /*
++ * Locking multiple devices is prone to deadlock, runaway and
++ * unwind if we hit contention.
++ */
++ if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
++ vfio_device_put(device);
++ return -EBUSY;
++ }
++
++ devs->devices[devs->cur_index++] = device;
++ return 0;
++}
++
+ /*
+ * If a bus or slot reset is available for the provided device and:
+ * - All of the devices affected by that bus or slot reset are unused
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index 90c0b80f8acf..3dcddbd572e6 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -395,6 +395,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
+ *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
+ }
+
++/* Caller should hold memory_lock semaphore */
++bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
++{
++ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
++
++ return cmd & PCI_COMMAND_MEMORY;
++}
++
+ /*
+ * Restore the *real* BARs after we detect a FLR or backdoor reset.
+ * (backdoor = some device specific technique that we didn't catch)
+@@ -556,13 +564,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+
+ new_cmd = le32_to_cpu(val);
+
++ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
++ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
++ new_io = !!(new_cmd & PCI_COMMAND_IO);
++
+ phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
+ virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
+ new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
+
+- phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+- new_io = !!(new_cmd & PCI_COMMAND_IO);
++ if (!new_mem)
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
++ else
++ down_write(&vdev->memory_lock);
+
+ /*
+ * If the user is writing mem/io enable (new_mem/io) and we
+@@ -579,8 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+ }
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+- if (count < 0)
++ if (count < 0) {
++ if (offset == PCI_COMMAND)
++ up_write(&vdev->memory_lock);
+ return count;
++ }
+
+ /*
+ * Save current memory/io enable bits in vconfig to allow for
+@@ -591,6 +607,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+
+ *virt_cmd &= cpu_to_le16(~mask);
+ *virt_cmd |= cpu_to_le16(new_cmd & mask);
++
++ up_write(&vdev->memory_lock);
+ }
+
+ /* Emulate INTx disable */
+@@ -828,8 +846,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
+ pos - offset + PCI_EXP_DEVCAP,
+ &cap);
+
+- if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
++ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ pci_try_reset_function(vdev->pdev);
++ up_write(&vdev->memory_lock);
++ }
+ }
+
+ /*
+@@ -907,8 +928,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
+ pos - offset + PCI_AF_CAP,
+ &cap);
+
+- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
++ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
++ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ pci_try_reset_function(vdev->pdev);
++ up_write(&vdev->memory_lock);
++ }
+ }
+
+ return count;
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 2056f3f85f59..54102a7eb9d3 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -626,6 +626,8 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
+ int (*func)(struct vfio_pci_device *vdev, unsigned index,
+ unsigned start, unsigned count, uint32_t flags,
+ void *data) = NULL;
++ int ret;
++ u16 cmd;
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+@@ -673,5 +675,19 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
+ if (!func)
+ return -ENOTTY;
+
+- return func(vdev, index, start, count, flags, data);
++ if (index == VFIO_PCI_MSIX_IRQ_INDEX) {
++ down_write(&vdev->memory_lock);
++ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
++ pci_write_config_word(vdev->pdev, PCI_COMMAND,
++ cmd | PCI_COMMAND_MEMORY);
++ }
++
++ ret = func(vdev, index, start, count, flags, data);
++
++ if (index == VFIO_PCI_MSIX_IRQ_INDEX) {
++ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
++ up_write(&vdev->memory_lock);
++ }
++
++ return ret;
+ }
+diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
+index 9b25f9f6ce1d..c4f25f1e80d7 100644
+--- a/drivers/vfio/pci/vfio_pci_private.h
++++ b/drivers/vfio/pci/vfio_pci_private.h
+@@ -139,6 +139,7 @@ struct vfio_pci_device {
+ struct list_head ioeventfds_list;
+ struct mutex vma_lock;
+ struct list_head vma_list;
++ struct rw_semaphore memory_lock;
+ };
+
+ #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
+@@ -181,6 +182,10 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
+ extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
+ pci_power_t state);
+
++extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
++extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
++ *vdev);
++
+ #ifdef CONFIG_VFIO_PCI_IGD
+ extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
+ #else
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index a87992892a9f..f58c45308682 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t x_start = 0, x_end = 0;
+ resource_size_t end;
+ void __iomem *io;
++ struct resource *res = &vdev->pdev->resource[bar];
+ ssize_t done;
+
+ if (pci_resource_start(pdev, bar))
+@@ -200,8 +201,19 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+ x_end = vdev->msix_offset + vdev->msix_size;
+ }
+
++ if (res->flags & IORESOURCE_MEM) {
++ down_read(&vdev->memory_lock);
++ if (!__vfio_pci_memory_enabled(vdev)) {
++ up_read(&vdev->memory_lock);
++ return -EIO;
++ }
++ }
++
+ done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
+
++ if (res->flags & IORESOURCE_MEM)
++ up_read(&vdev->memory_lock);
++
+ if (done >= 0)
+ *ppos += done;
+
+
+