summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThorsten Leemhuis <fedora@leemhuis.info>2018-08-16 08:15:54 +0200
committerThorsten Leemhuis <fedora@leemhuis.info>2018-08-16 08:15:54 +0200
commitdec761c2361865010f7708611191f30c75b1ec96 (patch)
tree7826890853ff967ca87f0d6b8ef455514afa5a2d
parenta8b4bbc1829a8e069954b61277459252e51ab696 (diff)
parent2ae6784ba77c01a449c3f6bb2f9208349b59f796 (diff)
downloadkernel-dec761c2361865010f7708611191f30c75b1ec96.tar.gz
kernel-dec761c2361865010f7708611191f30c75b1ec96.tar.xz
kernel-dec761c2361865010f7708611191f30c75b1ec96.zip
merge origin
-rw-r--r--0001-xfs-don-t-call-xfs_da_shrink_inode-with-NULL-bp.patch43
-rw-r--r--0001-xfs-validate-cached-inodes-are-free-when-allocated.patch155
-rw-r--r--foreshadow.patch10118
-rw-r--r--kernel.spec18
4 files changed, 10130 insertions, 204 deletions
diff --git a/0001-xfs-don-t-call-xfs_da_shrink_inode-with-NULL-bp.patch b/0001-xfs-don-t-call-xfs_da_shrink_inode-with-NULL-bp.patch
deleted file mode 100644
index 874536f24..000000000
--- a/0001-xfs-don-t-call-xfs_da_shrink_inode-with-NULL-bp.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From bb3d48dcf86a97dc25fe9fc2c11938e19cb4399a Mon Sep 17 00:00:00 2001
-From: Eric Sandeen <sandeen@sandeen.net>
-Date: Fri, 8 Jun 2018 09:53:49 -0700
-Subject: [PATCH] xfs: don't call xfs_da_shrink_inode with NULL bp
-
-xfs_attr3_leaf_create may have errored out before instantiating a buffer,
-for example if the blkno is out of range. In that case there is no work
-to do to remove it, and in fact xfs_da_shrink_inode will lead to an oops
-if we try.
-
-This also seems to fix a flaw where the original error from
-xfs_attr3_leaf_create gets overwritten in the cleanup case, and it
-removes a pointless assignment to bp which isn't used after this.
-
-Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199969
-Reported-by: Xu, Wen <wen.xu@gatech.edu>
-Tested-by: Xu, Wen <wen.xu@gatech.edu>
-Signed-off-by: Eric Sandeen <sandeen@redhat.com>
-Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
-Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
----
- fs/xfs/libxfs/xfs_attr_leaf.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
-index 99e0f5749dba..76e90046731c 100644
---- a/fs/xfs/libxfs/xfs_attr_leaf.c
-+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
-@@ -791,9 +791,8 @@ xfs_attr_shortform_to_leaf(
- ASSERT(blkno == 0);
- error = xfs_attr3_leaf_create(args, blkno, &bp);
- if (error) {
-- error = xfs_da_shrink_inode(args, 0, bp);
-- bp = NULL;
-- if (error)
-+ /* xfs_attr3_leaf_create may not have instantiated a block */
-+ if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
- goto out;
- xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
- memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
---
-2.17.1
-
diff --git a/0001-xfs-validate-cached-inodes-are-free-when-allocated.patch b/0001-xfs-validate-cached-inodes-are-free-when-allocated.patch
deleted file mode 100644
index ce78bed80..000000000
--- a/0001-xfs-validate-cached-inodes-are-free-when-allocated.patch
+++ /dev/null
@@ -1,155 +0,0 @@
-From afca6c5b2595fc44383919fba740c194b0b76aff Mon Sep 17 00:00:00 2001
-From: Dave Chinner <dchinner@redhat.com>
-Date: Tue, 17 Apr 2018 17:17:34 -0700
-Subject: [PATCH] xfs: validate cached inodes are free when allocated
-
-A recent fuzzed filesystem image cached random dcache corruption
-when the reproducer was run. This often showed up as panics in
-lookup_slow() on a null inode->i_ops pointer when doing pathwalks.
-
-BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
-....
-Call Trace:
- lookup_slow+0x44/0x60
- walk_component+0x3dd/0x9f0
- link_path_walk+0x4a7/0x830
- path_lookupat+0xc1/0x470
- filename_lookup+0x129/0x270
- user_path_at_empty+0x36/0x40
- path_listxattr+0x98/0x110
- SyS_listxattr+0x13/0x20
- do_syscall_64+0xf5/0x280
- entry_SYSCALL_64_after_hwframe+0x42/0xb7
-
-but had many different failure modes including deadlocks trying to
-lock the inode that was just allocated or KASAN reports of
-use-after-free violations.
-
-The cause of the problem was a corrupt INOBT on a v4 fs where the
-root inode was marked as free in the inobt record. Hence when we
-allocated an inode, it chose the root inode to allocate, found it in
-the cache and re-initialised it.
-
-We recently fixed a similar inode allocation issue caused by inobt
-record corruption problem in xfs_iget_cache_miss() in commit
-ee457001ed6c ("xfs: catch inode allocation state mismatch
-corruption"). This change adds similar checks to the cache-hit path
-to catch it, and turns the reproducer into a corruption shutdown
-situation.
-
-Reported-by: Wen Xu <wen.xu@gatech.edu>
-Signed-Off-By: Dave Chinner <dchinner@redhat.com>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
-Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
-[darrick: fix typos in comment]
-Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
----
- fs/xfs/xfs_icache.c | 73 +++++++++++++++++++++++++++++----------------
- 1 file changed, 48 insertions(+), 25 deletions(-)
-
-diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
-index 9a18f69f6e96..817899961f48 100644
---- a/fs/xfs/xfs_icache.c
-+++ b/fs/xfs/xfs_icache.c
-@@ -308,6 +308,46 @@ xfs_reinit_inode(
- return error;
- }
-
-+/*
-+ * If we are allocating a new inode, then check what was returned is
-+ * actually a free, empty inode. If we are not allocating an inode,
-+ * then check we didn't find a free inode.
-+ *
-+ * Returns:
-+ * 0 if the inode free state matches the lookup context
-+ * -ENOENT if the inode is free and we are not allocating
-+ * -EFSCORRUPTED if there is any state mismatch at all
-+ */
-+static int
-+xfs_iget_check_free_state(
-+ struct xfs_inode *ip,
-+ int flags)
-+{
-+ if (flags & XFS_IGET_CREATE) {
-+ /* should be a free inode */
-+ if (VFS_I(ip)->i_mode != 0) {
-+ xfs_warn(ip->i_mount,
-+"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
-+ ip->i_ino, VFS_I(ip)->i_mode);
-+ return -EFSCORRUPTED;
-+ }
-+
-+ if (ip->i_d.di_nblocks != 0) {
-+ xfs_warn(ip->i_mount,
-+"Corruption detected! Free inode 0x%llx has blocks allocated!",
-+ ip->i_ino);
-+ return -EFSCORRUPTED;
-+ }
-+ return 0;
-+ }
-+
-+ /* should be an allocated inode */
-+ if (VFS_I(ip)->i_mode == 0)
-+ return -ENOENT;
-+
-+ return 0;
-+}
-+
- /*
- * Check the validity of the inode we just found it the cache
- */
-@@ -357,12 +397,12 @@ xfs_iget_cache_hit(
- }
-
- /*
-- * If lookup is racing with unlink return an error immediately.
-+ * Check the inode free state is valid. This also detects lookup
-+ * racing with unlinks.
- */
-- if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
-- error = -ENOENT;
-+ error = xfs_iget_check_free_state(ip, flags);
-+ if (error)
- goto out_error;
-- }
-
- /*
- * If IRECLAIMABLE is set, we've torn down the VFS inode already.
-@@ -485,29 +525,12 @@ xfs_iget_cache_miss(
-
-
- /*
-- * If we are allocating a new inode, then check what was returned is
-- * actually a free, empty inode. If we are not allocating an inode,
-- * the check we didn't find a free inode.
-+ * Check the inode free state is valid. This also detects lookup
-+ * racing with unlinks.
- */
-- if (flags & XFS_IGET_CREATE) {
-- if (VFS_I(ip)->i_mode != 0) {
-- xfs_warn(mp,
--"Corruption detected! Free inode 0x%llx not marked free on disk",
-- ino);
-- error = -EFSCORRUPTED;
-- goto out_destroy;
-- }
-- if (ip->i_d.di_nblocks != 0) {
-- xfs_warn(mp,
--"Corruption detected! Free inode 0x%llx has blocks allocated!",
-- ino);
-- error = -EFSCORRUPTED;
-- goto out_destroy;
-- }
-- } else if (VFS_I(ip)->i_mode == 0) {
-- error = -ENOENT;
-+ error = xfs_iget_check_free_state(ip, flags);
-+ if (error)
- goto out_destroy;
-- }
-
- /*
- * Preload the radix tree so we can insert safely under the
---
-2.17.1
-
diff --git a/foreshadow.patch b/foreshadow.patch
new file mode 100644
index 000000000..fe950eebb
--- /dev/null
+++ b/foreshadow.patch
@@ -0,0 +1,10118 @@
+From 66509a276c8c1d19ee3f661a41b418d101c57d29 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sat, 28 Jul 2018 11:47:17 +0200
+Subject: parisc: Enable CONFIG_MLONGCALLS by default
+
+From: Helge Deller <deller@gmx.de>
+
+commit 66509a276c8c1d19ee3f661a41b418d101c57d29 upstream.
+
+Enable the -mlong-calls compiler option by default, because otherwise in most
+cases linking the vmlinux binary fails due to truncations of R_PARISC_PCREL22F
+relocations. This fixes building the 64-bit defconfig.
+
+Cc: stable@vger.kernel.org # 4.0+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -199,7 +199,7 @@ config PREFETCH
+
+ config MLONGCALLS
+ bool "Enable the -mlong-calls compiler option for big kernels"
+- def_bool y if (!MODULES)
++ default y
+ depends on PA8X00
+ help
+ If you configure the kernel to include many drivers built-in instead
+From fedb8da96355f5f64353625bf96dc69423ad1826 Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Sun, 5 Aug 2018 13:30:31 -0400
+Subject: parisc: Define mb() and add memory barriers to assembler unlock sequences
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit fedb8da96355f5f64353625bf96dc69423ad1826 upstream.
+
+For years I thought all parisc machines executed loads and stores in
+order. However, Jeff Law recently indicated on gcc-patches that this is
+not correct. There are various degrees of out-of-order execution all the
+way back to the PA7xxx processor series (hit-under-miss). The PA8xxx
+series has full out-of-order execution for both integer operations, and
+loads and stores.
+
+This is described in the following article:
+http://web.archive.org/web/20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml
+
+For this reason, we need to define mb() and to insert a memory barrier
+before the store unlocking spinlocks. This ensures that all memory
+accesses are complete prior to unlocking. The ldcw instruction performs
+the same function on entry.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Cc: stable@vger.kernel.org # 4.0+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/barrier.h | 32 ++++++++++++++++++++++++++++++++
+ arch/parisc/kernel/entry.S | 2 ++
+ arch/parisc/kernel/pacache.S | 1 +
+ arch/parisc/kernel/syscall.S | 4 ++++
+ 4 files changed, 39 insertions(+)
+
+--- /dev/null
++++ b/arch/parisc/include/asm/barrier.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __ASM_BARRIER_H
++#define __ASM_BARRIER_H
++
++#ifndef __ASSEMBLY__
++
++/* The synchronize caches instruction executes as a nop on systems in
++ which all memory references are performed in order. */
++#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
++
++#if defined(CONFIG_SMP)
++#define mb() do { synchronize_caches(); } while (0)
++#define rmb() mb()
++#define wmb() mb()
++#define dma_rmb() mb()
++#define dma_wmb() mb()
++#else
++#define mb() barrier()
++#define rmb() barrier()
++#define wmb() barrier()
++#define dma_rmb() barrier()
++#define dma_wmb() barrier()
++#endif
++
++#define __smp_mb() mb()
++#define __smp_rmb() mb()
++#define __smp_wmb() mb()
++
++#include <asm-generic/barrier.h>
++
++#endif /* !__ASSEMBLY__ */
++#endif /* __ASM_BARRIER_H */
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -482,6 +482,8 @@
+ .macro tlb_unlock0 spc,tmp
+ #ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
++ sync
++ or,COND(=) %r0,\spc,%r0
+ stw \spc,0(\tmp)
+ #endif
+ .endm
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local)
+ .macro tlb_unlock la,flags,tmp
+ #ifdef CONFIG_SMP
+ ldi 1,\tmp
++ sync
+ stw \tmp,0(\la)
+ mtsm \flags
+ #endif
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -633,6 +633,7 @@ cas_action:
+ sub,<> %r28, %r25, %r0
+ 2: stw,ma %r24, 0(%r26)
+ /* Free lock */
++ sync
+ stw,ma %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+ /* Clear thread register indicator */
+@@ -647,6 +648,7 @@ cas_action:
+ 3:
+ /* Error occurred on load or store */
+ /* Free lock */
++ sync
+ stw %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+ stw %r0, 4(%sr2,%r20)
+@@ -848,6 +850,7 @@ cas2_action:
+
+ cas2_end:
+ /* Free lock */
++ sync
+ stw,ma %r20, 0(%sr2,%r20)
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
+@@ -858,6 +861,7 @@ cas2_end:
+ 22:
+ /* Error occurred on load or store */
+ /* Free lock */
++ sync
+ stw %r20, 0(%sr2,%r20)
+ ssm PSW_SM_I, %r0
+ ldo 1(%r0),%r28
+From 3c53776e29f81719efcf8f7a6e30cdf753bee94d Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 8 Jan 2018 11:51:04 -0800
+Subject: Mark HI and TASKLET softirq synchronous
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 3c53776e29f81719efcf8f7a6e30cdf753bee94d upstream.
+
+Way back in 4.9, we committed 4cd13c21b207 ("softirq: Let ksoftirqd do
+its job"), and ever since we've had small nagging issues with it. For
+example, we've had:
+
+ 1ff688209e2e ("watchdog: core: make sure the watchdog_worker is not deferred")
+ 8d5755b3f77b ("watchdog: softdog: fire watchdog even if softirqs do not get to run")
+ 217f69743681 ("net: busy-poll: allow preemption in sk_busy_loop()")
+
+all of which worked around some of the effects of that commit.
+
+The DVB people have also complained that the commit causes excessive USB
+URB latencies, which seems to be due to the USB code using tasklets to
+schedule USB traffic. This seems to be an issue mainly when already
+living on the edge, but waiting for ksoftirqd to handle it really does
+seem to cause excessive latencies.
+
+Now Hanna Hawa reports that this issue isn't just limited to USB URB and
+DVB, but also causes timeout problems for the Marvell SoC team:
+
+ "I'm facing kernel panic issue while running raid 5 on sata disks
+ connected to Macchiatobin (Marvell community board with Armada-8040
+ SoC with 4 ARMv8 cores of CA72) Raid 5 built with Marvell DMA engine
+ and async_tx mechanism (ASYNC_TX_DMA [=y]); the DMA driver (mv_xor_v2)
+ uses a tasklet to clean the done descriptors from the queue"
+
+The latency problem causes a panic:
+
+ mv_xor_v2 f0400000.xor: dma_sync_wait: timeout!
+ Kernel panic - not syncing: async_tx_quiesce: DMA error waiting for transaction
+
+We've discussed simply just reverting the original commit entirely, and
+also much more involved solutions (with per-softirq threads etc). This
+patch is intentionally stupid and fairly limited, because the issue
+still remains, and the other solutions either got sidetracked or had
+other issues.
+
+We should probably also consider the timer softirqs to be synchronous
+and not be delayed to ksoftirqd (since they were the issue with the
+earlier watchdog problems), but that should be done as a separate patch.
+This does only the tasklet cases.
+
+Reported-and-tested-by: Hanna Hawa <hannah@marvell.com>
+Reported-and-tested-by: Josef Griebichler <griebichler.josef@gmx.at>
+Reported-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/softirq.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -79,12 +79,16 @@ static void wakeup_softirqd(void)
+
+ /*
+ * If ksoftirqd is scheduled, we do not want to process pending softirqs
+- * right now. Let ksoftirqd handle this at its own rate, to get fairness.
++ * right now. Let ksoftirqd handle this at its own rate, to get fairness,
++ * unless we're doing some of the synchronous softirqs.
+ */
+-static bool ksoftirqd_running(void)
++#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
++static bool ksoftirqd_running(unsigned long pending)
+ {
+ struct task_struct *tsk = __this_cpu_read(ksoftirqd);
+
++ if (pending & SOFTIRQ_NOW_MASK)
++ return false;
+ return tsk && (tsk->state == TASK_RUNNING);
+ }
+
+@@ -329,7 +333,7 @@ asmlinkage __visible void do_softirq(voi
+
+ pending = local_softirq_pending();
+
+- if (pending && !ksoftirqd_running())
++ if (pending && !ksoftirqd_running(pending))
+ do_softirq_own_stack();
+
+ local_irq_restore(flags);
+@@ -356,7 +360,7 @@ void irq_enter(void)
+
+ static inline void invoke_softirq(void)
+ {
+- if (ksoftirqd_running())
++ if (ksoftirqd_running(local_softirq_pending()))
+ return;
+
+ if (!force_irqthreads) {
+From 2610e88946632afb78aa58e61f11368ac4c0af7b Mon Sep 17 00:00:00 2001
+From: "Isaac J. Manjarres" <isaacm@codeaurora.org>
+Date: Tue, 17 Jul 2018 12:35:29 -0700
+Subject: stop_machine: Disable preemption after queueing stopper threads
+
+From: Isaac J. Manjarres <isaacm@codeaurora.org>
+
+commit 2610e88946632afb78aa58e61f11368ac4c0af7b upstream.
+
+This commit:
+
+ 9fb8d5dc4b64 ("stop_machine, Disable preemption when waking two stopper threads")
+
+does not fully address the race condition that can occur
+as follows:
+
+On one CPU, call it CPU 3, thread 1 invokes
+cpu_stop_queue_two_works(2, 3,...), and the execution is such
+that thread 1 queues the works for migration/2 and migration/3,
+and is preempted after releasing the locks for migration/2 and
+migration/3, but before waking the threads.
+
+Then, On CPU 2, a kworker, call it thread 2, is running,
+and it invokes cpu_stop_queue_two_works(1, 2,...), such that
+thread 2 queues the works for migration/1 and migration/2.
+Meanwhile, on CPU 3, thread 1 resumes execution, and wakes
+migration/2 and migration/3. This means that when CPU 2
+releases the locks for migration/1 and migration/2, but before
+it wakes those threads, it can be preempted by migration/2.
+
+If thread 2 is preempted by migration/2, then migration/2 will
+execute the first work item successfully, since migration/3
+was woken up by CPU 3, but when it goes to execute the second
+work item, it disables preemption, calls multi_cpu_stop(),
+and thus, CPU 2 will wait forever for migration/1, which should
+have been woken up by thread 2. However migration/1 cannot be
+woken up by thread 2, since it is a kworker, so it is affine to
+CPU 2, but CPU 2 is running migration/2 with preemption
+disabled, so thread 2 will never run.
+
+Disable preemption after queueing works for stopper threads
+to ensure that the operation of queueing the works and waking
+the stopper threads is atomic.
+
+Co-Developed-by: Prasad Sodagudi <psodagud@codeaurora.org>
+Co-Developed-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
+Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
+Signed-off-by: Prasad Sodagudi <psodagud@codeaurora.org>
+Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bigeasy@linutronix.de
+Cc: gregkh@linuxfoundation.org
+Cc: matt@codeblueprint.co.uk
+Fixes: 9fb8d5dc4b64 ("stop_machine, Disable preemption when waking two stopper threads")
+Link: http://lkml.kernel.org/r/1531856129-9871-1-git-send-email-isaacm@codeaurora.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/stop_machine.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -260,6 +260,15 @@ retry:
+ err = 0;
+ __cpu_stop_queue_work(stopper1, work1, &wakeq);
+ __cpu_stop_queue_work(stopper2, work2, &wakeq);
++ /*
++ * The waking up of stopper threads has to happen
++ * in the same scheduling context as the queueing.
++ * Otherwise, there is a possibility of one of the
++ * above stoppers being woken up by another CPU,
++ * and preempting us. This will cause us to n ot
++ * wake up the other stopper forever.
++ */
++ preempt_disable();
+ unlock:
+ raw_spin_unlock(&stopper2->lock);
+ raw_spin_unlock_irq(&stopper1->lock);
+@@ -271,7 +280,6 @@ unlock:
+ }
+
+ if (!err) {
+- preempt_disable();
+ wake_up_q(&wakeq);
+ preempt_enable();
+ }
+From 840d719604b0925ca23dde95f1767e4528668369 Mon Sep 17 00:00:00 2001
+From: Daniel Bristot de Oliveira <bristot@redhat.com>
+Date: Fri, 20 Jul 2018 11:16:30 +0200
+Subject: sched/deadline: Update rq_clock of later_rq when pushing a task
+
+From: Daniel Bristot de Oliveira <bristot@redhat.com>
+
+commit 840d719604b0925ca23dde95f1767e4528668369 upstream.
+
+Daniel Casini got this warn while running a DL task here at RetisLab:
+
+ [ 461.137582] ------------[ cut here ]------------
+ [ 461.137583] rq->clock_update_flags < RQCF_ACT_SKIP
+ [ 461.137599] WARNING: CPU: 4 PID: 2354 at kernel/sched/sched.h:967 assert_clock_updated.isra.32.part.33+0x17/0x20
+ [a ton of modules]
+ [ 461.137646] CPU: 4 PID: 2354 Comm: label_image Not tainted 4.18.0-rc4+ #3
+ [ 461.137647] Hardware name: ASUS All Series/Z87-K, BIOS 0801 09/02/2013
+ [ 461.137649] RIP: 0010:assert_clock_updated.isra.32.part.33+0x17/0x20
+ [ 461.137649] Code: ff 48 89 83 08 09 00 00 eb c6 66 0f 1f 84 00 00 00 00 00 55 48 c7 c7 98 7a 6c a5 c6 05 bc 0d 54 01 01 48 89 e5 e8 a9 84 fb ff <0f> 0b 5d c3 0f 1f 44 00 00 0f 1f 44 00 00 83 7e 60 01 74 0a 48 3b
+ [ 461.137673] RSP: 0018:ffffa77e08cafc68 EFLAGS: 00010082
+ [ 461.137674] RAX: 0000000000000000 RBX: ffff8b3fc1702d80 RCX: 0000000000000006
+ [ 461.137674] RDX: 0000000000000007 RSI: 0000000000000096 RDI: ffff8b3fded164b0
+ [ 461.137675] RBP: ffffa77e08cafc68 R08: 0000000000000026 R09: 0000000000000339
+ [ 461.137676] R10: ffff8b3fd060d410 R11: 0000000000000026 R12: ffffffffa4e14e20
+ [ 461.137677] R13: ffff8b3fdec22940 R14: ffff8b3fc1702da0 R15: ffff8b3fdec22940
+ [ 461.137678] FS: 00007efe43ee5700(0000) GS:ffff8b3fded00000(0000) knlGS:0000000000000000
+ [ 461.137679] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 461.137680] CR2: 00007efe30000010 CR3: 0000000301744003 CR4: 00000000001606e0
+ [ 461.137680] Call Trace:
+ [ 461.137684] push_dl_task.part.46+0x3bc/0x460
+ [ 461.137686] task_woken_dl+0x60/0x80
+ [ 461.137689] ttwu_do_wakeup+0x4f/0x150
+ [ 461.137690] ttwu_do_activate+0x77/0x80
+ [ 461.137692] try_to_wake_up+0x1d6/0x4c0
+ [ 461.137693] wake_up_q+0x32/0x70
+ [ 461.137696] do_futex+0x7e7/0xb50
+ [ 461.137698] __x64_sys_futex+0x8b/0x180
+ [ 461.137701] do_syscall_64+0x5a/0x110
+ [ 461.137703] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ [ 461.137705] RIP: 0033:0x7efe4918ca26
+ [ 461.137705] Code: 00 00 00 74 17 49 8b 48 20 44 8b 59 10 41 83 e3 30 41 83 fb 20 74 1e be 85 00 00 00 41 ba 01 00 00 00 41 b9 01 00 00 04 0f 05 <48> 3d 01 f0 ff ff 73 1f 31 c0 c3 be 8c 00 00 00 49 89 c8 4d 31 d2
+ [ 461.137738] RSP: 002b:00007efe43ee4928 EFLAGS: 00000283 ORIG_RAX: 00000000000000ca
+ [ 461.137739] RAX: ffffffffffffffda RBX: 0000000005094df0 RCX: 00007efe4918ca26
+ [ 461.137740] RDX: 0000000000000001 RSI: 0000000000000085 RDI: 0000000005094e24
+ [ 461.137741] RBP: 00007efe43ee49c0 R08: 0000000005094e20 R09: 0000000004000001
+ [ 461.137741] R10: 0000000000000001 R11: 0000000000000283 R12: 0000000000000000
+ [ 461.137742] R13: 0000000005094df8 R14: 0000000000000001 R15: 0000000000448a10
+ [ 461.137743] ---[ end trace 187df4cad2bf7649 ]---
+
+This warning happened in the push_dl_task(), because
+__add_running_bw()->cpufreq_update_util() is getting the rq_clock of
+the later_rq before its update, which takes place at activate_task().
+The fix then is to update the rq_clock before calling add_running_bw().
+
+To avoid double rq_clock_update() call, we set ENQUEUE_NOCLOCK flag to
+activate_task().
+
+Reported-by: Daniel Casini <daniel.casini@santannapisa.it>
+Signed-off-by: Daniel Bristot de Oliveira <bristot@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Juri Lelli <juri.lelli@redhat.com>
+Cc: Clark Williams <williams@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luca Abeni <luca.abeni@santannapisa.it>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tommaso Cucinotta <tommaso.cucinotta@santannapisa.it>
+Fixes: e0367b12674b sched/deadline: Move CPU frequency selection triggering points
+Link: http://lkml.kernel.org/r/ca31d073a4788acf0684a8b255f14fea775ccf20.1532077269.git.bristot@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/deadline.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2090,8 +2090,14 @@ retry:
+ sub_rq_bw(&next_task->dl, &rq->dl);
+ set_task_cpu(next_task, later_rq->cpu);
+ add_rq_bw(&next_task->dl, &later_rq->dl);
++
++ /*
++ * Update the later_rq clock here, because the clock is used
++ * by the cpufreq_update_util() inside __add_running_bw().
++ */
++ update_rq_clock(later_rq);
+ add_running_bw(&next_task->dl, &later_rq->dl);
+- activate_task(later_rq, next_task, 0);
++ activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
+ ret = 1;
+
+ resched_curr(later_rq);
+From 4f7a7beaee77275671654f7b9f3f9e73ca16ec65 Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Fri, 10 Aug 2018 17:23:10 -0700
+Subject: zram: remove BD_CAP_SYNCHRONOUS_IO with writeback feature
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit 4f7a7beaee77275671654f7b9f3f9e73ca16ec65 upstream.
+
+If zram supports writeback feature, it's no longer a
+BD_CAP_SYNCHRONOUS_IO device beause zram does asynchronous IO operations
+for incompressible pages.
+
+Do not pretend to be synchronous IO device. It makes the system very
+sluggish due to waiting for IO completion from upper layers.
+
+Furthermore, it causes a user-after-free problem because swap thinks the
+opearion is done when the IO functions returns so it can free the page
+(e.g., lock_page_or_retry and goto out_release in do_swap_page) but in
+fact, IO is asynchronous so the driver could access a just freed page
+afterward.
+
+This patch fixes the problem.
+
+ BUG: Bad page state in process qemu-system-x86 pfn:3dfab21
+ page:ffffdfb137eac840 count:0 mapcount:0 mapping:0000000000000000 index:0x1
+ flags: 0x17fffc000000008(uptodate)
+ raw: 017fffc000000008 dead000000000100 dead000000000200 0000000000000000
+ raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000
+ page dumped because: PAGE_FLAGS_CHECK_AT_PREP flag set
+ bad because of flags: 0x8(uptodate)
+ CPU: 4 PID: 1039 Comm: qemu-system-x86 Tainted: G B 4.18.0-rc5+ #1
+ Hardware name: Supermicro Super Server/X10SRL-F, BIOS 2.0b 05/02/2017
+ Call Trace:
+ dump_stack+0x5c/0x7b
+ bad_page+0xba/0x120
+ get_page_from_freelist+0x1016/0x1250
+ __alloc_pages_nodemask+0xfa/0x250
+ alloc_pages_vma+0x7c/0x1c0
+ do_swap_page+0x347/0x920
+ __handle_mm_fault+0x7b4/0x1110
+ handle_mm_fault+0xfc/0x1f0
+ __get_user_pages+0x12f/0x690
+ get_user_pages_unlocked+0x148/0x1f0
+ __gfn_to_pfn_memslot+0xff/0x3c0 [kvm]
+ try_async_pf+0x87/0x230 [kvm]
+ tdp_page_fault+0x132/0x290 [kvm]
+ kvm_mmu_page_fault+0x74/0x570 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x9b3/0x1990 [kvm]
+ kvm_vcpu_ioctl+0x388/0x5d0 [kvm]
+ do_vfs_ioctl+0xa2/0x630
+ ksys_ioctl+0x70/0x80
+ __x64_sys_ioctl+0x16/0x20
+ do_syscall_64+0x55/0x100
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Link: https://lore.kernel.org/lkml/0516ae2d-b0fd-92c5-aa92-112ba7bd32fc@contabo.de/
+Link: http://lkml.kernel.org/r/20180802051112.86174-1-minchan@kernel.org
+[minchan@kernel.org: fix changelog, add comment]
+ Link: https://lore.kernel.org/lkml/0516ae2d-b0fd-92c5-aa92-112ba7bd32fc@contabo.de/
+ Link: http://lkml.kernel.org/r/20180802051112.86174-1-minchan@kernel.org
+ Link: http://lkml.kernel.org/r/20180805233722.217347-1-minchan@kernel.org
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Reported-by: Tino Lehnig <tino.lehnig@contabo.de>
+Tested-by: Tino Lehnig <tino.lehnig@contabo.de>
+Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: <stable@vger.kernel.org> [4.15+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/zram/zram_drv.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -280,7 +280,8 @@ static void reset_bdev(struct zram *zram
+ zram->backing_dev = NULL;
+ zram->old_block_size = 0;
+ zram->bdev = NULL;
+-
++ zram->disk->queue->backing_dev_info->capabilities |=
++ BDI_CAP_SYNCHRONOUS_IO;
+ kvfree(zram->bitmap);
+ zram->bitmap = NULL;
+ }
+@@ -382,6 +383,18 @@ static ssize_t backing_dev_store(struct
+ zram->backing_dev = backing_dev;
+ zram->bitmap = bitmap;
+ zram->nr_pages = nr_pages;
++ /*
++ * With writeback feature, zram does asynchronous IO so it's no longer
++ * synchronous device so let's remove synchronous io flag. Othewise,
++ * upper layer(e.g., swap) could wait IO completion rather than
++ * (submit and return), which will cause system sluggish.
++ * Furthermore, when the IO function returns(e.g., swap_readpage),
++ * upper layer expects IO was done so it could deallocate the page
++ * freely but in fact, IO is going on so finally could cause
++ * use-after-free when the IO is really done.
++ */
++ zram->disk->queue->backing_dev_info->capabilities &=
++ ~BDI_CAP_SYNCHRONOUS_IO;
+ up_write(&zram->init_lock);
+
+ pr_info("setup backing device %s\n", file_name);
+From d472b3a6cf63cd31cae1ed61930f07e6cd6671b5 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 9 Aug 2018 16:42:16 +0200
+Subject: xen/netfront: don't cache skb_shinfo()
+
+From: Juergen Gross <jgross@suse.com>
+
+commit d472b3a6cf63cd31cae1ed61930f07e6cd6671b5 upstream.
+
+skb_shinfo() can change when calling __pskb_pull_tail(): Don't cache
+its return value.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netfront.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -894,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+ {
+- struct skb_shared_info *shinfo = skb_shinfo(skb);
+ RING_IDX cons = queue->rx.rsp_cons;
+ struct sk_buff *nskb;
+
+@@ -903,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct
+ RING_GET_RESPONSE(&queue->rx, ++cons);
+ skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
+
+- if (shinfo->nr_frags == MAX_SKB_FRAGS) {
++ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
+ unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
+
+ BUG_ON(pull_to <= skb_headlen(skb));
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ }
+- BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
++ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+
+- skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
++ skb_frag_page(nfrag),
+ rx->offset, rx->status, PAGE_SIZE);
+
+ skb_shinfo(nskb)->nr_frags = 0;
+From 7c81c71730456845e6212dccbf00098faa66740f Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 8 Aug 2018 19:23:14 +0200
+Subject: bpf, sockmap: fix leak in bpf_tcp_sendmsg wait for mem path
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 7c81c71730456845e6212dccbf00098faa66740f upstream.
+
+In bpf_tcp_sendmsg() the sk_alloc_sg() may fail. In the case of
+ENOMEM, it may also mean that we've partially filled the scatterlist
+entries with pages. Later jumping to sk_stream_wait_memory()
+we could further fail with an error for several reasons, however
+we miss to call free_start_sg() if the local sk_msg_buff was used.
+
+Fixes: 4f738adba30a ("bpf: create tcp_bpf_ulp allowing BPF to monitor socket TX/RX data")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/bpf/sockmap.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/kernel/bpf/sockmap.c
++++ b/kernel/bpf/sockmap.c
+@@ -947,7 +947,7 @@ static int bpf_tcp_sendmsg(struct sock *
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+ while (msg_data_left(msg)) {
+- struct sk_msg_buff *m;
++ struct sk_msg_buff *m = NULL;
+ bool enospc = false;
+ int copy;
+
+@@ -1015,8 +1015,11 @@ wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ wait_for_memory:
+ err = sk_stream_wait_memory(sk, &timeo);
+- if (err)
++ if (err) {
++ if (m && m != psock->cork)
++ free_start_sg(sk, m);
+ goto out_err;
++ }
+ }
+ out_err:
+ if (err < 0)
+From 5121700b346b6160ccc9411194e3f1f417c340d1 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 8 Aug 2018 19:23:13 +0200
+Subject: bpf, sockmap: fix bpf_tcp_sendmsg sock error handling
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 5121700b346b6160ccc9411194e3f1f417c340d1 upstream.
+
+While working on bpf_tcp_sendmsg() code, I noticed that when a
+sk->sk_err is set we error out with err = sk->sk_err. However
+this is problematic since sk->sk_err is a positive error value
+and therefore we will neither go into sk_stream_error() nor will
+we report an error back to user space. I had this case with EPIPE
+and user space was thinking sendmsg() succeeded since EPIPE is
+a positive value, thinking we submitted 32 bytes. Fix it by
+negating the sk->sk_err value.
+
+Fixes: 4f738adba30a ("bpf: create tcp_bpf_ulp allowing BPF to monitor socket TX/RX data")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/bpf/sockmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/bpf/sockmap.c
++++ b/kernel/bpf/sockmap.c
+@@ -952,7 +952,7 @@ static int bpf_tcp_sendmsg(struct sock *
+ int copy;
+
+ if (sk->sk_err) {
+- err = sk->sk_err;
++ err = -sk->sk_err;
+ goto out_err;
+ }
+
+From 1214fd7b497400d200e3f4e64e2338b303a20949 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Thu, 2 Aug 2018 10:44:42 -0700
+Subject: scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled
+
+From: Bart Van Assche <bart.vanassche@wdc.com>
+
+commit 1214fd7b497400d200e3f4e64e2338b303a20949 upstream.
+
+Surround scsi_execute() calls with scsi_autopm_get_device() and
+scsi_autopm_put_device(). Note: removing sr_mutex protection from the
+scsi_cd_get() and scsi_cd_put() calls is safe because the purpose of
+sr_mutex is to serialize cdrom_*() calls.
+
+This patch avoids that complaints similar to the following appear in the
+kernel log if runtime power management is enabled:
+
+INFO: task systemd-udevd:650 blocked for more than 120 seconds.
+ Not tainted 4.18.0-rc7-dbg+ #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+systemd-udevd D28176 650 513 0x00000104
+Call Trace:
+__schedule+0x444/0xfe0
+schedule+0x4e/0xe0
+schedule_preempt_disabled+0x18/0x30
+__mutex_lock+0x41c/0xc70
+mutex_lock_nested+0x1b/0x20
+__blkdev_get+0x106/0x970
+blkdev_get+0x22c/0x5a0
+blkdev_open+0xe9/0x100
+do_dentry_open.isra.19+0x33e/0x570
+vfs_open+0x7c/0xd0
+path_openat+0x6e3/0x1120
+do_filp_open+0x11c/0x1c0
+do_sys_open+0x208/0x2d0
+__x64_sys_openat+0x59/0x70
+do_syscall_64+0x77/0x230
+entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Maurizio Lombardi <mlombard@redhat.com>
+Cc: Johannes Thumshirn <jthumshirn@suse.de>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: <stable@vger.kernel.org>
+Tested-by: Johannes Thumshirn <jthumshirn@suse.de>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sr.c | 29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_c
+ static int sr_block_open(struct block_device *bdev, fmode_t mode)
+ {
+ struct scsi_cd *cd;
++ struct scsi_device *sdev;
+ int ret = -ENXIO;
+
++ cd = scsi_cd_get(bdev->bd_disk);
++ if (!cd)
++ goto out;
++
++ sdev = cd->device;
++ scsi_autopm_get_device(sdev);
+ check_disk_change(bdev);
+
+ mutex_lock(&sr_mutex);
+- cd = scsi_cd_get(bdev->bd_disk);
+- if (cd) {
+- ret = cdrom_open(&cd->cdi, bdev, mode);
+- if (ret)
+- scsi_cd_put(cd);
+- }
++ ret = cdrom_open(&cd->cdi, bdev, mode);
+ mutex_unlock(&sr_mutex);
++
++ scsi_autopm_put_device(sdev);
++ if (ret)
++ scsi_cd_put(cd);
++
++out:
+ return ret;
+ }
+
+@@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_d
+ if (ret)
+ goto out;
+
++ scsi_autopm_get_device(sdev);
++
+ /*
+ * Send SCSI addressing ioctls directly to mid level, send other
+ * ioctls to cdrom/block level.
+@@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_d
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ ret = scsi_ioctl(sdev, cmd, argp);
+- goto out;
++ goto put;
+ }
+
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ if (ret != -ENOSYS)
+- goto out;
++ goto put;
+
+ ret = scsi_ioctl(sdev, cmd, argp);
+
++put:
++ scsi_autopm_put_device(sdev);
++
+ out:
+ mutex_unlock(&sr_mutex);
+ return ret;
+From 5e53be8e476a3397ed5383c23376f299555a2b43 Mon Sep 17 00:00:00 2001
+From: Quinn Tran <quinn.tran@cavium.com>
+Date: Thu, 26 Jul 2018 16:34:44 -0700
+Subject: scsi: qla2xxx: Fix memory leak for allocating abort IOCB
+
+From: Quinn Tran <quinn.tran@cavium.com>
+
+commit 5e53be8e476a3397ed5383c23376f299555a2b43 upstream.
+
+In the case of IOCB QFull, Initiator code can leave behind a stale pointer
+to an SRB structure on the outstanding command array.
+
+Fixes: 82de802ad46e ("scsi: qla2xxx: Preparation for Target MQ.")
+Cc: stable@vger.kernel.org #v4.16+
+Signed-off-by: Quinn Tran <quinn.tran@cavium.com>
+Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/qla2xxx/qla_iocb.c | 53 ++++++++++++++++++++--------------------
+ 1 file changed, 27 insertions(+), 26 deletions(-)
+
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *
+ req_cnt = 1;
+ handle = 0;
+
+- if (!sp)
+- goto skip_cmd_array;
+-
+- /* Check for room in outstanding command list. */
+- handle = req->current_outstanding_cmd;
+- for (index = 1; index < req->num_outstanding_cmds; index++) {
+- handle++;
+- if (handle == req->num_outstanding_cmds)
+- handle = 1;
+- if (!req->outstanding_cmds[handle])
+- break;
+- }
+- if (index == req->num_outstanding_cmds) {
+- ql_log(ql_log_warn, vha, 0x700b,
+- "No room on outstanding cmd array.\n");
+- goto queuing_error;
+- }
+-
+- /* Prep command array. */
+- req->current_outstanding_cmd = handle;
+- req->outstanding_cmds[handle] = sp;
+- sp->handle = handle;
+-
+- /* Adjust entry-counts as needed. */
+- if (sp->type != SRB_SCSI_CMD)
++ if (sp && (sp->type != SRB_SCSI_CMD)) {
++ /* Adjust entry-counts as needed. */
+ req_cnt = sp->iocbs;
++ }
+
+-skip_cmd_array:
+ /* Check for room on request queue. */
+ if (req->cnt < req_cnt + 2) {
+ if (qpair->use_shadow_reg)
+@@ -2183,6 +2160,28 @@ skip_cmd_array:
+ if (req->cnt < req_cnt + 2)
+ goto queuing_error;
+
++ if (sp) {
++ /* Check for room in outstanding command list. */
++ handle = req->current_outstanding_cmd;
++ for (index = 1; index < req->num_outstanding_cmds; index++) {
++ handle++;
++ if (handle == req->num_outstanding_cmds)
++ handle = 1;
++ if (!req->outstanding_cmds[handle])
++ break;
++ }
++ if (index == req->num_outstanding_cmds) {
++ ql_log(ql_log_warn, vha, 0x700b,
++ "No room on outstanding cmd array.\n");
++ goto queuing_error;
++ }
++
++ /* Prep command array. */
++ req->current_outstanding_cmd = handle;
++ req->outstanding_cmds[handle] = sp;
++ sp->handle = handle;
++ }
++
+ /* Prep packet */
+ req->cnt -= req_cnt;
+ pkt = req->ring_ptr;
+@@ -2195,6 +2194,8 @@ skip_cmd_array:
+ pkt->handle = handle;
+ }
+
++ return pkt;
++
+ queuing_error:
+ qpair->tgt_counters.num_alloc_iocb_failed++;
+ return pkt;
+From b5b1404d0815894de0690de8a1ab58269e56eae6 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 12 Aug 2018 12:19:42 -0700
+Subject: init: rename and re-order boot_cpu_state_init()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit b5b1404d0815894de0690de8a1ab58269e56eae6 upstream.
+
+This is purely a preparatory patch for upcoming changes during the 4.19
+merge window.
+
+We have a function called "boot_cpu_state_init()" that isn't really
+about the bootup cpu state: that is done much earlier by the similarly
+named "boot_cpu_init()" (note lack of "state" in name).
+
+This function initializes some hotplug CPU state, and needs to run after
+the percpu data has been properly initialized. It even has a comment to
+that effect.
+
+Except it _doesn't_ actually run after the percpu data has been properly
+initialized. On x86 it happens to do that, but on at least arm and
+arm64, the percpu base pointers are initialized by the arch-specific
+'smp_prepare_boot_cpu()' hook, which ran _after_ boot_cpu_state_init().
+
+This had some unexpected results, and in particular we have a patch
+pending for the merge window that did the obvious cleanup of using
+'this_cpu_write()' in the cpu hotplug init code:
+
+ - per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+ + this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
+
+which is obviously the right thing to do. Except because of the
+ordering issue, it actually failed miserably and unexpectedly on arm64.
+
+So this just fixes the ordering, and changes the name of the function to
+be 'boot_cpu_hotplug_init()' to make it obvious that it's about cpu
+hotplug state, because the core CPU state was supposed to have already
+been done earlier.
+
+Marked for stable, since the (not yet merged) patch that will show this
+problem is marked for stable.
+
+Reported-by: Vlastimil Babka <vbabka@suse.cz>
+Reported-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpu.h | 2 +-
+ init/main.c | 2 +-
+ kernel/cpu.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -30,7 +30,7 @@ struct cpu {
+ };
+
+ extern void boot_cpu_init(void);
+-extern void boot_cpu_state_init(void);
++extern void boot_cpu_hotplug_init(void);
+ extern void cpu_init(void);
+ extern void trap_init(void);
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -561,8 +561,8 @@ asmlinkage __visible void __init start_k
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
+- boot_cpu_state_init();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
++ boot_cpu_hotplug_init();
+
+ build_all_zonelists(NULL);
+ page_alloc_init();
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2010,7 +2010,7 @@ void __init boot_cpu_init(void)
+ /*
+ * Must be called _AFTER_ setting up the per_cpu areas
+ */
+-void __init boot_cpu_state_init(void)
++void __init boot_cpu_hotplug_init(void)
+ {
+ per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+ }
+From 90bad5e05bcdb0308cfa3d3a60f5c0b9c8e2efb3 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Mon, 6 Aug 2018 09:03:58 -0400
+Subject: root dentries need RCU-delayed freeing
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 90bad5e05bcdb0308cfa3d3a60f5c0b9c8e2efb3 upstream.
+
+Since mountpoint crossing can happen without leaving lazy mode,
+root dentries do need the same protection against having their
+memory freed without RCU delay as everything else in the tree.
+
+It's partially hidden by RCU delay between detaching from the
+mount tree and dropping the vfsmount reference, but the starting
+point of pathwalk can be on an already detached mount, in which
+case umount-caused RCU delay has already passed by the time the
+lazy pathwalk grabs rcu_read_lock(). If the starting point
+happens to be at the root of that vfsmount *and* that vfsmount
+covers the entire filesystem, we get trouble.
+
+Fixes: 48a066e72d97 ("RCU'd vsfmounts")
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/dcache.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1954,10 +1954,12 @@ struct dentry *d_make_root(struct inode
+
+ if (root_inode) {
+ res = d_alloc_anon(root_inode->i_sb);
+- if (res)
++ if (res) {
++ res->d_flags |= DCACHE_RCUACCESS;
+ d_instantiate(res, root_inode);
+- else
++ } else {
+ iput(root_inode);
++ }
+ }
+ return res;
+ }
+From 4c0d7cd5c8416b1ef41534d19163cb07ffaa03ab Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 9 Aug 2018 10:15:54 -0400
+Subject: make sure that __dentry_kill() always invalidates d_seq, unhashed or not
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 4c0d7cd5c8416b1ef41534d19163cb07ffaa03ab upstream.
+
+RCU pathwalk relies upon the assumption that anything that changes
+->d_inode of a dentry will invalidate its ->d_seq. That's almost
+true - the one exception is that the final dput() of already unhashed
+dentry does *not* touch ->d_seq at all. Unhashing does, though,
+so for anything we'd found by RCU dcache lookup we are fine.
+Unfortunately, we can *start* with an unhashed dentry or jump into
+it.
+
+We could try and be careful in the (few) places where that could
+happen. Or we could just make the final dput() invalidate the damn
+thing, unhashed or not. The latter is much simpler and easier to
+backport, so let's do it that way.
+
+Reported-by: "Dae R. Jeong" <threeearcat@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/dcache.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -358,14 +358,11 @@ static void dentry_unlink_inode(struct d
+ __releases(dentry->d_inode->i_lock)
+ {
+ struct inode *inode = dentry->d_inode;
+- bool hashed = !d_unhashed(dentry);
+
+- if (hashed)
+- raw_write_seqcount_begin(&dentry->d_seq);
++ raw_write_seqcount_begin(&dentry->d_seq);
+ __d_clear_type_and_inode(dentry);
+ hlist_del_init(&dentry->d_u.d_alias);
+- if (hashed)
+- raw_write_seqcount_end(&dentry->d_seq);
++ raw_write_seqcount_end(&dentry->d_seq);
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&inode->i_lock);
+ if (!inode->i_nlink)
+From 9ea0a46ca2c318fcc449c1e6b62a7230a17888f1 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 9 Aug 2018 17:21:17 -0400
+Subject: fix mntput/mntput race
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 9ea0a46ca2c318fcc449c1e6b62a7230a17888f1 upstream.
+
+mntput_no_expire() does the calculation of total refcount under mount_lock;
+unfortunately, the decrement (as well as all increments) are done outside
+of it, leading to false positives in the "are we dropping the last reference"
+test. Consider the following situation:
+ * mnt is a lazy-umounted mount, kept alive by two opened files. One
+of those files gets closed. Total refcount of mnt is 2. On CPU 42
+mntput(mnt) (called from __fput()) drops one reference, decrementing component
+ * After it has looked at component #0, the process on CPU 0 does
+mntget(), incrementing component #0, gets preempted and gets to run again -
+on CPU 69. There it does mntput(), which drops the reference (component #69)
+and proceeds to spin on mount_lock.
+ * On CPU 42 our first mntput() finishes counting. It observes the
+decrement of component #69, but not the increment of component #0. As the
+result, the total it gets is not 1 as it should've been - it's 0. At which
+point we decide that vfsmount needs to be killed and proceed to free it and
+shut the filesystem down. However, there's still another opened file
+on that filesystem, with reference to (now freed) vfsmount, etc. and we are
+screwed.
+
+It's not a wide race, but it can be reproduced with artificial slowdown of
+the mnt_get_count() loop, and it should be easier to hit on SMP KVM setups.
+
+Fix consists of moving the refcount decrement under mount_lock; the tricky
+part is that we want (and can) keep the fast case (i.e. mount that still
+has non-NULL ->mnt_ns) entirely out of mount_lock. All places that zero
+mnt->mnt_ns are dropping some reference to mnt and they call synchronize_rcu()
+before that mntput(). IOW, if mntput() observes (under rcu_read_lock())
+a non-NULL ->mnt_ns, it is guaranteed that there is another reference yet to
+be dropped.
+
+Reported-by: Jann Horn <jannh@google.com>
+Tested-by: Jann Horn <jannh@google.com>
+Fixes: 48a066e72d97 ("RCU'd vsfmounts")
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/namespace.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1195,12 +1195,22 @@ static DECLARE_DELAYED_WORK(delayed_mntp
+ static void mntput_no_expire(struct mount *mnt)
+ {
+ rcu_read_lock();
+- mnt_add_count(mnt, -1);
+- if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
++ if (likely(READ_ONCE(mnt->mnt_ns))) {
++ /*
++ * Since we don't do lock_mount_hash() here,
++ * ->mnt_ns can change under us. However, if it's
++ * non-NULL, then there's a reference that won't
++ * be dropped until after an RCU delay done after
++ * turning ->mnt_ns NULL. So if we observe it
++ * non-NULL under rcu_read_lock(), the reference
++ * we are dropping is not the final one.
++ */
++ mnt_add_count(mnt, -1);
+ rcu_read_unlock();
+ return;
+ }
+ lock_mount_hash();
++ mnt_add_count(mnt, -1);
+ if (mnt_get_count(mnt)) {
+ rcu_read_unlock();
+ unlock_mount_hash();
+From 119e1ef80ecfe0d1deb6378d4ab41f5b71519de1 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 9 Aug 2018 17:51:32 -0400
+Subject: fix __legitimize_mnt()/mntput() race
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 119e1ef80ecfe0d1deb6378d4ab41f5b71519de1 upstream.
+
+__legitimize_mnt() has two problems - one is that in case of success
+the check of mount_lock is not ordered wrt preceding increment of
+refcount, making it possible to have successful __legitimize_mnt()
+on one CPU just before the otherwise final mntpu() on another,
+with __legitimize_mnt() not seeing mntput() taking the lock and
+mntput() not seeing the increment done by __legitimize_mnt().
+Solved by a pair of barriers.
+
+Another is that failure of __legitimize_mnt() on the second
+read_seqretry() leaves us with reference that'll need to be
+dropped by caller; however, if that races with final mntput()
+we can end up with caller dropping rcu_read_lock() and doing
+mntput() to release that reference - with the first mntput()
+having freed the damn thing just as rcu_read_lock() had been
+dropped. Solution: in "do mntput() yourself" failure case
+grab mount_lock, check if MNT_DOOMED has been set by racing
+final mntput() that has missed our increment and if it has -
+undo the increment and treat that as "failure, caller doesn't
+need to drop anything" case.
+
+It's not easy to hit - the final mntput() has to come right
+after the first read_seqretry() in __legitimize_mnt() *and*
+manage to miss the increment done by __legitimize_mnt() before
+the second read_seqretry() in there. The things that are almost
+impossible to hit on bare hardware are not impossible on SMP
+KVM, though...
+
+Reported-by: Oleg Nesterov <oleg@redhat.com>
+Fixes: 48a066e72d97 ("RCU'd vsfmounts")
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/namespace.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -659,12 +659,21 @@ int __legitimize_mnt(struct vfsmount *ba
+ return 0;
+ mnt = real_mount(bastard);
+ mnt_add_count(mnt, 1);
++ smp_mb(); // see mntput_no_expire()
+ if (likely(!read_seqretry(&mount_lock, seq)))
+ return 0;
+ if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
+ mnt_add_count(mnt, -1);
+ return 1;
+ }
++ lock_mount_hash();
++ if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
++ mnt_add_count(mnt, -1);
++ unlock_mount_hash();
++ return 1;
++ }
++ unlock_mount_hash();
++ /* caller will mntput() */
+ return -1;
+ }
+
+@@ -1210,6 +1219,11 @@ static void mntput_no_expire(struct moun
+ return;
+ }
+ lock_mount_hash();
++ /*
++ * make sure that if __legitimize_mnt() has not seen us grab
++ * mount_lock, we'll see their refcount increment here.
++ */
++ smp_mb();
+ mnt_add_count(mnt, -1);
+ if (mnt_get_count(mnt)) {
+ rcu_read_unlock();
+From 1bcfe0564044be578841744faea1c2f46adc8178 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Fri, 15 Jun 2018 09:41:29 +0200
+Subject: ARM: dts: imx6sx: fix irq for pcie bridge
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit 1bcfe0564044be578841744faea1c2f46adc8178 upstream.
+
+Use the correct IRQ line for the MSI controller in the PCIe host
+controller. Apparently a different IRQ line is used compared to other
+i.MX6 variants. Without this change MSI IRQs aren't properly propagated
+to the upstream interrupt controller.
+
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Reviewed-by: Lucas Stach <l.stach@pengutronix.de>
+Fixes: b1d17f68e5c5 ("ARM: dts: imx: add initial imx6sx device tree source")
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/imx6sx.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -1351,7 +1351,7 @@
+ ranges = <0x81000000 0 0 0x08f80000 0 0x00010000 /* downstream I/O */
+ 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */
+ num-lanes = <1>;
+- interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+From 5800dc5c19f34e6e03b5adab1282535cb102fafd Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 3 Aug 2018 16:41:39 +0200
+Subject: x86/paravirt: Fix spectre-v2 mitigations for paravirt guests
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 5800dc5c19f34e6e03b5adab1282535cb102fafd upstream.
+
+Nadav reported that on guests we're failing to rewrite the indirect
+calls to CALLEE_SAVE paravirt functions. In particular the
+pv_queued_spin_unlock() call is left unpatched and that is all over the
+place. This obviously wrecks Spectre-v2 mitigation (for paravirt
+guests) which relies on not actually having indirect calls around.
+
+The reason is an incorrect clobber test in paravirt_patch_call(); this
+function rewrites an indirect call with a direct call to the _SAME_
+function, there is no possible way the clobbers can be different
+because of this.
+
+Therefore remove this clobber check. Also put WARNs on the other patch
+failure case (not enough room for the instruction) which I've not seen
+trigger in my (limited) testing.
+
+Three live kernel image disassemblies for lock_sock_nested (as a small
+function that illustrates the problem nicely). PRE is the current
+situation for guests, POST is with this patch applied and NATIVE is with
+or without the patch for !guests.
+
+PRE:
+
+(gdb) disassemble lock_sock_nested
+Dump of assembler code for function lock_sock_nested:
+ 0xffffffff817be970 <+0>: push %rbp
+ 0xffffffff817be971 <+1>: mov %rdi,%rbp
+ 0xffffffff817be974 <+4>: push %rbx
+ 0xffffffff817be975 <+5>: lea 0x88(%rbp),%rbx
+ 0xffffffff817be97c <+12>: callq 0xffffffff819f7160 <_cond_resched>
+ 0xffffffff817be981 <+17>: mov %rbx,%rdi
+ 0xffffffff817be984 <+20>: callq 0xffffffff819fbb00 <_raw_spin_lock_bh>
+ 0xffffffff817be989 <+25>: mov 0x8c(%rbp),%eax
+ 0xffffffff817be98f <+31>: test %eax,%eax
+ 0xffffffff817be991 <+33>: jne 0xffffffff817be9ba <lock_sock_nested+74>
+ 0xffffffff817be993 <+35>: movl $0x1,0x8c(%rbp)
+ 0xffffffff817be99d <+45>: mov %rbx,%rdi
+ 0xffffffff817be9a0 <+48>: callq *0xffffffff822299e8
+ 0xffffffff817be9a7 <+55>: pop %rbx
+ 0xffffffff817be9a8 <+56>: pop %rbp
+ 0xffffffff817be9a9 <+57>: mov $0x200,%esi
+ 0xffffffff817be9ae <+62>: mov $0xffffffff817be993,%rdi
+ 0xffffffff817be9b5 <+69>: jmpq 0xffffffff81063ae0 <__local_bh_enable_ip>
+ 0xffffffff817be9ba <+74>: mov %rbp,%rdi
+ 0xffffffff817be9bd <+77>: callq 0xffffffff817be8c0 <__lock_sock>
+ 0xffffffff817be9c2 <+82>: jmp 0xffffffff817be993 <lock_sock_nested+35>
+End of assembler dump.
+
+POST:
+
+(gdb) disassemble lock_sock_nested
+Dump of assembler code for function lock_sock_nested:
+ 0xffffffff817be970 <+0>: push %rbp
+ 0xffffffff817be971 <+1>: mov %rdi,%rbp
+ 0xffffffff817be974 <+4>: push %rbx
+ 0xffffffff817be975 <+5>: lea 0x88(%rbp),%rbx
+ 0xffffffff817be97c <+12>: callq 0xffffffff819f7160 <_cond_resched>
+ 0xffffffff817be981 <+17>: mov %rbx,%rdi
+ 0xffffffff817be984 <+20>: callq 0xffffffff819fbb00 <_raw_spin_lock_bh>
+ 0xffffffff817be989 <+25>: mov 0x8c(%rbp),%eax
+ 0xffffffff817be98f <+31>: test %eax,%eax
+ 0xffffffff817be991 <+33>: jne 0xffffffff817be9ba <lock_sock_nested+74>
+ 0xffffffff817be993 <+35>: movl $0x1,0x8c(%rbp)
+ 0xffffffff817be99d <+45>: mov %rbx,%rdi
+ 0xffffffff817be9a0 <+48>: callq 0xffffffff810a0c20 <__raw_callee_save___pv_queued_spin_unlock>
+ 0xffffffff817be9a5 <+53>: xchg %ax,%ax
+ 0xffffffff817be9a7 <+55>: pop %rbx
+ 0xffffffff817be9a8 <+56>: pop %rbp
+ 0xffffffff817be9a9 <+57>: mov $0x200,%esi
+ 0xffffffff817be9ae <+62>: mov $0xffffffff817be993,%rdi
+ 0xffffffff817be9b5 <+69>: jmpq 0xffffffff81063aa0 <__local_bh_enable_ip>
+ 0xffffffff817be9ba <+74>: mov %rbp,%rdi
+ 0xffffffff817be9bd <+77>: callq 0xffffffff817be8c0 <__lock_sock>
+ 0xffffffff817be9c2 <+82>: jmp 0xffffffff817be993 <lock_sock_nested+35>
+End of assembler dump.
+
+NATIVE:
+
+(gdb) disassemble lock_sock_nested
+Dump of assembler code for function lock_sock_nested:
+ 0xffffffff817be970 <+0>: push %rbp
+ 0xffffffff817be971 <+1>: mov %rdi,%rbp
+ 0xffffffff817be974 <+4>: push %rbx
+ 0xffffffff817be975 <+5>: lea 0x88(%rbp),%rbx
+ 0xffffffff817be97c <+12>: callq 0xffffffff819f7160 <_cond_resched>
+ 0xffffffff817be981 <+17>: mov %rbx,%rdi
+ 0xffffffff817be984 <+20>: callq 0xffffffff819fbb00 <_raw_spin_lock_bh>
+ 0xffffffff817be989 <+25>: mov 0x8c(%rbp),%eax
+ 0xffffffff817be98f <+31>: test %eax,%eax
+ 0xffffffff817be991 <+33>: jne 0xffffffff817be9ba <lock_sock_nested+74>
+ 0xffffffff817be993 <+35>: movl $0x1,0x8c(%rbp)
+ 0xffffffff817be99d <+45>: mov %rbx,%rdi
+ 0xffffffff817be9a0 <+48>: movb $0x0,(%rdi)
+ 0xffffffff817be9a3 <+51>: nopl 0x0(%rax)
+ 0xffffffff817be9a7 <+55>: pop %rbx
+ 0xffffffff817be9a8 <+56>: pop %rbp
+ 0xffffffff817be9a9 <+57>: mov $0x200,%esi
+ 0xffffffff817be9ae <+62>: mov $0xffffffff817be993,%rdi
+ 0xffffffff817be9b5 <+69>: jmpq 0xffffffff81063ae0 <__local_bh_enable_ip>
+ 0xffffffff817be9ba <+74>: mov %rbp,%rdi
+ 0xffffffff817be9bd <+77>: callq 0xffffffff817be8c0 <__lock_sock>
+ 0xffffffff817be9c2 <+82>: jmp 0xffffffff817be993 <lock_sock_nested+35>
+End of assembler dump.
+
+
+Fixes: 63f70270ccd9 ("[PATCH] i386: PARAVIRT: add common patching machinery")
+Fixes: 3010a0663fd9 ("x86/paravirt, objtool: Annotate indirect calls")
+Reported-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/paravirt.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnb
+ struct branch *b = insnbuf;
+ unsigned long delta = (unsigned long)target - (addr+5);
+
+- if (tgt_clobbers & ~site_clobbers)
+- return len; /* target would clobber too much for this site */
+- if (len < 5)
++ if (len < 5) {
++#ifdef CONFIG_RETPOLINE
++ WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
++#endif
+ return len; /* call too long for patch site */
++ }
+
+ b->opcode = 0xe8; /* call */
+ b->delta = delta;
+@@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbu
+ struct branch *b = insnbuf;
+ unsigned long delta = (unsigned long)target - (addr+5);
+
+- if (len < 5)
++ if (len < 5) {
++#ifdef CONFIG_RETPOLINE
++ WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
++#endif
+ return len; /* call too long for patch site */
++ }
+
+ b->opcode = 0xe9; /* jmp */
+ b->delta = delta;
+From fdf82a7856b32d905c39afc85e34364491e46346 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 26 Jul 2018 13:14:55 +0200
+Subject: x86/speculation: Protect against userspace-userspace spectreRSB
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit fdf82a7856b32d905c39afc85e34364491e46346 upstream.
+
+The article "Spectre Returns! Speculation Attacks using the Return Stack
+Buffer" [1] describes two new (sub-)variants of spectrev2-like attacks,
+making use solely of the RSB contents even on CPUs that don't fallback to
+BTB on RSB underflow (Skylake+).
+
+Mitigate userspace-userspace attacks by always unconditionally filling RSB on
+context switch when the generic spectrev2 mitigation has been enabled.
+
+[1] https://arxiv.org/pdf/1807.07940.pdf
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1807261308190.997@cbobk.fhfr.pm
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c | 38 +++++++-------------------------------
+ 1 file changed, 7 insertions(+), 31 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -311,23 +311,6 @@ static enum spectre_v2_mitigation_cmd __
+ return cmd;
+ }
+
+-/* Check for Skylake-like CPUs (for RSB handling) */
+-static bool __init is_skylake_era(void)
+-{
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+- boot_cpu_data.x86 == 6) {
+- switch (boot_cpu_data.x86_model) {
+- case INTEL_FAM6_SKYLAKE_MOBILE:
+- case INTEL_FAM6_SKYLAKE_DESKTOP:
+- case INTEL_FAM6_SKYLAKE_X:
+- case INTEL_FAM6_KABYLAKE_MOBILE:
+- case INTEL_FAM6_KABYLAKE_DESKTOP:
+- return true;
+- }
+- }
+- return false;
+-}
+-
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -388,22 +371,15 @@ retpoline_auto:
+ pr_info("%s\n", spectre_v2_strings[mode]);
+
+ /*
+- * If neither SMEP nor PTI are available, there is a risk of
+- * hitting userspace addresses in the RSB after a context switch
+- * from a shallow call stack to a deeper one. To prevent this fill
+- * the entire RSB, even when using IBRS.
++ * If spectre v2 protection has been enabled, unconditionally fill
++ * RSB during a context switch; this protects against two independent
++ * issues:
+ *
+- * Skylake era CPUs have a separate issue with *underflow* of the
+- * RSB, when they will predict 'ret' targets from the generic BTB.
+- * The proper mitigation for this is IBRS. If IBRS is not supported
+- * or deactivated in favour of retpolines the RSB fill on context
+- * switch is required.
++ * - RSB underflow (and switch to BTB) on Skylake+
++ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
+ */
+- if ((!boot_cpu_has(X86_FEATURE_PTI) &&
+- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
+- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+- pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
+- }
++ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+From 0ea063306eecf300fcf06d2f5917474b580f666f Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Sat, 28 Apr 2018 21:37:03 +0900
+Subject: kprobes/x86: Fix %p uses in error messages
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 0ea063306eecf300fcf06d2f5917474b580f666f upstream.
+
+Remove all %p uses in error messages in kprobes/x86.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: David Howells <dhowells@redhat.com>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Jon Medhurst <tixy@linaro.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thomas Richter <tmricht@linux.ibm.com>
+Cc: Tobin C . Harding <me@tobin.cc>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: acme@kernel.org
+Cc: akpm@linux-foundation.org
+Cc: brueckner@linux.vnet.ibm.com
+Cc: linux-arch@vger.kernel.org
+Cc: rostedt@goodmis.org
+Cc: schwidefsky@de.ibm.com
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/lkml/152491902310.9916.13355297638917767319.stgit@devbox
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kprobes/core.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -395,8 +395,6 @@ int __copy_instruction(u8 *dest, u8 *src
+ - (u8 *) real;
+ if ((s64) (s32) newdisp != newdisp) {
+ pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
+- pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
+- src, real, insn->displacement.value);
+ return 0;
+ }
+ disp = (u8 *) dest + insn_offset_displacement(insn);
+@@ -640,8 +638,7 @@ static int reenter_kprobe(struct kprobe
+ * Raise a BUG or we'll continue in an endless reentering loop
+ * and eventually a stack overflow.
+ */
+- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
+- p->addr);
++ pr_err("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
+ BUG();
+ default:
+From 208cbb32558907f68b3b2a081ca2337ac3744794 Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Fri, 3 Aug 2018 10:05:50 -0700
+Subject: x86/irqflags: Provide a declaration for native_save_fl
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+commit 208cbb32558907f68b3b2a081ca2337ac3744794 upstream.
+
+It was reported that the commit d0a8d9378d16 is causing users of gcc < 4.9
+to observe -Werror=missing-prototypes errors.
+
+Indeed, it seems that:
+extern inline unsigned long native_save_fl(void) { return 0; }
+
+compiled with -Werror=missing-prototypes produces this warning in gcc <
+4.9, but not gcc >= 4.9.
+
+Fixes: d0a8d9378d16 ("x86/paravirt: Make native_save_fl() extern inline").
+Reported-by: David Laight <david.laight@aculab.com>
+Reported-by: Jean Delvare <jdelvare@suse.de>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: hpa@zytor.com
+Cc: jgross@suse.com
+Cc: kstewart@linuxfoundation.org
+Cc: gregkh@linuxfoundation.org
+Cc: boris.ostrovsky@oracle.com
+Cc: astrachan@google.com
+Cc: mka@chromium.org
+Cc: arnd@arndb.de
+Cc: tstellar@redhat.com
+Cc: sedat.dilek@gmail.com
+Cc: David.Laight@aculab.com
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180803170550.164688-1-ndesaulniers@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/irqflags.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -13,6 +13,8 @@
+ * Interrupt control:
+ */
+
++/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
++extern inline unsigned long native_save_fl(void);
+ extern inline unsigned long native_save_fl(void)
+ {
+ unsigned long flags;
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 13 Jun 2018 15:48:21 -0700
+Subject: x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 50896e180c6aa3a9c61a26ced99e15d602666a4c upstream
+
+L1 Terminal Fault (L1TF) is a speculation related vulnerability. The CPU
+speculates on PTE entries which do not have the PRESENT bit set, if the
+content of the resulting physical address is available in the L1D cache.
+
+The OS side mitigation makes sure that a !PRESENT PTE entry points to a
+physical address outside the actually existing and cachable memory
+space. This is achieved by inverting the upper bits of the PTE. Due to the
+address space limitations this only works for 64bit and 32bit PAE kernels,
+but not for 32bit non PAE.
+
+This mitigation applies to both host and guest kernels, but in case of a
+64bit host (hypervisor) and a 32bit PAE guest, inverting the upper bits of
+the PAE address space (44bit) is not enough if the host has more than 43
+bits of populated memory address space, because the speculation treats the
+PTE content as a physical host address bypassing EPT.
+
+The host (hypervisor) protects itself against the guest by flushing L1D as
+needed, but pages inside the guest are not protected against attacks from
+other processes inside the same guest.
+
+For the guest the inverted PTE mask has to match the host to provide the
+full protection for all pages the host could possibly map into the
+guest. The hosts populated address space is not known to the guest, so the
+mask must cover the possible maximal host address space, i.e. 52 bit.
+
+On 32bit PAE the maximum PTE mask is currently set to 44 bit because that
+is the limit imposed by 32bit unsigned long PFNs in the VMs. This limits
+the mask to be below what the host could possible use for physical pages.
+
+The L1TF PROT_NONE protection code uses the PTE masks to determine which
+bits to invert to make sure the higher bits are set for unmapped entries to
+prevent L1TF speculation attacks against EPT inside guests.
+
+In order to invert all bits that could be used by the host, increase
+__PHYSICAL_PAGE_SHIFT to 52 to match 64bit.
+
+The real limit for a 32bit PAE kernel is still 44 bits because all Linux
+PTEs are created from unsigned long PFNs, so they cannot be higher than 44
+bits on a 32bit kernel. So these extra PFN bits should be never set. The
+only users of this macro are using it to look at PTEs, so it's safe.
+
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/page_32_types.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/page_32_types.h
++++ b/arch/x86/include/asm/page_32_types.h
+@@ -29,8 +29,13 @@
+ #define N_EXCEPTION_STACKS 1
+
+ #ifdef CONFIG_X86_PAE
+-/* 44=32+12, the limit we can fit into an unsigned long pfn */
+-#define __PHYSICAL_MASK_SHIFT 44
++/*
++ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
++ * but we need the full mask to make sure inverted PROT_NONE
++ * entries have all the host bits set in a guest.
++ * The real limit is still 44 bits.
++ */
++#define __PHYSICAL_MASK_SHIFT 52
+ #define __VIRTUAL_MASK_SHIFT 32
+
+ #else /* !CONFIG_X86_PAE */
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Wed, 13 Jun 2018 15:48:22 -0700
+Subject: x86/speculation/l1tf: Change order of offset/type in swap entry
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit bcd11afa7adad8d720e7ba5ef58bdcd9775cf45f upstream
+
+If pages are swapped out, the swap entry is stored in the corresponding
+PTE, which has the Present bit cleared. CPUs vulnerable to L1TF speculate
+on PTE entries which have the present bit set and would treat the swap
+entry as phsyical address (PFN). To mitigate that the upper bits of the PTE
+must be set so the PTE points to non existent memory.
+
+The swap entry stores the type and the offset of a swapped out page in the
+PTE. type is stored in bit 9-13 and offset in bit 14-63. The hardware
+ignores the bits beyond the phsyical address space limit, so to make the
+mitigation effective its required to start 'offset' at the lowest possible
+bit so that even large swap offsets do not reach into the physical address
+space limit bits.
+
+Move offset to bit 9-58 and type to bit 59-63 which are the bits that
+hardware generally doesn't care about.
+
+That, in turn, means that if you on desktop chip with only 40 bits of
+physical addressing, now that the offset starts at bit 9, there needs to be
+30 bits of offset actually *in use* until bit 39 ends up being set, which
+means when inverted it will again point into existing memory.
+
+So that's 4 terabyte of swap space (because the offset is counted in pages,
+so 30 bits of offset is 42 bits of actual coverage). With bigger physical
+addressing, that obviously grows further, until the limit of the offset is
+hit (at 50 bits of offset - 62 bits of actual swap file coverage).
+
+This is a preparatory change for the actual swap entry inversion to protect
+against L1TF.
+
+[ AK: Updated description and minor tweaks. Split into two parts ]
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable_64.h | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -273,7 +273,7 @@ static inline int pgd_large(pgd_t pgd) {
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
+- * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry
++ * | TYPE (59-63) | OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+@@ -287,19 +287,28 @@ static inline int pgd_large(pgd_t pgd) {
+ * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
+ * but also L and G.
+ */
+-#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+-#define SWP_TYPE_BITS 5
+-/* Place the offset above the type: */
+-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
++#define SWP_TYPE_BITS 5
++
++#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
++
++/* We always extract/encode the offset by shifting it all the way up, and then down again */
++#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
+
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+-#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
+- & ((1U << SWP_TYPE_BITS) - 1))
+-#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
+-#define __swp_entry(type, offset) ((swp_entry_t) { \
+- ((type) << (SWP_TYPE_FIRST_BIT)) \
+- | ((offset) << SWP_OFFSET_FIRST_BIT) })
++/* Extract the high bits for type */
++#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
++
++/* Shift up (to get rid of type), then down to get value */
++#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
++
++/*
++ * Shift the offset up "too far" by TYPE bits, then down again
++ */
++#define __swp_entry(type, offset) ((swp_entry_t) { \
++ ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
++ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
++
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+ #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
+ #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Wed, 13 Jun 2018 15:48:23 -0700
+Subject: x86/speculation/l1tf: Protect swap entries against L1TF
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 2f22b4cd45b67b3496f4aa4c7180a1271c6452f6 upstream
+
+With L1 terminal fault the CPU speculates into unmapped PTEs, and resulting
+side effects allow to read the memory the PTE is pointing too, if its
+values are still in the L1 cache.
+
+For swapped out pages Linux uses unmapped PTEs and stores a swap entry into
+them.
+
+To protect against L1TF it must be ensured that the swap entry is not
+pointing to valid memory, which requires setting higher bits (between bit
+36 and bit 45) that are inside the CPUs physical address space, but outside
+any real memory.
+
+To do this invert the offset to make sure the higher bits are always set,
+as long as the swap file is not too big.
+
+Note there is no workaround for 32bit !PAE, or on systems which have more
+than MAX_PA/2 worth of memory. The later case is very unlikely to happen on
+real systems.
+
+[AK: updated description and minor tweaks by. Split out from the original
+ patch ]
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable_64.h | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -273,7 +273,7 @@ static inline int pgd_large(pgd_t pgd) {
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
+- * | TYPE (59-63) | OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
++ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+@@ -286,6 +286,9 @@ static inline int pgd_large(pgd_t pgd) {
+ *
+ * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
+ * but also L and G.
++ *
++ * The offset is inverted by a binary not operation to make the high
++ * physical bits set.
+ */
+ #define SWP_TYPE_BITS 5
+
+@@ -300,13 +303,15 @@ static inline int pgd_large(pgd_t pgd) {
+ #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
+
+ /* Shift up (to get rid of type), then down to get value */
+-#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
++#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
+
+ /*
+ * Shift the offset up "too far" by TYPE bits, then down again
++ * The offset is inverted by a binary not operation to make the high
++ * physical bits set.
+ */
+ #define __swp_entry(type, offset) ((swp_entry_t) { \
+- ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
++ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
+
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 13 Jun 2018 15:48:24 -0700
+Subject: x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 6b28baca9b1f0d4a42b865da7a05b1c81424bd5c upstream
+
+When PTEs are set to PROT_NONE the kernel just clears the Present bit and
+preserves the PFN, which creates attack surface for L1TF speculation
+speculation attacks.
+
+This is important inside guests, because L1TF speculation bypasses physical
+page remapping. While the host has its own migitations preventing leaking
+data from other VMs into the guest, this would still risk leaking the wrong
+page inside the current guest.
+
+This uses the same technique as Linus' swap entry patch: while an entry is
+is in PROTNONE state invert the complete PFN part part of it. This ensures
+that the the highest bit will point to non existing memory.
+
+The invert is done by pte/pmd_modify and pfn/pmd/pud_pte for PROTNONE and
+pte/pmd/pud_pfn undo it.
+
+This assume that no code path touches the PFN part of a PTE directly
+without using these primitives.
+
+This doesn't handle the case that MMIO is on the top of the CPU physical
+memory. If such an MMIO region was exposed by an unpriviledged driver for
+mmap it would be possible to attack some real memory. However this
+situation is all rather unlikely.
+
+For 32bit non PAE the inversion is not done because there are really not
+enough bits to protect anything.
+
+Q: Why does the guest need to be protected when the HyperVisor already has
+ L1TF mitigations?
+
+A: Here's an example:
+
+ Physical pages 1 2 get mapped into a guest as
+ GPA 1 -> PA 2
+ GPA 2 -> PA 1
+ through EPT.
+
+ The L1TF speculation ignores the EPT remapping.
+
+ Now the guest kernel maps GPA 1 to process A and GPA 2 to process B, and
+ they belong to different users and should be isolated.
+
+ A sets the GPA 1 PA 2 PTE to PROT_NONE to bypass the EPT remapping and
+ gets read access to the underlying physical page. Which in this case
+ points to PA 2, so it can read process B's data, if it happened to be in
+ L1, so isolation inside the guest is broken.
+
+ There's nothing the hypervisor can do about this. This mitigation has to
+ be done in the guest itself.
+
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable-2level.h | 17 +++++++++++++
+ arch/x86/include/asm/pgtable-3level.h | 2 +
+ arch/x86/include/asm/pgtable-invert.h | 32 ++++++++++++++++++++++++
+ arch/x86/include/asm/pgtable.h | 44 +++++++++++++++++++++++-----------
+ arch/x86/include/asm/pgtable_64.h | 2 +
+ 5 files changed, 84 insertions(+), 13 deletions(-)
+ create mode 100644 arch/x86/include/asm/pgtable-invert.h
+
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -95,4 +95,21 @@ static inline unsigned long pte_bitop(un
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+ #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+
++/* No inverted PFNs on 2 level page tables */
++
++static inline u64 protnone_mask(u64 val)
++{
++ return 0;
++}
++
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
++{
++ return val;
++}
++
++static inline bool __pte_needs_invert(u64 val)
++{
++ return false;
++}
++
+ #endif /* _ASM_X86_PGTABLE_2LEVEL_H */
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -295,4 +295,6 @@ static inline pte_t gup_get_pte(pte_t *p
+ return pte;
+ }
+
++#include <asm/pgtable-invert.h>
++
+ #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
+--- /dev/null
++++ b/arch/x86/include/asm/pgtable-invert.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_PGTABLE_INVERT_H
++#define _ASM_PGTABLE_INVERT_H 1
++
++#ifndef __ASSEMBLY__
++
++static inline bool __pte_needs_invert(u64 val)
++{
++ return (val & (_PAGE_PRESENT|_PAGE_PROTNONE)) == _PAGE_PROTNONE;
++}
++
++/* Get a mask to xor with the page table entry to get the correct pfn. */
++static inline u64 protnone_mask(u64 val)
++{
++ return __pte_needs_invert(val) ? ~0ull : 0;
++}
++
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
++{
++ /*
++ * When a PTE transitions from NONE to !NONE or vice-versa
++ * invert the PFN part to stop speculation.
++ * pte_pfn undoes this when needed.
++ */
++ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
++ val = (val & ~mask) | (~val & mask);
++ return val;
++}
++
++#endif /* __ASSEMBLY__ */
++
++#endif
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -185,19 +185,29 @@ static inline int pte_special(pte_t pte)
+ return pte_flags(pte) & _PAGE_SPECIAL;
+ }
+
++/* Entries that were set to PROT_NONE are inverted */
++
++static inline u64 protnone_mask(u64 val);
++
+ static inline unsigned long pte_pfn(pte_t pte)
+ {
+- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
++ unsigned long pfn = pte_val(pte);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
++ unsigned long pfn = pmd_val(pmd);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pud_pfn(pud_t pud)
+ {
+- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
++ unsigned long pfn = pud_val(pud);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long p4d_pfn(p4d_t p4d)
+@@ -545,25 +555,33 @@ static inline pgprotval_t check_pgprot(p
+
+ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- check_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PTE_PFN_MASK;
++ return __pte(pfn | check_pgprot(pgprot));
+ }
+
+ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- check_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PHYSICAL_PMD_PAGE_MASK;
++ return __pmd(pfn | check_pgprot(pgprot));
+ }
+
+ static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- check_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PHYSICAL_PUD_PAGE_MASK;
++ return __pud(pfn | check_pgprot(pgprot));
+ }
+
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
++
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+- pteval_t val = pte_val(pte);
++ pteval_t val = pte_val(pte), oldval = val;
+
+ /*
+ * Chop off the NX bit (if present), and add the NX portion of
+@@ -571,17 +589,17 @@ static inline pte_t pte_modify(pte_t pte
+ */
+ val &= _PAGE_CHG_MASK;
+ val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
+-
++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
+ return __pte(val);
+ }
+
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+- pmdval_t val = pmd_val(pmd);
++ pmdval_t val = pmd_val(pmd), oldval = val;
+
+ val &= _HPAGE_CHG_MASK;
+ val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+-
++ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
+ return __pmd(val);
+ }
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -357,5 +357,7 @@ static inline bool gup_fast_permitted(un
+ return true;
+ }
+
++#include <asm/pgtable-invert.h>
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* _ASM_X86_PGTABLE_64_H */
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 13 Jun 2018 15:48:25 -0700
+Subject: x86/speculation/l1tf: Make sure the first page is always reserved
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 10a70416e1f067f6c4efda6ffd8ea96002ac4223 upstream
+
+The L1TF workaround doesn't make any attempt to mitigate speculate accesses
+to the first physical page for zeroed PTEs. Normally it only contains some
+data from the early real mode BIOS.
+
+It's not entirely clear that the first page is reserved in all
+configurations, so add an extra reservation call to make sure it is really
+reserved. In most configurations (e.g. with the standard reservations)
+it's likely a nop.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/setup.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -823,6 +823,12 @@ void __init setup_arch(char **cmdline_p)
+ memblock_reserve(__pa_symbol(_text),
+ (unsigned long)__bss_stop - (unsigned long)_text);
+
++ /*
++ * Make sure page 0 is always reserved because on systems with
++ * L1TF its contents can be leaked to user processes.
++ */
++ memblock_reserve(0, PAGE_SIZE);
++
+ early_reserve_initrd();
+
+ /*
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 13 Jun 2018 15:48:26 -0700
+Subject: x86/speculation/l1tf: Add sysfs reporting for l1tf
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 17dbca119312b4e8173d4e25ff64262119fcef38 upstream
+
+L1TF core kernel workarounds are cheap and normally always enabled, However
+they still should be reported in sysfs if the system is vulnerable or
+mitigated. Add the necessary CPU feature/bug bits.
+
+- Extend the existing checks for Meltdowns to determine if the system is
+ vulnerable. All CPUs which are not vulnerable to Meltdown are also not
+ vulnerable to L1TF
+
+- Check for 32bit non PAE and emit a warning as there is no practical way
+ for mitigation due to the limited physical address bits
+
+- If the system has more than MAX_PA/2 physical memory the invert page
+ workarounds don't protect the system against the L1TF attack anymore,
+ because an inverted physical address will also point to valid
+ memory. Print a warning in this case and report that the system is
+ vulnerable.
+
+Add a function which returns the PFN limit for the L1TF mitigation, which
+will be used in follow up patches for sanity and range checks.
+
+[ tglx: Renamed the CPU feature bit to L1TF_PTEINV ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +
+ arch/x86/include/asm/processor.h | 5 ++++
+ arch/x86/kernel/cpu/bugs.c | 40 +++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c | 20 ++++++++++++++++++
+ drivers/base/cpu.c | 8 +++++++
+ include/linux/cpu.h | 2 +
+ 6 files changed, 77 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -219,6 +219,7 @@
+ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
++#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -371,5 +372,6 @@
+ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
++#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -181,6 +181,11 @@ extern const struct seq_operations cpuin
+
+ extern void cpu_detect(struct cpuinfo_x86 *c);
+
++static inline unsigned long l1tf_pfn_limit(void)
++{
++ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
++}
++
+ extern void early_cpu_init(void);
+ extern void identify_boot_cpu(void);
+ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -27,9 +27,11 @@
+ #include <asm/pgtable.h>
+ #include <asm/set_memory.h>
+ #include <asm/intel-family.h>
++#include <asm/e820/api.h>
+
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
++static void __init l1tf_select_mitigation(void);
+
+ /*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+@@ -81,6 +83,8 @@ void __init check_bugs(void)
+ */
+ ssb_select_mitigation();
+
++ l1tf_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -205,6 +209,32 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++static void __init l1tf_select_mitigation(void)
++{
++ u64 half_pa;
++
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return;
++
++#if CONFIG_PGTABLE_LEVELS == 2
++ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
++ return;
++#endif
++
++ /*
++ * This is extremely unlikely to happen because almost all
++ * systems have far more MAX_PA/2 than RAM can be fit into
++ * DIMM slots.
++ */
++ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
++ if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
++ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
++ return;
++ }
++
++ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
++}
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -657,6 +687,11 @@ static ssize_t cpu_show_common(struct de
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
++ case X86_BUG_L1TF:
++ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
++ return sprintf(buf, "Mitigation: Page Table Inversion\n");
++ break;
++
+ default:
+ break;
+ }
+@@ -683,4 +718,9 @@ ssize_t cpu_show_spec_store_bypass(struc
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
+ }
++
++ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -958,6 +958,21 @@ static const __initconst struct x86_cpu_
+ {}
+ };
+
++static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
++ /* in addition to cpu_no_speculation */
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
++ {}
++};
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+@@ -983,6 +998,11 @@ static void __init cpu_set_bug_bits(stru
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
++
++ if (x86_match_cpu(cpu_no_l1tf))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_L1TF);
+ }
+
+ /*
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -540,16 +540,24 @@ ssize_t __weak cpu_show_spec_store_bypas
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_l1tf(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+ static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
++static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
++ &dev_attr_l1tf.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struc
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_l1tf(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 13 Jun 2018 15:48:27 -0700
+Subject: x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 42e4089c7890725fcd329999252dc489b72f2921 upstream
+
+For L1TF PROT_NONE mappings are protected by inverting the PFN in the page
+table entry. This sets the high bits in the CPU's address space, thus
+making sure to point to not point an unmapped entry to valid cached memory.
+
+Some server system BIOSes put the MMIO mappings high up in the physical
+address space. If such an high mapping was mapped to unprivileged users
+they could attack low memory by setting such a mapping to PROT_NONE. This
+could happen through a special device driver which is not access
+protected. Normal /dev/mem is of course access protected.
+
+To avoid this forbid PROT_NONE mappings or mprotect for high MMIO mappings.
+
+Valid page mappings are allowed because the system is then unsafe anyways.
+
+It's not expected that users commonly use PROT_NONE on MMIO. But to
+minimize any impact this is only enforced if the mapping actually refers to
+a high MMIO address (defined as the MAX_PA-1 bit being set), and also skip
+the check for root.
+
+For mmaps this is straight forward and can be handled in vm_insert_pfn and
+in remap_pfn_range().
+
+For mprotect it's a bit trickier. At the point where the actual PTEs are
+accessed a lot of state has been changed and it would be difficult to undo
+on an error. Since this is a uncommon case use a separate early page talk
+walk pass for MMIO PROT_NONE mappings that checks for this condition
+early. For non MMIO and non PROT_NONE there are no changes.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable.h | 8 ++++++
+ arch/x86/mm/mmap.c | 21 +++++++++++++++++
+ include/asm-generic/pgtable.h | 12 ++++++++++
+ mm/memory.c | 37 ++++++++++++++++++++++--------
+ mm/mprotect.c | 49 +++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 117 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -1338,6 +1338,14 @@ static inline bool pud_access_permitted(
+ return __pte_access_permitted(pud_val(pud), write);
+ }
+
++#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
++extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
++
++static inline bool arch_has_pfn_modify_check(void)
++{
++ return boot_cpu_has_bug(X86_BUG_L1TF);
++}
++
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -240,3 +240,24 @@ int valid_mmap_phys_addr_range(unsigned
+
+ return phys_addr_valid(addr + count - 1);
+ }
++
++/*
++ * Only allow root to set high MMIO mappings to PROT_NONE.
++ * This prevents an unpriv. user to set them to PROT_NONE and invert
++ * them, then pointing to valid memory for L1TF speculation.
++ *
++ * Note: for locked down kernels may want to disable the root override.
++ */
++bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
++{
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return true;
++ if (!__pte_needs_invert(pgprot_val(prot)))
++ return true;
++ /* If it's real memory always allow */
++ if (pfn_valid(pfn))
++ return true;
++ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
++ return false;
++ return true;
++}
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -1097,4 +1097,16 @@ static inline void init_espfix_bsp(void)
+ #endif
+ #endif
+
++#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
++static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
++{
++ return true;
++}
++
++static inline bool arch_has_pfn_modify_check(void)
++{
++ return false;
++}
++#endif
++
+ #endif /* _ASM_GENERIC_PGTABLE_H */
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1891,6 +1891,9 @@ int vm_insert_pfn_prot(struct vm_area_st
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+
++ if (!pfn_modify_allowed(pfn, pgprot))
++ return -EACCES;
++
+ track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
+
+ ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
+@@ -1926,6 +1929,9 @@ static int __vm_insert_mixed(struct vm_a
+
+ track_pfn_insert(vma, &pgprot, pfn);
+
++ if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
++ return -EACCES;
++
+ /*
+ * If we don't have pte special, then we have to use the pfn_valid()
+ * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+@@ -1973,6 +1979,7 @@ static int remap_pte_range(struct mm_str
+ {
+ pte_t *pte;
+ spinlock_t *ptl;
++ int err = 0;
+
+ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ if (!pte)
+@@ -1980,12 +1987,16 @@ static int remap_pte_range(struct mm_str
+ arch_enter_lazy_mmu_mode();
+ do {
+ BUG_ON(!pte_none(*pte));
++ if (!pfn_modify_allowed(pfn, prot)) {
++ err = -EACCES;
++ break;
++ }
+ set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(pte - 1, ptl);
+- return 0;
++ return err;
+ }
+
+ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -1994,6 +2005,7 @@ static inline int remap_pmd_range(struct
+ {
+ pmd_t *pmd;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ pmd = pmd_alloc(mm, pud, addr);
+@@ -2002,9 +2014,10 @@ static inline int remap_pmd_range(struct
+ VM_BUG_ON(pmd_trans_huge(*pmd));
+ do {
+ next = pmd_addr_end(addr, end);
+- if (remap_pte_range(mm, pmd, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pte_range(mm, pmd, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (pmd++, addr = next, addr != end);
+ return 0;
+ }
+@@ -2015,6 +2028,7 @@ static inline int remap_pud_range(struct
+ {
+ pud_t *pud;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ pud = pud_alloc(mm, p4d, addr);
+@@ -2022,9 +2036,10 @@ static inline int remap_pud_range(struct
+ return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+- if (remap_pmd_range(mm, pud, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pmd_range(mm, pud, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (pud++, addr = next, addr != end);
+ return 0;
+ }
+@@ -2035,6 +2050,7 @@ static inline int remap_p4d_range(struct
+ {
+ p4d_t *p4d;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ p4d = p4d_alloc(mm, pgd, addr);
+@@ -2042,9 +2058,10 @@ static inline int remap_p4d_range(struct
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+- if (remap_pud_range(mm, p4d, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pud_range(mm, p4d, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (p4d++, addr = next, addr != end);
+ return 0;
+ }
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -306,6 +306,42 @@ unsigned long change_protection(struct v
+ return pages;
+ }
+
++static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
++ unsigned long next, struct mm_walk *walk)
++{
++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
++ 0 : -EACCES;
++}
++
++static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
++ unsigned long addr, unsigned long next,
++ struct mm_walk *walk)
++{
++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
++ 0 : -EACCES;
++}
++
++static int prot_none_test(unsigned long addr, unsigned long next,
++ struct mm_walk *walk)
++{
++ return 0;
++}
++
++static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
++ unsigned long end, unsigned long newflags)
++{
++ pgprot_t new_pgprot = vm_get_page_prot(newflags);
++ struct mm_walk prot_none_walk = {
++ .pte_entry = prot_none_pte_entry,
++ .hugetlb_entry = prot_none_hugetlb_entry,
++ .test_walk = prot_none_test,
++ .mm = current->mm,
++ .private = &new_pgprot,
++ };
++
++ return walk_page_range(start, end, &prot_none_walk);
++}
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags)
+@@ -324,6 +360,19 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+
+ /*
++ * Do PROT_NONE PFN permission checks here when we can still
++ * bail out without undoing a lot of state. This is a rather
++ * uncommon case, so doesn't need to be very optimized.
++ */
++ if (arch_has_pfn_modify_check() &&
++ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
++ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
++ error = prot_none_walk(vma, start, end, newflags);
++ if (error)
++ return error;
++ }
++
++ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+ * make it unwritable again. hugetlb mapping were accounted for
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 13 Jun 2018 15:48:28 -0700
+Subject: x86/speculation/l1tf: Limit swap file size to MAX_PA/2
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 377eeaa8e11fe815b1d07c81c4a0e2843a8c15eb upstream
+
+For the L1TF workaround its necessary to limit the swap file size to below
+MAX_PA/2, so that the higher bits of the swap offset inverted never point
+to valid memory.
+
+Add a mechanism for the architecture to override the swap file size check
+in swapfile.c and add a x86 specific max swapfile check function that
+enforces that limit.
+
+The check is only enabled if the CPU is vulnerable to L1TF.
+
+In VMs with 42bit MAX_PA the typical limit is 2TB now, on a native system
+with 46bit PA it is 32TB. The limit is only per individual swap file, so
+it's always possible to exceed these limits with multiple swap files or
+partitions.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/init.c | 15 +++++++++++++++
+ include/linux/swapfile.h | 2 ++
+ mm/swapfile.c | 46 ++++++++++++++++++++++++++++++----------------
+ 3 files changed, 47 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -4,6 +4,8 @@
+ #include <linux/swap.h>
+ #include <linux/memblock.h>
+ #include <linux/bootmem.h> /* for max_low_pfn */
++#include <linux/swapfile.h>
++#include <linux/swapops.h>
+
+ #include <asm/set_memory.h>
+ #include <asm/e820/api.h>
+@@ -880,3 +882,16 @@ void update_cache_mode_entry(unsigned en
+ __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
+ __pte2cachemode_tbl[entry] = cache;
+ }
++
++unsigned long max_swapfile_size(void)
++{
++ unsigned long pages;
++
++ pages = generic_max_swapfile_size();
++
++ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
++ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
++ pages = min_t(unsigned long, l1tf_pfn_limit() + 1, pages);
++ }
++ return pages;
++}
+--- a/include/linux/swapfile.h
++++ b/include/linux/swapfile.h
+@@ -10,5 +10,7 @@ extern spinlock_t swap_lock;
+ extern struct plist_head swap_active_head;
+ extern struct swap_info_struct *swap_info[];
+ extern int try_to_unuse(unsigned int, bool, unsigned long);
++extern unsigned long generic_max_swapfile_size(void);
++extern unsigned long max_swapfile_size(void);
+
+ #endif /* _LINUX_SWAPFILE_H */
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2909,6 +2909,35 @@ static int claim_swapfile(struct swap_in
+ return 0;
+ }
+
++
++/*
++ * Find out how many pages are allowed for a single swap device. There
++ * are two limiting factors:
++ * 1) the number of bits for the swap offset in the swp_entry_t type, and
++ * 2) the number of bits in the swap pte, as defined by the different
++ * architectures.
++ *
++ * In order to find the largest possible bit mask, a swap entry with
++ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
++ * decoded to a swp_entry_t again, and finally the swap offset is
++ * extracted.
++ *
++ * This will mask all the bits from the initial ~0UL mask that can't
++ * be encoded in either the swp_entry_t or the architecture definition
++ * of a swap pte.
++ */
++unsigned long generic_max_swapfile_size(void)
++{
++ return swp_offset(pte_to_swp_entry(
++ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
++}
++
++/* Can be overridden by an architecture for additional checks. */
++__weak unsigned long max_swapfile_size(void)
++{
++ return generic_max_swapfile_size();
++}
++
+ static unsigned long read_swap_header(struct swap_info_struct *p,
+ union swap_header *swap_header,
+ struct inode *inode)
+@@ -2944,22 +2973,7 @@ static unsigned long read_swap_header(st
+ p->cluster_next = 1;
+ p->cluster_nr = 0;
+
+- /*
+- * Find out how many pages are allowed for a single swap
+- * device. There are two limiting factors: 1) the number
+- * of bits for the swap offset in the swp_entry_t type, and
+- * 2) the number of bits in the swap pte as defined by the
+- * different architectures. In order to find the
+- * largest possible bit mask, a swap entry with swap type 0
+- * and swap offset ~0UL is created, encoded to a swap pte,
+- * decoded to a swp_entry_t again, and finally the swap
+- * offset is extracted. This will mask all the bits from
+- * the initial ~0UL mask that can't be encoded in either
+- * the swp_entry_t or the architecture definition of a
+- * swap pte.
+- */
+- maxpages = swp_offset(pte_to_swp_entry(
+- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
++ maxpages = max_swapfile_size();
+ last_page = swap_header->info.last_page;
+ if (!last_page) {
+ pr_warn("Empty swap-file\n");
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 16:42:57 -0400
+Subject: x86/bugs: Move the l1tf function and define pr_fmt properly
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 56563f53d3066afa9e63d6c997bf67e76a8b05c0 upstream
+
+The pr_warn in l1tf_select_mitigation would have used the prior pr_fmt
+which was defined as "Spectre V2 : ".
+
+Move the function to be past SSBD and also define the pr_fmt.
+
+Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 55 +++++++++++++++++++++++----------------------
+ 1 file changed, 29 insertions(+), 26 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -209,32 +209,6 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+-static void __init l1tf_select_mitigation(void)
+-{
+- u64 half_pa;
+-
+- if (!boot_cpu_has_bug(X86_BUG_L1TF))
+- return;
+-
+-#if CONFIG_PGTABLE_LEVELS == 2
+- pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+- return;
+-#endif
+-
+- /*
+- * This is extremely unlikely to happen because almost all
+- * systems have far more MAX_PA/2 than RAM can be fit into
+- * DIMM slots.
+- */
+- half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+- if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+- pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+- return;
+- }
+-
+- setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+-}
+-
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -660,6 +634,35 @@ void x86_spec_ctrl_setup_ap(void)
+ x86_amd_ssb_disable();
+ }
+
++#undef pr_fmt
++#define pr_fmt(fmt) "L1TF: " fmt
++static void __init l1tf_select_mitigation(void)
++{
++ u64 half_pa;
++
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return;
++
++#if CONFIG_PGTABLE_LEVELS == 2
++ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
++ return;
++#endif
++
++ /*
++ * This is extremely unlikely to happen because almost all
++ * systems have far more MAX_PA/2 than RAM can be fit into
++ * DIMM slots.
++ */
++ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
++ if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
++ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
++ return;
++ }
++
++ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
++}
++#undef pr_fmt
++
+ #ifdef CONFIG_SYSFS
+
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 29 May 2018 16:43:46 +0200
+Subject: sched/smt: Update sched_smt_present at runtime
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit ba2591a5993eabcc8e874e30f361d8ffbb10d6d4 upstream
+
+The static key sched_smt_present is only updated at boot time when SMT
+siblings have been detected. Booting with maxcpus=1 and bringing the
+siblings online after boot rebuilds the scheduling domains correctly but
+does not update the static key, so the SMT code is not enabled.
+
+Let the key be updated in the scheduler CPU hotplug code to fix this.
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 30 ++++++++++++------------------
+ kernel/sched/fair.c | 1 +
+ 2 files changed, 13 insertions(+), 18 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5788,6 +5788,18 @@ int sched_cpu_activate(unsigned int cpu)
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+
++#ifdef CONFIG_SCHED_SMT
++ /*
++ * The sched_smt_present static key needs to be evaluated on every
++ * hotplug event because at boot time SMT might be disabled when
++ * the number of booted CPUs is limited.
++ *
++ * If then later a sibling gets hotplugged, then the key would stay
++ * off and SMT scheduling would never be functional.
++ */
++ if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
++ static_branch_enable_cpuslocked(&sched_smt_present);
++#endif
+ set_cpu_active(cpu, true);
+
+ if (sched_smp_initialized) {
+@@ -5885,22 +5897,6 @@ int sched_cpu_dying(unsigned int cpu)
+ }
+ #endif
+
+-#ifdef CONFIG_SCHED_SMT
+-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+-
+-static void sched_init_smt(void)
+-{
+- /*
+- * We've enumerated all CPUs and will assume that if any CPU
+- * has SMT siblings, CPU0 will too.
+- */
+- if (cpumask_weight(cpu_smt_mask(0)) > 1)
+- static_branch_enable(&sched_smt_present);
+-}
+-#else
+-static inline void sched_init_smt(void) { }
+-#endif
+-
+ void __init sched_init_smp(void)
+ {
+ sched_init_numa();
+@@ -5922,8 +5918,6 @@ void __init sched_init_smp(void)
+ init_sched_rt_class();
+ init_sched_dl_class();
+
+- sched_init_smt();
+-
+ sched_smp_initialized = true;
+ }
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6183,6 +6183,7 @@ static inline int find_idlest_cpu(struct
+ }
+
+ #ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+
+ static inline void set_idle_cores(int cpu, int val)
+ {
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 17:50:22 +0200
+Subject: x86/smp: Provide topology_is_primary_thread()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 6a4d2657e048f096c7ffcad254010bd94891c8c0 upstream
+
+If the CPU is supporting SMT then the primary thread can be found by
+checking the lower APIC ID bits for zero. smp_num_siblings is used to build
+the mask for the APIC ID bits which need to be taken into account.
+
+This uses the MPTABLE or ACPI/MADT supplied APIC ID, which can be different
+than the initial APIC ID in CPUID. But according to AMD the lower bits have
+to be consistent. Intel gave a tentative confirmation as well.
+
+Preparatory patch to support disabling SMT at boot/runtime.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/apic.h | 6 ++++++
+ arch/x86/include/asm/topology.h | 4 +++-
+ arch/x86/kernel/apic/apic.c | 15 +++++++++++++++
+ arch/x86/kernel/smpboot.c | 9 +++++++++
+ 4 files changed, 33 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -502,6 +502,12 @@ extern int default_check_phys_apicid_pre
+
+ #endif /* CONFIG_X86_LOCAL_APIC */
+
++#ifdef CONFIG_SMP
++bool apic_id_is_primary_thread(unsigned int id);
++#else
++static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
++#endif
++
+ extern void irq_enter(void);
+ extern void irq_exit(void);
+
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -123,13 +123,15 @@ static inline int topology_max_smt_threa
+ }
+
+ int topology_update_package_map(unsigned int apicid, unsigned int cpu);
+-extern int topology_phys_to_logical_pkg(unsigned int pkg);
++int topology_phys_to_logical_pkg(unsigned int pkg);
++bool topology_is_primary_thread(unsigned int cpu);
+ #else
+ #define topology_max_packages() (1)
+ static inline int
+ topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
+ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
+ static inline int topology_max_smt_threads(void) { return 1; }
++static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
+ #endif
+
+ static inline void arch_fix_phys_package_id(int num, u32 slot)
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2192,6 +2192,21 @@ static int cpuid_to_apicid[] = {
+ [0 ... NR_CPUS - 1] = -1,
+ };
+
++/**
++ * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
++ * @id: APIC ID to check
++ */
++bool apic_id_is_primary_thread(unsigned int apicid)
++{
++ u32 mask;
++
++ if (smp_num_siblings == 1)
++ return true;
++ /* Isolate the SMT bit(s) in the APICID and check for 0 */
++ mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
++ return !(apicid & mask);
++}
++
+ /*
+ * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
+ * and cpuid_to_apicid[] synchronized.
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -273,6 +273,15 @@ static void notrace start_secondary(void
+ }
+
+ /**
++ * topology_is_primary_thread - Check whether CPU is the primary SMT thread
++ * @cpu: CPU to check
++ */
++bool topology_is_primary_thread(unsigned int cpu)
++{
++ return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
++}
++
++/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Jun 2018 10:37:20 +0200
+Subject: x86/topology: Provide topology_smt_supported()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit f048c399e0f7490ab7296bc2c255d37eb14a9675 upstream
+
+Provide information whether SMT is supoorted by the CPUs. Preparatory patch
+for SMT control mechanism.
+
+Suggested-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/topology.h | 2 ++
+ arch/x86/kernel/smpboot.c | 8 ++++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -125,6 +125,7 @@ static inline int topology_max_smt_threa
+ int topology_update_package_map(unsigned int apicid, unsigned int cpu);
+ int topology_phys_to_logical_pkg(unsigned int pkg);
+ bool topology_is_primary_thread(unsigned int cpu);
++bool topology_smt_supported(void);
+ #else
+ #define topology_max_packages() (1)
+ static inline int
+@@ -132,6 +133,7 @@ topology_update_package_map(unsigned int
+ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
+ static inline int topology_max_smt_threads(void) { return 1; }
+ static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
++static inline bool topology_smt_supported(void) { return false; }
+ #endif
+
+ static inline void arch_fix_phys_package_id(int num, u32 slot)
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -282,6 +282,14 @@ bool topology_is_primary_thread(unsigned
+ }
+
+ /**
++ * topology_smt_supported - Check whether SMT is supported by the CPUs
++ */
++bool topology_smt_supported(void)
++{
++ return smp_num_siblings > 1;
++}
++
++/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 19:05:25 +0200
+Subject: cpu/hotplug: Make bringup/teardown of smp threads symmetric
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit c4de65696d865c225fda3b9913b31284ea65ea96 upstream
+
+The asymmetry caused a warning to trigger if the bootup was stopped in state
+CPUHP_AP_ONLINE_IDLE. The warning no longer triggers as kthread_park() can
+now be invoked on already or still parked threads. But there is still no
+reason to have this be asymmetric.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cpu.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -754,7 +754,6 @@ static int takedown_cpu(unsigned int cpu
+
+ /* Park the smpboot threads */
+ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
+- smpboot_park_threads(cpu);
+
+ /*
+ * Prevent irq alloc/free while the dying cpu reorganizes the
+@@ -1332,7 +1331,7 @@ static struct cpuhp_step cpuhp_hp_states
+ [CPUHP_AP_SMPBOOT_THREADS] = {
+ .name = "smpboot/threads:online",
+ .startup.single = smpboot_unpark_threads,
+- .teardown.single = NULL,
++ .teardown.single = smpboot_park_threads,
+ },
+ [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
+ .name = "irq/affinity:online",
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 17:49:05 +0200
+Subject: cpu/hotplug: Split do_cpu_down()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit cc1fe215e1efa406b03aa4389e6269b61342dec5 upstream
+
+Split out the inner workings of do_cpu_down() to allow reuse of that
+function for the upcoming SMT disabling mechanism.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cpu.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -906,20 +906,19 @@ out:
+ return ret;
+ }
+
++static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
++{
++ if (cpu_hotplug_disabled)
++ return -EBUSY;
++ return _cpu_down(cpu, 0, target);
++}
++
+ static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
+ {
+ int err;
+
+ cpu_maps_update_begin();
+-
+- if (cpu_hotplug_disabled) {
+- err = -EBUSY;
+- goto out;
+- }
+-
+- err = _cpu_down(cpu, 0, target);
+-
+-out:
++ err = cpu_down_maps_locked(cpu, target);
+ cpu_maps_update_done();
+ return err;
+ }
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 17:48:27 +0200
+Subject: cpu/hotplug: Provide knobs to control SMT
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 05736e4ac13c08a4a9b1ef2de26dd31a32cbee57 upstream
+
+Provide a command line and a sysfs knob to control SMT.
+
+The command line options are:
+
+ 'nosmt': Enumerate secondary threads, but do not online them
+
+ 'nosmt=force': Ignore secondary threads completely during enumeration
+ via MP table and ACPI/MADT.
+
+The sysfs control file has the following states (read/write):
+
+ 'on': SMT is enabled. Secondary threads can be freely onlined
+ 'off': SMT is disabled. Secondary threads, even if enumerated
+ cannot be onlined
+ 'forceoff': SMT is permanentely disabled. Writes to the control
+ file are rejected.
+ 'notsupported': SMT is not supported by the CPU
+
+The command line option 'nosmt' sets the sysfs control to 'off'. This
+can be changed to 'on' to reenable SMT during runtime.
+
+The command line option 'nosmt=force' sets the sysfs control to
+'forceoff'. This cannot be changed during runtime.
+
+When SMT is 'on' and the control file is changed to 'off' then all online
+secondary threads are offlined and attempts to online a secondary thread
+later on are rejected.
+
+When SMT is 'off' and the control file is changed to 'on' then secondary
+threads can be onlined again. The 'off' -> 'on' transition does not
+automatically online the secondary threads.
+
+When the control file is set to 'forceoff', the behaviour is the same as
+setting it to 'off', but the operation is irreversible and later writes to
+the control file are rejected.
+
+When the control status is 'notsupported' then writes to the control file
+are rejected.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 20 ++
+ Documentation/admin-guide/kernel-parameters.txt | 8
+ arch/Kconfig | 3
+ arch/x86/Kconfig | 1
+ include/linux/cpu.h | 13 +
+ kernel/cpu.c | 170 +++++++++++++++++++++
+ 6 files changed, 215 insertions(+)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -490,3 +490,23 @@ Description: Information about CPU vulne
+ "Not affected" CPU is not affected by the vulnerability
+ "Vulnerable" CPU is affected and no mitigation in effect
+ "Mitigation: $M" CPU is affected and mitigation $M is in effect
++
++What: /sys/devices/system/cpu/smt
++ /sys/devices/system/cpu/smt/active
++ /sys/devices/system/cpu/smt/control
++Date: June 2018
++Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
++Description: Control Symetric Multi Threading (SMT)
++
++ active: Tells whether SMT is active (enabled and siblings online)
++
++ control: Read/write interface to control SMT. Possible
++ values:
++
++ "on" SMT is enabled
++ "off" SMT is disabled
++ "forceoff" SMT is force disabled. Cannot be changed.
++ "notsupported" SMT is not supported by the CPU
++
++ If control status is "forceoff" or "notsupported" writes
++ are rejected.
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2675,6 +2675,14 @@
+ nosmt [KNL,S390] Disable symmetric multithreading (SMT).
+ Equivalent to smt=1.
+
++ [KNL,x86] Disable symmetric multithreading (SMT).
++ nosmt=force: Force disable SMT, similar to disabling
++ it in the BIOS except that some of the
++ resource partitioning effects which are
++ caused by having SMT enabled in the BIOS
++ cannot be undone. Depending on the CPU
++ type this might have a performance impact.
++
+ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
+ (indirect branch prediction) vulnerability. System may
+ allow data leaks with this option, which is equivalent
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -13,6 +13,9 @@ config KEXEC_CORE
+ config HAVE_IMA_KEXEC
+ bool
+
++config HOTPLUG_SMT
++ bool
++
+ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -183,6 +183,7 @@ config X86
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UNSTABLE_SCHED_CLOCK
+ select HAVE_USER_RETURN_NOTIFIER
++ select HOTPLUG_SMT if SMP
+ select IRQ_FORCED_THREADING
+ select PCI_LOCKLESS_CONFIG
+ select PERF_EVENTS
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -168,4 +168,17 @@ void cpuhp_report_idle_dead(void);
+ static inline void cpuhp_report_idle_dead(void) { }
+ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
++enum cpuhp_smt_control {
++ CPU_SMT_ENABLED,
++ CPU_SMT_DISABLED,
++ CPU_SMT_FORCE_DISABLED,
++ CPU_SMT_NOT_SUPPORTED,
++};
++
++#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
++extern enum cpuhp_smt_control cpu_smt_control;
++#else
++# define cpu_smt_control (CPU_SMT_ENABLED)
++#endif
++
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -933,6 +933,29 @@ EXPORT_SYMBOL(cpu_down);
+ #define takedown_cpu NULL
+ #endif /*CONFIG_HOTPLUG_CPU*/
+
++#ifdef CONFIG_HOTPLUG_SMT
++enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_control = CPU_SMT_DISABLED;
++ if (str && !strcmp(str, "force")) {
++ pr_info("SMT: Force disabled\n");
++ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ }
++ return 0;
++}
++early_param("nosmt", smt_cmdline_disable);
++
++static inline bool cpu_smt_allowed(unsigned int cpu)
++{
++ return cpu_smt_control == CPU_SMT_ENABLED ||
++ topology_is_primary_thread(cpu);
++}
++#else
++static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++#endif
++
+ /**
+ * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
+ * @cpu: cpu that just started
+@@ -1056,6 +1079,10 @@ static int do_cpu_up(unsigned int cpu, e
+ err = -EBUSY;
+ goto out;
+ }
++ if (!cpu_smt_allowed(cpu)) {
++ err = -EPERM;
++ goto out;
++ }
+
+ err = _cpu_up(cpu, 0, target);
+ out:
+@@ -1904,10 +1931,153 @@ static const struct attribute_group cpuh
+ NULL
+ };
+
++#ifdef CONFIG_HOTPLUG_SMT
++
++static const char *smt_states[] = {
++ [CPU_SMT_ENABLED] = "on",
++ [CPU_SMT_DISABLED] = "off",
++ [CPU_SMT_FORCE_DISABLED] = "forceoff",
++ [CPU_SMT_NOT_SUPPORTED] = "notsupported",
++};
++
++static ssize_t
++show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
++}
++
++static void cpuhp_offline_cpu_device(unsigned int cpu)
++{
++ struct device *dev = get_cpu_device(cpu);
++
++ dev->offline = true;
++ /* Tell user space about the state change */
++ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
++}
++
++static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
++{
++ int cpu, ret = 0;
++
++ cpu_maps_update_begin();
++ for_each_online_cpu(cpu) {
++ if (topology_is_primary_thread(cpu))
++ continue;
++ ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
++ if (ret)
++ break;
++ /*
++ * As this needs to hold the cpu maps lock it's impossible
++ * to call device_offline() because that ends up calling
++ * cpu_down() which takes cpu maps lock. cpu maps lock
++ * needs to be held as this might race against in kernel
++ * abusers of the hotplug machinery (thermal management).
++ *
++ * So nothing would update device:offline state. That would
++ * leave the sysfs entry stale and prevent onlining after
++ * smt control has been changed to 'off' again. This is
++ * called under the sysfs hotplug lock, so it is properly
++ * serialized against the regular offline usage.
++ */
++ cpuhp_offline_cpu_device(cpu);
++ }
++ if (!ret)
++ cpu_smt_control = ctrlval;
++ cpu_maps_update_done();
++ return ret;
++}
++
++static void cpuhp_smt_enable(void)
++{
++ cpu_maps_update_begin();
++ cpu_smt_control = CPU_SMT_ENABLED;
++ cpu_maps_update_done();
++}
++
++static ssize_t
++store_smt_control(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int ctrlval, ret;
++
++ if (sysfs_streq(buf, "on"))
++ ctrlval = CPU_SMT_ENABLED;
++ else if (sysfs_streq(buf, "off"))
++ ctrlval = CPU_SMT_DISABLED;
++ else if (sysfs_streq(buf, "forceoff"))
++ ctrlval = CPU_SMT_FORCE_DISABLED;
++ else
++ return -EINVAL;
++
++ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
++ return -EPERM;
++
++ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return -ENODEV;
++
++ ret = lock_device_hotplug_sysfs();
++ if (ret)
++ return ret;
++
++ if (ctrlval != cpu_smt_control) {
++ switch (ctrlval) {
++ case CPU_SMT_ENABLED:
++ cpuhp_smt_enable();
++ break;
++ case CPU_SMT_DISABLED:
++ case CPU_SMT_FORCE_DISABLED:
++ ret = cpuhp_smt_disable(ctrlval);
++ break;
++ }
++ }
++
++ unlock_device_hotplug();
++ return ret ? ret : count;
++}
++static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
++
++static ssize_t
++show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool active = topology_max_smt_threads() > 1;
++
++ return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
++}
++static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
++
++static struct attribute *cpuhp_smt_attrs[] = {
++ &dev_attr_control.attr,
++ &dev_attr_active.attr,
++ NULL
++};
++
++static const struct attribute_group cpuhp_smt_attr_group = {
++ .attrs = cpuhp_smt_attrs,
++ .name = "smt",
++ NULL
++};
++
++static int __init cpu_smt_state_init(void)
++{
++ if (!topology_smt_supported())
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++
++ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
++ &cpuhp_smt_attr_group);
++}
++
++#else
++static inline int cpu_smt_state_init(void) { return 0; }
++#endif
++
+ static int __init cpuhp_sysfs_init(void)
+ {
+ int cpu, ret;
+
++ ret = cpu_smt_state_init();
++ if (ret)
++ return ret;
++
+ ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_cpu_root_attr_group);
+ if (ret)
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:36:15 +0200
+Subject: x86/cpu: Remove the pointless CPU printout
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 55e6d279abd92cfd7576bba031e7589be8475edb upstream
+
+The value of this printout is dubious at best and there is no point in
+having it in two different places along with convoluted ways to reach it.
+
+Remove it completely.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 20 +++++---------------
+ arch/x86/kernel/cpu/topology.c | 11 -----------
+ 2 files changed, 5 insertions(+), 26 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -643,13 +643,12 @@ void detect_ht(struct cpuinfo_x86 *c)
+ #ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+ int index_msb, core_bits;
+- static bool printed;
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+ return;
+
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+- goto out;
++ return;
+
+ if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
+ return;
+@@ -658,14 +657,14 @@ void detect_ht(struct cpuinfo_x86 *c)
+
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
++ if (!smp_num_siblings)
++ smp_num_siblings = 1;
++
+ if (smp_num_siblings == 1) {
+ pr_info_once("CPU0: Hyper-Threading is disabled\n");
+- goto out;
++ return;
+ }
+
+- if (smp_num_siblings <= 1)
+- goto out;
+-
+ index_msb = get_count_order(smp_num_siblings);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+
+@@ -677,15 +676,6 @@ void detect_ht(struct cpuinfo_x86 *c)
+
+ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+ ((1 << core_bits) - 1);
+-
+-out:
+- if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
+- pr_info("CPU: Physical Processor ID: %d\n",
+- c->phys_proc_id);
+- pr_info("CPU: Processor Core ID: %d\n",
+- c->cpu_core_id);
+- printed = 1;
+- }
+ #endif
+ }
+
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -33,7 +33,6 @@ void detect_extended_topology(struct cpu
+ unsigned int eax, ebx, ecx, edx, sub_index;
+ unsigned int ht_mask_width, core_plus_mask_width;
+ unsigned int core_select_mask, core_level_siblings;
+- static bool printed;
+
+ if (c->cpuid_level < 0xb)
+ return;
+@@ -86,15 +85,5 @@ void detect_extended_topology(struct cpu
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
+ c->x86_max_cores = (core_level_siblings / smp_num_siblings);
+-
+- if (!printed) {
+- pr_info("CPU: Physical Processor ID: %d\n",
+- c->phys_proc_id);
+- if (c->x86_max_cores > 1)
+- pr_info("CPU: Processor Core ID: %d\n",
+- c->cpu_core_id);
+- printed = 1;
+- }
+- return;
+ #endif
+ }
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:47:10 +0200
+Subject: x86/cpu/AMD: Remove the pointless detect_ht() call
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 44ca36de56d1bf196dca2eb67cd753a46961ffe6 upstream
+
+Real 32bit AMD CPUs do not have SMT and the only value of the call was to
+reach the magic printout which got removed.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -867,10 +867,6 @@ static void init_amd(struct cpuinfo_x86
+ srat_detect_node(c);
+ }
+
+-#ifdef CONFIG_X86_32
+- detect_ht(c);
+-#endif
+-
+ init_amd_cacheinfo(c);
+
+ if (c->x86 >= 0xf)
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:53:57 +0200
+Subject: x86/cpu/common: Provide detect_ht_early()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 545401f4448a807b963ff17b575e0a393e68b523 upstream
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. detect_ht() cannot be called before the APIC driver
+is selected, so split out the part which initializes smp_num_siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 24 ++++++++++++++----------
+ arch/x86/kernel/cpu/cpu.h | 1 +
+ 2 files changed, 15 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -638,32 +638,36 @@ static void cpu_detect_tlb(struct cpuinf
+ tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
+ }
+
+-void detect_ht(struct cpuinfo_x86 *c)
++int detect_ht_early(struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+- int index_msb, core_bits;
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+- return;
++ return -1;
+
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+- return;
++ return -1;
+
+ if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
+- return;
++ return -1;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
++ if (smp_num_siblings == 1)
++ pr_info_once("CPU0: Hyper-Threading is disabled\n");
++#endif
++ return 0;
++}
+
+- if (!smp_num_siblings)
+- smp_num_siblings = 1;
++void detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ int index_msb, core_bits;
+
+- if (smp_num_siblings == 1) {
+- pr_info_once("CPU0: Hyper-Threading is disabled\n");
++ if (detect_ht_early(c) < 0)
+ return;
+- }
+
+ index_msb = get_count_order(smp_num_siblings);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -47,6 +47,7 @@ extern const struct cpu_dev *const __x86
+
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
++extern int detect_ht_early(struct cpuinfo_x86 *c);
+
+ unsigned int aperfmperf_get_khz(int cpu);
+
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:55:39 +0200
+Subject: x86/cpu/topology: Provide detect_extended_topology_early()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 95f3d39ccf7aaea79d1ffdac1c887c2e100ec1b6 upstream
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. detect_extended_topology() cannot be called before
+the APIC driver is selected, so split out the part which initializes
+smp_num_siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/cpu.h | 1 +
+ arch/x86/kernel/cpu/topology.c | 30 ++++++++++++++++++++++++------
+ 2 files changed, 25 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -47,6 +47,7 @@ extern const struct cpu_dev *const __x86
+
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
++extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
+ extern int detect_ht_early(struct cpuinfo_x86 *c);
+
+ unsigned int aperfmperf_get_khz(int cpu);
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -27,15 +27,13 @@
+ * exists, use it for populating initial_apicid and cpu topology
+ * detection.
+ */
+-void detect_extended_topology(struct cpuinfo_x86 *c)
++int detect_extended_topology_early(struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+- unsigned int eax, ebx, ecx, edx, sub_index;
+- unsigned int ht_mask_width, core_plus_mask_width;
+- unsigned int core_select_mask, core_level_siblings;
++ unsigned int eax, ebx, ecx, edx;
+
+ if (c->cpuid_level < 0xb)
+- return;
++ return -1;
+
+ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+
+@@ -43,7 +41,7 @@ void detect_extended_topology(struct cpu
+ * check if the cpuid leaf 0xb is actually implemented.
+ */
+ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
+- return;
++ return -1;
+
+ set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
+
+@@ -51,10 +49,30 @@ void detect_extended_topology(struct cpu
+ * initial apic id, which also represents 32-bit extended x2apic id.
+ */
+ c->initial_apicid = edx;
++ smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
++#endif
++ return 0;
++}
++
++/*
++ * Check for extended topology enumeration cpuid leaf 0xb and if it
++ * exists, use it for populating initial_apicid and cpu topology
++ * detection.
++ */
++void detect_extended_topology(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ unsigned int eax, ebx, ecx, edx, sub_index;
++ unsigned int ht_mask_width, core_plus_mask_width;
++ unsigned int core_select_mask, core_level_siblings;
++
++ if (detect_extended_topology_early(c) < 0)
++ return;
+
+ /*
+ * Populate HT related information from sub-leaf level 0.
+ */
++ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+ core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 01:00:55 +0200
+Subject: x86/cpu/intel: Evaluate smp_num_siblings early
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1910ad5624968f93be48e8e265513c54d66b897c upstream
+
+Make use of the new early detection function to initialize smp_num_siblings
+on the boot cpu before the MP-Table or ACPI/MADT scan happens. That's
+required for force disabling SMT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/intel.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -301,6 +301,13 @@ static void early_init_intel(struct cpui
+ }
+
+ check_mpx_erratum(c);
++
++ /*
++ * Get the number of SMT siblings early from the extended topology
++ * leaf, if available. Otherwise try the legacy SMT detection.
++ */
++ if (detect_extended_topology_early(c) < 0)
++ detect_ht_early(c);
+ }
+
+ #ifdef CONFIG_X86_32
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 15 Jun 2018 20:48:39 +0200
+Subject: x86/CPU/AMD: Do not check CPUID max ext level before parsing SMP info
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 119bff8a9c9bb00116a844ec68be7bc4b1c768f5 upstream
+
+Old code used to check whether CPUID ext max level is >= 0x80000008 because
+that last leaf contains the number of cores of the physical CPU. The three
+functions called there now do not depend on that leaf anymore so the check
+can go.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -861,11 +861,8 @@ static void init_amd(struct cpuinfo_x86
+
+ cpu_detect_cache_sizes(c);
+
+- /* Multi core CPU? */
+- if (c->extended_cpuid_level >= 0x80000008) {
+- amd_detect_cmp(c);
+- srat_detect_node(c);
+- }
++ amd_detect_cmp(c);
++ srat_detect_node(c);
+
+ init_amd_cacheinfo(c);
+
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:57:38 +0200
+Subject: x86/cpu/AMD: Evaluate smp_num_siblings early
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1e1d7e25fd759eddf96d8ab39d0a90a1979b2d8c upstream
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. amd_get_topology() cannot be called before the APIC
+driver is selected, so split out the part which initializes
+smp_num_siblings and invoke it from amd_early_init().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -315,6 +315,17 @@ static void legacy_fixup_core_id(struct
+ c->cpu_core_id %= cus_per_node;
+ }
+
++
++static void amd_get_topology_early(struct cpuinfo_x86 *c)
++{
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
++ u32 eax, ebx, ecx, edx;
++
++ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
++ smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
++ }
++}
++
+ /*
+ * Fixup core topology information for
+ * (1) AMD multi-node processors
+@@ -694,6 +705,8 @@ static void early_init_amd(struct cpuinf
+ set_cpu_bug(c, X86_BUG_AMD_E400);
+
+ early_detect_mem_encrypt(c);
++
++ amd_get_topology_early(c);
+ }
+
+ static void init_amd_k8(struct cpuinfo_x86 *c)
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 5 Jun 2018 14:00:11 +0200
+Subject: x86/apic: Ignore secondary threads if nosmt=force
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 2207def700f902f169fc237b717252c326f9e464 upstream
+
+nosmt on the kernel command line merely prevents the onlining of the
+secondary SMT siblings.
+
+nosmt=force makes the APIC detection code ignore the secondary SMT siblings
+completely, so they even do not show up as possible CPUs. That reduces the
+amount of memory allocations for per cpu variables and saves other
+resources from being allocated too large.
+
+This is not fully equivalent to disabling SMT in the BIOS because the low
+level SMT enabling in the BIOS can result in partitioning of resources
+between the siblings, which is not undone by just ignoring them. Some CPUs
+can use the full resources when their sibling is not onlined, but this is
+depending on the CPU family and model and it's not well documented whether
+this applies to all partitioned resources. That means depending on the
+workload disabling SMT in the BIOS might result in better performance.
+
+Linus analysis of the Intel manual:
+
+ The intel optimization manual is not very clear on what the partitioning
+ rules are.
+
+ I find:
+
+ "In general, the buffers for staging instructions between major pipe
+ stages are partitioned. These buffers include µop queues after the
+ execution trace cache, the queues after the register rename stage, the
+ reorder buffer which stages instructions for retirement, and the load
+ and store buffers.
+
+ In the case of load and store buffers, partitioning also provided an
+ easier implementation to maintain memory ordering for each logical
+ processor and detect memory ordering violations"
+
+ but some of that partitioning may be relaxed if the HT thread is "not
+ active":
+
+ "In Intel microarchitecture code name Sandy Bridge, the micro-op queue
+ is statically partitioned to provide 28 entries for each logical
+ processor, irrespective of software executing in single thread or
+ multiple threads. If one logical processor is not active in Intel
+ microarchitecture code name Ivy Bridge, then a single thread executing
+ on that processor core can use the 56 entries in the micro-op queue"
+
+ but I do not know what "not active" means, and how dynamic it is. Some of
+ that partitioning may be entirely static and depend on the early BIOS
+ disabling of HT, and even if we park the cores, the resources will just be
+ wasted.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/apic.h | 2 ++
+ arch/x86/kernel/acpi/boot.c | 3 ++-
+ arch/x86/kernel/apic/apic.c | 19 +++++++++++++++++++
+ 3 files changed, 23 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -504,8 +504,10 @@ extern int default_check_phys_apicid_pre
+
+ #ifdef CONFIG_SMP
+ bool apic_id_is_primary_thread(unsigned int id);
++bool apic_id_disabled(unsigned int id);
+ #else
+ static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
++static inline bool apic_id_disabled(unsigned int id) { return false; }
+ #endif
+
+ extern void irq_enter(void);
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -181,7 +181,8 @@ static int acpi_register_lapic(int id, u
+ }
+
+ if (!enabled) {
+- ++disabled_cpus;
++ if (!apic_id_disabled(id))
++ ++disabled_cpus;
+ return -EINVAL;
+ }
+
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2207,6 +2207,16 @@ bool apic_id_is_primary_thread(unsigned
+ return !(apicid & mask);
+ }
+
++/**
++ * apic_id_disabled - Check whether APIC ID is disabled via SMT control
++ * @id: APIC ID to check
++ */
++bool apic_id_disabled(unsigned int id)
++{
++ return (cpu_smt_control == CPU_SMT_FORCE_DISABLED &&
++ !apic_id_is_primary_thread(id));
++}
++
+ /*
+ * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
+ * and cpuid_to_apicid[] synchronized.
+@@ -2302,6 +2312,15 @@ int generic_processor_info(int apicid, i
+ return -EINVAL;
+ }
+
++ /*
++ * If SMT is force disabled and the APIC ID belongs to
++ * a secondary thread, ignore it.
++ */
++ if (apic_id_disabled(apicid)) {
++ pr_info_once("Ignoring secondary SMT threads\n");
++ return -EINVAL;
++ }
++
+ if (apicid == boot_cpu_physical_apicid) {
+ /*
+ * x86_bios_cpu_apicid is required to have processors listed
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Thu, 21 Jun 2018 12:36:29 +0200
+Subject: x86/speculation/l1tf: Extend 64bit swap file size limit
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 1a7ed1ba4bba6c075d5ad61bb75e3fbc870840d6 upstream
+
+The previous patch has limited swap file size so that large offsets cannot
+clear bits above MAX_PA/2 in the pte and interfere with L1TF mitigation.
+
+It assumed that offsets are encoded starting with bit 12, same as pfn. But
+on x86_64, offsets are encoded starting with bit 9.
+
+Thus the limit can be raised by 3 bits. That means 16TB with 42bit MAX_PA
+and 256TB with 46bit MAX_PA.
+
+Fixes: 377eeaa8e11f ("x86/speculation/l1tf: Limit swap file size to MAX_PA/2")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/init.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -891,7 +891,15 @@ unsigned long max_swapfile_size(void)
+
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+- pages = min_t(unsigned long, l1tf_pfn_limit() + 1, pages);
++ unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
++ /*
++ * We encode swap offsets also with 3 bits below those for pfn
++ * which makes the usable limit higher.
++ */
++#ifdef CONFIG_X86_64
++ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
++#endif
++ pages = min_t(unsigned long, l1tf_limit, pages);
+ }
+ return pages;
+ }
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 16:42:58 -0400
+Subject: x86/cpufeatures: Add detection of L1D cache flush support.
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 11e34e64e4103955fc4568750914c75d65ea87ee upstream
+
+336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR
+(IA32_FLUSH_CMD) which is detected by CPUID.7.EDX[28]=1 bit being set.
+
+This new MSR "gives software a way to invalidate structures with finer
+granularity than other architectual methods like WBINVD."
+
+A copy of this document is available at
+ https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -340,6 +340,7 @@
+ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 22 Jun 2018 11:34:11 +0200
+Subject: x86/CPU/AMD: Move TOPOEXT reenablement before reading smp_num_siblings
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 7ce2f0393ea2396142b7faf6ee9b1f3676d08a5f upstream
+
+The TOPOEXT reenablement is a workaround for broken BIOSen which didn't
+enable the CPUID bit. amd_get_topology_early(), however, relies on
+that bit being set so that it can read out the CPUID leaf and set
+smp_num_siblings properly.
+
+Move the reenablement up to early_init_amd(). While at it, simplify
+amd_get_topology_early().
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 37 +++++++++++++++++--------------------
+ 1 file changed, 17 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -318,12 +318,8 @@ static void legacy_fixup_core_id(struct
+
+ static void amd_get_topology_early(struct cpuinfo_x86 *c)
+ {
+- if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+- u32 eax, ebx, ecx, edx;
+-
+- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+- smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+- }
++ if (cpu_has(c, X86_FEATURE_TOPOEXT))
++ smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+ }
+
+ /*
+@@ -344,7 +340,6 @@ static void amd_get_topology(struct cpui
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+
+ node_id = ecx & 0xff;
+- smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+ if (c->x86 == 0x15)
+ c->cu_id = ebx & 0xff;
+@@ -635,6 +630,7 @@ clear_sev:
+
+ static void early_init_amd(struct cpuinfo_x86 *c)
+ {
++ u64 value;
+ u32 dummy;
+
+ early_init_amd_mc(c);
+@@ -706,6 +702,20 @@ static void early_init_amd(struct cpuinf
+
+ early_detect_mem_encrypt(c);
+
++ /* Re-enable TopologyExtensions if switched off by BIOS */
++ if (c->x86 == 0x15 &&
++ (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
++ !cpu_has(c, X86_FEATURE_TOPOEXT)) {
++
++ if (msr_set_bit(0xc0011005, 54) > 0) {
++ rdmsrl(0xc0011005, value);
++ if (value & BIT_64(54)) {
++ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
++ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
++ }
++ }
++ }
++
+ amd_get_topology_early(c);
+ }
+
+@@ -798,19 +808,6 @@ static void init_amd_bd(struct cpuinfo_x
+ {
+ u64 value;
+
+- /* re-enable TopologyExtensions if switched off by BIOS */
+- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
+- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+-
+- if (msr_set_bit(0xc0011005, 54) > 0) {
+- rdmsrl(0xc0011005, value);
+- if (value & BIT_64(54)) {
+- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+- }
+- }
+- }
+-
+ /*
+ * The way access filter has a performance penalty on some workloads.
+ * Disable it on the affected CPUs.
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Fri, 22 Jun 2018 17:39:33 +0200
+Subject: x86/speculation/l1tf: Protect PAE swap entries against L1TF
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 0d0f6249058834ffe1ceaad0bb31464af66f6e7a upstream
+
+The PAE 3-level paging code currently doesn't mitigate L1TF by flipping the
+offset bits, and uses the high PTE word, thus bits 32-36 for type, 37-63 for
+offset. The lower word is zeroed, thus systems with less than 4GB memory are
+safe. With 4GB to 128GB the swap type selects the memory locations vulnerable
+to L1TF; with even more memory, also the swap offfset influences the address.
+This might be a problem with 32bit PAE guests running on large 64bit hosts.
+
+By continuing to keep the whole swap entry in either high or low 32bit word of
+PTE we would limit the swap size too much. Thus this patch uses the whole PAE
+PTE with the same layout as the 64bit version does. The macros just become a
+bit tricky since they assume the arch-dependent swp_entry_t to be 32bit.
+
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable-3level.h | 35 ++++++++++++++++++++++++++++++++--
+ arch/x86/mm/init.c | 2 -
+ 2 files changed, 34 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -241,12 +241,43 @@ static inline pud_t native_pudp_get_and_
+ #endif
+
+ /* Encode and de-code a swap entry */
++#define SWP_TYPE_BITS 5
++
++#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
++
++/* We always extract/encode the offset by shifting it all the way up, and then down again */
++#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
++
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
+ #define __swp_type(x) (((x).val) & 0x1f)
+ #define __swp_offset(x) ((x).val >> 5)
+ #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
+-#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
+-#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
++
++/*
++ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
++ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
++ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
++ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
++ * __swp_entry_to_pte() through the following helper macro based on 64bit
++ * __swp_entry().
++ */
++#define __swp_pteval_entry(type, offset) ((pteval_t) { \
++ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
++ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
++
++#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
++ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
++/*
++ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
++ * swp_entry_t, but also has to convert it from 64bit to the 32bit
++ * intermediate representation, using the following macros based on 64bit
++ * __swp_type() and __swp_offset().
++ */
++#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
++#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
++
++#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
++ __pteval_swp_offset(pte)))
+
+ #define gup_get_pte gup_get_pte
+ /*
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -896,7 +896,7 @@ unsigned long max_swapfile_size(void)
+ * We encode swap offsets also with 3 bits below those for pfn
+ * which makes the usable limit higher.
+ */
+-#ifdef CONFIG_X86_64
++#if CONFIG_PGTABLE_LEVELS > 2
+ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
+ #endif
+ pages = min_t(unsigned long, l1tf_limit, pages);
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Michal Hocko <mhocko@suse.cz>
+Date: Wed, 27 Jun 2018 17:46:50 +0200
+Subject: x86/speculation/l1tf: Fix up pte->pfn conversion for PAE
+
+From: Michal Hocko <mhocko@suse.cz>
+
+commit e14d7dfb41f5807a0c1c26a13f2b8ef16af24935 upstream
+
+Jan has noticed that pte_pfn and co. resp. pfn_pte are incorrect for
+CONFIG_PAE because phys_addr_t is wider than unsigned long and so the
+pte_val reps. shift left would get truncated. Fix this up by using proper
+types.
+
+Fixes: 6b28baca9b1f ("x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation")
+Reported-by: Jan Beulich <JBeulich@suse.com>
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -191,21 +191,21 @@ static inline u64 protnone_mask(u64 val)
+
+ static inline unsigned long pte_pfn(pte_t pte)
+ {
+- unsigned long pfn = pte_val(pte);
++ phys_addr_t pfn = pte_val(pte);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+- unsigned long pfn = pmd_val(pmd);
++ phys_addr_t pfn = pmd_val(pmd);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pud_pfn(pud_t pud)
+ {
+- unsigned long pfn = pud_val(pud);
++ phys_addr_t pfn = pud_val(pud);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ }
+@@ -555,7 +555,7 @@ static inline pgprotval_t check_pgprot(p
+
+ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+ {
+- phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PTE_PFN_MASK;
+ return __pte(pfn | check_pgprot(pgprot));
+@@ -563,7 +563,7 @@ static inline pte_t pfn_pte(unsigned lon
+
+ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+ {
+- phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PMD_PAGE_MASK;
+ return __pmd(pfn | check_pgprot(pgprot));
+@@ -571,7 +571,7 @@ static inline pmd_t pfn_pmd(unsigned lon
+
+ static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+ {
+- phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PUD_PAGE_MASK;
+ return __pud(pfn | check_pgprot(pgprot));
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 29 Jun 2018 16:05:47 +0200
+Subject: Revert "x86/apic: Ignore secondary threads if nosmt=force"
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 506a66f374891ff08e064a058c446b336c5ac760 upstream
+
+Dave Hansen reported, that it's outright dangerous to keep SMT siblings
+disabled completely so they are stuck in the BIOS and wait for SIPI.
+
+The reason is that Machine Check Exceptions are broadcasted to siblings and
+the soft disabled sibling has CR4.MCE = 0. If a MCE is delivered to a
+logical core with CR4.MCE = 0, it asserts IERR#, which shuts down or
+reboots the machine. The MCE chapter in the SDM contains the following
+blurb:
+
+ Because the logical processors within a physical package are tightly
+ coupled with respect to shared hardware resources, both logical
+ processors are notified of machine check errors that occur within a
+ given physical processor. If machine-check exceptions are enabled when
+ a fatal error is reported, all the logical processors within a physical
+ package are dispatched to the machine-check exception handler. If
+ machine-check exceptions are disabled, the logical processors enter the
+ shutdown state and assert the IERR# signal. When enabling machine-check
+ exceptions, the MCE flag in control register CR4 should be set for each
+ logical processor.
+
+Reverting the commit which ignores siblings at enumeration time solves only
+half of the problem. The core cpuhotplug logic needs to be adjusted as
+well.
+
+This thoughtful engineered mechanism also turns the boot process on all
+Intel HT enabled systems into a MCE lottery. MCE is enabled on the boot CPU
+before the secondary CPUs are brought up. Depending on the number of
+physical cores the window in which this situation can happen is smaller or
+larger. On a HSW-EX it's about 750ms:
+
+MCE is enabled on the boot CPU:
+
+[ 0.244017] mce: CPU supports 22 MCE banks
+
+The corresponding sibling #72 boots:
+
+[ 1.008005] .... node #0, CPUs: #72
+
+That means if an MCE hits on physical core 0 (logical CPUs 0 and 72)
+between these two points the machine is going to shutdown. At least it's a
+known safe state.
+
+It's obvious that the early boot can be hit by an MCE as well and then runs
+into the same situation because MCEs are not yet enabled on the boot CPU.
+But after enabling them on the boot CPU, it does not make any sense to
+prevent the kernel from recovering.
+
+Adjust the nosmt kernel parameter documentation as well.
+
+Reverts: 2207def700f9 ("x86/apic: Ignore secondary threads if nosmt=force")
+Reported-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 8 ++------
+ arch/x86/include/asm/apic.h | 2 --
+ arch/x86/kernel/acpi/boot.c | 3 +--
+ arch/x86/kernel/apic/apic.c | 19 -------------------
+ 4 files changed, 3 insertions(+), 29 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2676,12 +2676,8 @@
+ Equivalent to smt=1.
+
+ [KNL,x86] Disable symmetric multithreading (SMT).
+- nosmt=force: Force disable SMT, similar to disabling
+- it in the BIOS except that some of the
+- resource partitioning effects which are
+- caused by having SMT enabled in the BIOS
+- cannot be undone. Depending on the CPU
+- type this might have a performance impact.
++ nosmt=force: Force disable SMT, cannot be undone
++ via the sysfs control file.
+
+ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
+ (indirect branch prediction) vulnerability. System may
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -504,10 +504,8 @@ extern int default_check_phys_apicid_pre
+
+ #ifdef CONFIG_SMP
+ bool apic_id_is_primary_thread(unsigned int id);
+-bool apic_id_disabled(unsigned int id);
+ #else
+ static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
+-static inline bool apic_id_disabled(unsigned int id) { return false; }
+ #endif
+
+ extern void irq_enter(void);
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -181,8 +181,7 @@ static int acpi_register_lapic(int id, u
+ }
+
+ if (!enabled) {
+- if (!apic_id_disabled(id))
+- ++disabled_cpus;
++ ++disabled_cpus;
+ return -EINVAL;
+ }
+
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2207,16 +2207,6 @@ bool apic_id_is_primary_thread(unsigned
+ return !(apicid & mask);
+ }
+
+-/**
+- * apic_id_disabled - Check whether APIC ID is disabled via SMT control
+- * @id: APIC ID to check
+- */
+-bool apic_id_disabled(unsigned int id)
+-{
+- return (cpu_smt_control == CPU_SMT_FORCE_DISABLED &&
+- !apic_id_is_primary_thread(id));
+-}
+-
+ /*
+ * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
+ * and cpuid_to_apicid[] synchronized.
+@@ -2312,15 +2302,6 @@ int generic_processor_info(int apicid, i
+ return -EINVAL;
+ }
+
+- /*
+- * If SMT is force disabled and the APIC ID belongs to
+- * a secondary thread, ignore it.
+- */
+- if (apic_id_disabled(apicid)) {
+- pr_info_once("Ignoring secondary SMT threads\n");
+- return -EINVAL;
+- }
+-
+ if (apicid == boot_cpu_physical_apicid) {
+ /*
+ * x86_bios_cpu_apicid is required to have processors listed
+From foo@baz Tue Aug 14 13:43:10 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 29 Jun 2018 16:05:48 +0200
+Subject: cpu/hotplug: Boot HT siblings at least once
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 0cc3cd21657be04cb0559fe8063f2130493f92cf upstream
+
+Due to the way Machine Check Exceptions work on X86 hyperthreads it's
+required to boot up _all_ logical cores at least once in order to set the
+CR4.MCE bit.
+
+So instead of ignoring the sibling threads right away, let them boot up
+once so they can configure themselves. After they came out of the initial
+boot stage check whether its a "secondary" sibling and cancel the operation
+which puts the CPU back into offline state.
+
+Reported-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cpu.c | 72 +++++++++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 48 insertions(+), 24 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -60,6 +60,7 @@ struct cpuhp_cpu_state {
+ bool rollback;
+ bool single;
+ bool bringup;
++ bool booted_once;
+ struct hlist_node *node;
+ struct hlist_node *last;
+ enum cpuhp_state cb_state;
+@@ -342,6 +343,40 @@ void cpu_hotplug_enable(void)
+ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ #endif /* CONFIG_HOTPLUG_CPU */
+
++#ifdef CONFIG_HOTPLUG_SMT
++enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_control = CPU_SMT_DISABLED;
++ if (str && !strcmp(str, "force")) {
++ pr_info("SMT: Force disabled\n");
++ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ }
++ return 0;
++}
++early_param("nosmt", smt_cmdline_disable);
++
++static inline bool cpu_smt_allowed(unsigned int cpu)
++{
++ if (cpu_smt_control == CPU_SMT_ENABLED)
++ return true;
++
++ if (topology_is_primary_thread(cpu))
++ return true;
++
++ /*
++ * On x86 it's required to boot all logical CPUs at least once so
++ * that the init code can get a chance to set CR4.MCE on each
++ * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
++ * core will shutdown the machine.
++ */
++ return !per_cpu(cpuhp_state, cpu).booted_once;
++}
++#else
++static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++#endif
++
+ static inline enum cpuhp_state
+ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+ {
+@@ -422,6 +457,16 @@ static int bringup_wait_for_ap(unsigned
+ stop_machine_unpark(cpu);
+ kthread_unpark(st->thread);
+
++ /*
++ * SMT soft disabling on X86 requires to bring the CPU out of the
++ * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
++ * CPU marked itself as booted_once in cpu_notify_starting() so the
++ * cpu_smt_allowed() check will now return false if this is not the
++ * primary sibling.
++ */
++ if (!cpu_smt_allowed(cpu))
++ return -ECANCELED;
++
+ if (st->target <= CPUHP_AP_ONLINE_IDLE)
+ return 0;
+
+@@ -933,29 +978,6 @@ EXPORT_SYMBOL(cpu_down);
+ #define takedown_cpu NULL
+ #endif /*CONFIG_HOTPLUG_CPU*/
+
+-#ifdef CONFIG_HOTPLUG_SMT
+-enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+-
+-static int __init smt_cmdline_disable(char *str)
+-{
+- cpu_smt_control = CPU_SMT_DISABLED;
+- if (str && !strcmp(str, "force")) {
+- pr_info("SMT: Force disabled\n");
+- cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+- }
+- return 0;
+-}
+-early_param("nosmt", smt_cmdline_disable);
+-
+-static inline bool cpu_smt_allowed(unsigned int cpu)
+-{
+- return cpu_smt_control == CPU_SMT_ENABLED ||
+- topology_is_primary_thread(cpu);
+-}
+-#else
+-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+-#endif
+-
+ /**
+ * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
+ * @cpu: cpu that just started
+@@ -970,6 +992,7 @@ void notify_cpu_starting(unsigned int cp
+ int ret;
+
+ rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
++ st->booted_once = true;
+ while (st->state < target) {
+ st->state++;
+ ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
+@@ -2180,5 +2203,6 @@ void __init boot_cpu_init(void)
+ */
+ void __init boot_cpu_hotplug_init(void)
+ {
+- per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
++ this_cpu_write(cpuhp_state.booted_once, true);
++ this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 11:29:53 -0400
+Subject: x86/KVM: Warn user if KVM is loaded SMT and L1TF CPU bug being present
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 26acfb666a473d960f0fd971fe68f3e3ad16c70b upstream
+
+If the L1TF CPU bug is present we allow the KVM module to be loaded as the
+major of users that use Linux and KVM have trusted guests and do not want a
+broken setup.
+
+Cloud vendors are the ones that are uncomfortable with CVE 2018-3620 and as
+such they are the ones that should set nosmt to one.
+
+Setting 'nosmt' means that the system administrator also needs to disable
+SMT (Hyper-threading) in the BIOS, or via the 'nosmt' command line
+parameter, or via the /sys/devices/system/cpu/smt/control. See commit
+05736e4ac13c ("cpu/hotplug: Provide knobs to control SMT").
+
+Other mitigations are to use task affinity, cpu sets, interrupt binding,
+etc - anything to make sure that _only_ the same guests vCPUs are running
+on sibling threads.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 6 ++++++
+ arch/x86/kvm/vmx.c | 13 +++++++++++++
+ kernel/cpu.c | 1 +
+ 3 files changed, 20 insertions(+)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1937,6 +1937,12 @@
+ [KVM,ARM] Allow use of GICv4 for direct injection of
+ LPIs.
+
++ kvm-intel.nosmt=[KVM,Intel] If the L1TF CPU bug is present (CVE-2018-3620)
++ and the system has SMT (aka Hyper-Threading) enabled then
++ don't allow guests to be created.
++
++ Default is 0 (allow guests to be created).
++
+ kvm-intel.ept= [KVM,Intel] Disable extended page tables
+ (virtualized MMU) support on capable Intel chips.
+ Default is 1 (enabled)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -71,6 +71,9 @@ static const struct x86_cpu_id vmx_cpu_i
+ };
+ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
++static bool __read_mostly nosmt;
++module_param(nosmt, bool, S_IRUGO);
++
+ static bool __read_mostly enable_vpid = 1;
+ module_param_named(vpid, enable_vpid, bool, 0444);
+
+@@ -10142,10 +10145,20 @@ free_vcpu:
+ return ERR_PTR(err);
+ }
+
++#define L1TF_MSG "SMT enabled with L1TF CPU bug present. Refer to CVE-2018-3620 for details.\n"
++
+ static int vmx_vm_init(struct kvm *kvm)
+ {
+ if (!ple_gap)
+ kvm->arch.pause_in_guest = true;
++
++ if (boot_cpu_has(X86_BUG_L1TF) && cpu_smt_control == CPU_SMT_ENABLED) {
++ if (nosmt) {
++ pr_err(L1TF_MSG);
++ return -EOPNOTSUPP;
++ }
++ pr_warn(L1TF_MSG);
++ }
+ return 0;
+ }
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -345,6 +345,7 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+
+ #ifdef CONFIG_HOTPLUG_SMT
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+ static int __init smt_cmdline_disable(char *str)
+ {
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Mon, 2 Jul 2018 12:29:30 +0200
+Subject: x86/KVM/VMX: Add module argument for L1TF mitigation
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit a399477e52c17e148746d3ce9a483f681c2aa9a0 upstream
+
+Add a mitigation mode parameter "vmentry_l1d_flush" for CVE-2018-3620, aka
+L1 terminal fault. The valid arguments are:
+
+ - "always" L1D cache flush on every VMENTER.
+ - "cond" Conditional L1D cache flush, explained below
+ - "never" Disable the L1D cache flush mitigation
+
+"cond" is trying to avoid L1D cache flushes on VMENTER if the code executed
+between VMEXIT and VMENTER is considered safe, i.e. is not bringing any
+interesting information into L1D which might exploited.
+
+[ tglx: Split out from a larger patch ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 12 ++++
+ arch/x86/kvm/vmx.c | 59 ++++++++++++++++++++++++
+ 2 files changed, 71 insertions(+)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1964,6 +1964,18 @@
+ (virtualized real and unpaged mode) on capable
+ Intel chips. Default is 1 (enabled)
+
++ kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault
++ CVE-2018-3620.
++
++ Valid arguments: never, cond, always
++
++ always: L1D cache flush on every VMENTER.
++ cond: Flush L1D on VMENTER only when the code between
++ VMEXIT and VMENTER can leak host memory.
++ never: Disables the mitigation
++
++ Default is cond (do L1 cache flush in specific instances)
++
+ kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification
+ feature (tagged TLBs) on capable Intel chips.
+ Default is 1 (enabled)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -191,6 +191,54 @@ module_param(ple_window_max, uint, 0444)
+
+ extern const ulong vmx_return;
+
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
++
++/* These MUST be in sync with vmentry_l1d_param order. */
++enum vmx_l1d_flush_state {
++ VMENTER_L1D_FLUSH_NEVER,
++ VMENTER_L1D_FLUSH_COND,
++ VMENTER_L1D_FLUSH_ALWAYS,
++};
++
++static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
++
++static const struct {
++ const char *option;
++ enum vmx_l1d_flush_state cmd;
++} vmentry_l1d_param[] = {
++ {"never", VMENTER_L1D_FLUSH_NEVER},
++ {"cond", VMENTER_L1D_FLUSH_COND},
++ {"always", VMENTER_L1D_FLUSH_ALWAYS},
++};
++
++static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++{
++ unsigned int i;
++
++ if (!s)
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
++ if (!strcmp(s, vmentry_l1d_param[i].option)) {
++ vmentry_l1d_flush = vmentry_l1d_param[i].cmd;
++ return 0;
++ }
++ }
++
++ return -EINVAL;
++}
++
++static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
++{
++ return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option);
++}
++
++static const struct kernel_param_ops vmentry_l1d_flush_ops = {
++ .set = vmentry_l1d_flush_set,
++ .get = vmentry_l1d_flush_get,
++};
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO);
++
+ struct kvm_vmx {
+ struct kvm kvm;
+
+@@ -12881,6 +12929,15 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .enable_smi_window = enable_smi_window,
+ };
+
++static void __init vmx_setup_l1d_flush(void)
++{
++ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
++ !boot_cpu_has_bug(X86_BUG_L1TF))
++ return;
++
++ static_branch_enable(&vmx_l1d_should_flush);
++}
++
+ static int __init vmx_init(void)
+ {
+ int r;
+@@ -12914,6 +12971,8 @@ static int __init vmx_init(void)
+ }
+ #endif
+
++ vmx_setup_l1d_flush();
++
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 12:47:38 +0200
+Subject: x86/KVM/VMX: Add L1D flush algorithm
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit a47dd5f06714c844b33f3b5f517b6f3e81ce57b5 upstream
+
+To mitigate the L1 Terminal Fault vulnerability it's required to flush L1D
+on VMENTER to prevent rogue guests from snooping host memory.
+
+CPUs will have a new control MSR via a microcode update to flush L1D with a
+single MSR write, but in the absence of microcode a fallback to a software
+based flush algorithm is required.
+
+Add a software flush loop which is based on code from Intel.
+
+[ tglx: Split out from combo patch ]
+[ bpetkov: Polish the asm code ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 66 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9333,6 +9333,46 @@ static int vmx_handle_exit(struct kvm_vc
+ }
+ }
+
++/*
++ * Software based L1D cache flush which is used when microcode providing
++ * the cache control MSR is not loaded.
++ *
++ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
++ * flush it is required to read in 64 KiB because the replacement algorithm
++ * is not exactly LRU. This could be sized at runtime via topology
++ * information but as all relevant affected CPUs have 32KiB L1D cache size
++ * there is no point in doing so.
++ */
++#define L1D_CACHE_ORDER 4
++static void *vmx_l1d_flush_pages;
++
++static void __maybe_unused vmx_l1d_flush(void)
++{
++ int size = PAGE_SIZE << L1D_CACHE_ORDER;
++
++ asm volatile(
++ /* First ensure the pages are in the TLB */
++ "xorl %%eax, %%eax\n"
++ ".Lpopulate_tlb:\n\t"
++ "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "addl $4096, %%eax\n\t"
++ "cmpl %%eax, %[size]\n\t"
++ "jne .Lpopulate_tlb\n\t"
++ "xorl %%eax, %%eax\n\t"
++ "cpuid\n\t"
++ /* Now fill the cache */
++ "xorl %%eax, %%eax\n"
++ ".Lfill_cache:\n"
++ "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "addl $64, %%eax\n\t"
++ "cmpl %%eax, %[size]\n\t"
++ "jne .Lfill_cache\n\t"
++ "lfence\n"
++ :: [empty_zp] "r" (vmx_l1d_flush_pages),
++ [size] "r" (size)
++ : "eax", "ebx", "ecx", "edx");
++}
++
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+ {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+@@ -12929,13 +12969,29 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .enable_smi_window = enable_smi_window,
+ };
+
+-static void __init vmx_setup_l1d_flush(void)
++static int __init vmx_setup_l1d_flush(void)
+ {
++ struct page *page;
++
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+ !boot_cpu_has_bug(X86_BUG_L1TF))
+- return;
++ return 0;
++
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
+
++ vmx_l1d_flush_pages = page_address(page);
+ static_branch_enable(&vmx_l1d_should_flush);
++ return 0;
++}
++
++static void vmx_free_l1d_flush_pages(void)
++{
++ if (vmx_l1d_flush_pages) {
++ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
++ vmx_l1d_flush_pages = NULL;
++ }
+ }
+
+ static int __init vmx_init(void)
+@@ -12971,12 +13027,16 @@ static int __init vmx_init(void)
+ }
+ #endif
+
+- vmx_setup_l1d_flush();
++ r = vmx_setup_l1d_flush();
++ if (r)
++ return r;
+
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+- __alignof__(struct vcpu_vmx), THIS_MODULE);
+- if (r)
++ __alignof__(struct vcpu_vmx), THIS_MODULE);
++ if (r) {
++ vmx_free_l1d_flush_pages();
+ return r;
++ }
+
+ #ifdef CONFIG_KEXEC_CORE
+ rcu_assign_pointer(crash_vmclear_loaded_vmcss,
+@@ -13017,6 +13077,7 @@ static void __exit vmx_exit(void)
+ static_branch_disable(&enable_evmcs);
+ }
+ #endif
++ vmx_free_l1d_flush_pages();
+ }
+
+ module_init(vmx_init)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 13:03:48 +0200
+Subject: x86/KVM/VMX: Add L1D MSR based flush
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 3fa045be4c720146b18a19cea7a767dc6ad5df94 upstream
+
+336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR
+(IA32_FLUSH_CMD aka 0x10B) which has similar write-only semantics to other
+MSRs defined in the document.
+
+The semantics of this MSR is to allow "finer granularity invalidation of
+caching structures than existing mechanisms like WBINVD. It will writeback
+and invalidate the L1 data cache, including all cachelines brought in by
+preceding instructions, without invalidating all caches (eg. L2 or
+LLC). Some processors may also invalidate the first level level instruction
+cache on a L1D_FLUSH command. The L1 data and instruction caches may be
+shared across the logical processors of a core."
+
+Use it instead of the loop based L1 flush algorithm.
+
+A copy of this document is available at
+ https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+[ tglx: Avoid allocating pages when the MSR is available ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 6 ++++++
+ arch/x86/kvm/vmx.c | 15 +++++++++++----
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -76,6 +76,12 @@
+ * control required.
+ */
+
++#define MSR_IA32_FLUSH_CMD 0x0000010b
++#define L1D_FLUSH (1 << 0) /*
++ * Writeback and invalidate the
++ * L1 data cache.
++ */
++
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9350,6 +9350,11 @@ static void __maybe_unused vmx_l1d_flush
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
++ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
++ return;
++ }
++
+ asm volatile(
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+@@ -12977,11 +12982,13 @@ static int __init vmx_setup_l1d_flush(vo
+ !boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
+- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+- if (!page)
+- return -ENOMEM;
++ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++ vmx_l1d_flush_pages = page_address(page);
++ }
+
+- vmx_l1d_flush_pages = page_address(page);
+ static_branch_enable(&vmx_l1d_should_flush);
+ return 0;
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 13:07:14 +0200
+Subject: x86/KVM/VMX: Add L1D flush logic
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit c595ceee45707f00f64f61c54fb64ef0cc0b4e85 upstream
+
+Add the logic for flushing L1D on VMENTER. The flush depends on the static
+key being enabled and the new l1tf_flush_l1d flag being set.
+
+The flags is set:
+ - Always, if the flush module parameter is 'always'
+
+ - Conditionally at:
+ - Entry to vcpu_run(), i.e. after executing user space
+
+ - From the sched_in notifier, i.e. when switching to a vCPU thread.
+
+ - From vmexit handlers which are considered unsafe, i.e. where
+ sensitive data can be brought into L1D:
+
+ - The emulator, which could be a good target for other speculative
+ execution-based threats,
+
+ - The MMU, which can bring host page tables in the L1 cache.
+
+ - External interrupts
+
+ - Nested operations that require the MMU (see above). That is
+ vmptrld, vmptrst, vmclear,vmwrite,vmread.
+
+ - When handling invept,invvpid
+
+[ tglx: Split out from combo patch and reduced to a single flag ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 4 ++++
+ arch/x86/kvm/mmu.c | 1 +
+ arch/x86/kvm/vmx.c | 22 +++++++++++++++++++++-
+ arch/x86/kvm/x86.c | 8 ++++++++
+ 4 files changed, 34 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -711,6 +711,9 @@ struct kvm_vcpu_arch {
+
+ /* be preempted when it's in kernel-mode(cpl=0) */
+ bool preempted_in_kernel;
++
++ /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
++ bool l1tf_flush_l1d;
+ };
+
+ struct kvm_lpage_info {
+@@ -879,6 +882,7 @@ struct kvm_vcpu_stat {
+ u64 signal_exits;
+ u64 irq_window_exits;
+ u64 nmi_window_exits;
++ u64 l1d_flush;
+ u64 halt_exits;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3836,6 +3836,7 @@ int kvm_handle_page_fault(struct kvm_vcp
+ {
+ int r = 1;
+
++ vcpu->arch.l1tf_flush_l1d = true;
+ switch (vcpu->arch.apf.host_apf_reason) {
+ default:
+ trace_kvm_page_fault(fault_address, error_code);
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9346,9 +9346,20 @@ static int vmx_handle_exit(struct kvm_vc
+ #define L1D_CACHE_ORDER 4
+ static void *vmx_l1d_flush_pages;
+
+-static void __maybe_unused vmx_l1d_flush(void)
++static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
++ bool always;
++
++ /*
++ * If the mitigation mode is 'flush always', keep the flush bit
++ * set, otherwise clear it. It gets set again either from
++ * vcpu_run() or from one of the unsafe VMEXIT handlers.
++ */
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ vcpu->arch.l1tf_flush_l1d = always;
++
++ vcpu->stat.l1d_flush++;
+
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+@@ -9622,6 +9633,7 @@ static void vmx_handle_external_intr(str
+ [ss]"i"(__KERNEL_DS),
+ [cs]"i"(__KERNEL_CS)
+ );
++ vcpu->arch.l1tf_flush_l1d = true;
+ }
+ }
+ STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
+@@ -9879,6 +9891,11 @@ static void __noclone vmx_vcpu_run(struc
+ evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
+ (unsigned long)&current_evmcs->host_rsp : 0;
+
++ if (static_branch_unlikely(&vmx_l1d_should_flush)) {
++ if (vcpu->arch.l1tf_flush_l1d)
++ vmx_l1d_flush(vcpu);
++ }
++
+ asm(
+ /* Store host registers */
+ "push %%" _ASM_DX "; push %%" _ASM_BP ";"
+@@ -11748,6 +11765,9 @@ static int nested_vmx_run(struct kvm_vcp
+ if (ret)
+ return ret;
+
++ /* Hide L1D cache contents from the nested guest. */
++ vmx->vcpu.arch.l1tf_flush_l1d = true;
++
+ /*
+ * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
+ * by event injection, halt vcpu.
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -194,6 +194,7 @@ struct kvm_stats_debugfs_item debugfs_en
+ { "irq_injections", VCPU_STAT(irq_injections) },
+ { "nmi_injections", VCPU_STAT(nmi_injections) },
+ { "req_event", VCPU_STAT(req_event) },
++ { "l1d_flush", VCPU_STAT(l1d_flush) },
+ { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
+ { "mmu_pte_write", VM_STAT(mmu_pte_write) },
+ { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
+@@ -4870,6 +4871,9 @@ static int emulator_write_std(struct x86
+ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
+ unsigned int bytes, struct x86_exception *exception)
+ {
++ /* kvm_write_guest_virt_system can pull in tons of pages. */
++ vcpu->arch.l1tf_flush_l1d = true;
++
+ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+ PFERR_WRITE_MASK, exception);
+ }
+@@ -6046,6 +6050,8 @@ int x86_emulate_instruction(struct kvm_v
+ bool writeback = true;
+ bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
+
++ vcpu->arch.l1tf_flush_l1d = true;
++
+ /*
+ * Clear write_fault_to_shadow_pgtable here to ensure it is
+ * never reused.
+@@ -7575,6 +7581,7 @@ static int vcpu_run(struct kvm_vcpu *vcp
+ struct kvm *kvm = vcpu->kvm;
+
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
++ vcpu->arch.l1tf_flush_l1d = true;
+
+ for (;;) {
+ if (kvm_vcpu_running(vcpu)) {
+@@ -8694,6 +8701,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcp
+
+ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+ {
++ vcpu->arch.l1tf_flush_l1d = true;
+ kvm_x86_ops->sched_in(vcpu, cpu);
+ }
+
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 13:58:37 -0400
+Subject: x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 33966dd6b2d2c352fae55412db2ea8cfff5df13a upstream
+
+There is no semantic change but this change allows an unbalanced amount of
+MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
+restore on VMEXIT or VMENTER may be different.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 65 ++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 35 insertions(+), 30 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -642,6 +642,11 @@ static inline int pi_test_sn(struct pi_d
+ (unsigned long *)&pi_desc->control);
+ }
+
++struct vmx_msrs {
++ unsigned int nr;
++ struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
++};
++
+ struct vcpu_vmx {
+ struct kvm_vcpu vcpu;
+ unsigned long host_rsp;
+@@ -675,9 +680,8 @@ struct vcpu_vmx {
+ struct loaded_vmcs *loaded_vmcs;
+ bool __launched; /* temporary, used in vmx_vcpu_run */
+ struct msr_autoload {
+- unsigned nr;
+- struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+- struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
++ struct vmx_msrs guest;
++ struct vmx_msrs host;
+ } msr_autoload;
+ struct {
+ int loaded;
+@@ -2256,18 +2260,18 @@ static void clear_atomic_switch_msr(stru
+ }
+ break;
+ }
+-
+- for (i = 0; i < m->nr; ++i)
+- if (m->guest[i].index == msr)
++ for (i = 0; i < m->guest.nr; ++i)
++ if (m->guest.val[i].index == msr)
+ break;
+
+- if (i == m->nr)
++ if (i == m->guest.nr)
+ return;
+- --m->nr;
+- m->guest[i] = m->guest[m->nr];
+- m->host[i] = m->host[m->nr];
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
++ --m->guest.nr;
++ --m->host.nr;
++ m->guest.val[i] = m->guest.val[m->guest.nr];
++ m->host.val[i] = m->host.val[m->host.nr];
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+@@ -2319,24 +2323,25 @@ static void add_atomic_switch_msr(struct
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ }
+
+- for (i = 0; i < m->nr; ++i)
+- if (m->guest[i].index == msr)
++ for (i = 0; i < m->guest.nr; ++i)
++ if (m->guest.val[i].index == msr)
+ break;
+
+ if (i == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i == m->nr) {
+- ++m->nr;
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
++ } else if (i == m->guest.nr) {
++ ++m->guest.nr;
++ ++m->host.nr;
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+- m->guest[i].index = msr;
+- m->guest[i].value = guest_val;
+- m->host[i].index = msr;
+- m->host[i].value = host_val;
++ m->guest.val[i].index = msr;
++ m->guest.val[i].value = guest_val;
++ m->host.val[i].index = msr;
++ m->host.val[i].value = host_val;
+ }
+
+ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+@@ -6092,9 +6097,9 @@ static void vmx_vcpu_setup(struct vcpu_v
+
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
++ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
++ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+@@ -11128,10 +11133,10 @@ static void prepare_vmcs02_full(struct k
+ * Set the MSR load/store lists to match L0's settings.
+ */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
++ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
++ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+ set_cr4_guest_host_mask(vmx);
+
+@@ -12281,8 +12286,8 @@ static void nested_vmx_vmexit(struct kvm
+ vmx_segment_cache_clear(vmx);
+
+ /* Update any VMCS fields that might have changed while L2 ran */
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ if (vmx->hv_deadline_tsc == -1)
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 20:11:39 -0400
+Subject: x86/KVM/VMX: Add find_msr() helper function
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit ca83b4a7f2d068da79a029d323024aa45decb250 upstream
+
+.. to help find the MSR on either the guest or host MSR list.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2237,9 +2237,20 @@ static void clear_atomic_switch_msr_spec
+ vm_exit_controls_clearbit(vmx, exit);
+ }
+
++static int find_msr(struct vmx_msrs *m, unsigned int msr)
++{
++ unsigned int i;
++
++ for (i = 0; i < m->nr; ++i) {
++ if (m->val[i].index == msr)
++ return i;
++ }
++ return -ENOENT;
++}
++
+ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+ {
+- unsigned i;
++ int i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2260,11 +2271,8 @@ static void clear_atomic_switch_msr(stru
+ }
+ break;
+ }
+- for (i = 0; i < m->guest.nr; ++i)
+- if (m->guest.val[i].index == msr)
+- break;
+-
+- if (i == m->guest.nr)
++ i = find_msr(&m->guest, msr);
++ if (i < 0)
+ return;
+ --m->guest.nr;
+ --m->host.nr;
+@@ -2288,7 +2296,7 @@ static void add_atomic_switch_msr_specia
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+ {
+- unsigned i;
++ int i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2323,16 +2331,13 @@ static void add_atomic_switch_msr(struct
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ }
+
+- for (i = 0; i < m->guest.nr; ++i)
+- if (m->guest.val[i].index == msr)
+- break;
+-
++ i = find_msr(&m->guest, msr);
+ if (i == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i == m->guest.nr) {
+- ++m->guest.nr;
++ } else if (i < 0) {
++ i = m->guest.nr++;
+ ++m->host.nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 22:00:47 -0400
+Subject: x86/KVM/VMX: Separate the VMX AUTOLOAD guest/host number accounting
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 3190709335dd31fe1aeeebfe4ffb6c7624ef971f upstream
+
+This allows to load a different number of MSRs depending on the context:
+VMEXIT or VMENTER.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 29 +++++++++++++++++++----------
+ 1 file changed, 19 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2273,12 +2273,18 @@ static void clear_atomic_switch_msr(stru
+ }
+ i = find_msr(&m->guest, msr);
+ if (i < 0)
+- return;
++ goto skip_guest;
+ --m->guest.nr;
+- --m->host.nr;
+ m->guest.val[i] = m->guest.val[m->guest.nr];
+- m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++
++skip_guest:
++ i = find_msr(&m->host, msr);
++ if (i < 0)
++ return;
++
++ --m->host.nr;
++ m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+@@ -2296,7 +2302,7 @@ static void add_atomic_switch_msr_specia
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+ {
+- int i;
++ int i, j;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2332,21 +2338,24 @@ static void add_atomic_switch_msr(struct
+ }
+
+ i = find_msr(&m->guest, msr);
+- if (i == NR_AUTOLOAD_MSRS) {
++ j = find_msr(&m->host, msr);
++ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i < 0) {
++ }
++ if (i < 0) {
+ i = m->guest.nr++;
+- ++m->host.nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ }
++ if (j < 0) {
++ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+-
+ m->guest.val[i].index = msr;
+ m->guest.val[i].value = guest_val;
+- m->host.val[i].index = msr;
+- m->host.val[i].value = host_val;
++ m->host.val[j].index = msr;
++ m->host.val[j].value = host_val;
+ }
+
+ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 22:01:22 -0400
+Subject: x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 989e3992d2eca32c3f1404f2bc91acda3aa122d8 upstream
+
+The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend
+add_atomic_switch_msr() with an entry_only parameter to allow storing the
+MSR only in the guest (ENTRY) MSR array.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2300,9 +2300,9 @@ static void add_atomic_switch_msr_specia
+ }
+
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+- u64 guest_val, u64 host_val)
++ u64 guest_val, u64 host_val, bool entry_only)
+ {
+- int i, j;
++ int i, j = 0;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2338,7 +2338,9 @@ static void add_atomic_switch_msr(struct
+ }
+
+ i = find_msr(&m->guest, msr);
+- j = find_msr(&m->host, msr);
++ if (!entry_only)
++ j = find_msr(&m->host, msr);
++
+ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+@@ -2348,12 +2350,16 @@ static void add_atomic_switch_msr(struct
+ i = m->guest.nr++;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ }
++ m->guest.val[i].index = msr;
++ m->guest.val[i].value = guest_val;
++
++ if (entry_only)
++ return;
++
+ if (j < 0) {
+ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+- m->guest.val[i].index = msr;
+- m->guest.val[i].value = guest_val;
+ m->host.val[j].index = msr;
+ m->host.val[j].value = host_val;
+ }
+@@ -2399,7 +2405,7 @@ static bool update_transition_efer(struc
+ guest_efer &= ~EFER_LME;
+ if (guest_efer != host_efer)
+ add_atomic_switch_msr(vmx, MSR_EFER,
+- guest_efer, host_efer);
++ guest_efer, host_efer, false);
+ return false;
+ } else {
+ guest_efer &= ~ignore_bits;
+@@ -3845,7 +3851,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+ vcpu->arch.ia32_xss = data;
+ if (vcpu->arch.ia32_xss != host_xss)
+ add_atomic_switch_msr(vmx, MSR_IA32_XSS,
+- vcpu->arch.ia32_xss, host_xss);
++ vcpu->arch.ia32_xss, host_xss, false);
+ else
+ clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
+ break;
+@@ -9815,7 +9821,7 @@ static void atomic_switch_perf_msrs(stru
+ clear_atomic_switch_msr(vmx, msrs[i].msr);
+ else
+ add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
+- msrs[i].host);
++ msrs[i].host, false);
+ }
+
+ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Thu, 28 Jun 2018 17:10:36 -0400
+Subject: x86/KVM/VMX: Use MSR save list for IA32_FLUSH_CMD if required
+
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+
+commit 390d975e0c4e60ce70d4157e0dd91ede37824603 upstream
+
+If the L1D flush module parameter is set to 'always' and the IA32_FLUSH_CMD
+MSR is available, optimize the VMENTER code with the MSR save list.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 42 +++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 37 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6045,6 +6045,16 @@ static void ept_set_mmio_spte_mask(void)
+ VMX_EPT_MISCONFIG_WX_VALUE);
+ }
+
++static bool vmx_l1d_use_msr_save_list(void)
++{
++ if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
++ static_cpu_has(X86_FEATURE_HYPERVISOR) ||
++ !static_cpu_has(X86_FEATURE_FLUSH_L1D))
++ return false;
++
++ return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++}
++
+ #define VMX_XSS_EXIT_BITMAP 0
+ /*
+ * Sets up the vmcs for emulated real mode.
+@@ -6160,6 +6170,12 @@ static void vmx_vcpu_setup(struct vcpu_v
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
++ /*
++ * If flushing the L1D cache on every VMENTER is enforced and the
++ * MSR is available, use the MSR save list.
++ */
++ if (vmx_l1d_use_msr_save_list())
++ add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
+ }
+
+ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+@@ -9377,11 +9393,26 @@ static void vmx_l1d_flush(struct kvm_vcp
+ bool always;
+
+ /*
+- * If the mitigation mode is 'flush always', keep the flush bit
+- * set, otherwise clear it. It gets set again either from
+- * vcpu_run() or from one of the unsafe VMEXIT handlers.
++ * This code is only executed when:
++ * - the flush mode is 'cond'
++ * - the flush mode is 'always' and the flush MSR is not
++ * available
++ *
++ * If the CPU has the flush MSR then clear the flush bit because
++ * 'always' mode is handled via the MSR save list.
++ *
++ * If the MSR is not avaibable then act depending on the mitigation
++ * mode: If 'flush always', keep the flush bit set, otherwise clear
++ * it.
++ *
++ * The flush bit gets set again either from vcpu_run() or from one
++ * of the unsafe VMEXIT handlers.
+ */
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
++ always = false;
++ else
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -13024,7 +13055,8 @@ static int __init vmx_setup_l1d_flush(vo
+ struct page *page;
+
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- !boot_cpu_has_bug(X86_BUG_L1TF))
++ !boot_cpu_has_bug(X86_BUG_L1TF) ||
++ vmx_l1d_use_msr_save_list())
+ return 0;
+
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 7 Jul 2018 11:40:18 +0200
+Subject: cpu/hotplug: Online siblings when SMT control is turned on
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 215af5499d9e2b55f111d2431ea20218115f29b3 upstream
+
+Writing 'off' to /sys/devices/system/cpu/smt/control offlines all SMT
+siblings. Writing 'on' merily enables the abilify to online them, but does
+not online them automatically.
+
+Make 'on' more useful by onlining all offline siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cpu.c | 26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1979,6 +1979,15 @@ static void cpuhp_offline_cpu_device(uns
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+ }
+
++static void cpuhp_online_cpu_device(unsigned int cpu)
++{
++ struct device *dev = get_cpu_device(cpu);
++
++ dev->offline = false;
++ /* Tell user space about the state change */
++ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
++}
++
+ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
+@@ -2011,11 +2020,24 @@ static int cpuhp_smt_disable(enum cpuhp_
+ return ret;
+ }
+
+-static void cpuhp_smt_enable(void)
++static int cpuhp_smt_enable(void)
+ {
++ int cpu, ret = 0;
++
+ cpu_maps_update_begin();
+ cpu_smt_control = CPU_SMT_ENABLED;
++ for_each_present_cpu(cpu) {
++ /* Skip online CPUs and CPUs on offline nodes */
++ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
++ continue;
++ ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
++ if (ret)
++ break;
++ /* See comment in cpuhp_smt_disable() */
++ cpuhp_online_cpu_device(cpu);
++ }
+ cpu_maps_update_done();
++ return ret;
+ }
+
+ static ssize_t
+@@ -2046,7 +2068,7 @@ store_smt_control(struct device *dev, st
+ if (ctrlval != cpu_smt_control) {
+ switch (ctrlval) {
+ case CPU_SMT_ENABLED:
+- cpuhp_smt_enable();
++ ret = cpuhp_smt_enable();
+ break;
+ case CPU_SMT_DISABLED:
+ case CPU_SMT_FORCE_DISABLED:
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:16 +0200
+Subject: x86/litf: Introduce vmx status variable
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 72c6d2db64fa18c996ece8f06e499509e6c9a37e upstream
+
+Store the effective mitigation of VMX in a status variable and use it to
+report the VMX state in the l1tf sysfs file.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/vmx.h | 9 +++++++++
+ arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++++++++++++++++++--
+ arch/x86/kvm/vmx.c | 22 +++++++++++-----------
+ 3 files changed, 54 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -574,4 +574,13 @@ enum vm_instruction_error_number {
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
+ };
+
++enum vmx_l1d_flush_state {
++ VMENTER_L1D_FLUSH_AUTO,
++ VMENTER_L1D_FLUSH_NEVER,
++ VMENTER_L1D_FLUSH_COND,
++ VMENTER_L1D_FLUSH_ALWAYS,
++};
++
++extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
++
+ #endif
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -22,6 +22,7 @@
+ #include <asm/processor-flags.h>
+ #include <asm/fpu/internal.h>
+ #include <asm/msr.h>
++#include <asm/vmx.h>
+ #include <asm/paravirt.h>
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+@@ -636,6 +637,12 @@ void x86_spec_ctrl_setup_ap(void)
+
+ #undef pr_fmt
+ #define pr_fmt(fmt) "L1TF: " fmt
++
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
++EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
++#endif
++
+ static void __init l1tf_select_mitigation(void)
+ {
+ u64 half_pa;
+@@ -665,6 +672,32 @@ static void __init l1tf_select_mitigatio
+
+ #ifdef CONFIG_SYSFS
+
++#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
++
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++static const char *l1tf_vmx_states[] = {
++ [VMENTER_L1D_FLUSH_AUTO] = "auto",
++ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
++ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
++ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++};
++
++static ssize_t l1tf_show_state(char *buf)
++{
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
++ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++
++ return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
++ cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
++ l1tf_vmx_states[l1tf_vmx_mitigation]);
++}
++#else
++static ssize_t l1tf_show_state(char *buf)
++{
++ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++}
++#endif
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+@@ -692,9 +725,8 @@ static ssize_t cpu_show_common(struct de
+
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+- return sprintf(buf, "Mitigation: Page Table Inversion\n");
++ return l1tf_show_state(buf);
+ break;
+-
+ default:
+ break;
+ }
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -193,19 +193,13 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+
+-/* These MUST be in sync with vmentry_l1d_param order. */
+-enum vmx_l1d_flush_state {
+- VMENTER_L1D_FLUSH_NEVER,
+- VMENTER_L1D_FLUSH_COND,
+- VMENTER_L1D_FLUSH_ALWAYS,
+-};
+-
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
+
+ static const struct {
+ const char *option;
+ enum vmx_l1d_flush_state cmd;
+ } vmentry_l1d_param[] = {
++ {"auto", VMENTER_L1D_FLUSH_AUTO},
+ {"never", VMENTER_L1D_FLUSH_NEVER},
+ {"cond", VMENTER_L1D_FLUSH_COND},
+ {"always", VMENTER_L1D_FLUSH_ALWAYS},
+@@ -13054,8 +13048,12 @@ static int __init vmx_setup_l1d_flush(vo
+ {
+ struct page *page;
+
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return 0;
++
++ l1tf_vmx_mitigation = vmentry_l1d_flush;
++
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- !boot_cpu_has_bug(X86_BUG_L1TF) ||
+ vmx_l1d_use_msr_save_list())
+ return 0;
+
+@@ -13070,12 +13068,14 @@ static int __init vmx_setup_l1d_flush(vo
+ return 0;
+ }
+
+-static void vmx_free_l1d_flush_pages(void)
++static void vmx_cleanup_l1d_flush(void)
+ {
+ if (vmx_l1d_flush_pages) {
+ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ vmx_l1d_flush_pages = NULL;
+ }
++ /* Restore state so sysfs ignores VMX */
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+
+ static int __init vmx_init(void)
+@@ -13118,7 +13118,7 @@ static int __init vmx_init(void)
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r) {
+- vmx_free_l1d_flush_pages();
++ vmx_cleanup_l1d_flush();
+ return r;
+ }
+
+@@ -13161,7 +13161,7 @@ static void __exit vmx_exit(void)
+ static_branch_disable(&enable_evmcs);
+ }
+ #endif
+- vmx_free_l1d_flush_pages();
++ vmx_cleanup_l1d_flush();
+ }
+
+ module_init(vmx_init)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:17 +0200
+Subject: x86/kvm: Drop L1TF MSR list approach
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 2f055947ae5e2741fb2dc5bba1033c417ccf4faa upstream
+
+The VMX module parameter to control the L1D flush should become
+writeable.
+
+The MSR list is set up at VM init per guest VCPU, but the run time
+switching is based on a static key which is global. Toggling the MSR list
+at run time might be feasible, but for now drop this optimization and use
+the regular MSR write to make run-time switching possible.
+
+The default mitigation is the conditional flush anyway, so for extra
+paranoid setups this will add some small overhead, but the extra code
+executed is in the noise compared to the flush itself.
+
+Aside of that the EPT disabled case is not handled correctly at the moment
+and the MSR list magic is in the way for fixing that as well.
+
+If it's really providing a significant advantage, then this needs to be
+revisited after the code is correct and the control is writable.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.516940445@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 43 +++++++------------------------------------
+ 1 file changed, 7 insertions(+), 36 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6039,16 +6039,6 @@ static void ept_set_mmio_spte_mask(void)
+ VMX_EPT_MISCONFIG_WX_VALUE);
+ }
+
+-static bool vmx_l1d_use_msr_save_list(void)
+-{
+- if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
+- static_cpu_has(X86_FEATURE_HYPERVISOR) ||
+- !static_cpu_has(X86_FEATURE_FLUSH_L1D))
+- return false;
+-
+- return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+-}
+-
+ #define VMX_XSS_EXIT_BITMAP 0
+ /*
+ * Sets up the vmcs for emulated real mode.
+@@ -6164,12 +6154,6 @@ static void vmx_vcpu_setup(struct vcpu_v
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
+- /*
+- * If flushing the L1D cache on every VMENTER is enforced and the
+- * MSR is available, use the MSR save list.
+- */
+- if (vmx_l1d_use_msr_save_list())
+- add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
+ }
+
+ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+@@ -9387,26 +9371,14 @@ static void vmx_l1d_flush(struct kvm_vcp
+ bool always;
+
+ /*
+- * This code is only executed when:
+- * - the flush mode is 'cond'
+- * - the flush mode is 'always' and the flush MSR is not
+- * available
+- *
+- * If the CPU has the flush MSR then clear the flush bit because
+- * 'always' mode is handled via the MSR save list.
+- *
+- * If the MSR is not avaibable then act depending on the mitigation
+- * mode: If 'flush always', keep the flush bit set, otherwise clear
+- * it.
++ * This code is only executed when the the flush mode is 'cond' or
++ * 'always'
+ *
+- * The flush bit gets set again either from vcpu_run() or from one
+- * of the unsafe VMEXIT handlers.
++ * If 'flush always', keep the flush bit set, otherwise clear
++ * it. The flush bit gets set again either from vcpu_run() or from
++ * one of the unsafe VMEXIT handlers.
+ */
+- if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
+- always = false;
+- else
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+-
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -13053,8 +13025,7 @@ static int __init vmx_setup_l1d_flush(vo
+
+ l1tf_vmx_mitigation = vmentry_l1d_flush;
+
+- if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- vmx_l1d_use_msr_save_list())
++ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+ return 0;
+
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:18 +0200
+Subject: x86/l1tf: Handle EPT disabled state proper
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit a7b9020b06ec6d7c3f3b0d4ef1a9eba12654f4f7 upstream
+
+If Extended Page Tables (EPT) are disabled or not supported, no L1D
+flushing is required. The setup function can just avoid setting up the L1D
+flush for the EPT=n case.
+
+Invoke it after the hardware setup has be done and enable_ept has the
+correct state and expose the EPT disabled state in the mitigation status as
+well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.612160168@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/vmx.h | 1
+ arch/x86/kernel/cpu/bugs.c | 9 ++--
+ arch/x86/kvm/vmx.c | 89 ++++++++++++++++++++++++---------------------
+ 3 files changed, 54 insertions(+), 45 deletions(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -579,6 +579,7 @@ enum vmx_l1d_flush_state {
+ VMENTER_L1D_FLUSH_NEVER,
+ VMENTER_L1D_FLUSH_COND,
+ VMENTER_L1D_FLUSH_ALWAYS,
++ VMENTER_L1D_FLUSH_EPT_DISABLED,
+ };
+
+ extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -676,10 +676,11 @@ static void __init l1tf_select_mitigatio
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+ static const char *l1tf_vmx_states[] = {
+- [VMENTER_L1D_FLUSH_AUTO] = "auto",
+- [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
+- [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
+- [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++ [VMENTER_L1D_FLUSH_AUTO] = "auto",
++ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
++ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
++ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++ [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
+ };
+
+ static ssize_t l1tf_show_state(char *buf)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -13023,6 +13023,11 @@ static int __init vmx_setup_l1d_flush(vo
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
++ if (!enable_ept) {
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
++ return 0;
++ }
++
+ l1tf_vmx_mitigation = vmentry_l1d_flush;
+
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+@@ -13049,6 +13054,41 @@ static void vmx_cleanup_l1d_flush(void)
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+
++static void vmx_exit(void)
++{
++#ifdef CONFIG_KEXEC_CORE
++ RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
++ synchronize_rcu();
++#endif
++
++ kvm_exit();
++
++#if IS_ENABLED(CONFIG_HYPERV)
++ if (static_branch_unlikely(&enable_evmcs)) {
++ int cpu;
++ struct hv_vp_assist_page *vp_ap;
++ /*
++ * Reset everything to support using non-enlightened VMCS
++ * access later (e.g. when we reload the module with
++ * enlightened_vmcs=0)
++ */
++ for_each_online_cpu(cpu) {
++ vp_ap = hv_get_vp_assist_page(cpu);
++
++ if (!vp_ap)
++ continue;
++
++ vp_ap->current_nested_vmcs = 0;
++ vp_ap->enlighten_vmentry = 0;
++ }
++
++ static_branch_disable(&enable_evmcs);
++ }
++#endif
++ vmx_cleanup_l1d_flush();
++}
++module_exit(vmx_exit);
++
+ static int __init vmx_init(void)
+ {
+ int r;
+@@ -13082,14 +13122,17 @@ static int __init vmx_init(void)
+ }
+ #endif
+
+- r = vmx_setup_l1d_flush();
++ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
++ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+ return r;
+
+- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+- __alignof__(struct vcpu_vmx), THIS_MODULE);
++ /*
++ * Must be called after kvm_init() so enable_ept is properly set up
++ */
++ r = vmx_setup_l1d_flush();
+ if (r) {
+- vmx_cleanup_l1d_flush();
++ vmx_exit();
+ return r;
+ }
+
+@@ -13100,40 +13143,4 @@ static int __init vmx_init(void)
+
+ return 0;
+ }
+-
+-static void __exit vmx_exit(void)
+-{
+-#ifdef CONFIG_KEXEC_CORE
+- RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+- synchronize_rcu();
+-#endif
+-
+- kvm_exit();
+-
+-#if IS_ENABLED(CONFIG_HYPERV)
+- if (static_branch_unlikely(&enable_evmcs)) {
+- int cpu;
+- struct hv_vp_assist_page *vp_ap;
+- /*
+- * Reset everything to support using non-enlightened VMCS
+- * access later (e.g. when we reload the module with
+- * enlightened_vmcs=0)
+- */
+- for_each_online_cpu(cpu) {
+- vp_ap = hv_get_vp_assist_page(cpu);
+-
+- if (!vp_ap)
+- continue;
+-
+- vp_ap->current_nested_vmcs = 0;
+- vp_ap->enlighten_vmentry = 0;
+- }
+-
+- static_branch_disable(&enable_evmcs);
+- }
+-#endif
+- vmx_cleanup_l1d_flush();
+-}
+-
+-module_init(vmx_init)
+-module_exit(vmx_exit)
++module_init(vmx_init);
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:19 +0200
+Subject: x86/kvm: Move l1tf setup function
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7db92e165ac814487264632ab2624e832f20ae38 upstream
+
+In preparation of allowing run time control for L1D flushing, move the
+setup code to the module parameter handler.
+
+In case of pre module init parsing, just store the value and let vmx_init()
+do the actual setup after running kvm_init() so that enable_ept is having
+the correct state.
+
+During run-time invoke it directly from the parameter setter to prepare for
+run-time control.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.694063239@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 125 +++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 78 insertions(+), 47 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -193,7 +193,8 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+
+-static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
++/* Storage for pre module init parameter parsing */
++static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+
+ static const struct {
+ const char *option;
+@@ -205,33 +206,85 @@ static const struct {
+ {"always", VMENTER_L1D_FLUSH_ALWAYS},
+ };
+
+-static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++#define L1D_CACHE_ORDER 4
++static void *vmx_l1d_flush_pages;
++
++static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+ {
+- unsigned int i;
++ struct page *page;
+
+- if (!s)
+- return -EINVAL;
++ /* If set to 'auto' select 'cond' */
++ if (l1tf == VMENTER_L1D_FLUSH_AUTO)
++ l1tf = VMENTER_L1D_FLUSH_COND;
+
+- for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+- if (!strcmp(s, vmentry_l1d_param[i].option)) {
+- vmentry_l1d_flush = vmentry_l1d_param[i].cmd;
+- return 0;
+- }
++ if (!enable_ept) {
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
++ return 0;
+ }
+
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
++ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++ vmx_l1d_flush_pages = page_address(page);
++ }
++
++ l1tf_vmx_mitigation = l1tf;
++
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
++ static_branch_enable(&vmx_l1d_should_flush);
++ return 0;
++}
++
++static int vmentry_l1d_flush_parse(const char *s)
++{
++ unsigned int i;
++
++ if (s) {
++ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
++ if (!strcmp(s, vmentry_l1d_param[i].option))
++ return vmentry_l1d_param[i].cmd;
++ }
++ }
+ return -EINVAL;
+ }
+
++static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++{
++ int l1tf;
++
++ if (!boot_cpu_has(X86_BUG_L1TF))
++ return 0;
++
++ l1tf = vmentry_l1d_flush_parse(s);
++ if (l1tf < 0)
++ return l1tf;
++
++ /*
++ * Has vmx_init() run already? If not then this is the pre init
++ * parameter parsing. In that case just store the value and let
++ * vmx_init() do the proper setup after enable_ept has been
++ * established.
++ */
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
++ vmentry_l1d_flush_param = l1tf;
++ return 0;
++ }
++
++ return vmx_setup_l1d_flush(l1tf);
++}
++
+ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+ {
+- return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option);
++ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
+ }
+
+ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+ };
+-module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO);
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
+
+ struct kvm_vmx {
+ struct kvm kvm;
+@@ -9378,7 +9431,7 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * it. The flush bit gets set again either from vcpu_run() or from
+ * one of the unsafe VMEXIT handlers.
+ */
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -13016,34 +13069,6 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .enable_smi_window = enable_smi_window,
+ };
+
+-static int __init vmx_setup_l1d_flush(void)
+-{
+- struct page *page;
+-
+- if (!boot_cpu_has_bug(X86_BUG_L1TF))
+- return 0;
+-
+- if (!enable_ept) {
+- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+- return 0;
+- }
+-
+- l1tf_vmx_mitigation = vmentry_l1d_flush;
+-
+- if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+- return 0;
+-
+- if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+- if (!page)
+- return -ENOMEM;
+- vmx_l1d_flush_pages = page_address(page);
+- }
+-
+- static_branch_enable(&vmx_l1d_should_flush);
+- return 0;
+-}
+-
+ static void vmx_cleanup_l1d_flush(void)
+ {
+ if (vmx_l1d_flush_pages) {
+@@ -13128,12 +13153,18 @@ static int __init vmx_init(void)
+ return r;
+
+ /*
+- * Must be called after kvm_init() so enable_ept is properly set up
+- */
+- r = vmx_setup_l1d_flush();
+- if (r) {
+- vmx_exit();
+- return r;
++ * Must be called after kvm_init() so enable_ept is properly set
++ * up. Hand the parameter mitigation value in which was stored in
++ * the pre module init parser. If no parameter was given, it will
++ * contain 'auto' which will be turned into the default 'cond'
++ * mitigation mode.
++ */
++ if (boot_cpu_has(X86_BUG_L1TF)) {
++ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
++ if (r) {
++ vmx_exit();
++ return r;
++ }
+ }
+
+ #ifdef CONFIG_KEXEC_CORE
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:20 +0200
+Subject: x86/kvm: Add static key for flush always
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 4c6523ec59fe895ea352a650218a6be0653910b1 upstream
+
+Avoid the conditional in the L1D flush control path.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.790914912@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -192,6 +192,7 @@ module_param(ple_window_max, uint, 0444)
+ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
+
+ /* Storage for pre module init parameter parsing */
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+@@ -232,8 +233,12 @@ static int vmx_setup_l1d_flush(enum vmx_
+
+ l1tf_vmx_mitigation = l1tf;
+
+- if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+- static_branch_enable(&vmx_l1d_should_flush);
++ if (l1tf == VMENTER_L1D_FLUSH_NEVER)
++ return 0;
++
++ static_branch_enable(&vmx_l1d_should_flush);
++ if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
++ static_branch_enable(&vmx_l1d_flush_always);
+ return 0;
+ }
+
+@@ -9421,7 +9426,6 @@ static void *vmx_l1d_flush_pages;
+ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+- bool always;
+
+ /*
+ * This code is only executed when the the flush mode is 'cond' or
+@@ -9431,8 +9435,10 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * it. The flush bit gets set again either from vcpu_run() or from
+ * one of the unsafe VMEXIT handlers.
+ */
+- always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
+- vcpu->arch.l1tf_flush_l1d = always;
++ if (static_branch_unlikely(&vmx_l1d_flush_always))
++ vcpu->arch.l1tf_flush_l1d = true;
++ else
++ vcpu->arch.l1tf_flush_l1d = false;
+
+ vcpu->stat.l1d_flush++;
+
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:21 +0200
+Subject: x86/kvm: Serialize L1D flush parameter setter
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit dd4bfa739a72508b75760b393d129ed7b431daab upstream
+
+Writes to the parameter files are not serialized at the sysfs core
+level, so local serialization is required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.873642605@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -193,6 +193,7 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
++static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+ /* Storage for pre module init parameter parsing */
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+@@ -257,7 +258,7 @@ static int vmentry_l1d_flush_parse(const
+
+ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
+ {
+- int l1tf;
++ int l1tf, ret;
+
+ if (!boot_cpu_has(X86_BUG_L1TF))
+ return 0;
+@@ -277,7 +278,10 @@ static int vmentry_l1d_flush_set(const c
+ return 0;
+ }
+
+- return vmx_setup_l1d_flush(l1tf);
++ mutex_lock(&vmx_l1d_flush_mutex);
++ ret = vmx_setup_l1d_flush(l1tf);
++ mutex_unlock(&vmx_l1d_flush_mutex);
++ return ret;
+ }
+
+ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:22 +0200
+Subject: x86/kvm: Allow runtime control of L1D flush
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 895ae47f9918833c3a880fbccd41e0692b37e7d9 upstream
+
+All mitigation modes can be switched at run time with a static key now:
+
+ - Use sysfs_streq() instead of strcmp() to handle the trailing new line
+ from sysfs writes correctly.
+ - Make the static key management handle multiple invocations properly.
+ - Set the module parameter file to RW
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.954525119@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kvm/vmx.c | 13 ++++++++-----
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -639,7 +639,7 @@ void x86_spec_ctrl_setup_ap(void)
+ #define pr_fmt(fmt) "L1TF: " fmt
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+-enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
++enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+ #endif
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -234,12 +234,15 @@ static int vmx_setup_l1d_flush(enum vmx_
+
+ l1tf_vmx_mitigation = l1tf;
+
+- if (l1tf == VMENTER_L1D_FLUSH_NEVER)
+- return 0;
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
++ static_branch_enable(&vmx_l1d_should_flush);
++ else
++ static_branch_disable(&vmx_l1d_should_flush);
+
+- static_branch_enable(&vmx_l1d_should_flush);
+ if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
+ static_branch_enable(&vmx_l1d_flush_always);
++ else
++ static_branch_disable(&vmx_l1d_flush_always);
+ return 0;
+ }
+
+@@ -249,7 +252,7 @@ static int vmentry_l1d_flush_parse(const
+
+ if (s) {
+ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+- if (!strcmp(s, vmentry_l1d_param[i].option))
++ if (sysfs_streq(s, vmentry_l1d_param[i].option))
+ return vmentry_l1d_param[i].cmd;
+ }
+ }
+@@ -293,7 +296,7 @@ static const struct kernel_param_ops vme
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+ };
+-module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
+
+ struct kvm_vmx {
+ struct kvm kvm;
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 13 Jul 2018 16:23:23 +0200
+Subject: cpu/hotplug: Expose SMT control init function
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit 8e1b706b6e819bed215c0db16345568864660393 upstream
+
+The L1TF mitigation will gain a commend line parameter which allows to set
+a combination of hypervisor mitigation and SMT control.
+
+Expose cpu_smt_disable() so the command line parser can tweak SMT settings.
+
+[ tglx: Split out of larger patch and made it preserve an already existing
+ force off state ]
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.039715135@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cpu.h | 2 ++
+ kernel/cpu.c | 16 +++++++++++++---
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -177,8 +177,10 @@ enum cpuhp_smt_control {
+
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
++extern void cpu_smt_disable(bool force);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
++static inline void cpu_smt_disable(bool force) { }
+ #endif
+
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -347,13 +347,23 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+ EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+-static int __init smt_cmdline_disable(char *str)
++void __init cpu_smt_disable(bool force)
+ {
+- cpu_smt_control = CPU_SMT_DISABLED;
+- if (str && !strcmp(str, "force")) {
++ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
++ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return;
++
++ if (force) {
+ pr_info("SMT: Force disabled\n");
+ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ } else {
++ cpu_smt_control = CPU_SMT_DISABLED;
+ }
++}
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_disable(str && !strcmp(str, "force"));
+ return 0;
+ }
+ early_param("nosmt", smt_cmdline_disable);
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:24 +0200
+Subject: cpu/hotplug: Set CPU_SMT_NOT_SUPPORTED early
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit fee0aede6f4739c87179eca76136f83210953b86 upstream
+
+The CPU_SMT_NOT_SUPPORTED state is set (if the processor does not support
+SMT) when the sysfs SMT control file is initialized.
+
+That was fine so far as this was only required to make the output of the
+control file correct and to prevent writes in that case.
+
+With the upcoming l1tf command line parameter, this needs to be set up
+before the L1TF mitigation selection and command line parsing happens.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.121795971@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 6 ++++++
+ include/linux/cpu.h | 2 ++
+ kernel/cpu.c | 13 ++++++++++---
+ 3 files changed, 18 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -58,6 +58,12 @@ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+
++ /*
++ * identify_boot_cpu() initialized SMT support information, let the
++ * core code know.
++ */
++ cpu_smt_check_topology();
++
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -178,9 +178,11 @@ enum cpuhp_smt_control {
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
+ extern void cpu_smt_disable(bool force);
++extern void cpu_smt_check_topology(void);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
+ static inline void cpu_smt_disable(bool force) { }
++static inline void cpu_smt_check_topology(void) { }
+ #endif
+
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -361,6 +361,16 @@ void __init cpu_smt_disable(bool force)
+ }
+ }
+
++/*
++ * The decision whether SMT is supported can only be done after the full
++ * CPU identification. Called from architecture code.
++ */
++void __init cpu_smt_check_topology(void)
++{
++ if (!topology_smt_supported())
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++}
++
+ static int __init smt_cmdline_disable(char *str)
+ {
+ cpu_smt_disable(str && !strcmp(str, "force"));
+@@ -2115,9 +2125,6 @@ static const struct attribute_group cpuh
+
+ static int __init cpu_smt_state_init(void)
+ {
+- if (!topology_smt_supported())
+- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+-
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 13 Jul 2018 16:23:25 +0200
+Subject: x86/bugs, kvm: Introduce boot-time control of L1TF mitigations
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit d90a7a0ec83fb86622cd7dae23255d3c50a99ec8 upstream
+
+Introduce the 'l1tf=' kernel command line option to allow for boot-time
+switching of mitigation that is used on processors affected by L1TF.
+
+The possible values are:
+
+ full
+ Provides all available mitigations for the L1TF vulnerability. Disables
+ SMT and enables all mitigations in the hypervisors. SMT control via
+ /sys/devices/system/cpu/smt/control is still possible after boot.
+ Hypervisors will issue a warning when the first VM is started in
+ a potentially insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ full,force
+ Same as 'full', but disables SMT control. Implies the 'nosmt=force'
+ command line option. sysfs control of SMT and the hypervisor flush
+ control is disabled.
+
+ flush
+ Leaves SMT enabled and enables the conditional hypervisor mitigation.
+ Hypervisors will issue a warning when the first VM is started in a
+ potentially insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ flush,nosmt
+ Disables SMT and enables the conditional hypervisor mitigation. SMT
+ control via /sys/devices/system/cpu/smt/control is still possible
+ after boot. If SMT is reenabled or flushing disabled at runtime
+ hypervisors will issue a warning.
+
+ flush,nowarn
+ Same as 'flush', but hypervisors will not warn when
+ a VM is started in a potentially insecure configuration.
+
+ off
+ Disables hypervisor mitigations and doesn't emit any warnings.
+
+Default is 'flush'.
+
+Let KVM adhere to these semantics, which means:
+
+ - 'lt1f=full,force' : Performe L1D flushes. No runtime control
+ possible.
+
+ - 'l1tf=full'
+ - 'l1tf-flush'
+ - 'l1tf=flush,nosmt' : Perform L1D flushes and warn on VM start if
+ SMT has been runtime enabled or L1D flushing
+ has been run-time enabled
+
+ - 'l1tf=flush,nowarn' : Perform L1D flushes and no warnings are emitted.
+
+ - 'l1tf=off' : L1D flushes are not performed and no warnings
+ are emitted.
+
+KVM can always override the L1D flushing behavior using its 'vmentry_l1d_flush'
+module parameter except when lt1f=full,force is set.
+
+This makes KVM's private 'nosmt' option redundant, and as it is a bit
+non-systematic anyway (this is something to control globally, not on
+hypervisor level), remove that option.
+
+Add the missing Documentation entry for the l1tf vulnerability sysfs file
+while at it.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 4 +
+ Documentation/admin-guide/kernel-parameters.txt | 68 +++++++++++++++++++--
+ arch/x86/include/asm/processor.h | 12 +++
+ arch/x86/kernel/cpu/bugs.c | 44 +++++++++++++
+ arch/x86/kvm/vmx.c | 56 +++++++++++++----
+ 5 files changed, 165 insertions(+), 19 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -479,6 +479,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
++ /sys/devices/system/cpu/vulnerabilities/l1tf
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+@@ -491,6 +492,9 @@ Description: Information about CPU vulne
+ "Vulnerable" CPU is affected and no mitigation in effect
+ "Mitigation: $M" CPU is affected and mitigation $M is in effect
+
++ Details about the l1tf file can be found in
++ Documentation/admin-guide/l1tf.rst
++
+ What: /sys/devices/system/cpu/smt
+ /sys/devices/system/cpu/smt/active
+ /sys/devices/system/cpu/smt/control
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1937,12 +1937,6 @@
+ [KVM,ARM] Allow use of GICv4 for direct injection of
+ LPIs.
+
+- kvm-intel.nosmt=[KVM,Intel] If the L1TF CPU bug is present (CVE-2018-3620)
+- and the system has SMT (aka Hyper-Threading) enabled then
+- don't allow guests to be created.
+-
+- Default is 0 (allow guests to be created).
+-
+ kvm-intel.ept= [KVM,Intel] Disable extended page tables
+ (virtualized MMU) support on capable Intel chips.
+ Default is 1 (enabled)
+@@ -1980,6 +1974,68 @@
+ feature (tagged TLBs) on capable Intel chips.
+ Default is 1 (enabled)
+
++ l1tf= [X86] Control mitigation of the L1TF vulnerability on
++ affected CPUs
++
++ The kernel PTE inversion protection is unconditionally
++ enabled and cannot be disabled.
++
++ full
++ Provides all available mitigations for the
++ L1TF vulnerability. Disables SMT and
++ enables all mitigations in the
++ hypervisors, i.e. unconditional L1D flush.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ full,force
++ Same as 'full', but disables SMT and L1D
++ flush runtime control. Implies the
++ 'nosmt=force' command line option.
++ (i.e. sysfs control of SMT is disabled.)
++
++ flush
++ Leaves SMT enabled and enables the default
++ hypervisor mitigation, i.e. conditional
++ L1D flush.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ flush,nosmt
++
++ Disables SMT and enables the default
++ hypervisor mitigation.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ flush,nowarn
++ Same as 'flush', but hypervisors will not
++ warn when a VM is started in a potentially
++ insecure configuration.
++
++ off
++ Disables hypervisor mitigations and doesn't
++ emit any warnings.
++
++ Default is 'flush'.
++
++ For details see: Documentation/admin-guide/l1tf.rst
++
+ l2cr= [PPC]
+
+ l3cr= [PPC]
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -991,4 +991,16 @@ bool xen_set_default_idle(void);
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
+ void microcode_check(void);
++
++enum l1tf_mitigations {
++ L1TF_MITIGATION_OFF,
++ L1TF_MITIGATION_FLUSH_NOWARN,
++ L1TF_MITIGATION_FLUSH,
++ L1TF_MITIGATION_FLUSH_NOSMT,
++ L1TF_MITIGATION_FULL,
++ L1TF_MITIGATION_FULL_FORCE
++};
++
++extern enum l1tf_mitigations l1tf_mitigation;
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -644,7 +644,11 @@ void x86_spec_ctrl_setup_ap(void)
+ #undef pr_fmt
+ #define pr_fmt(fmt) "L1TF: " fmt
+
++/* Default mitigation for L1TF-affected CPUs */
++enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
++EXPORT_SYMBOL_GPL(l1tf_mitigation);
++
+ enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+ #endif
+@@ -656,6 +660,20 @@ static void __init l1tf_select_mitigatio
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ case L1TF_MITIGATION_FLUSH:
++ break;
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ case L1TF_MITIGATION_FULL:
++ cpu_smt_disable(false);
++ break;
++ case L1TF_MITIGATION_FULL_FORCE:
++ cpu_smt_disable(true);
++ break;
++ }
++
+ #if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+@@ -674,6 +692,32 @@ static void __init l1tf_select_mitigatio
+
+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+ }
++
++static int __init l1tf_cmdline(char *str)
++{
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return 0;
++
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "off"))
++ l1tf_mitigation = L1TF_MITIGATION_OFF;
++ else if (!strcmp(str, "flush,nowarn"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
++ else if (!strcmp(str, "flush"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH;
++ else if (!strcmp(str, "flush,nosmt"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
++ else if (!strcmp(str, "full"))
++ l1tf_mitigation = L1TF_MITIGATION_FULL;
++ else if (!strcmp(str, "full,force"))
++ l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
++
++ return 0;
++}
++early_param("l1tf", l1tf_cmdline);
++
+ #undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -71,9 +71,6 @@ static const struct x86_cpu_id vmx_cpu_i
+ };
+ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
+-static bool __read_mostly nosmt;
+-module_param(nosmt, bool, S_IRUGO);
+-
+ static bool __read_mostly enable_vpid = 1;
+ module_param_named(vpid, enable_vpid, bool, 0444);
+
+@@ -215,15 +212,31 @@ static int vmx_setup_l1d_flush(enum vmx_
+ {
+ struct page *page;
+
+- /* If set to 'auto' select 'cond' */
+- if (l1tf == VMENTER_L1D_FLUSH_AUTO)
+- l1tf = VMENTER_L1D_FLUSH_COND;
+-
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+ return 0;
+ }
+
++ /* If set to auto use the default l1tf mitigation method */
++ if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ l1tf = VMENTER_L1D_FLUSH_NEVER;
++ break;
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ case L1TF_MITIGATION_FLUSH:
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ l1tf = VMENTER_L1D_FLUSH_COND;
++ break;
++ case L1TF_MITIGATION_FULL:
++ case L1TF_MITIGATION_FULL_FORCE:
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
++ break;
++ }
++ } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
++ }
++
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
+ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+@@ -10343,19 +10356,36 @@ free_vcpu:
+ return ERR_PTR(err);
+ }
+
+-#define L1TF_MSG "SMT enabled with L1TF CPU bug present. Refer to CVE-2018-3620 for details.\n"
++#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
++#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+
+ static int vmx_vm_init(struct kvm *kvm)
+ {
+ if (!ple_gap)
+ kvm->arch.pause_in_guest = true;
+
+- if (boot_cpu_has(X86_BUG_L1TF) && cpu_smt_control == CPU_SMT_ENABLED) {
+- if (nosmt) {
+- pr_err(L1TF_MSG);
+- return -EOPNOTSUPP;
++ if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ /* 'I explicitly don't care' is set */
++ break;
++ case L1TF_MITIGATION_FLUSH:
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ case L1TF_MITIGATION_FULL:
++ /*
++ * Warn upon starting the first VM in a potentially
++ * insecure environment.
++ */
++ if (cpu_smt_control == CPU_SMT_ENABLED)
++ pr_warn_once(L1TF_MSG_SMT);
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
++ pr_warn_once(L1TF_MSG_L1D);
++ break;
++ case L1TF_MITIGATION_FULL_FORCE:
++ /* Flush is enforced */
++ break;
+ }
+- pr_warn(L1TF_MSG);
+ }
+ return 0;
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:26 +0200
+Subject: Documentation: Add section about CPU vulnerabilities
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 3ec8ce5d866ec6a08a9cfab82b62acf4a830b35f upstream
+
+Add documentation for the L1TF vulnerability and the mitigation mechanisms:
+
+ - Explain the problem and risks
+ - Document the mitigation mechanisms
+ - Document the command line controls
+ - Document the sysfs files
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lkml.kernel.org/r/20180713142323.287429944@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/index.rst | 9
+ Documentation/admin-guide/l1tf.rst | 591 ++++++++++++++++++++++++++++++++++++
+ 2 files changed, 600 insertions(+)
+ create mode 100644 Documentation/admin-guide/l1tf.rst
+
+--- a/Documentation/admin-guide/index.rst
++++ b/Documentation/admin-guide/index.rst
+@@ -17,6 +17,15 @@ etc.
+ kernel-parameters
+ devices
+
++This section describes CPU vulnerabilities and provides an overview of the
++possible mitigations along with guidance for selecting mitigations if they
++are configurable at compile, boot or run time.
++
++.. toctree::
++ :maxdepth: 1
++
++ l1tf
++
+ Here is a set of documents aimed at users who are trying to track down
+ problems and bugs in particular.
+
+--- /dev/null
++++ b/Documentation/admin-guide/l1tf.rst
+@@ -0,0 +1,591 @@
++L1TF - L1 Terminal Fault
++========================
++
++L1 Terminal Fault is a hardware vulnerability which allows unprivileged
++speculative access to data which is available in the Level 1 Data Cache
++when the page table entry controlling the virtual address, which is used
++for the access, has the Present bit cleared or other reserved bits set.
++
++Affected processors
++-------------------
++
++This vulnerability affects a wide range of Intel processors. The
++vulnerability is not present on:
++
++ - Processors from AMD, Centaur and other non Intel vendors
++
++ - Older processor models, where the CPU family is < 6
++
++ - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
++ Penwell, Pineview, Slivermont, Airmont, Merrifield)
++
++ - The Intel Core Duo Yonah variants (2006 - 2008)
++
++ - The Intel XEON PHI family
++
++ - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
++ IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected
++ by the Meltdown vulnerability either. These CPUs should become
++ available by end of 2018.
++
++Whether a processor is affected or not can be read out from the L1TF
++vulnerability file in sysfs. See :ref:`l1tf_sys_info`.
++
++Related CVEs
++------------
++
++The following CVE entries are related to the L1TF vulnerability:
++
++ ============= ================= ==============================
++ CVE-2018-3615 L1 Terminal Fault SGX related aspects
++ CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects
++ CVE-2018-3646 L1 Terminal Fault Virtualization related aspects
++ ============= ================= ==============================
++
++Problem
++-------
++
++If an instruction accesses a virtual address for which the relevant page
++table entry (PTE) has the Present bit cleared or other reserved bits set,
++then speculative execution ignores the invalid PTE and loads the referenced
++data if it is present in the Level 1 Data Cache, as if the page referenced
++by the address bits in the PTE was still present and accessible.
++
++While this is a purely speculative mechanism and the instruction will raise
++a page fault when it is retired eventually, the pure act of loading the
++data and making it available to other speculative instructions opens up the
++opportunity for side channel attacks to unprivileged malicious code,
++similar to the Meltdown attack.
++
++While Meltdown breaks the user space to kernel space protection, L1TF
++allows to attack any physical memory address in the system and the attack
++works across all protection domains. It allows an attack of SGX and also
++works from inside virtual machines because the speculation bypasses the
++extended page table (EPT) protection mechanism.
++
++
++Attack scenarios
++----------------
++
++1. Malicious user space
++^^^^^^^^^^^^^^^^^^^^^^^
++
++ Operating Systems store arbitrary information in the address bits of a
++ PTE which is marked non present. This allows a malicious user space
++ application to attack the physical memory to which these PTEs resolve.
++ In some cases user-space can maliciously influence the information
++ encoded in the address bits of the PTE, thus making attacks more
++ deterministic and more practical.
++
++ The Linux kernel contains a mitigation for this attack vector, PTE
++ inversion, which is permanently enabled and has no performance
++ impact. The kernel ensures that the address bits of PTEs, which are not
++ marked present, never point to cacheable physical memory space.
++
++ A system with an up to date kernel is protected against attacks from
++ malicious user space applications.
++
++2. Malicious guest in a virtual machine
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The fact that L1TF breaks all domain protections allows malicious guest
++ OSes, which can control the PTEs directly, and malicious guest user
++ space applications, which run on an unprotected guest kernel lacking the
++ PTE inversion mitigation for L1TF, to attack physical host memory.
++
++ A special aspect of L1TF in the context of virtualization is symmetric
++ multi threading (SMT). The Intel implementation of SMT is called
++ HyperThreading. The fact that Hyperthreads on the affected processors
++ share the L1 Data Cache (L1D) is important for this. As the flaw allows
++ only to attack data which is present in L1D, a malicious guest running
++ on one Hyperthread can attack the data which is brought into the L1D by
++ the context which runs on the sibling Hyperthread of the same physical
++ core. This context can be host OS, host user space or a different guest.
++
++ If the processor does not support Extended Page Tables, the attack is
++ only possible, when the hypervisor does not sanitize the content of the
++ effective (shadow) page tables.
++
++ While solutions exist to mitigate these attack vectors fully, these
++ mitigations are not enabled by default in the Linux kernel because they
++ can affect performance significantly. The kernel provides several
++ mechanisms which can be utilized to address the problem depending on the
++ deployment scenario. The mitigations, their protection scope and impact
++ are described in the next sections.
++
++ The default mitigations and the rationale for chosing them are explained
++ at the end of this document. See :ref:`default_mitigations`.
++
++.. _l1tf_sys_info:
++
++L1TF system information
++-----------------------
++
++The Linux kernel provides a sysfs interface to enumerate the current L1TF
++status of the system: whether the system is vulnerable, and which
++mitigations are active. The relevant sysfs file is:
++
++/sys/devices/system/cpu/vulnerabilities/l1tf
++
++The possible values in this file are:
++
++ =========================== ===============================
++ 'Not affected' The processor is not vulnerable
++ 'Mitigation: PTE Inversion' The host protection is active
++ =========================== ===============================
++
++If KVM/VMX is enabled and the processor is vulnerable then the following
++information is appended to the 'Mitigation: PTE Inversion' part:
++
++ - SMT status:
++
++ ===================== ================
++ 'VMX: SMT vulnerable' SMT is enabled
++ 'VMX: SMT disabled' SMT is disabled
++ ===================== ================
++
++ - L1D Flush mode:
++
++ ================================ ====================================
++ 'L1D vulnerable' L1D flushing is disabled
++
++ 'L1D conditional cache flushes' L1D flush is conditionally enabled
++
++ 'L1D cache flushes' L1D flush is unconditionally enabled
++ ================================ ====================================
++
++The resulting grade of protection is discussed in the following sections.
++
++
++Host mitigation mechanism
++-------------------------
++
++The kernel is unconditionally protected against L1TF attacks from malicious
++user space running on the host.
++
++
++Guest mitigation mechanisms
++---------------------------
++
++.. _l1d_flush:
++
++1. L1D flush on VMENTER
++^^^^^^^^^^^^^^^^^^^^^^^
++
++ To make sure that a guest cannot attack data which is present in the L1D
++ the hypervisor flushes the L1D before entering the guest.
++
++ Flushing the L1D evicts not only the data which should not be accessed
++ by a potentially malicious guest, it also flushes the guest
++ data. Flushing the L1D has a performance impact as the processor has to
++ bring the flushed guest data back into the L1D. Depending on the
++ frequency of VMEXIT/VMENTER and the type of computations in the guest
++ performance degradation in the range of 1% to 50% has been observed. For
++ scenarios where guest VMEXIT/VMENTER are rare the performance impact is
++ minimal. Virtio and mechanisms like posted interrupts are designed to
++ confine the VMEXITs to a bare minimum, but specific configurations and
++ application scenarios might still suffer from a high VMEXIT rate.
++
++ The kernel provides two L1D flush modes:
++ - conditional ('cond')
++ - unconditional ('always')
++
++ The conditional mode avoids L1D flushing after VMEXITs which execute
++ only audited code pathes before the corresponding VMENTER. These code
++ pathes have beed verified that they cannot expose secrets or other
++ interesting data to an attacker, but they can leak information about the
++ address space layout of the hypervisor.
++
++ Unconditional mode flushes L1D on all VMENTER invocations and provides
++ maximum protection. It has a higher overhead than the conditional
++ mode. The overhead cannot be quantified correctly as it depends on the
++ work load scenario and the resulting number of VMEXITs.
++
++ The general recommendation is to enable L1D flush on VMENTER. The kernel
++ defaults to conditional mode on affected processors.
++
++ **Note**, that L1D flush does not prevent the SMT problem because the
++ sibling thread will also bring back its data into the L1D which makes it
++ attackable again.
++
++ L1D flush can be controlled by the administrator via the kernel command
++ line and sysfs control files. See :ref:`mitigation_control_command_line`
++ and :ref:`mitigation_control_kvm`.
++
++.. _guest_confinement:
++
++2. Guest VCPU confinement to dedicated physical cores
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ To address the SMT problem, it is possible to make a guest or a group of
++ guests affine to one or more physical cores. The proper mechanism for
++ that is to utilize exclusive cpusets to ensure that no other guest or
++ host tasks can run on these cores.
++
++ If only a single guest or related guests run on sibling SMT threads on
++ the same physical core then they can only attack their own memory and
++ restricted parts of the host memory.
++
++ Host memory is attackable, when one of the sibling SMT threads runs in
++ host OS (hypervisor) context and the other in guest context. The amount
++ of valuable information from the host OS context depends on the context
++ which the host OS executes, i.e. interrupts, soft interrupts and kernel
++ threads. The amount of valuable data from these contexts cannot be
++ declared as non-interesting for an attacker without deep inspection of
++ the code.
++
++ **Note**, that assigning guests to a fixed set of physical cores affects
++ the ability of the scheduler to do load balancing and might have
++ negative effects on CPU utilization depending on the hosting
++ scenario. Disabling SMT might be a viable alternative for particular
++ scenarios.
++
++ For further information about confining guests to a single or to a group
++ of cores consult the cpusets documentation:
++
++ https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt
++
++.. _interrupt_isolation:
++
++3. Interrupt affinity
++^^^^^^^^^^^^^^^^^^^^^
++
++ Interrupts can be made affine to logical CPUs. This is not universally
++ true because there are types of interrupts which are truly per CPU
++ interrupts, e.g. the local timer interrupt. Aside of that multi queue
++ devices affine their interrupts to single CPUs or groups of CPUs per
++ queue without allowing the administrator to control the affinities.
++
++ Moving the interrupts, which can be affinity controlled, away from CPUs
++ which run untrusted guests, reduces the attack vector space.
++
++ Whether the interrupts with are affine to CPUs, which run untrusted
++ guests, provide interesting data for an attacker depends on the system
++ configuration and the scenarios which run on the system. While for some
++ of the interrupts it can be assumed that they wont expose interesting
++ information beyond exposing hints about the host OS memory layout, there
++ is no way to make general assumptions.
++
++ Interrupt affinity can be controlled by the administrator via the
++ /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is
++ available at:
++
++ https://www.kernel.org/doc/Documentation/IRQ-affinity.txt
++
++.. _smt_control:
++
++4. SMT control
++^^^^^^^^^^^^^^
++
++ To prevent the SMT issues of L1TF it might be necessary to disable SMT
++ completely. Disabling SMT can have a significant performance impact, but
++ the impact depends on the hosting scenario and the type of workloads.
++ The impact of disabling SMT needs also to be weighted against the impact
++ of other mitigation solutions like confining guests to dedicated cores.
++
++ The kernel provides a sysfs interface to retrieve the status of SMT and
++ to control it. It also provides a kernel command line interface to
++ control SMT.
++
++ The kernel command line interface consists of the following options:
++
++ =========== ==========================================================
++ nosmt Affects the bring up of the secondary CPUs during boot. The
++ kernel tries to bring all present CPUs online during the
++ boot process. "nosmt" makes sure that from each physical
++ core only one - the so called primary (hyper) thread is
++ activated. Due to a design flaw of Intel processors related
++ to Machine Check Exceptions the non primary siblings have
++ to be brought up at least partially and are then shut down
++ again. "nosmt" can be undone via the sysfs interface.
++
++ nosmt=force Has the same effect as "nosmt' but it does not allow to
++ undo the SMT disable via the sysfs interface.
++ =========== ==========================================================
++
++ The sysfs interface provides two files:
++
++ - /sys/devices/system/cpu/smt/control
++ - /sys/devices/system/cpu/smt/active
++
++ /sys/devices/system/cpu/smt/control:
++
++ This file allows to read out the SMT control state and provides the
++ ability to disable or (re)enable SMT. The possible states are:
++
++ ============== ===================================================
++ on SMT is supported by the CPU and enabled. All
++ logical CPUs can be onlined and offlined without
++ restrictions.
++
++ off SMT is supported by the CPU and disabled. Only
++ the so called primary SMT threads can be onlined
++ and offlined without restrictions. An attempt to
++ online a non-primary sibling is rejected
++
++ forceoff Same as 'off' but the state cannot be controlled.
++ Attempts to write to the control file are rejected.
++
++ notsupported The processor does not support SMT. It's therefore
++ not affected by the SMT implications of L1TF.
++ Attempts to write to the control file are rejected.
++ ============== ===================================================
++
++ The possible states which can be written into this file to control SMT
++ state are:
++
++ - on
++ - off
++ - forceoff
++
++ /sys/devices/system/cpu/smt/active:
++
++ This file reports whether SMT is enabled and active, i.e. if on any
++ physical core two or more sibling threads are online.
++
++ SMT control is also possible at boot time via the l1tf kernel command
++ line parameter in combination with L1D flush control. See
++ :ref:`mitigation_control_command_line`.
++
++5. Disabling EPT
++^^^^^^^^^^^^^^^^
++
++ Disabling EPT for virtual machines provides full mitigation for L1TF even
++ with SMT enabled, because the effective page tables for guests are
++ managed and sanitized by the hypervisor. Though disabling EPT has a
++ significant performance impact especially when the Meltdown mitigation
++ KPTI is enabled.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
++
++There is ongoing research and development for new mitigation mechanisms to
++address the performance impact of disabling SMT or EPT.
++
++.. _mitigation_control_command_line:
++
++Mitigation control on the kernel command line
++---------------------------------------------
++
++The kernel command line allows to control the L1TF mitigations at boot
++time with the option "l1tf=". The valid arguments for this option are:
++
++ ============ =============================================================
++ full Provides all available mitigations for the L1TF
++ vulnerability. Disables SMT and enables all mitigations in
++ the hypervisors, i.e. unconditional L1D flushing
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ full,force Same as 'full', but disables SMT and L1D flush runtime
++ control. Implies the 'nosmt=force' command line option.
++ (i.e. sysfs control of SMT is disabled.)
++
++ flush Leaves SMT enabled and enables the default hypervisor
++ mitigation, i.e. conditional L1D flushing
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ flush,nosmt Disables SMT and enables the default hypervisor mitigation,
++ i.e. conditional L1D flushing.
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is
++ started in a potentially insecure configuration.
++
++ off Disables hypervisor mitigations and doesn't emit any
++ warnings.
++ ============ =============================================================
++
++The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
++
++
++.. _mitigation_control_kvm:
++
++Mitigation control for KVM - module parameter
++-------------------------------------------------------------
++
++The KVM hypervisor mitigation mechanism, flushing the L1D cache when
++entering a guest, can be controlled with a module parameter.
++
++The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the
++following arguments:
++
++ ============ ==============================================================
++ always L1D cache flush on every VMENTER.
++
++ cond Flush L1D on VMENTER only when the code between VMEXIT and
++ VMENTER can leak host memory which is considered
++ interesting for an attacker. This still can leak host memory
++ which allows e.g. to determine the hosts address space layout.
++
++ never Disables the mitigation
++ ============ ==============================================================
++
++The parameter can be provided on the kernel command line, as a module
++parameter when loading the modules and at runtime modified via the sysfs
++file:
++
++/sys/module/kvm_intel/parameters/vmentry_l1d_flush
++
++The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
++line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
++module parameter is ignored and writes to the sysfs file are rejected.
++
++
++Mitigation selection guide
++--------------------------
++
++1. No virtualization in use
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The system is protected by the kernel unconditionally and no further
++ action is required.
++
++2. Virtualization with trusted guests
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ If the guest comes from a trusted source and the guest OS kernel is
++ guaranteed to have the L1TF mitigations in place the system is fully
++ protected against L1TF and no further action is required.
++
++ To avoid the overhead of the default L1D flushing on VMENTER the
++ administrator can disable the flushing via the kernel command line and
++ sysfs control files. See :ref:`mitigation_control_command_line` and
++ :ref:`mitigation_control_kvm`.
++
++
++3. Virtualization with untrusted guests
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++3.1. SMT not supported or disabled
++""""""""""""""""""""""""""""""""""
++
++ If SMT is not supported by the processor or disabled in the BIOS or by
++ the kernel, it's only required to enforce L1D flushing on VMENTER.
++
++ Conditional L1D flushing is the default behaviour and can be tuned. See
++ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
++
++3.2. EPT not supported or disabled
++""""""""""""""""""""""""""""""""""
++
++ If EPT is not supported by the processor or disabled in the hypervisor,
++ the system is fully protected. SMT can stay enabled and L1D flushing on
++ VMENTER is not required.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
++
++3.3. SMT and EPT supported and active
++"""""""""""""""""""""""""""""""""""""
++
++ If SMT and EPT are supported and active then various degrees of
++ mitigations can be employed:
++
++ - L1D flushing on VMENTER:
++
++ L1D flushing on VMENTER is the minimal protection requirement, but it
++ is only potent in combination with other mitigation methods.
++
++ Conditional L1D flushing is the default behaviour and can be tuned. See
++ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
++
++ - Guest confinement:
++
++ Confinement of guests to a single or a group of physical cores which
++ are not running any other processes, can reduce the attack surface
++ significantly, but interrupts, soft interrupts and kernel threads can
++ still expose valuable data to a potential attacker. See
++ :ref:`guest_confinement`.
++
++ - Interrupt isolation:
++
++ Isolating the guest CPUs from interrupts can reduce the attack surface
++ further, but still allows a malicious guest to explore a limited amount
++ of host physical memory. This can at least be used to gain knowledge
++ about the host address space layout. The interrupts which have a fixed
++ affinity to the CPUs which run the untrusted guests can depending on
++ the scenario still trigger soft interrupts and schedule kernel threads
++ which might expose valuable information. See
++ :ref:`interrupt_isolation`.
++
++The above three mitigation methods combined can provide protection to a
++certain degree, but the risk of the remaining attack surface has to be
++carefully analyzed. For full protection the following methods are
++available:
++
++ - Disabling SMT:
++
++ Disabling SMT and enforcing the L1D flushing provides the maximum
++ amount of protection. This mitigation is not depending on any of the
++ above mitigation methods.
++
++ SMT control and L1D flushing can be tuned by the command line
++ parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run
++ time with the matching sysfs control files. See :ref:`smt_control`,
++ :ref:`mitigation_control_command_line` and
++ :ref:`mitigation_control_kvm`.
++
++ - Disabling EPT:
++
++ Disabling EPT provides the maximum amount of protection as well. It is
++ not depending on any of the above mitigation methods. SMT can stay
++ enabled and L1D flushing is not required, but the performance impact is
++ significant.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
++ parameter.
++
++
++.. _default_mitigations:
++
++Default mitigations
++-------------------
++
++ The kernel default mitigations for vulnerable processors are:
++
++ - PTE inversion to protect against malicious user space. This is done
++ unconditionally and cannot be controlled.
++
++ - L1D conditional flushing on VMENTER when EPT is enabled for
++ a guest.
++
++ The kernel does not by default enforce the disabling of SMT, which leaves
++ SMT systems vulnerable when running untrusted guests with EPT enabled.
++
++ The rationale for this choice is:
++
++ - Force disabling SMT can break existing setups, especially with
++ unattended updates.
++
++ - If regular users run untrusted guests on their machine, then L1TF is
++ just an add on to other malware which might be embedded in an untrusted
++ guest, e.g. spam-bots or attacks on the local network.
++
++ There is no technical way to prevent a user from running untrusted code
++ on their machines blindly.
++
++ - It's technically extremely unlikely and from today's knowledge even
++ impossible that L1TF can be exploited via the most popular attack
++ mechanisms like JavaScript because these mechanisms have no way to
++ control PTEs. If this would be possible and not other mitigation would
++ be possible, then the default might be different.
++
++ - The administrators of cloud and hosting setups have to carefully
++ analyze the risk for their scenarios and make the appropriate
++ mitigation choices, which might even vary across their deployed
++ machines and also result in other changes of their overall setup.
++ There is no way for the kernel to provide a sensible default for this
++ kind of scenarios.
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Sat, 14 Jul 2018 21:56:13 +0200
+Subject: x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit 6c26fcd2abfe0a56bbd95271fce02df2896cfd24 upstream
+
+pfn_modify_allowed() and arch_has_pfn_modify_check() are outside of the
+!__ASSEMBLY__ section in include/asm-generic/pgtable.h, which confuses
+assembler on archs that don't have __HAVE_ARCH_PFN_MODIFY_ALLOWED (e.g.
+ia64) and breaks build:
+
+ include/asm-generic/pgtable.h: Assembler messages:
+ include/asm-generic/pgtable.h:538: Error: Unknown opcode `static inline bool pfn_modify_allowed(unsigned long pfn,pgprot_t prot)'
+ include/asm-generic/pgtable.h:540: Error: Unknown opcode `return true'
+ include/asm-generic/pgtable.h:543: Error: Unknown opcode `static inline bool arch_has_pfn_modify_check(void)'
+ include/asm-generic/pgtable.h:545: Error: Unknown opcode `return false'
+ arch/ia64/kernel/entry.S:69: Error: `mov' does not fit into bundle
+
+Move those two static inlines into the !__ASSEMBLY__ section so that they
+don't confuse the asm build pass.
+
+Fixes: 42e4089c7890 ("x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings")
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/asm-generic/pgtable.h | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -1083,6 +1083,18 @@ int phys_mem_access_prot_allowed(struct
+ static inline void init_espfix_bsp(void) { }
+ #endif
+
++#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
++static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
++{
++ return true;
++}
++
++static inline bool arch_has_pfn_modify_check(void)
++{
++ return false;
++}
++#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
++
+ #endif /* !__ASSEMBLY__ */
+
+ #ifndef io_remap_pfn_range
+@@ -1097,16 +1109,4 @@ static inline void init_espfix_bsp(void)
+ #endif
+ #endif
+
+-#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+-static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+-{
+- return true;
+-}
+-
+-static inline bool arch_has_pfn_modify_check(void)
+-{
+- return false;
+-}
+-#endif
+-
+ #endif /* _ASM_GENERIC_PGTABLE_H */
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Wed, 18 Jul 2018 19:07:38 +0200
+Subject: x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 288d152c23dcf3c09da46c5c481903ca10ebfef7 upstream
+
+The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order
+to evict the L1d cache.
+
+However, these pages are never cleared and, in theory, their data could be
+leaked.
+
+More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages
+to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break
+the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses.
+
+Fix this by initializing the individual vmx_l1d_flush_pages with a
+different pattern each.
+
+Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to
+"flush_pages" to reflect this change.
+
+Fixes: a47dd5f06714 ("x86/KVM/VMX: Add L1D flush algorithm")
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -211,6 +211,7 @@ static void *vmx_l1d_flush_pages;
+ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+ {
+ struct page *page;
++ unsigned int i;
+
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+@@ -243,6 +244,16 @@ static int vmx_setup_l1d_flush(enum vmx_
+ if (!page)
+ return -ENOMEM;
+ vmx_l1d_flush_pages = page_address(page);
++
++ /*
++ * Initialize each page with a different pattern in
++ * order to protect against KSM in the nested
++ * virtualization case.
++ */
++ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
++ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
++ PAGE_SIZE);
++ }
+ }
+
+ l1tf_vmx_mitigation = l1tf;
+@@ -9471,7 +9482,7 @@ static void vmx_l1d_flush(struct kvm_vcp
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+ ".Lpopulate_tlb:\n\t"
+- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $4096, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lpopulate_tlb\n\t"
+@@ -9480,12 +9491,12 @@ static void vmx_l1d_flush(struct kvm_vcp
+ /* Now fill the cache */
+ "xorl %%eax, %%eax\n"
+ ".Lfill_cache:\n"
+- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $64, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lfill_cache\n\t"
+ "lfence\n"
+- :: [empty_zp] "r" (vmx_l1d_flush_pages),
++ :: [flush_pages] "r" (vmx_l1d_flush_pages),
+ [size] "r" (size)
+ : "eax", "ebx", "ecx", "edx");
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Tony Luck <tony.luck@intel.com>
+Date: Thu, 19 Jul 2018 13:49:58 -0700
+Subject: Documentation/l1tf: Fix typos
+
+From: Tony Luck <tony.luck@intel.com>
+
+commit 1949f9f49792d65dba2090edddbe36a5f02e3ba3 upstream
+
+Fix spelling and other typos
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/l1tf.rst | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/Documentation/admin-guide/l1tf.rst
++++ b/Documentation/admin-guide/l1tf.rst
+@@ -17,7 +17,7 @@ vulnerability is not present on:
+ - Older processor models, where the CPU family is < 6
+
+ - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
+- Penwell, Pineview, Slivermont, Airmont, Merrifield)
++ Penwell, Pineview, Silvermont, Airmont, Merrifield)
+
+ - The Intel Core Duo Yonah variants (2006 - 2008)
+
+@@ -113,7 +113,7 @@ Attack scenarios
+ deployment scenario. The mitigations, their protection scope and impact
+ are described in the next sections.
+
+- The default mitigations and the rationale for chosing them are explained
++ The default mitigations and the rationale for choosing them are explained
+ at the end of this document. See :ref:`default_mitigations`.
+
+ .. _l1tf_sys_info:
+@@ -191,15 +191,15 @@ Guest mitigation mechanisms
+ - unconditional ('always')
+
+ The conditional mode avoids L1D flushing after VMEXITs which execute
+- only audited code pathes before the corresponding VMENTER. These code
+- pathes have beed verified that they cannot expose secrets or other
++ only audited code paths before the corresponding VMENTER. These code
++ paths have been verified that they cannot expose secrets or other
+ interesting data to an attacker, but they can leak information about the
+ address space layout of the hypervisor.
+
+ Unconditional mode flushes L1D on all VMENTER invocations and provides
+ maximum protection. It has a higher overhead than the conditional
+ mode. The overhead cannot be quantified correctly as it depends on the
+- work load scenario and the resulting number of VMEXITs.
++ workload scenario and the resulting number of VMEXITs.
+
+ The general recommendation is to enable L1D flush on VMENTER. The kernel
+ defaults to conditional mode on affected processors.
+@@ -262,7 +262,7 @@ Guest mitigation mechanisms
+ Whether the interrupts with are affine to CPUs, which run untrusted
+ guests, provide interesting data for an attacker depends on the system
+ configuration and the scenarios which run on the system. While for some
+- of the interrupts it can be assumed that they wont expose interesting
++ of the interrupts it can be assumed that they won't expose interesting
+ information beyond exposing hints about the host OS memory layout, there
+ is no way to make general assumptions.
+
+@@ -299,7 +299,7 @@ Guest mitigation mechanisms
+ to be brought up at least partially and are then shut down
+ again. "nosmt" can be undone via the sysfs interface.
+
+- nosmt=force Has the same effect as "nosmt' but it does not allow to
++ nosmt=force Has the same effect as "nosmt" but it does not allow to
+ undo the SMT disable via the sysfs interface.
+ =========== ==========================================================
+
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Wed, 25 Jul 2018 10:36:45 +0200
+Subject: cpu/hotplug: detect SMT disabled by BIOS
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit 73d5e2b472640b1fcdb61ae8be389912ef211bda upstream
+
+If SMT is disabled in BIOS, the CPU code doesn't properly detect it.
+The /sys/devices/system/cpu/smt/control file shows 'on', and the 'l1tf'
+vulnerabilities file shows SMT as vulnerable.
+
+Fix it by forcing 'cpu_smt_control' to CPU_SMT_NOT_SUPPORTED in such a
+case. Unfortunately the detection can only be done after bringing all
+the CPUs online, so we have to overwrite any previous writes to the
+variable.
+
+Reported-by: Joe Mario <jmario@redhat.com>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Fixes: f048c399e0f7 ("x86/topology: Provide topology_smt_supported()")
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cpu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2125,6 +2125,15 @@ static const struct attribute_group cpuh
+
+ static int __init cpu_smt_state_init(void)
+ {
++ /*
++ * If SMT was disabled by BIOS, detect it here, after the CPUs have
++ * been brought online. This ensures the smt/l1tf sysfs entries are
++ * consistent with reality. Note this may overwrite cpu_smt_control's
++ * previous setting.
++ */
++ if (topology_max_smt_threads() == 1)
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Sat, 21 Jul 2018 22:16:56 +0200
+Subject: x86/KVM/VMX: Don't set l1tf_flush_l1d to true from vmx_l1d_flush()
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 379fd0c7e6a391e5565336a646f19f218fb98c6c upstream
+
+vmx_l1d_flush() gets invoked only if l1tf_flush_l1d is true. There's no
+point in setting l1tf_flush_l1d to true from there again.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9461,15 +9461,15 @@ static void vmx_l1d_flush(struct kvm_vcp
+ /*
+ * This code is only executed when the the flush mode is 'cond' or
+ * 'always'
+- *
+- * If 'flush always', keep the flush bit set, otherwise clear
+- * it. The flush bit gets set again either from vcpu_run() or from
+- * one of the unsafe VMEXIT handlers.
+ */
+- if (static_branch_unlikely(&vmx_l1d_flush_always))
+- vcpu->arch.l1tf_flush_l1d = true;
+- else
++ if (!static_branch_unlikely(&vmx_l1d_flush_always)) {
++ /*
++ * Clear the flush bit, it gets set again either from
++ * vcpu_run() or from one of the unsafe VMEXIT
++ * handlers.
++ */
+ vcpu->arch.l1tf_flush_l1d = false;
++ }
+
+ vcpu->stat.l1d_flush++;
+
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Sat, 21 Jul 2018 22:25:00 +0200
+Subject: x86/KVM/VMX: Replace 'vmx_l1d_flush_always' with 'vmx_l1d_flush_cond'
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 427362a142441f08051369db6fbe7f61c73b3dca upstream
+
+The vmx_l1d_flush_always static key is only ever evaluated if
+vmx_l1d_should_flush is enabled. In that case however, there are only two
+L1d flushing modes possible: "always" and "conditional".
+
+The "conditional" mode's implementation tends to require more sophisticated
+logic than the "always" mode.
+
+Avoid inverted logic by replacing the 'vmx_l1d_flush_always' static key
+with a 'vmx_l1d_flush_cond' one.
+
+There is no change in functionality.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -189,7 +189,7 @@ module_param(ple_window_max, uint, 0444)
+ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+-static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
+ static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+ /* Storage for pre module init parameter parsing */
+@@ -263,10 +263,10 @@ static int vmx_setup_l1d_flush(enum vmx_
+ else
+ static_branch_disable(&vmx_l1d_should_flush);
+
+- if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
+- static_branch_enable(&vmx_l1d_flush_always);
++ if (l1tf == VMENTER_L1D_FLUSH_COND)
++ static_branch_enable(&vmx_l1d_flush_cond);
+ else
+- static_branch_disable(&vmx_l1d_flush_always);
++ static_branch_disable(&vmx_l1d_flush_cond);
+ return 0;
+ }
+
+@@ -9462,7 +9462,7 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * This code is only executed when the the flush mode is 'cond' or
+ * 'always'
+ */
+- if (!static_branch_unlikely(&vmx_l1d_flush_always)) {
++ if (static_branch_likely(&vmx_l1d_flush_cond)) {
+ /*
+ * Clear the flush bit, it gets set again either from
+ * vcpu_run() or from one of the unsafe VMEXIT
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Sat, 21 Jul 2018 22:35:28 +0200
+Subject: x86/KVM/VMX: Move the l1tf_flush_l1d test to vmx_l1d_flush()
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 5b6ccc6c3b1a477fbac9ec97a0b4c1c48e765209 upstream
+
+Currently, vmx_vcpu_run() checks if l1tf_flush_l1d is set and invokes
+vmx_l1d_flush() if so.
+
+This test is unncessary for the "always flush L1D" mode.
+
+Move the check to vmx_l1d_flush()'s conditional mode code path.
+
+Notes:
+- vmx_l1d_flush() is likely to get inlined anyway and thus, there's no
+ extra function call.
+
+- This inverts the (static) branch prediction, but there hadn't been any
+ explicit likely()/unlikely() annotations before and so it stays as is.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9463,12 +9463,16 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * 'always'
+ */
+ if (static_branch_likely(&vmx_l1d_flush_cond)) {
++ bool flush_l1d = vcpu->arch.l1tf_flush_l1d;
++
+ /*
+ * Clear the flush bit, it gets set again either from
+ * vcpu_run() or from one of the unsafe VMEXIT
+ * handlers.
+ */
+ vcpu->arch.l1tf_flush_l1d = false;
++ if (!flush_l1d)
++ return;
+ }
+
+ vcpu->stat.l1d_flush++;
+@@ -10003,10 +10007,8 @@ static void __noclone vmx_vcpu_run(struc
+ evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
+ (unsigned long)&current_evmcs->host_rsp : 0;
+
+- if (static_branch_unlikely(&vmx_l1d_should_flush)) {
+- if (vcpu->arch.l1tf_flush_l1d)
+- vmx_l1d_flush(vcpu);
+- }
++ if (static_branch_unlikely(&vmx_l1d_should_flush))
++ vmx_l1d_flush(vcpu);
+
+ asm(
+ /* Store host registers */
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Fri, 27 Jul 2018 12:46:29 +0200
+Subject: x86/irq: Demote irq_cpustat_t::__softirq_pending to u16
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 9aee5f8a7e30330d0a8f4c626dc924ca5590aba5 upstream
+
+An upcoming patch will extend KVM's L1TF mitigation in conditional mode
+to also cover interrupts after VMEXITs. For tracking those, stores to a
+new per-cpu flag from interrupt handlers will become necessary.
+
+In order to improve cache locality, this new flag will be added to x86's
+irq_cpustat_t.
+
+Make some space available there by shrinking the ->softirq_pending bitfield
+from 32 to 16 bits: the number of bits actually used is only NR_SOFTIRQS,
+i.e. 10.
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/hardirq.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/hardirq.h
++++ b/arch/x86/include/asm/hardirq.h
+@@ -6,7 +6,7 @@
+ #include <linux/irq.h>
+
+ typedef struct {
+- unsigned int __softirq_pending;
++ u16 __softirq_pending;
+ unsigned int __nmi_count; /* arch dependent */
+ #ifdef CONFIG_X86_LOCAL_APIC
+ unsigned int apic_timer_irqs; /* arch dependent */
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Fri, 27 Jul 2018 13:22:16 +0200
+Subject: x86/KVM/VMX: Introduce per-host-cpu analogue of l1tf_flush_l1d
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 45b575c00d8e72d69d75dd8c112f044b7b01b069 upstream
+
+Part of the L1TF mitigation for vmx includes flushing the L1D cache upon
+VMENTRY.
+
+L1D flushes are costly and two modes of operations are provided to users:
+"always" and the more selective "conditional" mode.
+
+If operating in the latter, the cache would get flushed only if a host side
+code path considered unconfined had been traversed. "Unconfined" in this
+context means that it might have pulled in sensitive data like user data
+or kernel crypto keys.
+
+The need for L1D flushes is tracked by means of the per-vcpu flag
+l1tf_flush_l1d. KVM exit handlers considered unconfined set it. A
+vmx_l1d_flush() subsequently invoked before the next VMENTER will conduct a
+L1d flush based on its value and reset that flag again.
+
+Currently, interrupts delivered "normally" while in root operation between
+VMEXIT and VMENTER are not taken into account. Part of the reason is that
+these don't leave any traces and thus, the vmx code is unable to tell if
+any such has happened.
+
+As proposed by Paolo Bonzini, prepare for tracking all interrupts by
+introducing a new per-cpu flag, "kvm_cpu_l1tf_flush_l1d". It will be in
+strong analogy to the per-vcpu ->l1tf_flush_l1d.
+
+A later patch will make interrupt handlers set it.
+
+For the sake of cache locality, group kvm_cpu_l1tf_flush_l1d into x86'
+per-cpu irq_cpustat_t as suggested by Peter Zijlstra.
+
+Provide the helpers kvm_set_cpu_l1tf_flush_l1d(),
+kvm_clear_cpu_l1tf_flush_l1d() and kvm_get_cpu_l1tf_flush_l1d(). Make them
+trivial resp. non-existent for !CONFIG_KVM_INTEL as appropriate.
+
+Let vmx_l1d_flush() handle kvm_cpu_l1tf_flush_l1d in the same way as
+l1tf_flush_l1d.
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/hardirq.h | 23 +++++++++++++++++++++++
+ arch/x86/kvm/vmx.c | 17 +++++++++++++----
+ 2 files changed, 36 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/hardirq.h
++++ b/arch/x86/include/asm/hardirq.h
+@@ -7,6 +7,9 @@
+
+ typedef struct {
+ u16 __softirq_pending;
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++ u8 kvm_cpu_l1tf_flush_l1d;
++#endif
+ unsigned int __nmi_count; /* arch dependent */
+ #ifdef CONFIG_X86_LOCAL_APIC
+ unsigned int apic_timer_irqs; /* arch dependent */
+@@ -66,4 +69,24 @@ extern u64 arch_irq_stat_cpu(unsigned in
+ extern u64 arch_irq_stat(void);
+ #define arch_irq_stat arch_irq_stat
+
++
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++static inline void kvm_set_cpu_l1tf_flush_l1d(void)
++{
++ __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
++}
++
++static inline void kvm_clear_cpu_l1tf_flush_l1d(void)
++{
++ __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
++}
++
++static inline bool kvm_get_cpu_l1tf_flush_l1d(void)
++{
++ return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
++}
++#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
++static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
++#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
++
+ #endif /* _ASM_X86_HARDIRQ_H */
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9463,14 +9463,23 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * 'always'
+ */
+ if (static_branch_likely(&vmx_l1d_flush_cond)) {
+- bool flush_l1d = vcpu->arch.l1tf_flush_l1d;
++ bool flush_l1d;
+
+ /*
+- * Clear the flush bit, it gets set again either from
+- * vcpu_run() or from one of the unsafe VMEXIT
+- * handlers.
++ * Clear the per-vcpu flush bit, it gets set again
++ * either from vcpu_run() or from one of the unsafe
++ * VMEXIT handlers.
+ */
++ flush_l1d = vcpu->arch.l1tf_flush_l1d;
+ vcpu->arch.l1tf_flush_l1d = false;
++
++ /*
++ * Clear the per-cpu flush bit, it gets set again from
++ * the interrupt handlers.
++ */
++ flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
++ kvm_clear_cpu_l1tf_flush_l1d();
++
+ if (!flush_l1d)
+ return;
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Sun, 29 Jul 2018 12:15:33 +0200
+Subject: x86: Don't include linux/irq.h from asm/hardirq.h
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 447ae316670230d7d29430e2cbf1f5db4f49d14c upstream
+
+The next patch in this series will have to make the definition of
+irq_cpustat_t available to entering_irq().
+
+Inclusion of asm/hardirq.h into asm/apic.h would cause circular header
+dependencies like
+
+ asm/smp.h
+ asm/apic.h
+ asm/hardirq.h
+ linux/irq.h
+ linux/topology.h
+ linux/smp.h
+ asm/smp.h
+
+or
+
+ linux/gfp.h
+ linux/mmzone.h
+ asm/mmzone.h
+ asm/mmzone_64.h
+ asm/smp.h
+ asm/apic.h
+ asm/hardirq.h
+ linux/irq.h
+ linux/irqdesc.h
+ linux/kobject.h
+ linux/sysfs.h
+ linux/kernfs.h
+ linux/idr.h
+ linux/gfp.h
+
+and others.
+
+This causes compilation errors because of the header guards becoming
+effective in the second inclusion: symbols/macros that had been defined
+before wouldn't be available to intermediate headers in the #include chain
+anymore.
+
+A possible workaround would be to move the definition of irq_cpustat_t
+into its own header and include that from both, asm/hardirq.h and
+asm/apic.h.
+
+However, this wouldn't solve the real problem, namely asm/harirq.h
+unnecessarily pulling in all the linux/irq.h cruft: nothing in
+asm/hardirq.h itself requires it. Also, note that there are some other
+archs, like e.g. arm64, which don't have that #include in their
+asm/hardirq.h.
+
+Remove the linux/irq.h #include from x86' asm/hardirq.h.
+
+Fix resulting compilation errors by adding appropriate #includes to *.c
+files as needed.
+
+Note that some of these *.c files could be cleaned up a bit wrt. to their
+set of #includes, but that should better be done from separate patches, if
+at all.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/dmi.h | 2 +-
+ arch/x86/include/asm/hardirq.h | 1 -
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kernel/apic/apic.c | 1 +
+ arch/x86/kernel/apic/io_apic.c | 1 +
+ arch/x86/kernel/apic/msi.c | 1 +
+ arch/x86/kernel/apic/vector.c | 1 +
+ arch/x86/kernel/fpu/core.c | 1 +
+ arch/x86/kernel/hpet.c | 1 +
+ arch/x86/kernel/i8259.c | 1 +
+ arch/x86/kernel/idt.c | 1 +
+ arch/x86/kernel/irq.c | 1 +
+ arch/x86/kernel/irq_32.c | 1 +
+ arch/x86/kernel/irq_64.c | 1 +
+ arch/x86/kernel/irqinit.c | 1 +
+ arch/x86/kernel/smpboot.c | 1 +
+ arch/x86/kernel/time.c | 1 +
+ arch/x86/mm/pti.c | 1 +
+ arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c | 1 +
+ arch/x86/xen/enlighten.c | 1 +
+ drivers/gpu/drm/i915/i915_pmu.c | 1 +
+ drivers/gpu/drm/i915/intel_lpe_audio.c | 1 +
+ drivers/pci/host/pci-hyperv.c | 2 ++
+ 23 files changed, 23 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/dmi.h
++++ b/arch/x86/include/asm/dmi.h
+@@ -4,8 +4,8 @@
+
+ #include <linux/compiler.h>
+ #include <linux/init.h>
++#include <linux/io.h>
+
+-#include <asm/io.h>
+ #include <asm/setup.h>
+
+ static __always_inline __init void *dmi_alloc(unsigned len)
+--- a/arch/x86/include/asm/hardirq.h
++++ b/arch/x86/include/asm/hardirq.h
+@@ -3,7 +3,6 @@
+ #define _ASM_X86_HARDIRQ_H
+
+ #include <linux/threads.h>
+-#include <linux/irq.h>
+
+ typedef struct {
+ u16 __softirq_pending;
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -17,6 +17,7 @@
+ #include <linux/tracepoint.h>
+ #include <linux/cpumask.h>
+ #include <linux/irq_work.h>
++#include <linux/irq.h>
+
+ #include <linux/kvm.h>
+ #include <linux/kvm_para.h>
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -56,6 +56,7 @@
+ #include <asm/hypervisor.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
++#include <asm/irq_regs.h>
+
+ unsigned int num_processors;
+
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -33,6 +33,7 @@
+
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/sched.h>
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -12,6 +12,7 @@
+ */
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/pci.h>
+ #include <linux/dmar.h>
+ #include <linux/hpet.h>
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -11,6 +11,7 @@
+ * published by the Free Software Foundation.
+ */
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/seq_file.h>
+ #include <linux/init.h>
+ #include <linux/compiler.h>
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -10,6 +10,7 @@
+ #include <asm/fpu/signal.h>
+ #include <asm/fpu/types.h>
+ #include <asm/traps.h>
++#include <asm/irq_regs.h>
+
+ #include <linux/hardirq.h>
+ #include <linux/pkeys.h>
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -1,6 +1,7 @@
+ #include <linux/clocksource.h>
+ #include <linux/clockchips.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/export.h>
+ #include <linux/delay.h>
+ #include <linux/errno.h>
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -5,6 +5,7 @@
+ #include <linux/sched.h>
+ #include <linux/ioport.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/timex.h>
+ #include <linux/random.h>
+ #include <linux/init.h>
+--- a/arch/x86/kernel/idt.c
++++ b/arch/x86/kernel/idt.c
+@@ -8,6 +8,7 @@
+ #include <asm/traps.h>
+ #include <asm/proto.h>
+ #include <asm/desc.h>
++#include <asm/hw_irq.h>
+
+ struct idt_data {
+ unsigned int vector;
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -10,6 +10,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
++#include <linux/irq.h>
+
+ #include <asm/apic.h>
+ #include <asm/io_apic.h>
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -11,6 +11,7 @@
+
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/notifier.h>
+ #include <linux/cpu.h>
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -11,6 +11,7 @@
+
+ #include <linux/kernel_stat.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/seq_file.h>
+ #include <linux/delay.h>
+ #include <linux/ftrace.h>
+--- a/arch/x86/kernel/irqinit.c
++++ b/arch/x86/kernel/irqinit.c
+@@ -5,6 +5,7 @@
+ #include <linux/sched.h>
+ #include <linux/ioport.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/timex.h>
+ #include <linux/random.h>
+ #include <linux/kprobes.h>
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -80,6 +80,7 @@
+ #include <asm/intel-family.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/spec-ctrl.h>
++#include <asm/hw_irq.h>
+
+ /* Number of siblings per CPU package */
+ int smp_num_siblings = 1;
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -12,6 +12,7 @@
+
+ #include <linux/clockchips.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #include <linux/i8253.h>
+ #include <linux/time.h>
+ #include <linux/export.h>
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -45,6 +45,7 @@
+ #include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+ #include <asm/desc.h>
++#include <asm/sections.h>
+
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
+--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
++++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+@@ -18,6 +18,7 @@
+ #include <asm/intel-mid.h>
+ #include <asm/intel_scu_ipc.h>
+ #include <asm/io_apic.h>
++#include <asm/hw_irq.h>
+
+ #define TANGIER_EXT_TIMER0_MSI 12
+
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -3,6 +3,7 @@
+ #endif
+ #include <linux/cpu.h>
+ #include <linux/kexec.h>
++#include <linux/slab.h>
+
+ #include <xen/features.h>
+ #include <xen/page.h>
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -24,6 +24,7 @@
+
+ #include <linux/perf_event.h>
+ #include <linux/pm_runtime.h>
++#include <linux/irq.h>
+
+ #include "i915_drv.h"
+ #include "i915_pmu.h"
+--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
++++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
+@@ -62,6 +62,7 @@
+
+ #include <linux/acpi.h>
+ #include <linux/device.h>
++#include <linux/irq.h>
+ #include <linux/pci.h>
+ #include <linux/pm_runtime.h>
+
+--- a/drivers/pci/host/pci-hyperv.c
++++ b/drivers/pci/host/pci-hyperv.c
+@@ -43,6 +43,8 @@
+ #include <linux/delay.h>
+ #include <linux/semaphore.h>
+ #include <linux/irqdomain.h>
++#include <linux/irq.h>
++
+ #include <asm/irqdomain.h>
+ #include <asm/apic.h>
+ #include <linux/msi.h>
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Sun, 29 Jul 2018 13:06:04 +0200
+Subject: x86/irq: Let interrupt handlers set kvm_cpu_l1tf_flush_l1d
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit ffcba43ff66c7dab34ec700debd491d2a4d319b4 upstream
+
+The last missing piece to having vmx_l1d_flush() take interrupts after
+VMEXIT into account is to set the kvm_cpu_l1tf_flush_l1d per-cpu flag on
+irq entry.
+
+Issue calls to kvm_set_cpu_l1tf_flush_l1d() from entering_irq(),
+ipi_entering_ack_irq(), smp_reschedule_interrupt() and
+uv_bau_message_interrupt().
+
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/apic.h | 3 +++
+ arch/x86/kernel/smp.c | 1 +
+ arch/x86/platform/uv/tlb_uv.c | 1 +
+ 3 files changed, 5 insertions(+)
+
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -10,6 +10,7 @@
+ #include <asm/fixmap.h>
+ #include <asm/mpspec.h>
+ #include <asm/msr.h>
++#include <asm/hardirq.h>
+
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
+
+@@ -514,6 +515,7 @@ extern void irq_exit(void);
+ static inline void entering_irq(void)
+ {
+ irq_enter();
++ kvm_set_cpu_l1tf_flush_l1d();
+ }
+
+ static inline void entering_ack_irq(void)
+@@ -526,6 +528,7 @@ static inline void ipi_entering_ack_irq(
+ {
+ irq_enter();
+ ack_APIC_irq();
++ kvm_set_cpu_l1tf_flush_l1d();
+ }
+
+ static inline void exiting_irq(void)
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -261,6 +261,7 @@ __visible void __irq_entry smp_reschedul
+ {
+ ack_APIC_irq();
+ inc_irq_stat(irq_resched_count);
++ kvm_set_cpu_l1tf_flush_l1d();
+
+ if (trace_resched_ipi_enabled()) {
+ /*
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -1285,6 +1285,7 @@ void uv_bau_message_interrupt(struct pt_
+ struct msg_desc msgdesc;
+
+ ack_APIC_irq();
++ kvm_set_cpu_l1tf_flush_l1d();
+ time_start = get_cycles();
+
+ bcp = &per_cpu(bau_control, smp_processor_id());
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Nicolai Stange <nstange@suse.de>
+Date: Sun, 22 Jul 2018 13:38:18 +0200
+Subject: x86/KVM/VMX: Don't set l1tf_flush_l1d from vmx_handle_external_intr()
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 18b57ce2eb8c8b9a24174a89250cf5f57c76ecdc upstream
+
+For VMEXITs caused by external interrupts, vmx_handle_external_intr()
+indirectly calls into the interrupt handlers through the host's IDT.
+
+It follows that these interrupts get accounted for in the
+kvm_cpu_l1tf_flush_l1d per-cpu flag.
+
+The subsequently executed vmx_l1d_flush() will thus be aware that some
+interrupts have happened and conduct a L1d flush anyway.
+
+Setting l1tf_flush_l1d from vmx_handle_external_intr() isn't needed
+anymore. Drop it.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9758,7 +9758,6 @@ static void vmx_handle_external_intr(str
+ [ss]"i"(__KERNEL_DS),
+ [cs]"i"(__KERNEL_CS)
+ );
+- vcpu->arch.l1tf_flush_l1d = true;
+ }
+ }
+ STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 5 Aug 2018 17:06:12 +0200
+Subject: Documentation/l1tf: Remove Yonah processors from not vulnerable list
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 58331136136935c631c2b5f06daf4c3006416e91 upstream
+
+Dave reported, that it's not confirmed that Yonah processors are
+unaffected. Remove them from the list.
+
+Reported-by: ave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/l1tf.rst | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/Documentation/admin-guide/l1tf.rst
++++ b/Documentation/admin-guide/l1tf.rst
+@@ -19,8 +19,6 @@ vulnerability is not present on:
+ - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
+ Penwell, Pineview, Silvermont, Airmont, Merrifield)
+
+- - The Intel Core Duo Yonah variants (2006 - 2008)
+-
+ - The Intel XEON PHI family
+
+ - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Sun, 5 Aug 2018 16:07:45 +0200
+Subject: x86/speculation: Simplify sysfs report of VMX L1TF vulnerability
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit ea156d192f5257a5bf393d33910d3b481bf8a401 upstream
+
+Three changes to the content of the sysfs file:
+
+ - If EPT is disabled, L1TF cannot be exploited even across threads on the
+ same core, and SMT is irrelevant.
+
+ - If mitigation is completely disabled, and SMT is enabled, print "vulnerable"
+ instead of "vulnerable, SMT vulnerable"
+
+ - Reorder the two parts so that the main vulnerability state comes first
+ and the detail on SMT is second.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -738,9 +738,15 @@ static ssize_t l1tf_show_state(char *buf
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+
+- return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
+- cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
+- l1tf_vmx_states[l1tf_vmx_mitigation]);
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
++ (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
++ cpu_smt_control == CPU_SMT_ENABLED))
++ return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
++ l1tf_vmx_states[l1tf_vmx_mitigation]);
++
++ return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
++ l1tf_vmx_states[l1tf_vmx_mitigation],
++ cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+ }
+ #else
+ static ssize_t l1tf_show_state(char *buf)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Sun, 5 Aug 2018 16:07:46 +0200
+Subject: x86/speculation: Use ARCH_CAPABILITIES to skip L1D flush on vmentry
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 8e0b2b916662e09dd4d09e5271cdf214c6b80e62 upstream
+
+Bit 3 of ARCH_CAPABILITIES tells a hypervisor that L1D flush on vmentry is
+not needed. Add a new value to enum vmx_l1d_flush_state, which is used
+either if there is no L1TF bug at all, or if bit 3 is set in ARCH_CAPABILITIES.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/include/asm/vmx.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 1 +
+ arch/x86/kvm/vmx.c | 10 ++++++++++
+ 4 files changed, 13 insertions(+)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -70,6 +70,7 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
++#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
+ #define ARCH_CAP_SSB_NO (1 << 4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -580,6 +580,7 @@ enum vmx_l1d_flush_state {
+ VMENTER_L1D_FLUSH_COND,
+ VMENTER_L1D_FLUSH_ALWAYS,
+ VMENTER_L1D_FLUSH_EPT_DISABLED,
++ VMENTER_L1D_FLUSH_NOT_REQUIRED,
+ };
+
+ extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -731,6 +731,7 @@ static const char *l1tf_vmx_states[] = {
+ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
+ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
+ [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
++ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
+ };
+
+ static ssize_t l1tf_show_state(char *buf)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -218,6 +218,16 @@ static int vmx_setup_l1d_flush(enum vmx_
+ return 0;
+ }
+
++ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
++ u64 msr;
++
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
++ if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
++ return 0;
++ }
++ }
++
+ /* If set to auto use the default l1tf mitigation method */
+ if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
+ switch (l1tf_mitigation) {
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Sun, 5 Aug 2018 16:07:47 +0200
+Subject: KVM: VMX: Tell the nested hypervisor to skip L1D flush on vmentry
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 5b76a3cff011df2dcb6186c965a2e4d809a05ad4 upstream
+
+When nested virtualization is in use, VMENTER operations from the nested
+hypervisor into the nested guest will always be processed by the bare metal
+hypervisor, and KVM's "conditional cache flushes" mode in particular does a
+flush on nested vmentry. Therefore, include the "skip L1D flush on
+vmentry" bit in KVM's suggested ARCH_CAPABILITIES setting.
+
+Add the relevant Documentation.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/l1tf.rst | 21 +++++++++++++++++++++
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/vmx.c | 3 +--
+ arch/x86/kvm/x86.c | 26 +++++++++++++++++++++++++-
+ 4 files changed, 48 insertions(+), 3 deletions(-)
+
+--- a/Documentation/admin-guide/l1tf.rst
++++ b/Documentation/admin-guide/l1tf.rst
+@@ -546,6 +546,27 @@ available:
+ EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
+ parameter.
+
++3.4. Nested virtual machines
++""""""""""""""""""""""""""""
++
++When nested virtualization is in use, three operating systems are involved:
++the bare metal hypervisor, the nested hypervisor and the nested virtual
++machine. VMENTER operations from the nested hypervisor into the nested
++guest will always be processed by the bare metal hypervisor. If KVM is the
++bare metal hypervisor it wiil:
++
++ - Flush the L1D cache on every switch from the nested hypervisor to the
++ nested virtual machine, so that the nested hypervisor's secrets are not
++ exposed to the nested virtual machine;
++
++ - Flush the L1D cache on every switch from the nested virtual machine to
++ the nested hypervisor; this is a complex operation, and flushing the L1D
++ cache avoids that the bare metal hypervisor's secrets are exposed to the
++ nested virtual machine;
++
++ - Instruct the nested hypervisor to not perform any L1D cache flush. This
++ is an optimization to avoid double L1D flushing.
++
+
+ .. _default_mitigations:
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1415,6 +1415,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcp
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
+
++u64 kvm_get_arch_capabilities(void);
+ void kvm_define_shared_msr(unsigned index, u32 msr);
+ int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6232,8 +6232,7 @@ static void vmx_vcpu_setup(struct vcpu_v
+ ++vmx->nmsrs;
+ }
+
+- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
++ vmx->arch_capabilities = kvm_get_arch_capabilities();
+
+ vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1098,11 +1098,35 @@ static u32 msr_based_features[] = {
+
+ static unsigned int num_msr_based_features;
+
++u64 kvm_get_arch_capabilities(void)
++{
++ u64 data;
++
++ rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
++
++ /*
++ * If we're doing cache flushes (either "always" or "cond")
++ * we will do one whenever the guest does a vmlaunch/vmresume.
++ * If an outer hypervisor is doing the cache flush for us
++ * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
++ * capability to the guest too, and if EPT is disabled we're not
++ * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
++ * require a nested hypervisor to do a flush of its own.
++ */
++ if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
++ data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
++
++ return data;
++}
++EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
++
+ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+ {
+ switch (msr->index) {
+- case MSR_IA32_UCODE_REV:
+ case MSR_IA32_ARCH_CAPABILITIES:
++ msr->data = kvm_get_arch_capabilities();
++ break;
++ case MSR_IA32_UCODE_REV:
+ rdmsrl_safe(msr->index, &msr->data);
+ break;
+ default:
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 7 Aug 2018 08:19:57 +0200
+Subject: cpu/hotplug: Fix SMT supported evaluation
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit bc2d8d262cba5736332cbc866acb11b1c5748aa9 upstream
+
+Josh reported that the late SMT evaluation in cpu_smt_state_init() sets
+cpu_smt_control to CPU_SMT_NOT_SUPPORTED in case that 'nosmt' was supplied
+on the kernel command line as it cannot differentiate between SMT disabled
+by BIOS and SMT soft disable via 'nosmt'. That wreckages the state and
+makes the sysfs interface unusable.
+
+Rework this so that during bringup of the non boot CPUs the availability of
+SMT is determined in cpu_smt_allowed(). If a newly booted CPU is not a
+'primary' thread then set the local cpu_smt_available marker and evaluate
+this explicitely right after the initial SMP bringup has finished.
+
+SMT evaulation on x86 is a trainwreck as the firmware has all the
+information _before_ booting the kernel, but there is no interface to query
+it.
+
+Fixes: 73d5e2b47264 ("cpu/hotplug: detect SMT disabled by BIOS")
+Reported-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ include/linux/cpu.h | 2 ++
+ kernel/cpu.c | 41 ++++++++++++++++++++++++++++-------------
+ kernel/smp.c | 2 ++
+ 4 files changed, 33 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -62,7 +62,7 @@ void __init check_bugs(void)
+ * identify_boot_cpu() initialized SMT support information, let the
+ * core code know.
+ */
+- cpu_smt_check_topology();
++ cpu_smt_check_topology_early();
+
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -178,10 +178,12 @@ enum cpuhp_smt_control {
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
+ extern void cpu_smt_disable(bool force);
++extern void cpu_smt_check_topology_early(void);
+ extern void cpu_smt_check_topology(void);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
+ static inline void cpu_smt_disable(bool force) { }
++static inline void cpu_smt_check_topology_early(void) { }
+ static inline void cpu_smt_check_topology(void) { }
+ #endif
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -347,6 +347,8 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+ EXPORT_SYMBOL_GPL(cpu_smt_control);
+
++static bool cpu_smt_available __read_mostly;
++
+ void __init cpu_smt_disable(bool force)
+ {
+ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+@@ -363,14 +365,28 @@ void __init cpu_smt_disable(bool force)
+
+ /*
+ * The decision whether SMT is supported can only be done after the full
+- * CPU identification. Called from architecture code.
++ * CPU identification. Called from architecture code before non boot CPUs
++ * are brought up.
+ */
+-void __init cpu_smt_check_topology(void)
++void __init cpu_smt_check_topology_early(void)
+ {
+ if (!topology_smt_supported())
+ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+ }
+
++/*
++ * If SMT was disabled by BIOS, detect it here, after the CPUs have been
++ * brought online. This ensures the smt/l1tf sysfs entries are consistent
++ * with reality. cpu_smt_available is set to true during the bringup of non
++ * boot CPUs when a SMT sibling is detected. Note, this may overwrite
++ * cpu_smt_control's previous setting.
++ */
++void __init cpu_smt_check_topology(void)
++{
++ if (!cpu_smt_available)
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++}
++
+ static int __init smt_cmdline_disable(char *str)
+ {
+ cpu_smt_disable(str && !strcmp(str, "force"));
+@@ -380,10 +396,18 @@ early_param("nosmt", smt_cmdline_disable
+
+ static inline bool cpu_smt_allowed(unsigned int cpu)
+ {
+- if (cpu_smt_control == CPU_SMT_ENABLED)
++ if (topology_is_primary_thread(cpu))
+ return true;
+
+- if (topology_is_primary_thread(cpu))
++ /*
++ * If the CPU is not a 'primary' thread and the booted_once bit is
++ * set then the processor has SMT support. Store this information
++ * for the late check of SMT support in cpu_smt_check_topology().
++ */
++ if (per_cpu(cpuhp_state, cpu).booted_once)
++ cpu_smt_available = true;
++
++ if (cpu_smt_control == CPU_SMT_ENABLED)
+ return true;
+
+ /*
+@@ -2125,15 +2149,6 @@ static const struct attribute_group cpuh
+
+ static int __init cpu_smt_state_init(void)
+ {
+- /*
+- * If SMT was disabled by BIOS, detect it here, after the CPUs have
+- * been brought online. This ensures the smt/l1tf sysfs entries are
+- * consistent with reality. Note this may overwrite cpu_smt_control's
+- * previous setting.
+- */
+- if (topology_max_smt_threads() == 1)
+- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+-
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+ }
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -584,6 +584,8 @@ void __init smp_init(void)
+ num_nodes, (num_nodes > 1 ? "s" : ""),
+ num_cpus, (num_cpus > 1 ? "s" : ""));
+
++ /* Final decision about SMT support */
++ cpu_smt_check_topology();
+ /* Any cleanup work */
+ smp_cpus_done(setup_max_cpus);
+ }
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Tue, 7 Aug 2018 15:09:36 -0700
+Subject: x86/speculation/l1tf: Invert all not present mappings
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit f22cc87f6c1f771b57c407555cfefd811cdd9507 upstream
+
+For kernel mappings PAGE_PROTNONE is not necessarily set for a non present
+mapping, but the inversion logic explicitely checks for !PRESENT and
+PROT_NONE.
+
+Remove the PROT_NONE check and make the inversion unconditional for all not
+present mappings.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable-invert.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/pgtable-invert.h
++++ b/arch/x86/include/asm/pgtable-invert.h
+@@ -6,7 +6,7 @@
+
+ static inline bool __pte_needs_invert(u64 val)
+ {
+- return (val & (_PAGE_PRESENT|_PAGE_PROTNONE)) == _PAGE_PROTNONE;
++ return !(val & _PAGE_PRESENT);
+ }
+
+ /* Get a mask to xor with the page table entry to get the correct pfn. */
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Tue, 7 Aug 2018 15:09:37 -0700
+Subject: x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 0768f91530ff46683e0b372df14fd79fe8d156e5 upstream
+
+Some cases in THP like:
+ - MADV_FREE
+ - mprotect
+ - split
+
+mark the PMD non present for temporarily to prevent races. The window for
+an L1TF attack in these contexts is very small, but it wants to be fixed
+for correctness sake.
+
+Use the proper low level functions for pmd/pud_mknotpresent() to address
+this.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/pgtable.h | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -410,11 +410,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pm
+ return pmd_set_flags(pmd, _PAGE_RW);
+ }
+
+-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+-{
+- return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
+-}
+-
+ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+ {
+ pudval_t v = native_pud_val(pud);
+@@ -469,11 +464,6 @@ static inline pud_t pud_mkwrite(pud_t pu
+ return pud_set_flags(pud, _PAGE_RW);
+ }
+
+-static inline pud_t pud_mknotpresent(pud_t pud)
+-{
+- return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
+-}
+-
+ #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+ static inline int pte_soft_dirty(pte_t pte)
+ {
+@@ -577,6 +567,18 @@ static inline pud_t pfn_pud(unsigned lon
+ return __pud(pfn | check_pgprot(pgprot));
+ }
+
++static inline pmd_t pmd_mknotpresent(pmd_t pmd)
++{
++ return pfn_pmd(pmd_pfn(pmd),
++ __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
++}
++
++static inline pud_t pud_mknotpresent(pud_t pud)
++{
++ return pfn_pud(pud_pfn(pud),
++ __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
++}
++
+ static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
+
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Tue, 7 Aug 2018 15:09:39 -0700
+Subject: x86/mm/pat: Make set_memory_np() L1TF safe
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 958f79b9ee55dfaf00c8106ed1c22a2919e0028b upstream
+
+set_memory_np() is used to mark kernel mappings not present, but it has
+it's own open coded mechanism which does not have the L1TF protection of
+inverting the address bits.
+
+Replace the open coded PTE manipulation with the L1TF protecting low level
+PTE routines.
+
+Passes the CPA self test.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/pageattr.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -1014,8 +1014,8 @@ static long populate_pmd(struct cpa_data
+
+ pmd = pmd_offset(pud, start);
+
+- set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
+- massage_pgprot(pmd_pgprot)));
++ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
++ canon_pgprot(pmd_pgprot))));
+
+ start += PMD_SIZE;
+ cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
+@@ -1087,8 +1087,8 @@ static int populate_pud(struct cpa_data
+ * Map everything starting from the Gb boundary, possibly with 1G pages
+ */
+ while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
+- set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
+- massage_pgprot(pud_pgprot)));
++ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
++ canon_pgprot(pud_pgprot))));
+
+ start += PUD_SIZE;
+ cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Andi Kleen <ak@linux.intel.com>
+Date: Tue, 7 Aug 2018 15:09:38 -0700
+Subject: x86/mm/kmmio: Make the tracer robust against L1TF
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 1063711b57393c1999248cccb57bebfaf16739e7 upstream
+
+The mmio tracer sets io mapping PTEs and PMDs to non present when enabled
+without inverting the address bits, which makes the PTE entry vulnerable
+for L1TF.
+
+Make it use the right low level macros to actually invert the address bits
+to protect against L1TF.
+
+In principle this could be avoided because MMIO tracing is not likely to be
+enabled on production machines, but the fix is straigt forward and for
+consistency sake it's better to get rid of the open coded PTE manipulation.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/kmmio.c | 25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -126,24 +126,29 @@ static struct kmmio_fault_page *get_kmmi
+
+ static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
+ {
++ pmd_t new_pmd;
+ pmdval_t v = pmd_val(*pmd);
+ if (clear) {
+- *old = v & _PAGE_PRESENT;
+- v &= ~_PAGE_PRESENT;
+- } else /* presume this has been called with clear==true previously */
+- v |= *old;
+- set_pmd(pmd, __pmd(v));
++ *old = v;
++ new_pmd = pmd_mknotpresent(*pmd);
++ } else {
++ /* Presume this has been called with clear==true previously */
++ new_pmd = __pmd(*old);
++ }
++ set_pmd(pmd, new_pmd);
+ }
+
+ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
+ {
+ pteval_t v = pte_val(*pte);
+ if (clear) {
+- *old = v & _PAGE_PRESENT;
+- v &= ~_PAGE_PRESENT;
+- } else /* presume this has been called with clear==true previously */
+- v |= *old;
+- set_pte_atomic(pte, __pte(v));
++ *old = v;
++ /* Nothing should care about address */
++ pte_clear(&init_mm, 0, pte);
++ } else {
++ /* Presume this has been called with clear==true previously */
++ set_pte_atomic(pte, __pte(*old));
++ }
+ }
+
+ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Fri, 1 Jun 2018 10:42:31 -0300
+Subject: tools headers: Synchronize prctl.h ABI header
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit 63b89a19cc9ef911dcc64d41b60930c346eee0c0 upstream
+
+To pick up changes from:
+
+ $ git log --oneline -2 -i include/uapi/linux/prctl.h
+ 356e4bfff2c5 prctl: Add force disable speculation
+ b617cfc85816 prctl: Add speculation control prctls
+
+ $ tools/perf/trace/beauty/prctl_option.sh > before.c
+ $ cp include/uapi/linux/prctl.h tools/include/uapi/linux/prctl.h
+ $ tools/perf/trace/beauty/prctl_option.sh > after.c
+ $ diff -u before.c after.c
+# --- before.c 2018-06-01 10:39:53.834073962 -0300
+# +++ after.c 2018-06-01 10:42:11.307985394 -0300
+ @@ -35,6 +35,8 @@
+ [42] = "GET_THP_DISABLE",
+ [45] = "SET_FP_MODE",
+ [46] = "GET_FP_MODE",
+ + [52] = "GET_SPECULATION_CTRL",
+ + [53] = "SET_SPECULATION_CTRL",
+ };
+ static const char *prctl_set_mm_options[] = {
+ [1] = "START_CODE",
+ $
+
+This will be used by 'perf trace' to show these strings when beautifying
+the prctl syscall args. At some point we'll be able to say something
+like:
+
+ 'perf trace --all-cpus -e prctl(option=*SPEC*)'
+
+To filter by arg by name.
+
+ This silences this warning when building tools/perf:
+
+ Warning: Kernel ABI header at 'tools/include/uapi/linux/prctl.h' differs from latest version at 'include/uapi/linux/prctl.h'
+
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: David Ahern <dsahern@gmail.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Wang Nan <wangnan0@huawei.com>
+Link: https://lkml.kernel.org/n/tip-zztsptwhc264r8wg44tqh5gp@git.kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/include/uapi/linux/prctl.h | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/tools/include/uapi/linux/prctl.h
++++ b/tools/include/uapi/linux/prctl.h
+@@ -207,4 +207,16 @@ struct prctl_mm_map {
+ # define PR_SVE_VL_LEN_MASK 0xffff
+ # define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+
++/* Per task speculation control */
++#define PR_GET_SPECULATION_CTRL 52
++#define PR_SET_SPECULATION_CTRL 53
++/* Speculation control variants */
++# define PR_SPEC_STORE_BYPASS 0
++/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
++# define PR_SPEC_NOT_AFFECTED 0
++# define PR_SPEC_PRCTL (1UL << 0)
++# define PR_SPEC_ENABLE (1UL << 1)
++# define PR_SPEC_DISABLE (1UL << 2)
++# define PR_SPEC_FORCE_DISABLE (1UL << 3)
++
+ #endif /* _LINUX_PRCTL_H */
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Wed, 8 Aug 2018 11:00:16 +0100
+Subject: tools headers: Synchronise x86 cpufeatures.h for L1TF additions
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit e24f14b0ff985f3e09e573ba1134bfdf42987e05 upstream
+
+[ ... and some older changes in the 4.17.y backport too ...]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/arch/x86/include/asm/cpufeatures.h | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -198,7 +198,6 @@
+ #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
+ #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
+ #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
+-
+ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+@@ -207,13 +206,20 @@
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
+-
++#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+ #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
+-
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
++#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
++#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -274,9 +280,10 @@
+ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
+ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
+ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+-#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+-#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+-#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
++#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
++#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+@@ -333,7 +340,9 @@
+ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
+ /*
+ * BUG word(s)
+@@ -363,5 +372,7 @@
+ #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
++#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+From foo@baz Tue Aug 14 13:43:11 CEST 2018
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Fri, 10 Aug 2018 08:31:10 +0100
+Subject: x86/microcode: Allow late microcode loading with SMT disabled
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit 07d981ad4cf1e78361c6db1c28ee5ba105f96cc1 upstream
+
+The kernel unnecessarily prevents late microcode loading when SMT is
+disabled. It should be safe to allow it if all the primary threads are
+online.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/microcode/core.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -509,12 +509,20 @@ static struct platform_device *microcode
+
+ static int check_online_cpus(void)
+ {
+- if (num_online_cpus() == num_present_cpus())
+- return 0;
++ unsigned int cpu;
+
+- pr_err("Not all CPUs online, aborting microcode update.\n");
++ /*
++ * Make sure all CPUs are online. It's fine for SMT to be disabled if
++ * all the primary threads are still online.
++ */
++ for_each_present_cpu(cpu) {
++ if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
++ pr_err("Not all CPUs online, aborting microcode update.\n");
++ return -EINVAL;
++ }
++ }
+
+- return -EINVAL;
++ return 0;
+ }
+
+ static atomic_t late_cpus_in;
+From d0055f351e647f33f3b0329bff022213bf8aa085 Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Tue, 14 Aug 2018 23:38:57 +0200
+Subject: x86/smp: fix non-SMP broken build due to redefinition of apic_id_is_primary_thread
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit d0055f351e647f33f3b0329bff022213bf8aa085 upstream.
+
+The function has an inline "return false;" definition with CONFIG_SMP=n
+but the "real" definition is also visible leading to "redefinition of
+‘apic_id_is_primary_thread’" compiler error.
+
+Guard it with #ifdef CONFIG_SMP
+
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Fixes: 6a4d2657e048 ("x86/smp: Provide topology_is_primary_thread()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/apic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2193,6 +2193,7 @@ static int cpuid_to_apicid[] = {
+ [0 ... NR_CPUS - 1] = -1,
+ };
+
++#ifdef CONFIG_SMP
+ /**
+ * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
+ * @id: APIC ID to check
+@@ -2207,6 +2208,7 @@ bool apic_id_is_primary_thread(unsigned
+ mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
+ return !(apicid & mask);
+ }
++#endif
+
+ /*
+ * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
+From 269777aa530f3438ec1781586cdac0b5fe47b061 Mon Sep 17 00:00:00 2001
+From: Abel Vesa <abelvesa@linux.com>
+Date: Wed, 15 Aug 2018 00:26:00 +0300
+Subject: cpu/hotplug: Non-SMP machines do not make use of booted_once
+
+From: Abel Vesa <abelvesa@linux.com>
+
+commit 269777aa530f3438ec1781586cdac0b5fe47b061 upstream.
+
+Commit 0cc3cd21657b ("cpu/hotplug: Boot HT siblings at least once")
+breaks non-SMP builds.
+
+[ I suspect the 'bool' fields should just be made to be bitfields and be
+ exposed regardless of configuration, but that's a separate cleanup
+ that I'll leave to the owners of this file for later. - Linus ]
+
+Fixes: 0cc3cd21657b ("cpu/hotplug: Boot HT siblings at least once")
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Abel Vesa <abelvesa@linux.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpu.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2267,6 +2267,8 @@ void __init boot_cpu_init(void)
+ */
+ void __init boot_cpu_hotplug_init(void)
+ {
++#ifdef CONFIG_SMP
+ this_cpu_write(cpuhp_state.booted_once, true);
++#endif
+ this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
+ }
+From 792adb90fa724ce07c0171cbc96b9215af4b1045 Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Tue, 14 Aug 2018 20:50:47 +0200
+Subject: x86/init: fix build with CONFIG_SWAP=n
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 792adb90fa724ce07c0171cbc96b9215af4b1045 upstream.
+
+The introduction of generic_max_swapfile_size and arch-specific versions has
+broken linking on x86 with CONFIG_SWAP=n due to undefined reference to
+'generic_max_swapfile_size'. Fix it by compiling the x86-specific
+max_swapfile_size() only with CONFIG_SWAP=y.
+
+Reported-by: Tomas Pruzina <pruzinat@gmail.com>
+Fixes: 377eeaa8e11f ("x86/speculation/l1tf: Limit swap file size to MAX_PA/2")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/init.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -883,6 +883,7 @@ void update_cache_mode_entry(unsigned en
+ __pte2cachemode_tbl[entry] = cache;
+ }
+
++#ifdef CONFIG_SWAP
+ unsigned long max_swapfile_size(void)
+ {
+ unsigned long pages;
+@@ -903,3 +904,4 @@ unsigned long max_swapfile_size(void)
+ }
+ return pages;
+ }
++#endif
diff --git a/kernel.spec b/kernel.spec
index 0fbc7c88e..ac489cdb4 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -676,18 +676,15 @@ Patch518: alarmtimer-prevent-overflow-for-relative-nanosleep.patch
Patch519: 1-2-posix-timers-Make-forward-callback-return-s64.patch
Patch520: 2-2-posix-timers-Sanitize-overrun-handling.patch
-# CVE-2018-13093 rhbz 1597766 1597767
-Patch521: 0001-xfs-validate-cached-inodes-are-free-when-allocated.patch
-
-# CVE-2018-13094 rhbz 1597771 1597772
-Patch522: 0001-xfs-don-t-call-xfs_da_shrink_inode-with-NULL-bp.patch
-
# CVE-2018-13095 rhbz 1597775 1597777
Patch523: 0001-xfs-More-robust-inode-extent-count-validation.patch
# rhbz 1597333
# Patch526: xhci-Fix-perceived-dead-host-due-to-runtime-suspend-.patch
+# CVE-2018-3620 CVE-2018-3646 rhbz 1585005 1615998
+Patch524: foreshadow.patch
+
# END OF PATCH DEFINITIONS
%endif
@@ -1940,6 +1937,15 @@ fi
#
#
%changelog
+* Wed Aug 15 2018 Justin M. Forbes <jforbes@fedoraproject.org> - 4.17.14-202
+- Include missing Forshadow patches
+
+* Tue Aug 14 2018 Justin M. Forbes <jforbes@fedoraproject.org> - 4.17.14-201
+- Fix "Foreshadow" CVE-2018-3620 CVE-2018-3646 (rhbz 1585005 1615998)
+
+* Thu Aug 09 2018 Justin M. Forbes <jforbes@redhat.com> - 4.17.14-200
+- Linux v4.17.14
+
* Wed Aug 08 2018 Justin M. Forbes <jforbes@redhat.com> - 4.17.13-200
- Linux v4.17.13