summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThorsten Leemhuis <fedora@leemhuis.info>2018-01-11 07:52:22 +0100
committerThorsten Leemhuis <fedora@leemhuis.info>2018-01-11 07:52:22 +0100
commit73bc821b586c900691803325660fb31e3a92dacc (patch)
tree259d10572825d1394d2dc96a4d6dbcc68b8f9957
parente851409ca3893fcf9a643133ee80a08835b0df7f (diff)
parentc8bb01fd169684a52dd4af99f789886e4c1c0f76 (diff)
downloadkernel-73bc821b586c900691803325660fb31e3a92dacc.tar.gz
kernel-73bc821b586c900691803325660fb31e3a92dacc.tar.xz
kernel-73bc821b586c900691803325660fb31e3a92dacc.zip
Merge remote-tracking branch 'origin/f26' into f26-user-thl-vanilla-fedora
-rw-r--r--0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch66
-rw-r--r--0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch58
-rw-r--r--0002-sysfs-cpu-Add-vulnerability-folder.patch154
-rw-r--r--0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch82
-rw-r--r--baseconfig/x86/CONFIG_RETPOLINE1
-rw-r--r--kernel-i686-PAE.config1
-rw-r--r--kernel-i686-PAEdebug.config1
-rw-r--r--kernel-i686-debug.config1
-rw-r--r--kernel-i686.config1
-rw-r--r--kernel-x86_64-debug.config1
-rw-r--r--kernel-x86_64.config1
-rw-r--r--kernel.spec12
-rw-r--r--prevent-bounds-check-bypass-via-speculative-execution.patch1351
-rw-r--r--retpoline.patch1480
-rw-r--r--sources2
15 files changed, 3211 insertions, 1 deletions
diff --git a/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch b/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch
new file mode 100644
index 000000000..b44c184d9
--- /dev/null
+++ b/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch
@@ -0,0 +1,66 @@
+From e4d0e84e490790798691aaa0f2e598637f1867ec Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 8 Jan 2018 16:09:21 -0600
+Subject: [PATCH 1/2] x86/cpu/AMD: Make LFENCE a serializing instruction
+
+To aid in speculation control, make LFENCE a serializing instruction
+since it has less overhead than MFENCE. This is done by setting bit 1
+of MSR 0xc0011029 (DE_CFG). Some families that support LFENCE do not
+have this MSR. For these families, the LFENCE instruction is already
+serializing.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180108220921.12580.71694.stgit@tlendack-t1.amdoffice.net
+---
+ arch/x86/include/asm/msr-index.h | 2 ++
+ arch/x86/kernel/cpu/amd.c | 10 ++++++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index ab022618a50a..1e7d710fef43 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -352,6 +352,8 @@
+ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT 20
+ #define MSR_FAM10H_NODE_ID 0xc001100c
++#define MSR_F10H_DECFG 0xc0011029
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
+
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1 0xc001001a
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index bcb75dc97d44..5b438d81beb2 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -829,6 +829,16 @@ static void init_amd(struct cpuinfo_x86 *c)
+ set_cpu_cap(c, X86_FEATURE_K8);
+
+ if (cpu_has(c, X86_FEATURE_XMM2)) {
++ /*
++ * A serializing LFENCE has less overhead than MFENCE, so
++ * use it for execution serialization. On families which
++ * don't have that MSR, LFENCE is already serializing.
++ * msr_set_bit() uses the safe accessors, too, even if the MSR
++ * is not present.
++ */
++ msr_set_bit(MSR_F10H_DECFG,
++ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
++
+ /* MFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+ }
+--
+2.14.3
+
diff --git a/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch b/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch
new file mode 100644
index 000000000..e358c16f9
--- /dev/null
+++ b/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch
@@ -0,0 +1,58 @@
+From 99c6fa2511d8a683e61468be91b83f85452115fa Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sat, 6 Jan 2018 11:49:23 +0000
+Subject: [PATCH 1/2] x86/cpufeatures: Add X86_BUG_SPECTRE_V[12]
+
+Add the bug bits for spectre v1/2 and force them unconditionally for all
+cpus.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/1515239374-23361-2-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/kernel/cpu/common.c | 3 +++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 21ac898df2d8..1641c2f96363 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -342,5 +342,7 @@
+ #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
+ #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+ #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 2d3bd2215e5b..372ba3fb400f 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -902,6 +902,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ if (c->x86_vendor != X86_VENDOR_AMD)
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++
+ fpu__init_system(c);
+
+ #ifdef CONFIG_X86_32
+--
+2.14.3
+
diff --git a/0002-sysfs-cpu-Add-vulnerability-folder.patch b/0002-sysfs-cpu-Add-vulnerability-folder.patch
new file mode 100644
index 000000000..8f1ae3a6a
--- /dev/null
+++ b/0002-sysfs-cpu-Add-vulnerability-folder.patch
@@ -0,0 +1,154 @@
+From 87590ce6e373d1a5401f6539f0c59ef92dd924a9 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 7 Jan 2018 22:48:00 +0100
+Subject: [PATCH 2/2] sysfs/cpu: Add vulnerability folder
+
+As the meltdown/spectre problem affects several CPU architectures, it makes
+sense to have common way to express whether a system is affected by a
+particular vulnerability or not. If affected the way to express the
+mitigation should be common as well.
+
+Create /sys/devices/system/cpu/vulnerabilities folder and files for
+meltdown, spectre_v1 and spectre_v2.
+
+Allow architectures to override the show function.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Linus Torvalds <torvalds@linuxfoundation.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180107214913.096657732@linutronix.de
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 16 ++++++++
+ drivers/base/Kconfig | 3 ++
+ drivers/base/cpu.c | 48 ++++++++++++++++++++++
+ include/linux/cpu.h | 7 ++++
+ 4 files changed, 74 insertions(+)
+
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index f3d5817c4ef0..bd3a88e16d8b 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -373,3 +373,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: information about CPUs heterogeneity.
+
+ cpu_capacity: capacity of cpu#.
++
++What: /sys/devices/system/cpu/vulnerabilities
++ /sys/devices/system/cpu/vulnerabilities/meltdown
++ /sys/devices/system/cpu/vulnerabilities/spectre_v1
++ /sys/devices/system/cpu/vulnerabilities/spectre_v2
++Date: Januar 2018
++Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
++Description: Information about CPU vulnerabilities
++
++ The files are named after the code names of CPU
++ vulnerabilities. The output of those files reflects the
++ state of the CPUs in the system. Possible output values:
++
++ "Not affected" CPU is not affected by the vulnerability
++ "Vulnerable" CPU is affected and no mitigation in effect
++ "Mitigation: $M" CPU is affetcted and mitigation $M is in effect
+diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
+index 2f6614c9a229..37a71fd9043f 100644
+--- a/drivers/base/Kconfig
++++ b/drivers/base/Kconfig
+@@ -235,6 +235,9 @@ config GENERIC_CPU_DEVICES
+ config GENERIC_CPU_AUTOPROBE
+ bool
+
++config GENERIC_CPU_VULNERABILITIES
++ bool
++
+ config SOC_BUS
+ bool
+ select GLOB
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 321cd7b4d817..825964efda1d 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -501,10 +501,58 @@ static void __init cpu_dev_register_generic(void)
+ #endif
+ }
+
++#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
++
++ssize_t __weak cpu_show_meltdown(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
++ssize_t __weak cpu_show_spectre_v1(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
++ssize_t __weak cpu_show_spectre_v2(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
++static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
++static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
++static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
++
++static struct attribute *cpu_root_vulnerabilities_attrs[] = {
++ &dev_attr_meltdown.attr,
++ &dev_attr_spectre_v1.attr,
++ &dev_attr_spectre_v2.attr,
++ NULL
++};
++
++static const struct attribute_group cpu_root_vulnerabilities_group = {
++ .name = "vulnerabilities",
++ .attrs = cpu_root_vulnerabilities_attrs,
++};
++
++static void __init cpu_register_vulnerabilities(void)
++{
++ if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
++ &cpu_root_vulnerabilities_group))
++ pr_err("Unable to register CPU vulnerabilities\n");
++}
++
++#else
++static inline void cpu_register_vulnerabilities(void) { }
++#endif
++
+ void __init cpu_dev_init(void)
+ {
+ if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
+ panic("Failed to register CPU subsystem");
+
+ cpu_dev_register_generic();
++ cpu_register_vulnerabilities();
+ }
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 938ea8ae0ba4..c816e6f2730c 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
+ extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
+ extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
+
++extern ssize_t cpu_show_meltdown(struct device *dev,
++ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_spectre_v1(struct device *dev,
++ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_spectre_v2(struct device *dev,
++ struct device_attribute *attr, char *buf);
++
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+--
+2.14.3
+
diff --git a/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch b/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch
new file mode 100644
index 000000000..8676c732f
--- /dev/null
+++ b/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch
@@ -0,0 +1,82 @@
+From 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 8 Jan 2018 16:09:32 -0600
+Subject: [PATCH 2/2] x86/cpu/AMD: Use LFENCE_RDTSC in preference to
+ MFENCE_RDTSC
+
+With LFENCE now a serializing instruction, use LFENCE_RDTSC in preference
+to MFENCE_RDTSC. However, since the kernel could be running under a
+hypervisor that does not support writing that MSR, read the MSR back and
+verify that the bit has been set successfully. If the MSR can be read
+and the bit is set, then set the LFENCE_RDTSC feature, otherwise set the
+MFENCE_RDTSC feature.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180108220932.12580.52458.stgit@tlendack-t1.amdoffice.net
+---
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kernel/cpu/amd.c | 18 ++++++++++++++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 1e7d710fef43..fa11fb1fa570 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -354,6 +354,7 @@
+ #define MSR_FAM10H_NODE_ID 0xc001100c
+ #define MSR_F10H_DECFG 0xc0011029
+ #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1 0xc001001a
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 5b438d81beb2..ea831c858195 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -829,6 +829,9 @@ static void init_amd(struct cpuinfo_x86 *c)
+ set_cpu_cap(c, X86_FEATURE_K8);
+
+ if (cpu_has(c, X86_FEATURE_XMM2)) {
++ unsigned long long val;
++ int ret;
++
+ /*
+ * A serializing LFENCE has less overhead than MFENCE, so
+ * use it for execution serialization. On families which
+@@ -839,8 +842,19 @@ static void init_amd(struct cpuinfo_x86 *c)
+ msr_set_bit(MSR_F10H_DECFG,
+ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+- /* MFENCE stops RDTSC speculation */
+- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++ /*
++ * Verify that the MSR write was successful (could be running
++ * under a hypervisor) and only then assume that LFENCE is
++ * serializing.
++ */
++ ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
++ if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
++ /* A serializing LFENCE stops RDTSC speculation */
++ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
++ } else {
++ /* MFENCE stops RDTSC speculation */
++ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++ }
+ }
+
+ /*
+--
+2.14.3
+
diff --git a/baseconfig/x86/CONFIG_RETPOLINE b/baseconfig/x86/CONFIG_RETPOLINE
new file mode 100644
index 000000000..c46e12644
--- /dev/null
+++ b/baseconfig/x86/CONFIG_RETPOLINE
@@ -0,0 +1 @@
+CONFIG_RETPOLINE=y
diff --git a/kernel-i686-PAE.config b/kernel-i686-PAE.config
index e4191b109..c730bf1f6 100644
--- a/kernel-i686-PAE.config
+++ b/kernel-i686-PAE.config
@@ -4157,6 +4157,7 @@ CONFIG_REMOTEPROC=m
# CONFIG_RESET_ATTACK_MITIGATION is not set
# CONFIG_RESET_HSDK_V1 is not set
# CONFIG_RESET_TI_SYSCON is not set
+CONFIG_RETPOLINE=y
# CONFIG_RFD_FTL is not set
CONFIG_RFKILL_GPIO=m
CONFIG_RFKILL_INPUT=y
diff --git a/kernel-i686-PAEdebug.config b/kernel-i686-PAEdebug.config
index cbdfd27d6..1690e7ebf 100644
--- a/kernel-i686-PAEdebug.config
+++ b/kernel-i686-PAEdebug.config
@@ -4178,6 +4178,7 @@ CONFIG_REMOTEPROC=m
# CONFIG_RESET_ATTACK_MITIGATION is not set
# CONFIG_RESET_HSDK_V1 is not set
# CONFIG_RESET_TI_SYSCON is not set
+CONFIG_RETPOLINE=y
# CONFIG_RFD_FTL is not set
CONFIG_RFKILL_GPIO=m
CONFIG_RFKILL_INPUT=y
diff --git a/kernel-i686-debug.config b/kernel-i686-debug.config
index 6445aa289..0a12c0a41 100644
--- a/kernel-i686-debug.config
+++ b/kernel-i686-debug.config
@@ -4178,6 +4178,7 @@ CONFIG_REMOTEPROC=m
# CONFIG_RESET_ATTACK_MITIGATION is not set
# CONFIG_RESET_HSDK_V1 is not set
# CONFIG_RESET_TI_SYSCON is not set
+CONFIG_RETPOLINE=y
# CONFIG_RFD_FTL is not set
CONFIG_RFKILL_GPIO=m
CONFIG_RFKILL_INPUT=y
diff --git a/kernel-i686.config b/kernel-i686.config
index 414eca53e..f90e98970 100644
--- a/kernel-i686.config
+++ b/kernel-i686.config
@@ -4157,6 +4157,7 @@ CONFIG_REMOTEPROC=m
# CONFIG_RESET_ATTACK_MITIGATION is not set
# CONFIG_RESET_HSDK_V1 is not set
# CONFIG_RESET_TI_SYSCON is not set
+CONFIG_RETPOLINE=y
# CONFIG_RFD_FTL is not set
CONFIG_RFKILL_GPIO=m
CONFIG_RFKILL_INPUT=y
diff --git a/kernel-x86_64-debug.config b/kernel-x86_64-debug.config
index 0ee7f8330..276257ca3 100644
--- a/kernel-x86_64-debug.config
+++ b/kernel-x86_64-debug.config
@@ -4221,6 +4221,7 @@ CONFIG_REMOTEPROC=m
# CONFIG_RESET_ATTACK_MITIGATION is not set
# CONFIG_RESET_HSDK_V1 is not set
# CONFIG_RESET_TI_SYSCON is not set
+CONFIG_RETPOLINE=y
# CONFIG_RFD_FTL is not set
CONFIG_RFKILL_GPIO=m
CONFIG_RFKILL_INPUT=y
diff --git a/kernel-x86_64.config b/kernel-x86_64.config
index 73abee473..b374723a2 100644
--- a/kernel-x86_64.config
+++ b/kernel-x86_64.config
@@ -4200,6 +4200,7 @@ CONFIG_REMOTEPROC=m
# CONFIG_RESET_ATTACK_MITIGATION is not set
# CONFIG_RESET_HSDK_V1 is not set
# CONFIG_RESET_TI_SYSCON is not set
+CONFIG_RETPOLINE=y
# CONFIG_RFD_FTL is not set
CONFIG_RFKILL_GPIO=m
CONFIG_RFKILL_INPUT=y
diff --git a/kernel.spec b/kernel.spec
index 85f9d0f98..9988ee7d9 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -650,6 +650,14 @@ Patch505: netfilter-nfnetlink_cthelper-Add-missing-permission-.patch
# https://patchwork.kernel.org/patch/10104349/
Patch506: e1000e-Fix-e1000_check_for_copper_link_ich8lan-return-value..patch
+# 550-600 Meltdown and Spectre Fixes
+Patch550: prevent-bounds-check-bypass-via-speculative-execution.patch
+Patch551: 0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch
+Patch552: 0002-sysfs-cpu-Add-vulnerability-folder.patch
+Patch553: 0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch
+Patch554: 0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch
+Patch555: retpoline.patch
+
# 600 - Patches for improved Bay and Cherry Trail device support
# Below patches are submitted upstream, awaiting review / merging
Patch601: 0001-Input-gpio_keys-Allow-suppression-of-input-events-fo.patch
@@ -2262,6 +2270,10 @@ fi
#
#
%changelog
+* Wed Jan 10 2018 Justin M. Forbes <jforbes@fedoraproject.org> - 4.14.13-200
+- Linux v4.14.13
+- Iniital retpoline fixes for Spectre v2
+
* Mon Jan 08 2018 Laura Abbott <labbott@redhat.com>
- Disable CONFIG_RESET_ATTACK_MITIGATION (rhbz 1532058)
diff --git a/prevent-bounds-check-bypass-via-speculative-execution.patch b/prevent-bounds-check-bypass-via-speculative-execution.patch
new file mode 100644
index 000000000..0969a6792
--- /dev/null
+++ b/prevent-bounds-check-bypass-via-speculative-execution.patch
@@ -0,0 +1,1351 @@
+From 1d115042dde79e3c0fcc18af548342b172e749e1 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 7 Dec 2017 17:14:24 +0000
+Subject: [PATCH 01/19] asm-generic/barrier: add generic nospec helpers
+
+Under speculation, CPUs may mis-predict branches in bounds checks. Thus,
+memory accesses under a bounds check may be speculated even if the
+bounds check fails, providing a primitive for building a side channel.
+
+This patch adds helpers which can be used to inhibit the use of
+out-of-bounds pointers under speculation.
+
+A generic implementation is provided for compatibility, but does not
+guarantee safety under speculation. Architectures are expected to
+override these helpers as necessary.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Cc: Daniel Willams <dan.j.williams@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ include/asm-generic/barrier.h | 68 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 68 insertions(+)
+
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index fe297b599b0a..91c3071f49e5 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -54,6 +54,74 @@
+ #define read_barrier_depends() do { } while (0)
+ #endif
+
++/*
++ * Inhibit subsequent speculative memory accesses.
++ *
++ * Architectures with a suitable memory barrier should provide an
++ * implementation. This is non-portable, and generic code should use
++ * nospec_ptr().
++ */
++#ifndef __nospec_barrier
++#define __nospec_barrier() do { } while (0)
++#endif
++
++/**
++ * nospec_ptr() - Ensure a pointer is bounded, even under speculation.
++ *
++ * @ptr: the pointer to test
++ * @lo: the lower valid bound for @ptr, inclusive
++ * @hi: the upper valid bound for @ptr, exclusive
++ *
++ * If @ptr falls in the interval [@lo, @i), returns @ptr, otherwise returns
++ * NULL.
++ *
++ * Architectures which do not provide __nospec_barrier() should override this
++ * to ensure that ptr falls in the [lo, hi) interval both under architectural
++ * execution and under speculation, preventing propagation of an out-of-bounds
++ * pointer to code which is speculatively executed.
++ */
++#ifndef nospec_ptr
++#define nospec_ptr(ptr, lo, hi) \
++({ \
++ typeof (ptr) __ret; \
++ typeof (ptr) __ptr = (ptr); \
++ typeof (ptr) __lo = (lo); \
++ typeof (ptr) __hi = (hi); \
++ \
++ __ret = (__lo <= __ptr && __ptr < __hi) ? __ptr : NULL; \
++ \
++ __nospec_barrier(); \
++ \
++ __ret; \
++})
++#endif
++
++/**
++ * nospec_array_ptr - Generate a pointer to an array element, ensuring the
++ * pointer is bounded under speculation.
++ *
++ * @arr: the base of the array
++ * @idx: the index of the element
++ * @sz: the number of elements in the array
++ *
++ * If @idx falls in the interval [0, @sz), returns the pointer to @arr[@idx],
++ * otherwise returns NULL.
++ *
++ * This is a wrapper around nospec_ptr(), provided for convenience.
++ * Architectures should implement nospec_ptr() to ensure this is the case
++ * under speculation.
++ */
++#define nospec_array_ptr(arr, idx, sz) \
++({ \
++ typeof(*(arr)) *__arr = (arr); \
++ typeof(idx) __idx = (idx); \
++ typeof(sz) __sz = (sz); \
++ \
++ nospec_ptr(__arr + __idx, __arr, __arr + __sz); \
++})
++
++#undef __nospec_barrier
++
+ #ifndef __smp_mb
+ #define __smp_mb() mb()
+ #endif
+--
+2.14.3
+
+From 0a9659964052448903985b38f08b3912ab65f1a9 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 Jan 2018 19:47:06 +0000
+Subject: [PATCH 02/19] Documentation: document nospec helpers
+
+Document the rationale and usage of the new nospec*() helpers.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ Documentation/speculation.txt | 166 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 166 insertions(+)
+ create mode 100644 Documentation/speculation.txt
+
+diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt
+new file mode 100644
+index 000000000000..748fcd4dcda4
+--- /dev/null
++++ b/Documentation/speculation.txt
+@@ -0,0 +1,166 @@
++This document explains potential effects of speculation, and how undesirable
++effects can be mitigated portably using common APIs.
++
++===========
++Speculation
++===========
++
++To improve performance and minimize average latencies, many contemporary CPUs
++employ speculative execution techniques such as branch prediction, performing
++work which may be discarded at a later stage.
++
++Typically speculative execution cannot be observed from architectural state,
++such as the contents of registers. However, in some cases it is possible to
++observe its impact on microarchitectural state, such as the presence or
++absence of data in caches. Such state may form side-channels which can be
++observed to extract secret information.
++
++For example, in the presence of branch prediction, it is possible for bounds
++checks to be ignored by code which is speculatively executed. Consider the
++following code:
++
++ int load_array(int *array, unsigned int idx) {
++ if (idx >= MAX_ARRAY_ELEMS)
++ return 0;
++ else
++ return array[idx];
++ }
++
++Which, on arm64, may be compiled to an assembly sequence such as:
++
++ CMP <idx>, #MAX_ARRAY_ELEMS
++ B.LT less
++ MOV <returnval>, #0
++ RET
++ less:
++ LDR <returnval>, [<array>, <idx>]
++ RET
++
++It is possible that a CPU mis-predicts the conditional branch, and
++speculatively loads array[idx], even if idx >= MAX_ARRAY_ELEMS. This value
++will subsequently be discarded, but the speculated load may affect
++microarchitectural state which can be subsequently measured.
++
++More complex sequences involving multiple dependent memory accesses may result
++in sensitive information being leaked. Consider the following code, building on
++the prior example:
++
++ int load_dependent_arrays(int *arr1, int *arr2, int idx) {
++ int val1, val2,
++
++ val1 = load_array(arr1, idx);
++ val2 = load_array(arr2, val1);
++
++ return val2;
++ }
++
++Under speculation, the first call to load_array() may return the value of an
++out-of-bounds address, while the second call will influence microarchitectural
++state dependent on this value. This may provide an arbitrary read primitive.
++
++====================================
++Mitigating speculation side-channels
++====================================
++
++The kernel provides a generic API to ensure that bounds checks are respected
++even under speculation. Architectures which are affected by speculation-based
++side-channels are expected to implement these primitives.
++
++The following helpers found in <asm/barrier.h> can be used to prevent
++information from being leaked via side-channels.
++
++* nospec_ptr(ptr, lo, hi)
++
++ Returns a sanitized pointer that is bounded by the [lo, hi) interval. When
++ ptr < lo, or ptr >= hi, NULL is returned. Prevents an out-of-bounds pointer
++ being propagated to code which is speculatively executed.
++
++ This is expected to be used by code which computes pointers to data
++ structures, where part of the address (such as an array index) may be
++ user-controlled.
++
++ This can be used to protect the earlier load_array() example:
++
++ int load_array(int *array, unsigned int idx)
++ {
++ int *elem;
++
++ if ((elem = nospec_ptr(array + idx, array, array + MAX_ARRAY_ELEMS)))
++ return *elem;
++ else
++ return 0;
++ }
++
++ This can also be used in situations where multiple fields on a structure are
++ accessed:
++
++ struct foo array[SIZE];
++ int a, b;
++
++ void do_thing(int idx)
++ {
++ struct foo *elem;
++
++ if ((elem = nospec_ptr(array + idx, array, array + SIZE)) {
++ a = elem->field_a;
++ b = elem->field_b;
++ }
++ }
++
++ It is imperative that the returned pointer is used. Pointers which are
++ generated separately are subject to a number of potential CPU and compiler
++ optimizations, and may still be used speculatively. For example, this means
++ that the following sequence is unsafe:
++
++ struct foo array[SIZE];
++ int a, b;
++
++ void do_thing(int idx)
++ {
++ if (nospec_ptr(array + idx, array, array + SIZE) != NULL) {
++ // unsafe as wrong pointer is used
++ a = array[idx].field_a;
++ b = array[idx].field_b;
++ }
++ }
++
++ Similarly, it is unsafe to compare the returned pointer with other pointers,
++ as this may permit the compiler to substitute one pointer with another,
++ permitting speculation. For example, the following sequence is unsafe:
++
++ struct foo array[SIZE];
++ int a, b;
++
++ void do_thing(int idx)
++ {
++ struct foo *elem = nospec_ptr(array + idx, array, array + size);
++
++ // unsafe due to pointer substitution
++ if (elem == &array[idx]) {
++ a = elem->field_a;
++ b = elem->field_b;
++ }
++ }
++
++* nospec_array_ptr(arr, idx, sz)
++
++ Returns a sanitized pointer to arr[idx] only if idx falls in the [0, sz)
++ interval. When idx < 0 or idx > sz, NULL is returned. Prevents an
++ out-of-bounds pointer being propagated to code which is speculatively
++ executed.
++
++ This is a convenience function which wraps nospec_ptr(), and has the same
++ caveats w.r.t. the use of the returned pointer.
++
++ For example, this may be used as follows:
++
++ int load_array(int *array, unsigned int idx)
++ {
++ int *elem;
++
++ if ((elem = nospec_array_ptr(array, idx, MAX_ARRAY_ELEMS)))
++ return *elem;
++ else
++ return 0;
++ }
++
+--
+2.14.3
+
+From 2b98026ffeeb0b4a06c80fe39bfebd5cef4a8fa6 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 7 Dec 2017 17:15:01 +0000
+Subject: [PATCH 03/19] arm64: implement nospec_ptr()
+
+This patch implements nospec_ptr() for arm64, following the recommended
+architectural sequence.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ arch/arm64/include/asm/barrier.h | 55 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 55 insertions(+)
+
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
+index 77651c49ef44..b4819f6a0e5c 100644
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -40,6 +40,61 @@
+ #define dma_rmb() dmb(oshld)
+ #define dma_wmb() dmb(oshst)
+
++#define __load_no_speculate_n(ptr, lo, hi, failval, cmpptr, w, sz) \
++({ \
++ typeof(*ptr) __nln_val; \
++ typeof(*ptr) __failval = \
++ (typeof(*ptr))(unsigned long)(failval); \
++ \
++ asm volatile ( \
++ " cmp %[c], %[l]\n" \
++ " ccmp %[c], %[h], 2, cs\n" \
++ " b.cs 1f\n" \
++ " ldr" #sz " %" #w "[v], %[p]\n" \
++ "1: csel %" #w "[v], %" #w "[v], %" #w "[f], cc\n" \
++ " hint #0x14 // CSDB\n" \
++ : [v] "=&r" (__nln_val) \
++ : [p] "m" (*(ptr)), [l] "r" (lo), [h] "r" (hi), \
++ [f] "rZ" (__failval), [c] "r" (cmpptr) \
++ : "cc"); \
++ \
++ __nln_val; \
++})
++
++#define __load_no_speculate(ptr, lo, hi, failval, cmpptr) \
++({ \
++ typeof(*(ptr)) __nl_val; \
++ \
++ switch (sizeof(__nl_val)) { \
++ case 1: \
++ __nl_val = __load_no_speculate_n(ptr, lo, hi, failval, \
++ cmpptr, w, b); \
++ break; \
++ case 2: \
++ __nl_val = __load_no_speculate_n(ptr, lo, hi, failval, \
++ cmpptr, w, h); \
++ break; \
++ case 4: \
++ __nl_val = __load_no_speculate_n(ptr, lo, hi, failval, \
++ cmpptr, w, ); \
++ break; \
++ case 8: \
++ __nl_val = __load_no_speculate_n(ptr, lo, hi, failval, \
++ cmpptr, x, ); \
++ break; \
++ default: \
++ BUILD_BUG(); \
++ } \
++ \
++ __nl_val; \
++})
++
++#define nospec_ptr(ptr, lo, hi) \
++({ \
++ typeof(ptr) __np_ptr = (ptr); \
++ __load_no_speculate(&__np_ptr, lo, hi, 0, __np_ptr); \
++})
++
+ #define __smp_mb() dmb(ish)
+ #define __smp_rmb() dmb(ishld)
+ #define __smp_wmb() dmb(ishst)
+--
+2.14.3
+
+From cedaed8d38108dc6b68c1418d9b942f64b2be488 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 5 Jan 2018 16:44:36 +0000
+Subject: [PATCH 04/19] arm: implement nospec_ptr()
+
+This patch implements nospec_ptr() for arm, following the recommended
+architectural sequences for the arm and thumb instruction sets.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ arch/arm/include/asm/barrier.h | 75 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 75 insertions(+)
+
+diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
+index 40f5c410fd8c..6384c90e4b72 100644
+--- a/arch/arm/include/asm/barrier.h
++++ b/arch/arm/include/asm/barrier.h
+@@ -37,6 +37,81 @@
+ #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+ #endif
+
++#ifdef CONFIG_THUMB2_KERNEL
++#define __load_no_speculate_n(ptr, lo, hi, failval, cmpptr, sz) \
++({ \
++ typeof(*ptr) __nln_val; \
++ typeof(*ptr) __failval = \
++ (typeof(*ptr))(unsigned long)(failval); \
++ \
++ asm volatile ( \
++ " cmp %[c], %[l]\n" \
++ " it hs\n" \
++ " cmphs %[h], %[c]\n" \
++ " blo 1f\n" \
++ " ld" #sz " %[v], %[p]\n" \
++ "1: it lo\n" \
++ " movlo %[v], %[f]\n" \
++ " .inst 0xf3af8014 @ CSDB\n" \
++ : [v] "=&r" (__nln_val) \
++ : [p] "m" (*(ptr)), [l] "r" (lo), [h] "r" (hi), \
++ [f] "r" (__failval), [c] "r" (cmpptr) \
++ : "cc"); \
++ \
++ __nln_val; \
++})
++#else
++#define __load_no_speculate_n(ptr, lo, hi, failval, cmpptr, sz) \
++({ \
++ typeof(*ptr) __nln_val; \
++ typeof(*ptr) __failval = \
++ (typeof(*ptr))(unsigned long)(failval); \
++ \
++ asm volatile ( \
++ " cmp %[c], %[l]\n" \
++ " cmphs %[h], %[c]\n" \
++ " ldr" #sz "hi %[v], %[p]\n" \
++ " movls %[v], %[f]\n" \
++ " .inst 0xe320f014 @ CSDB\n" \
++ : [v] "=&r" (__nln_val) \
++ : [p] "m" (*(ptr)), [l] "r" (lo), [h] "r" (hi), \
++ [f] "r" (__failval), [c] "r" (cmpptr) \
++ : "cc"); \
++ \
++ __nln_val; \
++})
++#endif
++
++#define __load_no_speculate(ptr, lo, hi, failval, cmpptr) \
++({ \
++ typeof(*(ptr)) __nl_val; \
++ \
++ switch (sizeof(__nl_val)) { \
++ case 1: \
++ __nl_val = __load_no_speculate_n(ptr, lo, hi, failval, \
++ cmpptr, b); \
++ break; \
++ case 2: \
++ __nl_val = __load_no_speculate_n(ptr, lo, hi, failval, \
++ cmpptr, h); \
++ break; \
++ case 4: \
++ __nl_val = __load_no_speculate_n(ptr, lo, hi, failval, \
++ cmpptr, ); \
++ break; \
++ default: \
++ BUILD_BUG(); \
++ } \
++ \
++ __nl_val; \
++})
++
++#define nospec_ptr(ptr, lo, hi) \
++({ \
++ typeof(ptr) __np_ptr = (ptr); \
++ __load_no_speculate(&__np_ptr, lo, hi, 0, __np_ptr); \
++})
++
+ #ifdef CONFIG_ARM_HEAVY_MB
+ extern void (*soc_mb)(void);
+ extern void arm_heavy_mb(void);
+--
+2.14.3
+
+From d14a4150a2f74a068247cf3846405904e21a8d2c Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 14:51:58 -0800
+Subject: [PATCH 05/19] x86: implement nospec_barrier()
+
+The new speculative execution barrier, nospec_barrier(), ensures
+that any userspace controllable speculation doesn't cross the boundary.
+
+Any user observable speculative activity on this CPU thread before this
+point either completes, reaches a state it can no longer cause an
+observable activity, or is aborted before instructions after the barrier
+execute.
+
+In the x86 case nospec_barrier() resolves to an lfence if
+X86_FEATURE_LFENCE_RDTSC is present. Other architectures can define
+their variants.
+
+Note the expectation is that this barrier is never used directly, at
+least outside of architecture specific code. It is implied by the
+nospec_{array_ptr,ptr} macros.
+
+x86, for now, depends on the barrier for protection while other
+architectures place their speculation prevention in
+nospec_{ptr,array_ptr} when a barrier instruction is not available or
+too heavy-weight. In the x86 case lfence is not a fully serializing
+instruction so it is not as expensive as other barriers.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Suggested-by: Arjan van de Ven <arjan@linux.intel.com>
+Suggested-by: Alan Cox <alan.cox@intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ arch/x86/include/asm/barrier.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index 7fb336210e1b..1148cd9f5ae7 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -24,6 +24,12 @@
+ #define wmb() asm volatile("sfence" ::: "memory")
+ #endif
+
++/*
++ * CPUs without LFENCE don't really speculate much. Possibly fall back to IRET-to-self.
++ */
++#define __nospec_barrier() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
++#define nospec_barrier __nospec_barrier
++
+ #ifdef CONFIG_X86_PPRO_FENCE
+ #define dma_rmb() rmb()
+ #else
+--
+2.14.3
+
+From d077f11b7fcb697af0c9419cc2273d179e6f51ad Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 4 Jan 2018 13:36:20 -0800
+Subject: [PATCH 06/19] x86, barrier: stop speculation for failed access_ok
+
+When access_ok fails we should always stop speculating.
+Add the required barriers to the x86 access_ok macro.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: x86@kernel.org
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ arch/x86/include/asm/uaccess.h | 17 +++++++++++++----
+ include/asm-generic/barrier.h | 6 +++---
+ 2 files changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 574dff4d2913..9b6f20cfaeb9 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -43,6 +43,8 @@ static inline void set_fs(mm_segment_t fs)
+ /*
+ * Test whether a block of memory is a valid user space address.
+ * Returns 0 if the range is valid, nonzero otherwise.
++ *
++ * We also disable speculation when a check fails.
+ */
+ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
+ {
+@@ -53,14 +55,19 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
+ * important to subtract the size from the
+ * limit, not add it to the address).
+ */
+- if (__builtin_constant_p(size))
+- return unlikely(addr > limit - size);
++ if (__builtin_constant_p(size)) {
++ if (unlikely(addr > limit - size))
++ return true;
++ nospec_barrier();
++ return false;
++ }
+
+ /* Arbitrary sizes? Be careful about overflow */
+ addr += size;
+- if (unlikely(addr < size))
++ if (unlikely(addr < size || addr > limit))
+ return true;
+- return unlikely(addr > limit);
++ nospec_barrier();
++ return false;
+ }
+
+ #define __range_not_ok(addr, size, limit) \
+@@ -94,6 +101,8 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
+ * Note that, depending on architecture, this function probably just
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
++ *
++ * Stops speculation automatically
+ */
+ #define access_ok(type, addr, size) \
+ ({ \
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index 91c3071f49e5..a11765eba860 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -59,7 +59,9 @@
+ *
+ * Architectures with a suitable memory barrier should provide an
+ * implementation. This is non-portable, and generic code should use
+- * nospec_ptr().
++ * nospec_{array_ptr,ptr}. Arch-specific code should define and use
++ * nospec_barrier() for usages where nospec_{array_ptr,ptr} is
++ * unsuitable.
+ */
+ #ifndef __nospec_barrier
+ #define __nospec_barrier() do { } while (0)
+@@ -120,8 +122,6 @@
+ nospec_ptr(__arr + __idx, __arr, __arr + __sz); \
+ })
+
+-#undef __nospec_barrier
+-
+ #ifndef __smp_mb
+ #define __smp_mb() mb()
+ #endif
+--
+2.14.3
+
+From bb10d660be01a93f19d258260dd25444e14e5889 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:53:55 -0800
+Subject: [PATCH 07/19] [media] uvcvideo: prevent bounds-check bypass via
+ speculative execution
+
+Static analysis reports that 'index' may be a user controlled value that
+is used as a data dependency to read 'pin' from the
+'selector->baSourceID' array. In order to avoid potential leaks of
+kernel memory values, block speculative execution of the instruction
+stream that could issue reads based on an invalid value of 'pin'.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: linux-media@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ drivers/media/usb/uvc/uvc_v4l2.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 3e7e283a44a8..7442626dc20e 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -22,6 +22,7 @@
+ #include <linux/mm.h>
+ #include <linux/wait.h>
+ #include <linux/atomic.h>
++#include <linux/compiler.h>
+
+ #include <media/v4l2-common.h>
+ #include <media/v4l2-ctrls.h>
+@@ -810,6 +811,7 @@ static int uvc_ioctl_enum_input(struct file *file, void *fh,
+ struct uvc_entity *iterm = NULL;
+ u32 index = input->index;
+ int pin = 0;
++ __u8 *elem;
+
+ if (selector == NULL ||
+ (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
+@@ -820,8 +822,9 @@ static int uvc_ioctl_enum_input(struct file *file, void *fh,
+ break;
+ }
+ pin = iterm->id;
+- } else if (index < selector->bNrInPins) {
+- pin = selector->baSourceID[index];
++ } else if ((elem = nospec_array_ptr(selector->baSourceID, index,
++ selector->bNrInPins))) {
++ pin = *elem;
+ list_for_each_entry(iterm, &chain->entities, chain) {
+ if (!UVC_ENTITY_IS_ITERM(iterm))
+ continue;
+--
+2.14.3
+
+From 8a4e4e1e674b9aaf0d2ca95c3fa5117ab5aa2987 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:53:56 -0800
+Subject: [PATCH 08/19] carl9170: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'queue' may be a user controlled value that
+is used as a data dependency to read from the 'ar9170_qmap' array. In
+order to avoid potential leaks of kernel memory values, block
+speculative execution of the instruction stream that could issue reads
+based on an invalid result of 'ar9170_qmap[queue]'. In this case the
+value of 'ar9170_qmap[queue]' is immediately reused as an index to the
+'ar->edcf' array.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: Christian Lamparter <chunkeey@googlemail.com>
+Cc: Kalle Valo <kvalo@codeaurora.org>
+Cc: linux-wireless@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ drivers/net/wireless/ath/carl9170/main.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
+index 988c8857d78c..0ff34cbe2b62 100644
+--- a/drivers/net/wireless/ath/carl9170/main.c
++++ b/drivers/net/wireless/ath/carl9170/main.c
+@@ -41,6 +41,7 @@
+ #include <linux/module.h>
+ #include <linux/etherdevice.h>
+ #include <linux/random.h>
++#include <linux/compiler.h>
+ #include <net/mac80211.h>
+ #include <net/cfg80211.h>
+ #include "hw.h"
+@@ -1384,11 +1385,12 @@ static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
+ const struct ieee80211_tx_queue_params *param)
+ {
+ struct ar9170 *ar = hw->priv;
++ const u8 *elem;
+ int ret;
+
+ mutex_lock(&ar->mutex);
+- if (queue < ar->hw->queues) {
+- memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
++ if ((elem = nospec_array_ptr(ar9170_qmap, queue, ar->hw->queues))) {
++ memcpy(&ar->edcf[*elem], param, sizeof(*param));
+ ret = carl9170_set_qos(ar);
+ } else {
+ ret = -EINVAL;
+--
+2.14.3
+
+From b2134ba6dc16b4e6a232e34179c3489c3e51ba89 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:53:57 -0800
+Subject: [PATCH 09/19] p54: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'queue' may be a user controlled value that
+is used as a data dependency to read from the 'priv->qos_params' array.
+In order to avoid potential leaks of kernel memory values, block
+speculative execution of the instruction stream that could issue reads
+based on an invalid result of 'priv->qos_params[queue]'.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: Christian Lamparter <chunkeey@googlemail.com>
+Cc: Kalle Valo <kvalo@codeaurora.org>
+Cc: linux-wireless@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ drivers/net/wireless/intersil/p54/main.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
+index ab6d39e12069..85c9cbee35fc 100644
+--- a/drivers/net/wireless/intersil/p54/main.c
++++ b/drivers/net/wireless/intersil/p54/main.c
+@@ -20,6 +20,7 @@
+ #include <linux/firmware.h>
+ #include <linux/etherdevice.h>
+ #include <linux/module.h>
++#include <linux/compiler.h>
+
+ #include <net/mac80211.h>
+
+@@ -411,12 +412,13 @@ static int p54_conf_tx(struct ieee80211_hw *dev,
+ const struct ieee80211_tx_queue_params *params)
+ {
+ struct p54_common *priv = dev->priv;
++ struct p54_edcf_queue_param *p54_q;
+ int ret;
+
+ mutex_lock(&priv->conf_mutex);
+- if (queue < dev->queues) {
+- P54_SET_QUEUE(priv->qos_params[queue], params->aifs,
+- params->cw_min, params->cw_max, params->txop);
++ if ((p54_q = nospec_array_ptr(priv->qos_params, queue, dev->queues))) {
++ P54_SET_QUEUE(p54_q[0], params->aifs, params->cw_min,
++ params->cw_max, params->txop);
+ ret = p54_set_edcf(priv);
+ } else
+ ret = -EINVAL;
+--
+2.14.3
+
+From addb69e8d90a79887aa369398e73b9b64fb9e910 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:53:58 -0800
+Subject: [PATCH 10/19] qla2xxx: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'handle' may be a user controlled value
+that is used as a data dependency to read 'sp' from the
+'req->outstanding_cmds' array. In order to avoid potential leaks of
+kernel memory values, block speculative execution of the instruction
+stream that could issue reads based on an invalid value of 'sp'. In this
+case 'sp' is directly dereferenced later in the function.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: qla2xxx-upstream@qlogic.com
+Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
+Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
+Cc: linux-scsi@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ drivers/scsi/qla2xxx/qla_mr.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
+index d5da3981cefe..128b41de3784 100644
+--- a/drivers/scsi/qla2xxx/qla_mr.c
++++ b/drivers/scsi/qla2xxx/qla_mr.c
+@@ -9,6 +9,7 @@
+ #include <linux/ktime.h>
+ #include <linux/pci.h>
+ #include <linux/ratelimit.h>
++#include <linux/compiler.h>
+ #include <linux/vmalloc.h>
+ #include <linux/bsg-lib.h>
+ #include <scsi/scsi_tcq.h>
+@@ -2275,7 +2276,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ static void
+ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ {
+- srb_t *sp;
++ srb_t *sp, **elem;
+ fc_port_t *fcport;
+ struct scsi_cmnd *cp;
+ struct sts_entry_fx00 *sts;
+@@ -2304,8 +2305,9 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ req = ha->req_q_map[que];
+
+ /* Validate handle. */
+- if (handle < req->num_outstanding_cmds)
+- sp = req->outstanding_cmds[handle];
++ if ((elem = nospec_array_ptr(req->outstanding_cmds, handle,
++ req->num_outstanding_cmds)))
++ sp = *elem;
+ else
+ sp = NULL;
+
+@@ -2626,7 +2628,7 @@ static void
+ qlafx00_multistatus_entry(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, void *pkt)
+ {
+- srb_t *sp;
++ srb_t *sp, **elem;
+ struct multi_sts_entry_fx00 *stsmfx;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t handle, hindex, handle_count, i;
+@@ -2655,8 +2657,9 @@ qlafx00_multistatus_entry(struct scsi_qla_host *vha,
+ req = ha->req_q_map[que];
+
+ /* Validate handle. */
+- if (handle < req->num_outstanding_cmds)
+- sp = req->outstanding_cmds[handle];
++ if ((elem = nospec_array_ptr(req->outstanding_cmds, handle,
++ req->num_outstanding_cmds)))
++ sp = *elem;
+ else
+ sp = NULL;
+
+--
+2.14.3
+
+From 18e5e10139f6a04e00f6522c4b0091f167eb6c1d Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:54:00 -0800
+Subject: [PATCH 11/19] cw1200: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'queue' may be a user controlled value that
+is used as a data dependency to read 'txq_params' from the
+'priv->tx_queue_params.params' array. In order to avoid potential leaks
+of kernel memory values, block speculative execution of the instruction
+stream that could issue reads based on an invalid value of 'txq_params'.
+In this case 'txq_params' is referenced later in the function.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: Solomon Peachy <pizza@shaftnet.org>
+Cc: Kalle Valo <kvalo@codeaurora.org>
+Cc: linux-wireless@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ drivers/net/wireless/st/cw1200/sta.c | 10 ++++++----
+ drivers/net/wireless/st/cw1200/wsm.h | 4 +---
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
+index 38678e9a0562..886942617f14 100644
+--- a/drivers/net/wireless/st/cw1200/sta.c
++++ b/drivers/net/wireless/st/cw1200/sta.c
+@@ -14,6 +14,7 @@
+ #include <linux/firmware.h>
+ #include <linux/module.h>
+ #include <linux/etherdevice.h>
++#include <linux/compiler.h>
+
+ #include "cw1200.h"
+ #include "sta.h"
+@@ -612,18 +613,19 @@ int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+ {
+ struct cw1200_common *priv = dev->priv;
++ struct wsm_set_tx_queue_params *txq_params;
+ int ret = 0;
+ /* To prevent re-applying PM request OID again and again*/
+ bool old_uapsd_flags;
+
+ mutex_lock(&priv->conf_mutex);
+
+- if (queue < dev->queues) {
++ if ((txq_params = nospec_array_ptr(priv->tx_queue_params.params,
++ queue, dev->queues))) {
+ old_uapsd_flags = le16_to_cpu(priv->uapsd_info.uapsd_flags);
+
+- WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0);
+- ret = wsm_set_tx_queue_params(priv,
+- &priv->tx_queue_params.params[queue], queue);
++ WSM_TX_QUEUE_SET(txq_params, 0, 0, 0);
++ ret = wsm_set_tx_queue_params(priv, txq_params, queue);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+diff --git a/drivers/net/wireless/st/cw1200/wsm.h b/drivers/net/wireless/st/cw1200/wsm.h
+index 48086e849515..8c8d9191e233 100644
+--- a/drivers/net/wireless/st/cw1200/wsm.h
++++ b/drivers/net/wireless/st/cw1200/wsm.h
+@@ -1099,10 +1099,8 @@ struct wsm_tx_queue_params {
+ };
+
+
+-#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\
+- max_life_time) \
++#define WSM_TX_QUEUE_SET(p, ack_policy, allowed_time, max_life_time) \
+ do { \
+- struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \
+ p->ackPolicy = (ack_policy); \
+ p->allowedMediumTime = (allowed_time); \
+ p->maxTransmitLifetime = (max_life_time); \
+--
+2.14.3
+
+From 0096694093529628e2a855812a5111358d1e952d Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:54:01 -0800
+Subject: [PATCH 12/19] Thermal/int340x: prevent bounds-check bypass via
+ speculative execution
+
+Static analysis reports that 'trip' may be a user controlled value that
+is used as a data dependency to read '*temp' from the 'd->aux_trips'
+array. In order to avoid potential leaks of kernel memory values, block
+speculative execution of the instruction stream that could issue reads
+based on an invalid value of '*temp'.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: Zhang Rui <rui.zhang@intel.com>
+Cc: Eduardo Valentin <edubezval@gmail.com>
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ drivers/thermal/int340x_thermal/int340x_thermal_zone.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+index 145a5c53ff5c..442a1d9bf7ad 100644
+--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
++++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+@@ -17,6 +17,7 @@
+ #include <linux/init.h>
+ #include <linux/acpi.h>
+ #include <linux/thermal.h>
++#include <linux/compiler.h>
+ #include "int340x_thermal_zone.h"
+
+ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
+@@ -52,20 +53,21 @@ static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ int trip, int *temp)
+ {
+ struct int34x_thermal_zone *d = zone->devdata;
++ unsigned long *elem;
+ int i;
+
+ if (d->override_ops && d->override_ops->get_trip_temp)
+ return d->override_ops->get_trip_temp(zone, trip, temp);
+
+- if (trip < d->aux_trip_nr)
+- *temp = d->aux_trips[trip];
+- else if (trip == d->crt_trip_id)
++ if ((elem = nospec_array_ptr(d->aux_trips, trip, d->aux_trip_nr))) {
++ *temp = *elem;
++ } else if (trip == d->crt_trip_id) {
+ *temp = d->crt_temp;
+- else if (trip == d->psv_trip_id)
++ } else if (trip == d->psv_trip_id) {
+ *temp = d->psv_temp;
+- else if (trip == d->hot_trip_id)
++ } else if (trip == d->hot_trip_id) {
+ *temp = d->hot_temp;
+- else {
++ } else {
+ for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+ if (d->act_trips[i].valid &&
+ d->act_trips[i].id == trip) {
+--
+2.14.3
+
+From 2a5a165ff05df37c3f4d02ab70ddee1e9329401c Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:54:03 -0800
+Subject: [PATCH 13/19] ipv6: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'offset' may be a user controlled value
+that is used as a data dependency reading from a raw6_frag_vec buffer.
+In order to avoid potential leaks of kernel memory values, block
+speculative execution of the instruction stream that could issue further
+reads based on an invalid '*(rfv->c + offset)' value.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+Cc: netdev@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ net/ipv6/raw.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 761a473a07c5..384e3d59d148 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -33,6 +33,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/compat.h>
+ #include <linux/uaccess.h>
++#include <linux/compiler.h>
+ #include <asm/ioctls.h>
+
+ #include <net/net_namespace.h>
+@@ -725,17 +726,17 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
+ struct sk_buff *skb)
+ {
+ struct raw6_frag_vec *rfv = from;
++ char *rfv_buf;
+
+- if (offset < rfv->hlen) {
++ if ((rfv_buf = nospec_array_ptr(rfv->c, offset, rfv->hlen))) {
+ int copy = min(rfv->hlen - offset, len);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+- memcpy(to, rfv->c + offset, copy);
++ memcpy(to, rfv_buf, copy);
+ else
+ skb->csum = csum_block_add(
+ skb->csum,
+- csum_partial_copy_nocheck(rfv->c + offset,
+- to, copy, 0),
++ csum_partial_copy_nocheck(rfv_buf, to, copy, 0),
+ odd);
+
+ odd = 0;
+--
+2.14.3
+
+From f38cdd5d461ce686d201e41242fd626641e7253d Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:54:02 -0800
+Subject: [PATCH 14/19] ipv4: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'offset' may be a user controlled value
+that is used as a data dependency reading from a raw_frag_vec buffer.
+In order to avoid potential leaks of kernel memory values, block
+speculative execution of the instruction stream that could issue further
+reads based on an invalid '*(rfv->c + offset)' value.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+Cc: netdev@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ net/ipv4/raw.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 125c1eab3eaa..f72b20131a15 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -57,6 +57,7 @@
+ #include <linux/in_route.h>
+ #include <linux/route.h>
+ #include <linux/skbuff.h>
++#include <linux/compiler.h>
+ #include <linux/igmp.h>
+ #include <net/net_namespace.h>
+ #include <net/dst.h>
+@@ -472,17 +473,17 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd,
+ struct sk_buff *skb)
+ {
+ struct raw_frag_vec *rfv = from;
++ char *rfv_buf;
+
+- if (offset < rfv->hlen) {
++ if ((rfv_buf = nospec_array_ptr(rfv->hdr.c, offset, rfv->hlen))) {
+ int copy = min(rfv->hlen - offset, len);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+- memcpy(to, rfv->hdr.c + offset, copy);
++ memcpy(to, rfv_buf, copy);
+ else
+ skb->csum = csum_block_add(
+ skb->csum,
+- csum_partial_copy_nocheck(rfv->hdr.c + offset,
+- to, copy, 0),
++ csum_partial_copy_nocheck(rfv_buf, to, copy, 0),
+ odd);
+
+ odd = 0;
+--
+2.14.3
+
+From 07a715cb9cd9e4e8bac7204a2462803bfe7ae259 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:54:04 -0800
+Subject: [PATCH 15/19] vfs, fdtable: prevent bounds-check bypass via
+ speculative execution
+
+Expectedly, static analysis reports that 'fd' is a user controlled value
+that is used as a data dependency to read from the 'fdt->fd' array. In
+order to avoid potential leaks of kernel memory values, block
+speculative execution of the instruction stream that could issue reads
+based on an invalid 'file *' returned from __fcheck_files.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ include/linux/fdtable.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
+index 1c65817673db..4a147c5c2533 100644
+--- a/include/linux/fdtable.h
++++ b/include/linux/fdtable.h
+@@ -81,9 +81,10 @@ struct dentry;
+ static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd)
+ {
+ struct fdtable *fdt = rcu_dereference_raw(files->fdt);
++ struct file __rcu **fdp;
+
+- if (fd < fdt->max_fds)
+- return rcu_dereference_raw(fdt->fd[fd]);
++ if ((fdp = nospec_array_ptr(fdt->fd, fd, fdt->max_fds)))
++ return rcu_dereference_raw(*fdp);
+ return NULL;
+ }
+
+--
+2.14.3
+
+From e5ef1fdb08b0d2ae0af3f725a6c4a3394af538fe Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:54:05 -0800
+Subject: [PATCH 16/19] net: mpls: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'index' may be a user controlled value that
+is used as a data dependency reading 'rt' from the 'platform_label'
+array. In order to avoid potential leaks of kernel memory values, block
+speculative execution of the instruction stream that could issue further
+reads based on an invalid 'rt' value.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: netdev@vger.kernel.org
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ net/mpls/af_mpls.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 8ca9915befc8..ebcf0e246cfe 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -8,6 +8,7 @@
+ #include <linux/ipv6.h>
+ #include <linux/mpls.h>
+ #include <linux/netconf.h>
++#include <linux/compiler.h>
+ #include <linux/vmalloc.h>
+ #include <linux/percpu.h>
+ #include <net/ip.h>
+@@ -77,12 +78,13 @@ static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
+ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
+ {
+ struct mpls_route *rt = NULL;
++ struct mpls_route __rcu **platform_label =
++ rcu_dereference(net->mpls.platform_label);
++ struct mpls_route __rcu **rtp;
+
+- if (index < net->mpls.platform_labels) {
+- struct mpls_route __rcu **platform_label =
+- rcu_dereference(net->mpls.platform_label);
+- rt = rcu_dereference(platform_label[index]);
+- }
++ if ((rtp = nospec_array_ptr(platform_label, index,
++ net->mpls.platform_labels)))
++ rt = rcu_dereference(*rtp);
+ return rt;
+ }
+
+--
+2.14.3
+
+From 276b18c636de3afc89571198b22b518473ce2b2a Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 3 Jan 2018 13:54:07 -0800
+Subject: [PATCH 17/19] udf: prevent bounds-check bypass via speculative
+ execution
+
+Static analysis reports that 'eahd->appAttrLocation' and
+'eahd->impAttrLocation' may be a user controlled values that are used as
+data dependencies for calculating source and destination buffers for
+memmove operations. In order to avoid potential leaks of kernel memory
+values, block speculative execution of the instruction stream that could
+issue further reads based on invalid 'aal' or 'ial' values.
+
+Based on an original patch by Elena Reshetova.
+
+Cc: Jan Kara <jack@suse.com>
+Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+---
+ fs/udf/misc.c | 39 +++++++++++++++++++++------------------
+ 1 file changed, 21 insertions(+), 18 deletions(-)
+
+diff --git a/fs/udf/misc.c b/fs/udf/misc.c
+index 401e64cde1be..9403160822de 100644
+--- a/fs/udf/misc.c
++++ b/fs/udf/misc.c
+@@ -51,6 +51,8 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
+ int offset;
+ uint16_t crclen;
+ struct udf_inode_info *iinfo = UDF_I(inode);
++ uint8_t *ea_dst, *ea_src;
++ uint32_t aal, ial;
+
+ ea = iinfo->i_ext.i_data;
+ if (iinfo->i_lenEAttr) {
+@@ -100,33 +102,34 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
+
+ offset = iinfo->i_lenEAttr;
+ if (type < 2048) {
+- if (le32_to_cpu(eahd->appAttrLocation) <
+- iinfo->i_lenEAttr) {
+- uint32_t aal =
+- le32_to_cpu(eahd->appAttrLocation);
+- memmove(&ea[offset - aal + size],
+- &ea[aal], offset - aal);
++ aal = le32_to_cpu(eahd->appAttrLocation);
++ if ((ea_dst = nospec_array_ptr(ea, offset - aal + size,
++ iinfo->i_lenEAttr)) &&
++ (ea_src = nospec_array_ptr(ea, aal,
++ iinfo->i_lenEAttr))) {
++ memmove(ea_dst, ea_src, offset - aal);
+ offset -= aal;
+ eahd->appAttrLocation =
+ cpu_to_le32(aal + size);
+ }
+- if (le32_to_cpu(eahd->impAttrLocation) <
+- iinfo->i_lenEAttr) {
+- uint32_t ial =
+- le32_to_cpu(eahd->impAttrLocation);
+- memmove(&ea[offset - ial + size],
+- &ea[ial], offset - ial);
++
++ ial = le32_to_cpu(eahd->impAttrLocation);
++ if ((ea_dst = nospec_array_ptr(ea, offset - ial + size,
++ iinfo->i_lenEAttr)) &&
++ (ea_src = nospec_array_ptr(ea, ial,
++ iinfo->i_lenEAttr))) {
++ memmove(ea_dst, ea_src, offset - ial);
+ offset -= ial;
+ eahd->impAttrLocation =
+ cpu_to_le32(ial + size);
+ }
+ } else if (type < 65536) {
+- if (le32_to_cpu(eahd->appAttrLocation) <
+- iinfo->i_lenEAttr) {
+- uint32_t aal =
+- le32_to_cpu(eahd->appAttrLocation);
+- memmove(&ea[offset - aal + size],
+- &ea[aal], offset - aal);
++ aal = le32_to_cpu(eahd->appAttrLocation);
++ if ((ea_dst = nospec_array_ptr(ea, offset - aal + size,
++ iinfo->i_lenEAttr)) &&
++ (ea_src = nospec_array_ptr(ea, aal,
++ iinfo->i_lenEAttr))) {
++ memmove(ea_dst, ea_src, offset - aal);
+ offset -= aal;
+ eahd->appAttrLocation =
+ cpu_to_le32(aal + size);
+--
+2.14.3
diff --git a/retpoline.patch b/retpoline.patch
new file mode 100644
index 000000000..88c78fd0e
--- /dev/null
+++ b/retpoline.patch
@@ -0,0 +1,1480 @@
+From 61dc0f555b5c761cdafb0ba5bd41ecf22d68a4c4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 7 Jan 2018 22:48:01 +0100
+Subject: [PATCH] x86/cpu: Implement CPU vulnerabilites sysfs functions
+
+Implement the CPU vulnerabilty show functions for meltdown, spectre_v1 and
+spectre_v2.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Linus Torvalds <torvalds@linuxfoundation.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180107214913.177414879@linutronix.de
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/kernel/cpu/bugs.c | 29 +++++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index cd5199de231e..e23d21ac745a 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -89,6 +89,7 @@ config X86
+ select GENERIC_CLOCKEVENTS_MIN_ADJUST
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CPU_AUTOPROBE
++ select GENERIC_CPU_VULNERABILITIES
+ select GENERIC_EARLY_IOREMAP
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_IOMAP
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index ba0b2424c9b0..76ad6cb44b40 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -10,6 +10,7 @@
+ */
+ #include <linux/init.h>
+ #include <linux/utsname.h>
++#include <linux/cpu.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+ #include <asm/processor-flags.h>
+@@ -60,3 +61,31 @@ void __init check_bugs(void)
+ set_memory_4k((unsigned long)__va(0), 1);
+ #endif
+ }
++
++#ifdef CONFIG_SYSFS
++ssize_t cpu_show_meltdown(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
++ return sprintf(buf, "Not affected\n");
++ if (boot_cpu_has(X86_FEATURE_PTI))
++ return sprintf(buf, "Mitigation: PTI\n");
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v1(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
++ return sprintf(buf, "Not affected\n");
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ return sprintf(buf, "Not affected\n");
++ return sprintf(buf, "Vulnerable\n");
++}
++#endif
+--
+2.14.3
+
+From d46717c610dcfa2cba5c87500c928993371ef1ad Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:07 +0000
+Subject: [PATCH 01/10] x86/retpoline: Add initial retpoline support
+
+Enable the use of -mindirect-branch=thunk-extern in newer GCC, and provide
+the corresponding thunks. Provide assembler macros for invoking the thunks
+in the same way that GCC does, from native and inline assembler.
+
+This adds X86_FEATURE_RETPOLINE and sets it by default on all CPUs. In
+some circumstances, IBRS microcode features may be used instead, and the
+retpoline can be disabled.
+
+On AMD CPUs if lfence is serialising, the retpoline can be dramatically
+simplified to a simple "lfence; jmp *\reg". A future patch, after it has
+been verified that lfence really is serialising in all circumstances, can
+enable this by setting the X86_FEATURE_RETPOLINE_AMD feature bit in addition
+to X86_FEATURE_RETPOLINE.
+
+Do not align the retpoline in the altinstr section, because there is no
+guarantee that it stays aligned when it's copied over the oldinstr during
+alternative patching.
+
+[ Andi Kleen: Rename the macros, add CONFIG_RETPOLINE option, export thunks]
+[ tglx: Put actual function CALL/JMP in front of the macros, convert to
+ symbolic labels ]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-2-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/Kconfig | 13 ++++
+ arch/x86/Makefile | 10 ++++
+ arch/x86/include/asm/asm-prototypes.h | 25 ++++++++
+ arch/x86/include/asm/cpufeatures.h | 2 +
+ arch/x86/include/asm/nospec-branch.h | 109 ++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c | 4 ++
+ arch/x86/lib/Makefile | 1 +
+ arch/x86/lib/retpoline.S | 48 +++++++++++++++
+ 8 files changed, 212 insertions(+)
+ create mode 100644 arch/x86/include/asm/nospec-branch.h
+ create mode 100644 arch/x86/lib/retpoline.S
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index e23d21ac745a..d1819161cc6c 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -429,6 +429,19 @@ config GOLDFISH
+ def_bool y
+ depends on X86_GOLDFISH
+
++config RETPOLINE
++ bool "Avoid speculative indirect branches in kernel"
++ default y
++ help
++ Compile kernel with the retpoline compiler options to guard against
++ kernel-to-user data leaks by avoiding speculative indirect
++ branches. Requires a compiler with -mindirect-branch=thunk-extern
++ support for full protection. The kernel may run slower.
++
++ Without compiler support, at least indirect branches in assembler
++ code are eliminated. Since this includes the syscall entry path,
++ it is not entirely pointless.
++
+ config INTEL_RDT
+ bool "Intel Resource Director Technology support"
+ default n
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index a20eacd9c7e9..974c61864978 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -235,6 +235,16 @@ KBUILD_CFLAGS += -Wno-sign-compare
+ #
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+
++# Avoid indirect branches in kernel to deal with Spectre
++ifdef CONFIG_RETPOLINE
++ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
++ ifneq ($(RETPOLINE_CFLAGS),)
++ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
++ else
++ $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.)
++ endif
++endif
++
+ archscripts: scripts_basic
+ $(Q)$(MAKE) $(build)=arch/x86/tools relocs
+
+diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
+index ff700d81e91e..0927cdc4f946 100644
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -11,7 +11,32 @@
+ #include <asm/pgtable.h>
+ #include <asm/special_insns.h>
+ #include <asm/preempt.h>
++#include <asm/asm.h>
+
+ #ifndef CONFIG_X86_CMPXCHG64
+ extern void cmpxchg8b_emu(void);
+ #endif
++
++#ifdef CONFIG_RETPOLINE
++#ifdef CONFIG_X86_32
++#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
++#else
++#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
++INDIRECT_THUNK(8)
++INDIRECT_THUNK(9)
++INDIRECT_THUNK(10)
++INDIRECT_THUNK(11)
++INDIRECT_THUNK(12)
++INDIRECT_THUNK(13)
++INDIRECT_THUNK(14)
++INDIRECT_THUNK(15)
++#endif
++INDIRECT_THUNK(ax)
++INDIRECT_THUNK(bx)
++INDIRECT_THUNK(cx)
++INDIRECT_THUNK(dx)
++INDIRECT_THUNK(si)
++INDIRECT_THUNK(di)
++INDIRECT_THUNK(bp)
++INDIRECT_THUNK(sp)
++#endif /* CONFIG_RETPOLINE */
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 1641c2f96363..f275447862f4 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -203,6 +203,8 @@
+ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+ #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+new file mode 100644
+index 000000000000..7f58713b27c4
+--- /dev/null
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -0,0 +1,109 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#ifndef __NOSPEC_BRANCH_H__
++#define __NOSPEC_BRANCH_H__
++
++#include <asm/alternative.h>
++#include <asm/alternative-asm.h>
++#include <asm/cpufeatures.h>
++
++#ifdef __ASSEMBLY__
++
++/*
++ * These are the bare retpoline primitives for indirect jmp and call.
++ * Do not use these directly; they only exist to make the ALTERNATIVE
++ * invocation below less ugly.
++ */
++.macro RETPOLINE_JMP reg:req
++ call .Ldo_rop_\@
++.Lspec_trap_\@:
++ pause
++ jmp .Lspec_trap_\@
++.Ldo_rop_\@:
++ mov \reg, (%_ASM_SP)
++ ret
++.endm
++
++/*
++ * This is a wrapper around RETPOLINE_JMP so the called function in reg
++ * returns to the instruction after the macro.
++ */
++.macro RETPOLINE_CALL reg:req
++ jmp .Ldo_call_\@
++.Ldo_retpoline_jmp_\@:
++ RETPOLINE_JMP \reg
++.Ldo_call_\@:
++ call .Ldo_retpoline_jmp_\@
++.endm
++
++/*
++ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
++ * indirect jmp/call which may be susceptible to the Spectre variant 2
++ * attack.
++ */
++.macro JMP_NOSPEC reg:req
++#ifdef CONFIG_RETPOLINE
++ ALTERNATIVE_2 __stringify(jmp *\reg), \
++ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
++ __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
++#else
++ jmp *\reg
++#endif
++.endm
++
++.macro CALL_NOSPEC reg:req
++#ifdef CONFIG_RETPOLINE
++ ALTERNATIVE_2 __stringify(call *\reg), \
++ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
++ __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
++#else
++ call *\reg
++#endif
++.endm
++
++#else /* __ASSEMBLY__ */
++
++#if defined(CONFIG_X86_64) && defined(RETPOLINE)
++/*
++ * Since the inline asm uses the %V modifier which is only in newer GCC,
++ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
++ */
++# define CALL_NOSPEC ALTERNATIVE( \
++ "call *%[thunk_target]\n", \
++ "call __x86_indirect_thunk_%V[thunk_target]\n", \
++ X86_FEATURE_RETPOLINE)
++# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
++#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
++/*
++ * For i386 we use the original ret-equivalent retpoline, because
++ * otherwise we'll run out of registers. We don't care about CET
++ * here, anyway.
++ */
++# define CALL_NOSPEC ALTERNATIVE( \
++ "call *%[thunk_target]\n", \
++ "" \
++ " jmp do_call%=;\n" \
++ " .align 16\n" \
++ "do_retpoline%=:\n" \
++ " call do_rop%=;\n" \
++ "spec_trap%=:\n" \
++ " pause;\n" \
++ " jmp spec_trap%=;\n" \
++ " .align 16\n" \
++ "do_rop%=:\n" \
++ " addl $4, %%esp;\n" \
++ " pushl %[thunk_target];\n" \
++ " ret;\n" \
++ " .align 16\n" \
++ "do_call%=:\n" \
++ " call do_retpoline%=;\n", \
++ X86_FEATURE_RETPOLINE)
++
++# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
++#else /* No retpoline */
++# define CALL_NOSPEC "call *%[thunk_target]\n"
++# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
++#endif
++
++#endif /* __ASSEMBLY__ */
++#endif /* __NOSPEC_BRANCH_H__ */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 372ba3fb400f..7a671d1ae3cb 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -905,6 +905,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
++#ifdef CONFIG_RETPOLINE
++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++#endif
++
+ fpu__init_system(c);
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index 457f681ef379..d435c89875c1 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
+ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
++lib-$(CONFIG_RETPOLINE) += retpoline.o
+
+ obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
+
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+new file mode 100644
+index 000000000000..cb45c6cb465f
+--- /dev/null
++++ b/arch/x86/lib/retpoline.S
+@@ -0,0 +1,48 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#include <linux/stringify.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++#include <asm/cpufeatures.h>
++#include <asm/alternative-asm.h>
++#include <asm/export.h>
++#include <asm/nospec-branch.h>
++
++.macro THUNK reg
++ .section .text.__x86.indirect_thunk.\reg
++
++ENTRY(__x86_indirect_thunk_\reg)
++ CFI_STARTPROC
++ JMP_NOSPEC %\reg
++ CFI_ENDPROC
++ENDPROC(__x86_indirect_thunk_\reg)
++.endm
++
++/*
++ * Despite being an assembler file we can't just use .irp here
++ * because __KSYM_DEPS__ only uses the C preprocessor and would
++ * only see one instance of "__x86_indirect_thunk_\reg" rather
++ * than one per register with the correct names. So we do it
++ * the simple and nasty way...
++ */
++#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
++#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
++
++GENERATE_THUNK(_ASM_AX)
++GENERATE_THUNK(_ASM_BX)
++GENERATE_THUNK(_ASM_CX)
++GENERATE_THUNK(_ASM_DX)
++GENERATE_THUNK(_ASM_SI)
++GENERATE_THUNK(_ASM_DI)
++GENERATE_THUNK(_ASM_BP)
++GENERATE_THUNK(_ASM_SP)
++#ifdef CONFIG_64BIT
++GENERATE_THUNK(r8)
++GENERATE_THUNK(r9)
++GENERATE_THUNK(r10)
++GENERATE_THUNK(r11)
++GENERATE_THUNK(r12)
++GENERATE_THUNK(r13)
++GENERATE_THUNK(r14)
++GENERATE_THUNK(r15)
++#endif
+--
+2.14.3
+
+From 59b6e22f92f9a86dbd0798db72adc97bdb831f86 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Tue, 9 Jan 2018 14:43:08 +0000
+Subject: [PATCH 02/10] x86/retpoline: Temporarily disable objtool when
+ CONFIG_RETPOLINE=y
+
+objtool's assembler currently cannot deal with the code generated by the
+retpoline compiler and throws hundreds of warnings, mostly because it sees
+calls that don't have a symbolic target.
+
+Exclude all the options that rely on objtool when RETPOLINE is active.
+
+This mainly means that the kernel has to fallback to use the frame pointer
+unwinder and livepatch is not supported.
+
+Josh is looking into resolving the issue.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-3-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/Kconfig | 4 ++--
+ arch/x86/Kconfig.debug | 6 +++---
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index d1819161cc6c..abeac4b80b74 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -172,8 +172,8 @@ config X86
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+- select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
+- select HAVE_STACK_VALIDATION if X86_64
++ select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION && !RETPOLINE
++ select HAVE_STACK_VALIDATION if X86_64 && !RETPOLINE
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UNSTABLE_SCHED_CLOCK
+ select HAVE_USER_RETURN_NOTIFIER
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index 6293a8768a91..9f3928d744bc 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -359,8 +359,8 @@ config PUNIT_ATOM_DEBUG
+
+ choice
+ prompt "Choose kernel unwinder"
+- default UNWINDER_ORC if X86_64
+- default UNWINDER_FRAME_POINTER if X86_32
++ default UNWINDER_ORC if X86_64 && !RETPOLINE
++ default UNWINDER_FRAME_POINTER if X86_32 || RETPOLINE
+ ---help---
+ This determines which method will be used for unwinding kernel stack
+ traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
+@@ -368,7 +368,7 @@ choice
+
+ config UNWINDER_ORC
+ bool "ORC unwinder"
+- depends on X86_64
++ depends on X86_64 && !RETPOLINE
+ select STACK_VALIDATION
+ ---help---
+ This option enables the ORC (Oops Rewind Capability) unwinder for
+--
+2.14.3
+
+From 86d057614112971f7d5bbac45f67869adca79852 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:09 +0000
+Subject: [PATCH 03/10] x86/spectre: Add boot time option to select Spectre v2
+ mitigation
+
+Add a spectre_v2= option to select the mitigation used for the indirect
+branch speculation vulnerability.
+
+Currently, the only option available is retpoline, in its various forms.
+This will be expanded to cover the new IBRS/IBPB microcode features.
+
+The RETPOLINE_AMD feature relies on a serializing LFENCE for speculation
+control. For AMD hardware, only set RETPOLINE_AMD if LFENCE is a
+serializing instruction, which is indicated by the LFENCE_RDTSC feature.
+
+[ tglx: Folded back the LFENCE/AMD fixes and reworked it so IBRS
+ integration becomes simple ]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-4-git-send-email-dwmw@amazon.co.uk
+---
+ Documentation/admin-guide/kernel-parameters.txt | 28 +++++
+ arch/x86/include/asm/nospec-branch.h | 10 ++
+ arch/x86/kernel/cpu/bugs.c | 158 +++++++++++++++++++++++-
+ arch/x86/kernel/cpu/common.c | 4 -
+ 4 files changed, 195 insertions(+), 5 deletions(-)
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 905991745d26..8122b5f98ea1 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2599,6 +2599,11 @@
+ nosmt [KNL,S390] Disable symmetric multithreading (SMT).
+ Equivalent to smt=1.
+
++ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
++ (indirect branch prediction) vulnerability. System may
++ allow data leaks with this option, which is equivalent
++ to spectre_v2=off.
++
+ noxsave [BUGS=X86] Disables x86 extended register state save
+ and restore using xsave. The kernel will fallback to
+ enabling legacy floating-point and sse state.
+@@ -3908,6 +3913,29 @@
+ sonypi.*= [HW] Sony Programmable I/O Control Device driver
+ See Documentation/laptops/sonypi.txt
+
++ spectre_v2= [X86] Control mitigation of Spectre variant 2
++ (indirect branch speculation) vulnerability.
++
++ on - unconditionally enable
++ off - unconditionally disable
++ auto - kernel detects whether your CPU model is
++ vulnerable
++
++ Selecting 'on' will, and 'auto' may, choose a
++ mitigation method at run time according to the
++ CPU, the available microcode, the setting of the
++ CONFIG_RETPOLINE configuration option, and the
++ compiler with which the kernel was built.
++
++ Specific mitigations can also be selected manually:
++
++ retpoline - replace indirect branches
++ retpoline,generic - google's original retpoline
++ retpoline,amd - AMD-specific minimal thunk
++
++ Not specifying this option is equivalent to
++ spectre_v2=auto.
++
+ spia_io_base= [HW,MTD]
+ spia_fio_base=
+ spia_pedr=
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 7f58713b27c4..7d70ea977fbe 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -105,5 +105,15 @@
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+ #endif
+
++/* The Spectre V2 mitigation variants */
++enum spectre_v2_mitigation {
++ SPECTRE_V2_NONE,
++ SPECTRE_V2_RETPOLINE_MINIMAL,
++ SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
++ SPECTRE_V2_RETPOLINE_GENERIC,
++ SPECTRE_V2_RETPOLINE_AMD,
++ SPECTRE_V2_IBRS,
++};
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 76ad6cb44b40..e4dc26185aa7 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -11,6 +11,9 @@
+ #include <linux/init.h>
+ #include <linux/utsname.h>
+ #include <linux/cpu.h>
++
++#include <asm/nospec-branch.h>
++#include <asm/cmdline.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+ #include <asm/processor-flags.h>
+@@ -21,6 +24,8 @@
+ #include <asm/pgtable.h>
+ #include <asm/set_memory.h>
+
++static void __init spectre_v2_select_mitigation(void);
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -30,6 +35,9 @@ void __init check_bugs(void)
+ print_cpu_info(&boot_cpu_data);
+ }
+
++ /* Select the proper spectre mitigation before patching alternatives */
++ spectre_v2_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -62,6 +70,153 @@ void __init check_bugs(void)
+ #endif
+ }
+
++/* The kernel command line selection */
++enum spectre_v2_mitigation_cmd {
++ SPECTRE_V2_CMD_NONE,
++ SPECTRE_V2_CMD_AUTO,
++ SPECTRE_V2_CMD_FORCE,
++ SPECTRE_V2_CMD_RETPOLINE,
++ SPECTRE_V2_CMD_RETPOLINE_GENERIC,
++ SPECTRE_V2_CMD_RETPOLINE_AMD,
++};
++
++static const char *spectre_v2_strings[] = {
++ [SPECTRE_V2_NONE] = "Vulnerable",
++ [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
++ [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
++ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
++ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
++};
++
++#undef pr_fmt
++#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
++
++static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
++
++static void __init spec2_print_if_insecure(const char *reason)
++{
++ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ pr_info("%s\n", reason);
++}
++
++static void __init spec2_print_if_secure(const char *reason)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ pr_info("%s\n", reason);
++}
++
++static inline bool retp_compiler(void)
++{
++ return __is_defined(RETPOLINE);
++}
++
++static inline bool match_option(const char *arg, int arglen, const char *opt)
++{
++ int len = strlen(opt);
++
++ return len == arglen && !strncmp(arg, opt, len);
++}
++
++static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
++{
++ char arg[20];
++ int ret;
++
++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
++ sizeof(arg));
++ if (ret > 0) {
++ if (match_option(arg, ret, "off")) {
++ goto disable;
++ } else if (match_option(arg, ret, "on")) {
++ spec2_print_if_secure("force enabled on command line.");
++ return SPECTRE_V2_CMD_FORCE;
++ } else if (match_option(arg, ret, "retpoline")) {
++ spec2_print_if_insecure("retpoline selected on command line.");
++ return SPECTRE_V2_CMD_RETPOLINE;
++ } else if (match_option(arg, ret, "retpoline,amd")) {
++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
++ pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
++ return SPECTRE_V2_CMD_AUTO;
++ }
++ spec2_print_if_insecure("AMD retpoline selected on command line.");
++ return SPECTRE_V2_CMD_RETPOLINE_AMD;
++ } else if (match_option(arg, ret, "retpoline,generic")) {
++ spec2_print_if_insecure("generic retpoline selected on command line.");
++ return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
++ } else if (match_option(arg, ret, "auto")) {
++ return SPECTRE_V2_CMD_AUTO;
++ }
++ }
++
++ if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
++ return SPECTRE_V2_CMD_AUTO;
++disable:
++ spec2_print_if_insecure("disabled on command line.");
++ return SPECTRE_V2_CMD_NONE;
++}
++
++static void __init spectre_v2_select_mitigation(void)
++{
++ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
++ enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
++
++ /*
++ * If the CPU is not affected and the command line mode is NONE or AUTO
++ * then nothing to do.
++ */
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
++ (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
++ return;
++
++ switch (cmd) {
++ case SPECTRE_V2_CMD_NONE:
++ return;
++
++ case SPECTRE_V2_CMD_FORCE:
++ /* FALLTRHU */
++ case SPECTRE_V2_CMD_AUTO:
++ goto retpoline_auto;
++
++ case SPECTRE_V2_CMD_RETPOLINE_AMD:
++ if (IS_ENABLED(CONFIG_RETPOLINE))
++ goto retpoline_amd;
++ break;
++ case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
++ if (IS_ENABLED(CONFIG_RETPOLINE))
++ goto retpoline_generic;
++ break;
++ case SPECTRE_V2_CMD_RETPOLINE:
++ if (IS_ENABLED(CONFIG_RETPOLINE))
++ goto retpoline_auto;
++ break;
++ }
++ pr_err("kernel not compiled with retpoline; no mitigation available!");
++ return;
++
++retpoline_auto:
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++ retpoline_amd:
++ if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
++ pr_err("LFENCE not serializing. Switching to generic retpoline\n");
++ goto retpoline_generic;
++ }
++ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
++ SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++ } else {
++ retpoline_generic:
++ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
++ SPECTRE_V2_RETPOLINE_MINIMAL;
++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++ }
++
++ spectre_v2_enabled = mode;
++ pr_info("%s\n", spectre_v2_strings[mode]);
++}
++
++#undef pr_fmt
++
+ #ifdef CONFIG_SYSFS
+ ssize_t cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+@@ -86,6 +241,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
+ {
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return sprintf(buf, "Not affected\n");
+- return sprintf(buf, "Vulnerable\n");
++
++ return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 7a671d1ae3cb..372ba3fb400f 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -905,10 +905,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+-#ifdef CONFIG_RETPOLINE
+- setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+-#endif
+-
+ fpu__init_system(c);
+
+ #ifdef CONFIG_X86_32
+--
+2.14.3
+
+From b3a96862283e68914d1f74f160ab980dacf811ee Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:10 +0000
+Subject: [PATCH 04/10] x86/retpoline/crypto: Convert crypto assembler indirect
+ jumps
+
+Convert all indirect jumps in crypto assembler code to use non-speculative
+sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-5-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/crypto/aesni-intel_asm.S | 5 +++--
+ arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++-
+ arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 3 ++-
+ arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++-
+ 4 files changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index 16627fec80b2..3d09e3aca18d 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -32,6 +32,7 @@
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
+ #include <asm/frame.h>
++#include <asm/nospec-branch.h>
+
+ /*
+ * The following macros are used to move an (un)aligned 16 byte value to/from
+@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8)
+ pxor INC, STATE4
+ movdqu IV, 0x30(OUTP)
+
+- call *%r11
++ CALL_NOSPEC %r11
+
+ movdqu 0x00(OUTP), INC
+ pxor INC, STATE1
+@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8)
+ _aesni_gf128mul_x_ble()
+ movups IV, (IVP)
+
+- call *%r11
++ CALL_NOSPEC %r11
+
+ movdqu 0x40(OUTP), INC
+ pxor INC, STATE1
+diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+index f7c495e2863c..a14af6eb09cb 100644
+--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+@@ -17,6 +17,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/nospec-branch.h>
+
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+
+@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
+ vpxor 14 * 16(%rax), %xmm15, %xmm14;
+ vpxor 15 * 16(%rax), %xmm15, %xmm15;
+
+- call *%r9;
++ CALL_NOSPEC %r9;
+
+ addq $(16 * 16), %rsp;
+
+diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+index eee5b3982cfd..b66bbfa62f50 100644
+--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+@@ -12,6 +12,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/nospec-branch.h>
+
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+
+@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
+ vpxor 14 * 32(%rax), %ymm15, %ymm14;
+ vpxor 15 * 32(%rax), %ymm15, %ymm15;
+
+- call *%r9;
++ CALL_NOSPEC %r9;
+
+ addq $(16 * 32), %rsp;
+
+diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+index 7a7de27c6f41..d9b734d0c8cc 100644
+--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+@@ -45,6 +45,7 @@
+
+ #include <asm/inst.h>
+ #include <linux/linkage.h>
++#include <asm/nospec-branch.h>
+
+ ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
+
+@@ -172,7 +173,7 @@ continue_block:
+ movzxw (bufp, %rax, 2), len
+ lea crc_array(%rip), bufp
+ lea (bufp, len, 1), bufp
+- jmp *bufp
++ JMP_NOSPEC bufp
+
+ ################################################################
+ ## 2a) PROCESS FULL BLOCKS:
+--
+2.14.3
+
+From 2558106c7a47e16968a10fa66eea78a096fabfe6 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:11 +0000
+Subject: [PATCH 05/10] x86/retpoline/entry: Convert entry assembler indirect
+ jumps
+
+Convert indirect jumps in core 32/64bit entry assembler code to use
+non-speculative sequences when CONFIG_RETPOLINE is enabled.
+
+Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
+address after the 'call' instruction must be *precisely* at the
+.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
+and the use of alternatives will mess that up unless we play horrid
+games to prepend with NOPs and make the variants the same length. It's
+not worth it; in the case where we ALTERNATIVE out the retpoline, the
+first instruction at __x86.indirect_thunk.rax is going to be a bare
+jmp *%rax anyway.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-6-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/entry/entry_32.S | 5 +++--
+ arch/x86/entry/entry_64.S | 12 +++++++++---
+ 2 files changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index ace8f321a5a1..a1f28a54f23a 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -44,6 +44,7 @@
+ #include <asm/asm.h>
+ #include <asm/smap.h>
+ #include <asm/frame.h>
++#include <asm/nospec-branch.h>
+
+ .section .entry.text, "ax"
+
+@@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
+
+ /* kernel thread */
+ 1: movl %edi, %eax
+- call *%ebx
++ CALL_NOSPEC %ebx
+ /*
+ * A kernel thread is allowed to return here after successfully
+ * calling do_execve(). Exit to userspace to complete the execve()
+@@ -919,7 +920,7 @@ common_exception:
+ movl %ecx, %es
+ TRACE_IRQS_OFF
+ movl %esp, %eax # pt_regs pointer
+- call *%edi
++ CALL_NOSPEC %edi
+ jmp ret_from_exception
+ END(common_exception)
+
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index ed31d00dc5ee..59874bc1aed2 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -37,6 +37,7 @@
+ #include <asm/pgtable_types.h>
+ #include <asm/export.h>
+ #include <asm/frame.h>
++#include <asm/nospec-branch.h>
+ #include <linux/err.h>
+
+ #include "calling.h"
+@@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
+ */
+ pushq %rdi
+ movq $entry_SYSCALL_64_stage2, %rdi
+- jmp *%rdi
++ JMP_NOSPEC %rdi
+ END(entry_SYSCALL_64_trampoline)
+
+ .popsection
+@@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath:
+ * It might end up jumping to the slow path. If it jumps, RAX
+ * and all argument registers are clobbered.
+ */
++#ifdef CONFIG_RETPOLINE
++ movq sys_call_table(, %rax, 8), %rax
++ call __x86_indirect_thunk_rax
++#else
+ call *sys_call_table(, %rax, 8)
++#endif
+ .Lentry_SYSCALL_64_after_fastpath_call:
+
+ movq %rax, RAX(%rsp)
+@@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64)
+ jmp entry_SYSCALL64_slow_path
+
+ 1:
+- jmp *%rax /* Called from C */
++ JMP_NOSPEC %rax /* Called from C */
+ END(stub_ptregs_64)
+
+ .macro ptregs_stub func
+@@ -517,7 +523,7 @@ ENTRY(ret_from_fork)
+ 1:
+ /* kernel thread */
+ movq %r12, %rdi
+- call *%rbx
++ CALL_NOSPEC %rbx
+ /*
+ * A kernel thread is allowed to return here after successfully
+ * calling do_execve(). Exit to userspace to complete the execve()
+--
+2.14.3
+
+From 42f7c812022441ffba2d5ccca3acf6380201f19e Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:12 +0000
+Subject: [PATCH 06/10] x86/retpoline/ftrace: Convert ftrace assembler indirect
+ jumps
+
+Convert all indirect jumps in ftrace assembler code to use non-speculative
+sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-7-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/kernel/ftrace_32.S | 6 ++++--
+ arch/x86/kernel/ftrace_64.S | 8 ++++----
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
+index b6c6468e10bc..4c8440de3355 100644
+--- a/arch/x86/kernel/ftrace_32.S
++++ b/arch/x86/kernel/ftrace_32.S
+@@ -8,6 +8,7 @@
+ #include <asm/segment.h>
+ #include <asm/export.h>
+ #include <asm/ftrace.h>
++#include <asm/nospec-branch.h>
+
+ #ifdef CC_USING_FENTRY
+ # define function_hook __fentry__
+@@ -197,7 +198,8 @@ ftrace_stub:
+ movl 0x4(%ebp), %edx
+ subl $MCOUNT_INSN_SIZE, %eax
+
+- call *ftrace_trace_function
++ movl ftrace_trace_function, %ecx
++ CALL_NOSPEC %ecx
+
+ popl %edx
+ popl %ecx
+@@ -241,5 +243,5 @@ return_to_handler:
+ movl %eax, %ecx
+ popl %edx
+ popl %eax
+- jmp *%ecx
++ JMP_NOSPEC %ecx
+ #endif
+diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
+index c832291d948a..7cb8ba08beb9 100644
+--- a/arch/x86/kernel/ftrace_64.S
++++ b/arch/x86/kernel/ftrace_64.S
+@@ -7,7 +7,7 @@
+ #include <asm/ptrace.h>
+ #include <asm/ftrace.h>
+ #include <asm/export.h>
+-
++#include <asm/nospec-branch.h>
+
+ .code64
+ .section .entry.text, "ax"
+@@ -286,8 +286,8 @@ trace:
+ * ip and parent ip are used and the list function is called when
+ * function tracing is enabled.
+ */
+- call *ftrace_trace_function
+-
++ movq ftrace_trace_function, %r8
++ CALL_NOSPEC %r8
+ restore_mcount_regs
+
+ jmp fgraph_trace
+@@ -329,5 +329,5 @@ GLOBAL(return_to_handler)
+ movq 8(%rsp), %rdx
+ movq (%rsp), %rax
+ addq $24, %rsp
+- jmp *%rdi
++ JMP_NOSPEC %rdi
+ #endif
+--
+2.14.3
+
+From f14fd95d2f3e611619756ea3c008aee3b4bd4978 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:13 +0000
+Subject: [PATCH 07/10] x86/retpoline/hyperv: Convert assembler indirect jumps
+
+Convert all indirect jumps in hyperv inline asm code to use non-speculative
+sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-8-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/include/asm/mshyperv.h | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
+index 581bb54dd464..5119e4b555cc 100644
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -7,6 +7,7 @@
+ #include <linux/nmi.h>
+ #include <asm/io.h>
+ #include <asm/hyperv.h>
++#include <asm/nospec-branch.h>
+
+ /*
+ * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
+@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
+ return U64_MAX;
+
+ __asm__ __volatile__("mov %4, %%r8\n"
+- "call *%5"
++ CALL_NOSPEC
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input_address)
+- : "r" (output_address), "m" (hv_hypercall_pg)
++ : "r" (output_address),
++ THUNK_TARGET(hv_hypercall_pg)
+ : "cc", "memory", "r8", "r9", "r10", "r11");
+ #else
+ u32 input_address_hi = upper_32_bits(input_address);
+@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
+ if (!hv_hypercall_pg)
+ return U64_MAX;
+
+- __asm__ __volatile__("call *%7"
++ __asm__ __volatile__(CALL_NOSPEC
+ : "=A" (hv_status),
+ "+c" (input_address_lo), ASM_CALL_CONSTRAINT
+ : "A" (control),
+ "b" (input_address_hi),
+ "D"(output_address_hi), "S"(output_address_lo),
+- "m" (hv_hypercall_pg)
++ THUNK_TARGET(hv_hypercall_pg)
+ : "cc", "memory");
+ #endif /* !x86_64 */
+ return hv_status;
+@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
+
+ #ifdef CONFIG_X86_64
+ {
+- __asm__ __volatile__("call *%4"
++ __asm__ __volatile__(CALL_NOSPEC
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input1)
+- : "m" (hv_hypercall_pg)
++ : THUNK_TARGET(hv_hypercall_pg)
+ : "cc", "r8", "r9", "r10", "r11");
+ }
+ #else
+@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
+ u32 input1_hi = upper_32_bits(input1);
+ u32 input1_lo = lower_32_bits(input1);
+
+- __asm__ __volatile__ ("call *%5"
++ __asm__ __volatile__ (CALL_NOSPEC
+ : "=A"(hv_status),
+ "+c"(input1_lo),
+ ASM_CALL_CONSTRAINT
+ : "A" (control),
+ "b" (input1_hi),
+- "m" (hv_hypercall_pg)
++ THUNK_TARGET(hv_hypercall_pg)
+ : "cc", "edi", "esi");
+ }
+ #endif
+--
+2.14.3
+
+From b569cb1e72bda00e7e6245519fe7d0d0ab13898e Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:14 +0000
+Subject: [PATCH 08/10] x86/retpoline/xen: Convert Xen hypercall indirect jumps
+
+Convert indirect call in Xen hypercall to use non-speculative sequence,
+when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-9-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/include/asm/xen/hypercall.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
+index 7cb282e9e587..bfd882617613 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -44,6 +44,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/smap.h>
++#include <asm/nospec-branch.h>
+
+ #include <xen/interface/xen.h>
+ #include <xen/interface/sched.h>
+@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
+ __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+
+ stac();
+- asm volatile("call *%[call]"
++ asm volatile(CALL_NOSPEC
+ : __HYPERCALL_5PARAM
+- : [call] "a" (&hypercall_page[call])
++ : [thunk_target] "a" (&hypercall_page[call])
+ : __HYPERCALL_CLOBBER5);
+ clac();
+
+--
+2.14.3
+
+From 96f71b3a482e918991d165eb7a6b42eb9a9ef735 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 14:43:15 +0000
+Subject: [PATCH 09/10] x86/retpoline/checksum32: Convert assembler indirect
+ jumps
+
+Convert all indirect jumps in 32bit checksum assembler code to use
+non-speculative sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-10-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/lib/checksum_32.S | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
+index 4d34bb548b41..46e71a74e612 100644
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -29,7 +29,8 @@
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+ #include <asm/export.h>
+-
++#include <asm/nospec-branch.h>
++
+ /*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+@@ -156,7 +157,7 @@ ENTRY(csum_partial)
+ negl %ebx
+ lea 45f(%ebx,%ebx,2), %ebx
+ testl %esi, %esi
+- jmp *%ebx
++ JMP_NOSPEC %ebx
+
+ # Handle 2-byte-aligned regions
+ 20: addw (%esi), %ax
+@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic)
+ andl $-32,%edx
+ lea 3f(%ebx,%ebx), %ebx
+ testl %esi, %esi
+- jmp *%ebx
++ JMP_NOSPEC %ebx
+ 1: addl $64,%esi
+ addl $64,%edi
+ SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
+--
+2.14.3
+
+From 9080a45e302772c068f73bc24b3304a416fe2daf Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Tue, 9 Jan 2018 14:43:16 +0000
+Subject: [PATCH 10/10] x86/retpoline/irq32: Convert assembler indirect jumps
+
+Convert all indirect jumps in 32bit irq inline asm code to use non
+speculative sequences.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515508997-6154-11-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/kernel/irq_32.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index a83b3346a0e1..c1bdbd3d3232 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -20,6 +20,7 @@
+ #include <linux/mm.h>
+
+ #include <asm/apic.h>
++#include <asm/nospec-branch.h>
+
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+
+@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
+ static void call_on_stack(void *func, void *stack)
+ {
+ asm volatile("xchgl %%ebx,%%esp \n"
+- "call *%%edi \n"
++ CALL_NOSPEC
+ "movl %%ebx,%%esp \n"
+ : "=b" (stack)
+ : "0" (stack),
+- "D"(func)
++ [thunk_target] "D"(func)
+ : "memory", "cc", "edx", "ecx", "eax");
+ }
+
+@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+ call_on_stack(print_stack_overflow, isp);
+
+ asm volatile("xchgl %%ebx,%%esp \n"
+- "call *%%edi \n"
++ CALL_NOSPEC
+ "movl %%ebx,%%esp \n"
+ : "=a" (arg1), "=b" (isp)
+ : "0" (desc), "1" (isp),
+- "D" (desc->handle_irq)
++ [thunk_target] "D" (desc->handle_irq)
+ : "memory", "cc", "ecx");
+ return 1;
+ }
+--
+2.14.3
+
diff --git a/sources b/sources
index 37c0dd367..56436f43b 100644
--- a/sources
+++ b/sources
@@ -1,3 +1,3 @@
SHA512 (linux-4.14.tar.xz) = 77e43a02d766c3d73b7e25c4aafb2e931d6b16e870510c22cef0cdb05c3acb7952b8908ebad12b10ef982c6efbe286364b1544586e715cf38390e483927904d8
SHA512 (perf-man-4.14.tar.gz) = 76a9d8adc284cdffd4b3fbb060e7f9a14109267707ce1d03f4c3239cd70d8d164f697da3a0f90a363fbcac42a61d3c378afbcc2a86f112c501b9cb5ce74ef9f8
-SHA512 (patch-4.14.12.xz) = b11b91503c9eb879b79cb16683204f5dbb467aac62dcfc1b025f889dc38016d990c0fd1879210226430e9f9ac6e168439b13603781188d67d213b12a334b4e5b
+SHA512 (patch-4.14.13.xz) = 6ae473fbed193a2997e9d3f02ef9c1b5a1bc6f2464ef32a4bc22306659f5d978ab64e531b3488bf8266732043868f1b14183e463c17020d1dc95c8cf70343415