diff options
author | Justin M. Forbes <jforbes@fedoraproject.org> | 2018-01-11 10:28:58 -0600 |
---|---|---|
committer | Justin M. Forbes <jforbes@fedoraproject.org> | 2018-01-11 10:28:58 -0600 |
commit | 78b277bd7298981604c85b30d83fd3022674292d (patch) | |
tree | 1cc3cd96f8fbef74a67f3075065d0e4da87a20df | |
parent | d496f759f1fe1234d0ba3e125e69f5547a26dbc5 (diff) | |
download | kernel-78b277bd7298981604c85b30d83fd3022674292d.tar.gz kernel-78b277bd7298981604c85b30d83fd3022674292d.tar.xz kernel-78b277bd7298981604c85b30d83fd3022674292d.zip |
Initial retpoline patches for Spectre v2
-rw-r--r-- | 0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch | 66 | ||||
-rw-r--r-- | 0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch | 58 | ||||
-rw-r--r-- | 0002-sysfs-cpu-Add-vulnerability-folder.patch | 154 | ||||
-rw-r--r-- | 0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch | 82 | ||||
-rw-r--r-- | configs/fedora/generic/x86/CONFIG_RETPOLINE | 1 | ||||
-rw-r--r-- | kernel-i686-PAE.config | 1 | ||||
-rw-r--r-- | kernel-i686-PAEdebug.config | 1 | ||||
-rw-r--r-- | kernel-i686-debug.config | 1 | ||||
-rw-r--r-- | kernel-i686.config | 1 | ||||
-rw-r--r-- | kernel-x86_64-debug.config | 1 | ||||
-rw-r--r-- | kernel-x86_64.config | 1 | ||||
-rw-r--r-- | kernel.spec | 9 | ||||
-rw-r--r-- | retpoline.patch | 1480 |
13 files changed, 1856 insertions, 0 deletions
diff --git a/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch b/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch new file mode 100644 index 000000000..b44c184d9 --- /dev/null +++ b/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch @@ -0,0 +1,66 @@ +From e4d0e84e490790798691aaa0f2e598637f1867ec Mon Sep 17 00:00:00 2001 +From: Tom Lendacky <thomas.lendacky@amd.com> +Date: Mon, 8 Jan 2018 16:09:21 -0600 +Subject: [PATCH 1/2] x86/cpu/AMD: Make LFENCE a serializing instruction + +To aid in speculation control, make LFENCE a serializing instruction +since it has less overhead than MFENCE. This is done by setting bit 1 +of MSR 0xc0011029 (DE_CFG). Some families that support LFENCE do not +have this MSR. For these families, the LFENCE instruction is already +serializing. + +Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Borislav Petkov <bp@alien8.de> +Cc: Dan Williams <dan.j.williams@intel.com> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: David Woodhouse <dwmw@amazon.co.uk> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/20180108220921.12580.71694.stgit@tlendack-t1.amdoffice.net +--- + arch/x86/include/asm/msr-index.h | 2 ++ + arch/x86/kernel/cpu/amd.c | 10 ++++++++++ + 2 files changed, 12 insertions(+) + +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index ab022618a50a..1e7d710fef43 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -352,6 +352,8 @@ + #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL + #define FAM10H_MMIO_CONF_BASE_SHIFT 20 + #define MSR_FAM10H_NODE_ID 0xc001100c ++#define MSR_F10H_DECFG 0xc0011029 ++#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 + + /* K8 MSRs */ + #define MSR_K8_TOP_MEM1 0xc001001a +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index bcb75dc97d44..5b438d81beb2 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -829,6 +829,16 @@ static void init_amd(struct cpuinfo_x86 *c) + set_cpu_cap(c, X86_FEATURE_K8); + + if (cpu_has(c, X86_FEATURE_XMM2)) { ++ /* ++ * A serializing LFENCE has less overhead than MFENCE, so ++ * use it for execution serialization. On families which ++ * don't have that MSR, LFENCE is already serializing. ++ * msr_set_bit() uses the safe accessors, too, even if the MSR ++ * is not present. ++ */ ++ msr_set_bit(MSR_F10H_DECFG, ++ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); ++ + /* MFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + } +-- +2.14.3 + diff --git a/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch b/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch new file mode 100644 index 000000000..e358c16f9 --- /dev/null +++ b/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch @@ -0,0 +1,58 @@ +From 99c6fa2511d8a683e61468be91b83f85452115fa Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Sat, 6 Jan 2018 11:49:23 +0000 +Subject: [PATCH 1/2] x86/cpufeatures: Add X86_BUG_SPECTRE_V[12] + +Add the bug bits for spectre v1/2 and force them unconditionally for all +cpus. + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/1515239374-23361-2-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/include/asm/cpufeatures.h | 2 ++ + arch/x86/kernel/cpu/common.c | 3 +++ + 2 files changed, 5 insertions(+) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 21ac898df2d8..1641c2f96363 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -342,5 +342,7 @@ + #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ + #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ + #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ ++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ ++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 2d3bd2215e5b..372ba3fb400f 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -902,6 +902,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) + if (c->x86_vendor != X86_VENDOR_AMD) + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + ++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1); ++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2); ++ + fpu__init_system(c); + + #ifdef CONFIG_X86_32 +-- +2.14.3 + diff --git a/0002-sysfs-cpu-Add-vulnerability-folder.patch b/0002-sysfs-cpu-Add-vulnerability-folder.patch new file mode 100644 index 000000000..8f1ae3a6a --- /dev/null +++ b/0002-sysfs-cpu-Add-vulnerability-folder.patch @@ -0,0 +1,154 @@ +From 87590ce6e373d1a5401f6539f0c59ef92dd924a9 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Sun, 7 Jan 2018 22:48:00 +0100 +Subject: [PATCH 2/2] sysfs/cpu: Add vulnerability folder + +As the meltdown/spectre problem affects several CPU architectures, it makes +sense to have common way to express whether a system is affected by a +particular vulnerability or not. If affected the way to express the +mitigation should be common as well. + +Create /sys/devices/system/cpu/vulnerabilities folder and files for +meltdown, spectre_v1 and spectre_v2. + +Allow architectures to override the show function. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Will Deacon <will.deacon@arm.com> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Linus Torvalds <torvalds@linuxfoundation.org> +Cc: Borislav Petkov <bp@alien8.de> +Cc: David Woodhouse <dwmw@amazon.co.uk> +Link: https://lkml.kernel.org/r/20180107214913.096657732@linutronix.de +--- + Documentation/ABI/testing/sysfs-devices-system-cpu | 16 ++++++++ + drivers/base/Kconfig | 3 ++ + drivers/base/cpu.c | 48 ++++++++++++++++++++++ + include/linux/cpu.h | 7 ++++ + 4 files changed, 74 insertions(+) + +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index f3d5817c4ef0..bd3a88e16d8b 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -373,3 +373,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> + Description: information about CPUs heterogeneity. + + cpu_capacity: capacity of cpu#. ++ ++What: /sys/devices/system/cpu/vulnerabilities ++ /sys/devices/system/cpu/vulnerabilities/meltdown ++ /sys/devices/system/cpu/vulnerabilities/spectre_v1 ++ /sys/devices/system/cpu/vulnerabilities/spectre_v2 ++Date: Januar 2018 ++Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> ++Description: Information about CPU vulnerabilities ++ ++ The files are named after the code names of CPU ++ vulnerabilities. The output of those files reflects the ++ state of the CPUs in the system. Possible output values: ++ ++ "Not affected" CPU is not affected by the vulnerability ++ "Vulnerable" CPU is affected and no mitigation in effect ++ "Mitigation: $M" CPU is affetcted and mitigation $M is in effect +diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig +index 2f6614c9a229..37a71fd9043f 100644 +--- a/drivers/base/Kconfig ++++ b/drivers/base/Kconfig +@@ -235,6 +235,9 @@ config GENERIC_CPU_DEVICES + config GENERIC_CPU_AUTOPROBE + bool + ++config GENERIC_CPU_VULNERABILITIES ++ bool ++ + config SOC_BUS + bool + select GLOB +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index 321cd7b4d817..825964efda1d 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -501,10 +501,58 @@ static void __init cpu_dev_register_generic(void) + #endif + } + ++#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES ++ ++ssize_t __weak cpu_show_meltdown(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ ++ssize_t __weak cpu_show_spectre_v1(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ ++ssize_t __weak cpu_show_spectre_v2(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ ++static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); ++static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); ++static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); ++ ++static struct attribute *cpu_root_vulnerabilities_attrs[] = { ++ &dev_attr_meltdown.attr, ++ &dev_attr_spectre_v1.attr, ++ &dev_attr_spectre_v2.attr, ++ NULL ++}; ++ ++static const struct attribute_group cpu_root_vulnerabilities_group = { ++ .name = "vulnerabilities", ++ .attrs = cpu_root_vulnerabilities_attrs, ++}; ++ ++static void __init cpu_register_vulnerabilities(void) ++{ ++ if (sysfs_create_group(&cpu_subsys.dev_root->kobj, ++ &cpu_root_vulnerabilities_group)) ++ pr_err("Unable to register CPU vulnerabilities\n"); ++} ++ ++#else ++static inline void cpu_register_vulnerabilities(void) { } ++#endif ++ + void __init cpu_dev_init(void) + { + if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) + panic("Failed to register CPU subsystem"); + + cpu_dev_register_generic(); ++ cpu_register_vulnerabilities(); + } +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 938ea8ae0ba4..c816e6f2730c 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); + extern int cpu_add_dev_attr_group(struct attribute_group *attrs); + extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); + ++extern ssize_t cpu_show_meltdown(struct device *dev, ++ struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_spectre_v1(struct device *dev, ++ struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_spectre_v2(struct device *dev, ++ struct device_attribute *attr, char *buf); ++ + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, +-- +2.14.3 + diff --git a/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch b/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch new file mode 100644 index 000000000..8676c732f --- /dev/null +++ b/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch @@ -0,0 +1,82 @@ +From 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f Mon Sep 17 00:00:00 2001 +From: Tom Lendacky <thomas.lendacky@amd.com> +Date: Mon, 8 Jan 2018 16:09:32 -0600 +Subject: [PATCH 2/2] x86/cpu/AMD: Use LFENCE_RDTSC in preference to + MFENCE_RDTSC + +With LFENCE now a serializing instruction, use LFENCE_RDTSC in preference +to MFENCE_RDTSC. However, since the kernel could be running under a +hypervisor that does not support writing that MSR, read the MSR back and +verify that the bit has been set successfully. If the MSR can be read +and the bit is set, then set the LFENCE_RDTSC feature, otherwise set the +MFENCE_RDTSC feature. + +Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Borislav Petkov <bp@alien8.de> +Cc: Dan Williams <dan.j.williams@intel.com> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: David Woodhouse <dwmw@amazon.co.uk> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/20180108220932.12580.52458.stgit@tlendack-t1.amdoffice.net +--- + arch/x86/include/asm/msr-index.h | 1 + + arch/x86/kernel/cpu/amd.c | 18 ++++++++++++++++-- + 2 files changed, 17 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 1e7d710fef43..fa11fb1fa570 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -354,6 +354,7 @@ + #define MSR_FAM10H_NODE_ID 0xc001100c + #define MSR_F10H_DECFG 0xc0011029 + #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 ++#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) + + /* K8 MSRs */ + #define MSR_K8_TOP_MEM1 0xc001001a +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 5b438d81beb2..ea831c858195 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -829,6 +829,9 @@ static void init_amd(struct cpuinfo_x86 *c) + set_cpu_cap(c, X86_FEATURE_K8); + + if (cpu_has(c, X86_FEATURE_XMM2)) { ++ unsigned long long val; ++ int ret; ++ + /* + * A serializing LFENCE has less overhead than MFENCE, so + * use it for execution serialization. On families which +@@ -839,8 +842,19 @@ static void init_amd(struct cpuinfo_x86 *c) + msr_set_bit(MSR_F10H_DECFG, + MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + +- /* MFENCE stops RDTSC speculation */ +- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); ++ /* ++ * Verify that the MSR write was successful (could be running ++ * under a hypervisor) and only then assume that LFENCE is ++ * serializing. ++ */ ++ ret = rdmsrl_safe(MSR_F10H_DECFG, &val); ++ if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { ++ /* A serializing LFENCE stops RDTSC speculation */ ++ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); ++ } else { ++ /* MFENCE stops RDTSC speculation */ ++ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); ++ } + } + + /* +-- +2.14.3 + diff --git a/configs/fedora/generic/x86/CONFIG_RETPOLINE b/configs/fedora/generic/x86/CONFIG_RETPOLINE new file mode 100644 index 000000000..c46e12644 --- /dev/null +++ b/configs/fedora/generic/x86/CONFIG_RETPOLINE @@ -0,0 +1 @@ +CONFIG_RETPOLINE=y diff --git a/kernel-i686-PAE.config b/kernel-i686-PAE.config index 34127ec22..e8179c6d2 100644 --- a/kernel-i686-PAE.config +++ b/kernel-i686-PAE.config @@ -4184,6 +4184,7 @@ CONFIG_RENESAS_PHY=m # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_RESET_HSDK_V1 is not set # CONFIG_RESET_TI_SYSCON is not set +CONFIG_RETPOLINE=y # CONFIG_RFD77402 is not set # CONFIG_RFD_FTL is not set CONFIG_RFKILL_GPIO=m diff --git a/kernel-i686-PAEdebug.config b/kernel-i686-PAEdebug.config index 6483d7993..dd5b99fab 100644 --- a/kernel-i686-PAEdebug.config +++ b/kernel-i686-PAEdebug.config @@ -4205,6 +4205,7 @@ CONFIG_RENESAS_PHY=m # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_RESET_HSDK_V1 is not set # CONFIG_RESET_TI_SYSCON is not set +CONFIG_RETPOLINE=y # CONFIG_RFD77402 is not set # CONFIG_RFD_FTL is not set CONFIG_RFKILL_GPIO=m diff --git a/kernel-i686-debug.config b/kernel-i686-debug.config index f9838b329..41d1907b3 100644 --- a/kernel-i686-debug.config +++ b/kernel-i686-debug.config @@ -4205,6 +4205,7 @@ CONFIG_RENESAS_PHY=m # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_RESET_HSDK_V1 is not set # CONFIG_RESET_TI_SYSCON is not set +CONFIG_RETPOLINE=y # CONFIG_RFD77402 is not set # CONFIG_RFD_FTL is not set CONFIG_RFKILL_GPIO=m diff --git a/kernel-i686.config b/kernel-i686.config index 5066de5f7..6e3f54b3e 100644 --- a/kernel-i686.config +++ b/kernel-i686.config @@ -4184,6 +4184,7 @@ CONFIG_RENESAS_PHY=m # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_RESET_HSDK_V1 is not set # CONFIG_RESET_TI_SYSCON is not set +CONFIG_RETPOLINE=y # CONFIG_RFD77402 is not set # CONFIG_RFD_FTL is not set CONFIG_RFKILL_GPIO=m diff --git a/kernel-x86_64-debug.config b/kernel-x86_64-debug.config index dfccfed98..fb8159c08 100644 --- a/kernel-x86_64-debug.config +++ b/kernel-x86_64-debug.config @@ -4283,6 +4283,7 @@ CONFIG_RENESAS_PHY=m # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_RESET_HSDK_V1 is not set # CONFIG_RESET_TI_SYSCON is not set +CONFIG_RETPOLINE=y # CONFIG_RFD77402 is not set # CONFIG_RFD_FTL is not set CONFIG_RFKILL_GPIO=m diff --git a/kernel-x86_64.config b/kernel-x86_64.config index a30c9a5bc..aebcd4086 100644 --- a/kernel-x86_64.config +++ b/kernel-x86_64.config @@ -4262,6 +4262,7 @@ CONFIG_RENESAS_PHY=m # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_RESET_HSDK_V1 is not set # CONFIG_RESET_TI_SYSCON is not set +CONFIG_RETPOLINE=y # CONFIG_RFD77402 is not set # CONFIG_RFD_FTL is not set CONFIG_RFKILL_GPIO=m diff --git a/kernel.spec b/kernel.spec index ebbf8f978..7a9b7936c 100644 --- a/kernel.spec +++ b/kernel.spec @@ -635,6 +635,12 @@ Patch641: 0001-Bluetooth-btusb-Disable-autosuspend-on-QCA-Rome-devi.patch # Speculative Execution patches Patch642: prevent-bounds-check-bypass-via-speculative-execution.patch +Patch643: 0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch +Patch644: 0002-sysfs-cpu-Add-vulnerability-folder.patch +Patch645: 0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch +Patch646: 0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch +Patch647: retpoline.patch + # END OF PATCH DEFINITIONS @@ -1894,6 +1900,9 @@ fi # # %changelog +* Thu Jan 11 2018 Justin M. Forbes <jforbes@fedoraproject.org> +- Initial retpoline patches for Spectre v2 + * Wed Jan 10 2018 Laura Abbott <labbott@redhat.com> - 4.15.0-0.rc7.git2.1 - Linux v4.15-rc7-102-gcf1fb158230e diff --git a/retpoline.patch b/retpoline.patch new file mode 100644 index 000000000..005325d54 --- /dev/null +++ b/retpoline.patch @@ -0,0 +1,1480 @@ +From 61dc0f555b5c761cdafb0ba5bd41ecf22d68a4c4 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Sun, 7 Jan 2018 22:48:01 +0100 +Subject: [PATCH] x86/cpu: Implement CPU vulnerabilites sysfs functions + +Implement the CPU vulnerabilty show functions for meltdown, spectre_v1 and +spectre_v2. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Will Deacon <will.deacon@arm.com> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Linus Torvalds <torvalds@linuxfoundation.org> +Cc: Borislav Petkov <bp@alien8.de> +Cc: David Woodhouse <dwmw@amazon.co.uk> +Link: https://lkml.kernel.org/r/20180107214913.177414879@linutronix.de +--- + arch/x86/Kconfig | 1 + + arch/x86/kernel/cpu/bugs.c | 29 +++++++++++++++++++++++++++++ + 2 files changed, 30 insertions(+) + +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index cd5199de231e..e23d21ac745a 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -89,6 +89,7 @@ config X86 + select GENERIC_CLOCKEVENTS_MIN_ADJUST + select GENERIC_CMOS_UPDATE + select GENERIC_CPU_AUTOPROBE ++ select GENERIC_CPU_VULNERABILITIES + select GENERIC_EARLY_IOREMAP + select GENERIC_FIND_FIRST_BIT + select GENERIC_IOMAP +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index ba0b2424c9b0..76ad6cb44b40 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -10,6 +10,7 @@ + */ + #include <linux/init.h> + #include <linux/utsname.h> ++#include <linux/cpu.h> + #include <asm/bugs.h> + #include <asm/processor.h> + #include <asm/processor-flags.h> +@@ -60,3 +61,31 @@ void __init check_bugs(void) + set_memory_4k((unsigned long)__va(0), 1); + #endif + } ++ ++#ifdef CONFIG_SYSFS ++ssize_t cpu_show_meltdown(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) ++ return sprintf(buf, "Not affected\n"); ++ if (boot_cpu_has(X86_FEATURE_PTI)) ++ return sprintf(buf, "Mitigation: PTI\n"); ++ return sprintf(buf, "Vulnerable\n"); ++} ++ ++ssize_t cpu_show_spectre_v1(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) ++ return sprintf(buf, "Not affected\n"); ++ return sprintf(buf, "Vulnerable\n"); ++} ++ ++ssize_t cpu_show_spectre_v2(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) ++ return sprintf(buf, "Not affected\n"); ++ return sprintf(buf, "Vulnerable\n"); ++} ++#endif +-- +2.14.3 + +From d46717c610dcfa2cba5c87500c928993371ef1ad Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:07 +0000 +Subject: [PATCH 01/10] x86/retpoline: Add initial retpoline support + +Enable the use of -mindirect-branch=thunk-extern in newer GCC, and provide +the corresponding thunks. Provide assembler macros for invoking the thunks +in the same way that GCC does, from native and inline assembler. + +This adds X86_FEATURE_RETPOLINE and sets it by default on all CPUs. In +some circumstances, IBRS microcode features may be used instead, and the +retpoline can be disabled. + +On AMD CPUs if lfence is serialising, the retpoline can be dramatically +simplified to a simple "lfence; jmp *\reg". A future patch, after it has +been verified that lfence really is serialising in all circumstances, can +enable this by setting the X86_FEATURE_RETPOLINE_AMD feature bit in addition +to X86_FEATURE_RETPOLINE. + +Do not align the retpoline in the altinstr section, because there is no +guarantee that it stays aligned when it's copied over the oldinstr during +alternative patching. + +[ Andi Kleen: Rename the macros, add CONFIG_RETPOLINE option, export thunks] +[ tglx: Put actual function CALL/JMP in front of the macros, convert to + symbolic labels ] + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-2-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/Kconfig | 13 ++++ + arch/x86/Makefile | 10 ++++ + arch/x86/include/asm/asm-prototypes.h | 25 ++++++++ + arch/x86/include/asm/cpufeatures.h | 2 + + arch/x86/include/asm/nospec-branch.h | 109 ++++++++++++++++++++++++++++++++++ + arch/x86/kernel/cpu/common.c | 4 ++ + arch/x86/lib/Makefile | 1 + + arch/x86/lib/retpoline.S | 48 +++++++++++++++ + 8 files changed, 212 insertions(+) + create mode 100644 arch/x86/include/asm/nospec-branch.h + create mode 100644 arch/x86/lib/retpoline.S + +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index e23d21ac745a..d1819161cc6c 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -429,6 +429,19 @@ config GOLDFISH + def_bool y + depends on X86_GOLDFISH + ++config RETPOLINE ++ bool "Avoid speculative indirect branches in kernel" ++ default y ++ help ++ Compile kernel with the retpoline compiler options to guard against ++ kernel-to-user data leaks by avoiding speculative indirect ++ branches. Requires a compiler with -mindirect-branch=thunk-extern ++ support for full protection. The kernel may run slower. ++ ++ Without compiler support, at least indirect branches in assembler ++ code are eliminated. Since this includes the syscall entry path, ++ it is not entirely pointless. ++ + config INTEL_RDT + bool "Intel Resource Director Technology support" + default n +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index a20eacd9c7e9..974c61864978 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -235,6 +235,16 @@ KBUILD_CFLAGS += -Wno-sign-compare + # + KBUILD_CFLAGS += -fno-asynchronous-unwind-tables + ++# Avoid indirect branches in kernel to deal with Spectre ++ifdef CONFIG_RETPOLINE ++ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) ++ ifneq ($(RETPOLINE_CFLAGS),) ++ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE ++ else ++ $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.) ++ endif ++endif ++ + archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/x86/tools relocs + +diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h +index ff700d81e91e..0927cdc4f946 100644 +--- a/arch/x86/include/asm/asm-prototypes.h ++++ b/arch/x86/include/asm/asm-prototypes.h +@@ -11,7 +11,32 @@ + #include <asm/pgtable.h> + #include <asm/special_insns.h> + #include <asm/preempt.h> ++#include <asm/asm.h> + + #ifndef CONFIG_X86_CMPXCHG64 + extern void cmpxchg8b_emu(void); + #endif ++ ++#ifdef CONFIG_RETPOLINE ++#ifdef CONFIG_X86_32 ++#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void); ++#else ++#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void); ++INDIRECT_THUNK(8) ++INDIRECT_THUNK(9) ++INDIRECT_THUNK(10) ++INDIRECT_THUNK(11) ++INDIRECT_THUNK(12) ++INDIRECT_THUNK(13) ++INDIRECT_THUNK(14) ++INDIRECT_THUNK(15) ++#endif ++INDIRECT_THUNK(ax) ++INDIRECT_THUNK(bx) ++INDIRECT_THUNK(cx) ++INDIRECT_THUNK(dx) ++INDIRECT_THUNK(si) ++INDIRECT_THUNK(di) ++INDIRECT_THUNK(bp) ++INDIRECT_THUNK(sp) ++#endif /* CONFIG_RETPOLINE */ +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 1641c2f96363..f275447862f4 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -203,6 +203,8 @@ + #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ + #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ + #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ ++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ + #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ + #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ + #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +new file mode 100644 +index 000000000000..7f58713b27c4 +--- /dev/null ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -0,0 +1,109 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++#ifndef __NOSPEC_BRANCH_H__ ++#define __NOSPEC_BRANCH_H__ ++ ++#include <asm/alternative.h> ++#include <asm/alternative-asm.h> ++#include <asm/cpufeatures.h> ++ ++#ifdef __ASSEMBLY__ ++ ++/* ++ * These are the bare retpoline primitives for indirect jmp and call. ++ * Do not use these directly; they only exist to make the ALTERNATIVE ++ * invocation below less ugly. ++ */ ++.macro RETPOLINE_JMP reg:req ++ call .Ldo_rop_\@ ++.Lspec_trap_\@: ++ pause ++ jmp .Lspec_trap_\@ ++.Ldo_rop_\@: ++ mov \reg, (%_ASM_SP) ++ ret ++.endm ++ ++/* ++ * This is a wrapper around RETPOLINE_JMP so the called function in reg ++ * returns to the instruction after the macro. ++ */ ++.macro RETPOLINE_CALL reg:req ++ jmp .Ldo_call_\@ ++.Ldo_retpoline_jmp_\@: ++ RETPOLINE_JMP \reg ++.Ldo_call_\@: ++ call .Ldo_retpoline_jmp_\@ ++.endm ++ ++/* ++ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple ++ * indirect jmp/call which may be susceptible to the Spectre variant 2 ++ * attack. ++ */ ++.macro JMP_NOSPEC reg:req ++#ifdef CONFIG_RETPOLINE ++ ALTERNATIVE_2 __stringify(jmp *\reg), \ ++ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ ++ __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD ++#else ++ jmp *\reg ++#endif ++.endm ++ ++.macro CALL_NOSPEC reg:req ++#ifdef CONFIG_RETPOLINE ++ ALTERNATIVE_2 __stringify(call *\reg), \ ++ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ ++ __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD ++#else ++ call *\reg ++#endif ++.endm ++ ++#else /* __ASSEMBLY__ */ ++ ++#if defined(CONFIG_X86_64) && defined(RETPOLINE) ++/* ++ * Since the inline asm uses the %V modifier which is only in newer GCC, ++ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. ++ */ ++# define CALL_NOSPEC ALTERNATIVE( \ ++ "call *%[thunk_target]\n", \ ++ "call __x86_indirect_thunk_%V[thunk_target]\n", \ ++ X86_FEATURE_RETPOLINE) ++# define THUNK_TARGET(addr) [thunk_target] "r" (addr) ++#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) ++/* ++ * For i386 we use the original ret-equivalent retpoline, because ++ * otherwise we'll run out of registers. We don't care about CET ++ * here, anyway. ++ */ ++# define CALL_NOSPEC ALTERNATIVE( \ ++ "call *%[thunk_target]\n", \ ++ "" \ ++ " jmp do_call%=;\n" \ ++ " .align 16\n" \ ++ "do_retpoline%=:\n" \ ++ " call do_rop%=;\n" \ ++ "spec_trap%=:\n" \ ++ " pause;\n" \ ++ " jmp spec_trap%=;\n" \ ++ " .align 16\n" \ ++ "do_rop%=:\n" \ ++ " addl $4, %%esp;\n" \ ++ " pushl %[thunk_target];\n" \ ++ " ret;\n" \ ++ " .align 16\n" \ ++ "do_call%=:\n" \ ++ " call do_retpoline%=;\n", \ ++ X86_FEATURE_RETPOLINE) ++ ++# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) ++#else /* No retpoline */ ++# define CALL_NOSPEC "call *%[thunk_target]\n" ++# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) ++#endif ++ ++#endif /* __ASSEMBLY__ */ ++#endif /* __NOSPEC_BRANCH_H__ */ +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 372ba3fb400f..7a671d1ae3cb 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -905,6 +905,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + ++#ifdef CONFIG_RETPOLINE ++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE); ++#endif ++ + fpu__init_system(c); + + #ifdef CONFIG_X86_32 +diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile +index 457f681ef379..d435c89875c1 100644 +--- a/arch/x86/lib/Makefile ++++ b/arch/x86/lib/Makefile +@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o + lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o + lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o + lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o ++lib-$(CONFIG_RETPOLINE) += retpoline.o + + obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o + +diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S +new file mode 100644 +index 000000000000..cb45c6cb465f +--- /dev/null ++++ b/arch/x86/lib/retpoline.S +@@ -0,0 +1,48 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++#include <linux/stringify.h> ++#include <linux/linkage.h> ++#include <asm/dwarf2.h> ++#include <asm/cpufeatures.h> ++#include <asm/alternative-asm.h> ++#include <asm/export.h> ++#include <asm/nospec-branch.h> ++ ++.macro THUNK reg ++ .section .text.__x86.indirect_thunk.\reg ++ ++ENTRY(__x86_indirect_thunk_\reg) ++ CFI_STARTPROC ++ JMP_NOSPEC %\reg ++ CFI_ENDPROC ++ENDPROC(__x86_indirect_thunk_\reg) ++.endm ++ ++/* ++ * Despite being an assembler file we can't just use .irp here ++ * because __KSYM_DEPS__ only uses the C preprocessor and would ++ * only see one instance of "__x86_indirect_thunk_\reg" rather ++ * than one per register with the correct names. So we do it ++ * the simple and nasty way... ++ */ ++#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg) ++#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) ++ ++GENERATE_THUNK(_ASM_AX) ++GENERATE_THUNK(_ASM_BX) ++GENERATE_THUNK(_ASM_CX) ++GENERATE_THUNK(_ASM_DX) ++GENERATE_THUNK(_ASM_SI) ++GENERATE_THUNK(_ASM_DI) ++GENERATE_THUNK(_ASM_BP) ++GENERATE_THUNK(_ASM_SP) ++#ifdef CONFIG_64BIT ++GENERATE_THUNK(r8) ++GENERATE_THUNK(r9) ++GENERATE_THUNK(r10) ++GENERATE_THUNK(r11) ++GENERATE_THUNK(r12) ++GENERATE_THUNK(r13) ++GENERATE_THUNK(r14) ++GENERATE_THUNK(r15) ++#endif +-- +2.14.3 + +From 59b6e22f92f9a86dbd0798db72adc97bdb831f86 Mon Sep 17 00:00:00 2001 +From: Andi Kleen <ak@linux.intel.com> +Date: Tue, 9 Jan 2018 14:43:08 +0000 +Subject: [PATCH 02/10] x86/retpoline: Temporarily disable objtool when + CONFIG_RETPOLINE=y + +objtool's assembler currently cannot deal with the code generated by the +retpoline compiler and throws hundreds of warnings, mostly because it sees +calls that don't have a symbolic target. + +Exclude all the options that rely on objtool when RETPOLINE is active. + +This mainly means that the kernel has to fallback to use the frame pointer +unwinder and livepatch is not supported. + +Josh is looking into resolving the issue. + +Signed-off-by: Andi Kleen <ak@linux.intel.com> +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-3-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/Kconfig | 4 ++-- + arch/x86/Kconfig.debug | 6 +++--- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index d1819161cc6c..abeac4b80b74 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -172,8 +172,8 @@ config X86 + select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE + select HAVE_REGS_AND_STACK_ACCESS_API +- select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION +- select HAVE_STACK_VALIDATION if X86_64 ++ select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION && !RETPOLINE ++ select HAVE_STACK_VALIDATION if X86_64 && !RETPOLINE + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_UNSTABLE_SCHED_CLOCK + select HAVE_USER_RETURN_NOTIFIER +diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug +index 6293a8768a91..9f3928d744bc 100644 +--- a/arch/x86/Kconfig.debug ++++ b/arch/x86/Kconfig.debug +@@ -359,8 +359,8 @@ config PUNIT_ATOM_DEBUG + + choice + prompt "Choose kernel unwinder" +- default UNWINDER_ORC if X86_64 +- default UNWINDER_FRAME_POINTER if X86_32 ++ default UNWINDER_ORC if X86_64 && !RETPOLINE ++ default UNWINDER_FRAME_POINTER if X86_32 || RETPOLINE + ---help--- + This determines which method will be used for unwinding kernel stack + traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack, +@@ -368,7 +368,7 @@ choice + + config UNWINDER_ORC + bool "ORC unwinder" +- depends on X86_64 ++ depends on X86_64 && !RETPOLINE + select STACK_VALIDATION + ---help--- + This option enables the ORC (Oops Rewind Capability) unwinder for +-- +2.14.3 + +From 86d057614112971f7d5bbac45f67869adca79852 Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:09 +0000 +Subject: [PATCH 03/10] x86/spectre: Add boot time option to select Spectre v2 + mitigation + +Add a spectre_v2= option to select the mitigation used for the indirect +branch speculation vulnerability. + +Currently, the only option available is retpoline, in its various forms. +This will be expanded to cover the new IBRS/IBPB microcode features. + +The RETPOLINE_AMD feature relies on a serializing LFENCE for speculation +control. For AMD hardware, only set RETPOLINE_AMD if LFENCE is a +serializing instruction, which is indicated by the LFENCE_RDTSC feature. + +[ tglx: Folded back the LFENCE/AMD fixes and reworked it so IBRS + integration becomes simple ] + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Cc: Tom Lendacky <thomas.lendacky@amd.com> +Link: https://lkml.kernel.org/r/1515508997-6154-4-git-send-email-dwmw@amazon.co.uk +--- + Documentation/admin-guide/kernel-parameters.txt | 28 +++++ + arch/x86/include/asm/nospec-branch.h | 10 ++ + arch/x86/kernel/cpu/bugs.c | 158 +++++++++++++++++++++++- + arch/x86/kernel/cpu/common.c | 4 - + 4 files changed, 195 insertions(+), 5 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 905991745d26..8122b5f98ea1 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2599,6 +2599,11 @@ + nosmt [KNL,S390] Disable symmetric multithreading (SMT). + Equivalent to smt=1. + ++ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 ++ (indirect branch prediction) vulnerability. System may ++ allow data leaks with this option, which is equivalent ++ to spectre_v2=off. ++ + noxsave [BUGS=X86] Disables x86 extended register state save + and restore using xsave. The kernel will fallback to + enabling legacy floating-point and sse state. +@@ -3908,6 +3913,29 @@ + sonypi.*= [HW] Sony Programmable I/O Control Device driver + See Documentation/laptops/sonypi.txt + ++ spectre_v2= [X86] Control mitigation of Spectre variant 2 ++ (indirect branch speculation) vulnerability. ++ ++ on - unconditionally enable ++ off - unconditionally disable ++ auto - kernel detects whether your CPU model is ++ vulnerable ++ ++ Selecting 'on' will, and 'auto' may, choose a ++ mitigation method at run time according to the ++ CPU, the available microcode, the setting of the ++ CONFIG_RETPOLINE configuration option, and the ++ compiler with which the kernel was built. ++ ++ Specific mitigations can also be selected manually: ++ ++ retpoline - replace indirect branches ++ retpoline,generic - google's original retpoline ++ retpoline,amd - AMD-specific minimal thunk ++ ++ Not specifying this option is equivalent to ++ spectre_v2=auto. ++ + spia_io_base= [HW,MTD] + spia_fio_base= + spia_pedr= +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 7f58713b27c4..7d70ea977fbe 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -105,5 +105,15 @@ + # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) + #endif + ++/* The Spectre V2 mitigation variants */ ++enum spectre_v2_mitigation { ++ SPECTRE_V2_NONE, ++ SPECTRE_V2_RETPOLINE_MINIMAL, ++ SPECTRE_V2_RETPOLINE_MINIMAL_AMD, ++ SPECTRE_V2_RETPOLINE_GENERIC, ++ SPECTRE_V2_RETPOLINE_AMD, ++ SPECTRE_V2_IBRS, ++}; ++ + #endif /* __ASSEMBLY__ */ + #endif /* __NOSPEC_BRANCH_H__ */ +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 76ad6cb44b40..e4dc26185aa7 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -11,6 +11,9 @@ + #include <linux/init.h> + #include <linux/utsname.h> + #include <linux/cpu.h> ++ ++#include <asm/nospec-branch.h> ++#include <asm/cmdline.h> + #include <asm/bugs.h> + #include <asm/processor.h> + #include <asm/processor-flags.h> +@@ -21,6 +24,8 @@ + #include <asm/pgtable.h> + #include <asm/set_memory.h> + ++static void __init spectre_v2_select_mitigation(void); ++ + void __init check_bugs(void) + { + identify_boot_cpu(); +@@ -30,6 +35,9 @@ void __init check_bugs(void) + print_cpu_info(&boot_cpu_data); + } + ++ /* Select the proper spectre mitigation before patching alternatives */ ++ spectre_v2_select_mitigation(); ++ + #ifdef CONFIG_X86_32 + /* + * Check whether we are able to run this kernel safely on SMP. +@@ -62,6 +70,153 @@ void __init check_bugs(void) + #endif + } + ++/* The kernel command line selection */ ++enum spectre_v2_mitigation_cmd { ++ SPECTRE_V2_CMD_NONE, ++ SPECTRE_V2_CMD_AUTO, ++ SPECTRE_V2_CMD_FORCE, ++ SPECTRE_V2_CMD_RETPOLINE, ++ SPECTRE_V2_CMD_RETPOLINE_GENERIC, ++ SPECTRE_V2_CMD_RETPOLINE_AMD, ++}; ++ ++static const char *spectre_v2_strings[] = { ++ [SPECTRE_V2_NONE] = "Vulnerable", ++ [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", ++ [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", ++ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", ++ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", ++}; ++ ++#undef pr_fmt ++#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt ++ ++static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; ++ ++static void __init spec2_print_if_insecure(const char *reason) ++{ ++ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) ++ pr_info("%s\n", reason); ++} ++ ++static void __init spec2_print_if_secure(const char *reason) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) ++ pr_info("%s\n", reason); ++} ++ ++static inline bool retp_compiler(void) ++{ ++ return __is_defined(RETPOLINE); ++} ++ ++static inline bool match_option(const char *arg, int arglen, const char *opt) ++{ ++ int len = strlen(opt); ++ ++ return len == arglen && !strncmp(arg, opt, len); ++} ++ ++static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) ++{ ++ char arg[20]; ++ int ret; ++ ++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, ++ sizeof(arg)); ++ if (ret > 0) { ++ if (match_option(arg, ret, "off")) { ++ goto disable; ++ } else if (match_option(arg, ret, "on")) { ++ spec2_print_if_secure("force enabled on command line."); ++ return SPECTRE_V2_CMD_FORCE; ++ } else if (match_option(arg, ret, "retpoline")) { ++ spec2_print_if_insecure("retpoline selected on command line."); ++ return SPECTRE_V2_CMD_RETPOLINE; ++ } else if (match_option(arg, ret, "retpoline,amd")) { ++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { ++ pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ spec2_print_if_insecure("AMD retpoline selected on command line."); ++ return SPECTRE_V2_CMD_RETPOLINE_AMD; ++ } else if (match_option(arg, ret, "retpoline,generic")) { ++ spec2_print_if_insecure("generic retpoline selected on command line."); ++ return SPECTRE_V2_CMD_RETPOLINE_GENERIC; ++ } else if (match_option(arg, ret, "auto")) { ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ } ++ ++ if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2")) ++ return SPECTRE_V2_CMD_AUTO; ++disable: ++ spec2_print_if_insecure("disabled on command line."); ++ return SPECTRE_V2_CMD_NONE; ++} ++ ++static void __init spectre_v2_select_mitigation(void) ++{ ++ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); ++ enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; ++ ++ /* ++ * If the CPU is not affected and the command line mode is NONE or AUTO ++ * then nothing to do. ++ */ ++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && ++ (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) ++ return; ++ ++ switch (cmd) { ++ case SPECTRE_V2_CMD_NONE: ++ return; ++ ++ case SPECTRE_V2_CMD_FORCE: ++ /* FALLTRHU */ ++ case SPECTRE_V2_CMD_AUTO: ++ goto retpoline_auto; ++ ++ case SPECTRE_V2_CMD_RETPOLINE_AMD: ++ if (IS_ENABLED(CONFIG_RETPOLINE)) ++ goto retpoline_amd; ++ break; ++ case SPECTRE_V2_CMD_RETPOLINE_GENERIC: ++ if (IS_ENABLED(CONFIG_RETPOLINE)) ++ goto retpoline_generic; ++ break; ++ case SPECTRE_V2_CMD_RETPOLINE: ++ if (IS_ENABLED(CONFIG_RETPOLINE)) ++ goto retpoline_auto; ++ break; ++ } ++ pr_err("kernel not compiled with retpoline; no mitigation available!"); ++ return; ++ ++retpoline_auto: ++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { ++ retpoline_amd: ++ if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { ++ pr_err("LFENCE not serializing. Switching to generic retpoline\n"); ++ goto retpoline_generic; ++ } ++ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : ++ SPECTRE_V2_RETPOLINE_MINIMAL_AMD; ++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); ++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE); ++ } else { ++ retpoline_generic: ++ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : ++ SPECTRE_V2_RETPOLINE_MINIMAL; ++ setup_force_cpu_cap(X86_FEATURE_RETPOLINE); ++ } ++ ++ spectre_v2_enabled = mode; ++ pr_info("%s\n", spectre_v2_strings[mode]); ++} ++ ++#undef pr_fmt ++ + #ifdef CONFIG_SYSFS + ssize_t cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf) +@@ -86,6 +241,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, + { + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return sprintf(buf, "Not affected\n"); +- return sprintf(buf, "Vulnerable\n"); ++ ++ return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]); + } + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 7a671d1ae3cb..372ba3fb400f 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -905,10 +905,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + +-#ifdef CONFIG_RETPOLINE +- setup_force_cpu_cap(X86_FEATURE_RETPOLINE); +-#endif +- + fpu__init_system(c); + + #ifdef CONFIG_X86_32 +-- +2.14.3 + +From b3a96862283e68914d1f74f160ab980dacf811ee Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:10 +0000 +Subject: [PATCH 04/10] x86/retpoline/crypto: Convert crypto assembler indirect + jumps + +Convert all indirect jumps in crypto assembler code to use non-speculative +sequences when CONFIG_RETPOLINE is enabled. + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-5-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/crypto/aesni-intel_asm.S | 5 +++-- + arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++- + arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 3 ++- + arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++- + 4 files changed, 9 insertions(+), 5 deletions(-) + +diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S +index 16627fec80b2..3d09e3aca18d 100644 +--- a/arch/x86/crypto/aesni-intel_asm.S ++++ b/arch/x86/crypto/aesni-intel_asm.S +@@ -32,6 +32,7 @@ + #include <linux/linkage.h> + #include <asm/inst.h> + #include <asm/frame.h> ++#include <asm/nospec-branch.h> + + /* + * The following macros are used to move an (un)aligned 16 byte value to/from +@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8) + pxor INC, STATE4 + movdqu IV, 0x30(OUTP) + +- call *%r11 ++ CALL_NOSPEC %r11 + + movdqu 0x00(OUTP), INC + pxor INC, STATE1 +@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8) + _aesni_gf128mul_x_ble() + movups IV, (IVP) + +- call *%r11 ++ CALL_NOSPEC %r11 + + movdqu 0x40(OUTP), INC + pxor INC, STATE1 +diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S +index f7c495e2863c..a14af6eb09cb 100644 +--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S ++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S +@@ -17,6 +17,7 @@ + + #include <linux/linkage.h> + #include <asm/frame.h> ++#include <asm/nospec-branch.h> + + #define CAMELLIA_TABLE_BYTE_LEN 272 + +@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way: + vpxor 14 * 16(%rax), %xmm15, %xmm14; + vpxor 15 * 16(%rax), %xmm15, %xmm15; + +- call *%r9; ++ CALL_NOSPEC %r9; + + addq $(16 * 16), %rsp; + +diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +index eee5b3982cfd..b66bbfa62f50 100644 +--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S ++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +@@ -12,6 +12,7 @@ + + #include <linux/linkage.h> + #include <asm/frame.h> ++#include <asm/nospec-branch.h> + + #define CAMELLIA_TABLE_BYTE_LEN 272 + +@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way: + vpxor 14 * 32(%rax), %ymm15, %ymm14; + vpxor 15 * 32(%rax), %ymm15, %ymm15; + +- call *%r9; ++ CALL_NOSPEC %r9; + + addq $(16 * 32), %rsp; + +diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +index 7a7de27c6f41..d9b734d0c8cc 100644 +--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S ++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +@@ -45,6 +45,7 @@ + + #include <asm/inst.h> + #include <linux/linkage.h> ++#include <asm/nospec-branch.h> + + ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction + +@@ -172,7 +173,7 @@ continue_block: + movzxw (bufp, %rax, 2), len + lea crc_array(%rip), bufp + lea (bufp, len, 1), bufp +- jmp *bufp ++ JMP_NOSPEC bufp + + ################################################################ + ## 2a) PROCESS FULL BLOCKS: +-- +2.14.3 + +From 2558106c7a47e16968a10fa66eea78a096fabfe6 Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:11 +0000 +Subject: [PATCH 05/10] x86/retpoline/entry: Convert entry assembler indirect + jumps + +Convert indirect jumps in core 32/64bit entry assembler code to use +non-speculative sequences when CONFIG_RETPOLINE is enabled. + +Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return +address after the 'call' instruction must be *precisely* at the +.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work, +and the use of alternatives will mess that up unless we play horrid +games to prepend with NOPs and make the variants the same length. It's +not worth it; in the case where we ALTERNATIVE out the retpoline, the +first instruction at __x86.indirect_thunk.rax is going to be a bare +jmp *%rax anyway. + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Ingo Molnar <mingo@kernel.org> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-6-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/entry/entry_32.S | 5 +++-- + arch/x86/entry/entry_64.S | 12 +++++++++--- + 2 files changed, 12 insertions(+), 5 deletions(-) + +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S +index ace8f321a5a1..a1f28a54f23a 100644 +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -44,6 +44,7 @@ + #include <asm/asm.h> + #include <asm/smap.h> + #include <asm/frame.h> ++#include <asm/nospec-branch.h> + + .section .entry.text, "ax" + +@@ -290,7 +291,7 @@ ENTRY(ret_from_fork) + + /* kernel thread */ + 1: movl %edi, %eax +- call *%ebx ++ CALL_NOSPEC %ebx + /* + * A kernel thread is allowed to return here after successfully + * calling do_execve(). Exit to userspace to complete the execve() +@@ -919,7 +920,7 @@ common_exception: + movl %ecx, %es + TRACE_IRQS_OFF + movl %esp, %eax # pt_regs pointer +- call *%edi ++ CALL_NOSPEC %edi + jmp ret_from_exception + END(common_exception) + +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index ed31d00dc5ee..59874bc1aed2 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -37,6 +37,7 @@ + #include <asm/pgtable_types.h> + #include <asm/export.h> + #include <asm/frame.h> ++#include <asm/nospec-branch.h> + #include <linux/err.h> + + #include "calling.h" +@@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline) + */ + pushq %rdi + movq $entry_SYSCALL_64_stage2, %rdi +- jmp *%rdi ++ JMP_NOSPEC %rdi + END(entry_SYSCALL_64_trampoline) + + .popsection +@@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath: + * It might end up jumping to the slow path. If it jumps, RAX + * and all argument registers are clobbered. + */ ++#ifdef CONFIG_RETPOLINE ++ movq sys_call_table(, %rax, 8), %rax ++ call __x86_indirect_thunk_rax ++#else + call *sys_call_table(, %rax, 8) ++#endif + .Lentry_SYSCALL_64_after_fastpath_call: + + movq %rax, RAX(%rsp) +@@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64) + jmp entry_SYSCALL64_slow_path + + 1: +- jmp *%rax /* Called from C */ ++ JMP_NOSPEC %rax /* Called from C */ + END(stub_ptregs_64) + + .macro ptregs_stub func +@@ -517,7 +523,7 @@ ENTRY(ret_from_fork) + 1: + /* kernel thread */ + movq %r12, %rdi +- call *%rbx ++ CALL_NOSPEC %rbx + /* + * A kernel thread is allowed to return here after successfully + * calling do_execve(). Exit to userspace to complete the execve() +-- +2.14.3 + +From 42f7c812022441ffba2d5ccca3acf6380201f19e Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:12 +0000 +Subject: [PATCH 06/10] x86/retpoline/ftrace: Convert ftrace assembler indirect + jumps + +Convert all indirect jumps in ftrace assembler code to use non-speculative +sequences when CONFIG_RETPOLINE is enabled. + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-7-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/kernel/ftrace_32.S | 6 ++++-- + arch/x86/kernel/ftrace_64.S | 8 ++++---- + 2 files changed, 8 insertions(+), 6 deletions(-) + +diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S +index b6c6468e10bc..4c8440de3355 100644 +--- a/arch/x86/kernel/ftrace_32.S ++++ b/arch/x86/kernel/ftrace_32.S +@@ -8,6 +8,7 @@ + #include <asm/segment.h> + #include <asm/export.h> + #include <asm/ftrace.h> ++#include <asm/nospec-branch.h> + + #ifdef CC_USING_FENTRY + # define function_hook __fentry__ +@@ -197,7 +198,8 @@ ftrace_stub: + movl 0x4(%ebp), %edx + subl $MCOUNT_INSN_SIZE, %eax + +- call *ftrace_trace_function ++ movl ftrace_trace_function, %ecx ++ CALL_NOSPEC %ecx + + popl %edx + popl %ecx +@@ -241,5 +243,5 @@ return_to_handler: + movl %eax, %ecx + popl %edx + popl %eax +- jmp *%ecx ++ JMP_NOSPEC %ecx + #endif +diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S +index c832291d948a..7cb8ba08beb9 100644 +--- a/arch/x86/kernel/ftrace_64.S ++++ b/arch/x86/kernel/ftrace_64.S +@@ -7,7 +7,7 @@ + #include <asm/ptrace.h> + #include <asm/ftrace.h> + #include <asm/export.h> +- ++#include <asm/nospec-branch.h> + + .code64 + .section .entry.text, "ax" +@@ -286,8 +286,8 @@ trace: + * ip and parent ip are used and the list function is called when + * function tracing is enabled. + */ +- call *ftrace_trace_function +- ++ movq ftrace_trace_function, %r8 ++ CALL_NOSPEC %r8 + restore_mcount_regs + + jmp fgraph_trace +@@ -329,5 +329,5 @@ GLOBAL(return_to_handler) + movq 8(%rsp), %rdx + movq (%rsp), %rax + addq $24, %rsp +- jmp *%rdi ++ JMP_NOSPEC %rdi + #endif +-- +2.14.3 + +From f14fd95d2f3e611619756ea3c008aee3b4bd4978 Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:13 +0000 +Subject: [PATCH 07/10] x86/retpoline/hyperv: Convert assembler indirect jumps + +Convert all indirect jumps in hyperv inline asm code to use non-speculative +sequences when CONFIG_RETPOLINE is enabled. + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-8-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/include/asm/mshyperv.h | 18 ++++++++++-------- + 1 file changed, 10 insertions(+), 8 deletions(-) + +diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h +index 581bb54dd464..5119e4b555cc 100644 +--- a/arch/x86/include/asm/mshyperv.h ++++ b/arch/x86/include/asm/mshyperv.h +@@ -7,6 +7,7 @@ + #include <linux/nmi.h> + #include <asm/io.h> + #include <asm/hyperv.h> ++#include <asm/nospec-branch.h> + + /* + * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent +@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) + return U64_MAX; + + __asm__ __volatile__("mov %4, %%r8\n" +- "call *%5" ++ CALL_NOSPEC + : "=a" (hv_status), ASM_CALL_CONSTRAINT, + "+c" (control), "+d" (input_address) +- : "r" (output_address), "m" (hv_hypercall_pg) ++ : "r" (output_address), ++ THUNK_TARGET(hv_hypercall_pg) + : "cc", "memory", "r8", "r9", "r10", "r11"); + #else + u32 input_address_hi = upper_32_bits(input_address); +@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) + if (!hv_hypercall_pg) + return U64_MAX; + +- __asm__ __volatile__("call *%7" ++ __asm__ __volatile__(CALL_NOSPEC + : "=A" (hv_status), + "+c" (input_address_lo), ASM_CALL_CONSTRAINT + : "A" (control), + "b" (input_address_hi), + "D"(output_address_hi), "S"(output_address_lo), +- "m" (hv_hypercall_pg) ++ THUNK_TARGET(hv_hypercall_pg) + : "cc", "memory"); + #endif /* !x86_64 */ + return hv_status; +@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) + + #ifdef CONFIG_X86_64 + { +- __asm__ __volatile__("call *%4" ++ __asm__ __volatile__(CALL_NOSPEC + : "=a" (hv_status), ASM_CALL_CONSTRAINT, + "+c" (control), "+d" (input1) +- : "m" (hv_hypercall_pg) ++ : THUNK_TARGET(hv_hypercall_pg) + : "cc", "r8", "r9", "r10", "r11"); + } + #else +@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) + u32 input1_hi = upper_32_bits(input1); + u32 input1_lo = lower_32_bits(input1); + +- __asm__ __volatile__ ("call *%5" ++ __asm__ __volatile__ (CALL_NOSPEC + : "=A"(hv_status), + "+c"(input1_lo), + ASM_CALL_CONSTRAINT + : "A" (control), + "b" (input1_hi), +- "m" (hv_hypercall_pg) ++ THUNK_TARGET(hv_hypercall_pg) + : "cc", "edi", "esi"); + } + #endif +-- +2.14.3 + +From b569cb1e72bda00e7e6245519fe7d0d0ab13898e Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:14 +0000 +Subject: [PATCH 08/10] x86/retpoline/xen: Convert Xen hypercall indirect jumps + +Convert indirect call in Xen hypercall to use non-speculative sequence, +when CONFIG_RETPOLINE is enabled. + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Reviewed-by: Juergen Gross <jgross@suse.com> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-9-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/include/asm/xen/hypercall.h | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h +index 7cb282e9e587..bfd882617613 100644 +--- a/arch/x86/include/asm/xen/hypercall.h ++++ b/arch/x86/include/asm/xen/hypercall.h +@@ -44,6 +44,7 @@ + #include <asm/page.h> + #include <asm/pgtable.h> + #include <asm/smap.h> ++#include <asm/nospec-branch.h> + + #include <xen/interface/xen.h> + #include <xen/interface/sched.h> +@@ -217,9 +218,9 @@ privcmd_call(unsigned call, + __HYPERCALL_5ARG(a1, a2, a3, a4, a5); + + stac(); +- asm volatile("call *%[call]" ++ asm volatile(CALL_NOSPEC + : __HYPERCALL_5PARAM +- : [call] "a" (&hypercall_page[call]) ++ : [thunk_target] "a" (&hypercall_page[call]) + : __HYPERCALL_CLOBBER5); + clac(); + +-- +2.14.3 + +From 96f71b3a482e918991d165eb7a6b42eb9a9ef735 Mon Sep 17 00:00:00 2001 +From: David Woodhouse <dwmw@amazon.co.uk> +Date: Tue, 9 Jan 2018 14:43:15 +0000 +Subject: [PATCH 09/10] x86/retpoline/checksum32: Convert assembler indirect + jumps + +Convert all indirect jumps in 32bit checksum assembler code to use +non-speculative sequences when CONFIG_RETPOLINE is enabled. + +Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Andi Kleen <ak@linux.intel.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-10-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/lib/checksum_32.S | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S +index 4d34bb548b41..46e71a74e612 100644 +--- a/arch/x86/lib/checksum_32.S ++++ b/arch/x86/lib/checksum_32.S +@@ -29,7 +29,8 @@ + #include <asm/errno.h> + #include <asm/asm.h> + #include <asm/export.h> +- ++#include <asm/nospec-branch.h> ++ + /* + * computes a partial checksum, e.g. for TCP/UDP fragments + */ +@@ -156,7 +157,7 @@ ENTRY(csum_partial) + negl %ebx + lea 45f(%ebx,%ebx,2), %ebx + testl %esi, %esi +- jmp *%ebx ++ JMP_NOSPEC %ebx + + # Handle 2-byte-aligned regions + 20: addw (%esi), %ax +@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic) + andl $-32,%edx + lea 3f(%ebx,%ebx), %ebx + testl %esi, %esi +- jmp *%ebx ++ JMP_NOSPEC %ebx + 1: addl $64,%esi + addl $64,%edi + SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) +-- +2.14.3 + +From 9080a45e302772c068f73bc24b3304a416fe2daf Mon Sep 17 00:00:00 2001 +From: Andi Kleen <ak@linux.intel.com> +Date: Tue, 9 Jan 2018 14:43:16 +0000 +Subject: [PATCH 10/10] x86/retpoline/irq32: Convert assembler indirect jumps + +Convert all indirect jumps in 32bit irq inline asm code to use non +speculative sequences. + +Signed-off-by: Andi Kleen <ak@linux.intel.com> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Acked-by: Arjan van de Ven <arjan@linux.intel.com> +Acked-by: Ingo Molnar <mingo@kernel.org> +Cc: gnomes@lxorguk.ukuu.org.uk +Cc: Rik van Riel <riel@redhat.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Jiri Kosina <jikos@kernel.org> +Cc: Andy Lutomirski <luto@amacapital.net> +Cc: Dave Hansen <dave.hansen@intel.com> +Cc: Kees Cook <keescook@google.com> +Cc: Tim Chen <tim.c.chen@linux.intel.com> +Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org> +Cc: Paul Turner <pjt@google.com> +Link: https://lkml.kernel.org/r/1515508997-6154-11-git-send-email-dwmw@amazon.co.uk +--- + arch/x86/kernel/irq_32.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c +index a83b3346a0e1..c1bdbd3d3232 100644 +--- a/arch/x86/kernel/irq_32.c ++++ b/arch/x86/kernel/irq_32.c +@@ -20,6 +20,7 @@ + #include <linux/mm.h> + + #include <asm/apic.h> ++#include <asm/nospec-branch.h> + + #ifdef CONFIG_DEBUG_STACKOVERFLOW + +@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack); + static void call_on_stack(void *func, void *stack) + { + asm volatile("xchgl %%ebx,%%esp \n" +- "call *%%edi \n" ++ CALL_NOSPEC + "movl %%ebx,%%esp \n" + : "=b" (stack) + : "0" (stack), +- "D"(func) ++ [thunk_target] "D"(func) + : "memory", "cc", "edx", "ecx", "eax"); + } + +@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) + call_on_stack(print_stack_overflow, isp); + + asm volatile("xchgl %%ebx,%%esp \n" +- "call *%%edi \n" ++ CALL_NOSPEC + "movl %%ebx,%%esp \n" + : "=a" (arg1), "=b" (isp) + : "0" (desc), "1" (isp), +- "D" (desc->handle_irq) ++ [thunk_target] "D" (desc->handle_irq) + : "memory", "cc", "ecx"); + return 1; + } +-- +2.14.3 + |