summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosh Boyer <jwboyer@fedoraproject.org>2015-01-12 08:17:01 -0500
committerJosh Boyer <jwboyer@fedoraproject.org>2015-01-12 08:20:12 -0500
commitfe651573b718ba19bfd5e68365103f8747c9d9d1 (patch)
treec981a9a9f974788a406327eb02f1e49ece5e7f6d
parentc60911c9944bb06d31b0fc5d9aeaf1731700d517 (diff)
downloadkernel-fe651573b718ba19bfd5e68365103f8747c9d9d1.tar.gz
kernel-fe651573b718ba19bfd5e68365103f8747c9d9d1.tar.xz
kernel-fe651573b718ba19bfd5e68365103f8747c9d9d1.zip
Add patch to fix loop in VDSO (rhbz 1178975)
-rw-r--r--kernel.spec9
-rw-r--r--x86-vdso-Use-asm-volatile-in-__getcpu.patch55
2 files changed, 64 insertions, 0 deletions
diff --git a/kernel.spec b/kernel.spec
index a4c08e9b4..84eb6dacb 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -614,6 +614,9 @@ Patch26092: xhci-Add-broken-streams-quirk-for-Fresco-Logic-FL100.patch
Patch26093: uas-Add-US_FL_NO_ATA_1X-for-Seagate-devices-with-usb.patch
Patch26094: uas-Add-US_FL_NO_REPORT_OPCODES-for-JMicron-JMS566-w.patch
+#rhbz 1178975
+Patch26125: x86-vdso-Use-asm-volatile-in-__getcpu.patch
+
# git clone ssh://git.fedorahosted.org/git/kernel-arm64.git, git diff master...devel
Patch30000: kernel-arm64.patch
@@ -1336,6 +1339,9 @@ ApplyPatch xhci-Add-broken-streams-quirk-for-Fresco-Logic-FL100.patch
ApplyPatch uas-Add-US_FL_NO_ATA_1X-for-Seagate-devices-with-usb.patch
ApplyPatch uas-Add-US_FL_NO_REPORT_OPCODES-for-JMicron-JMS566-w.patch
+#rhbz 1178975
+ApplyPatch x86-vdso-Use-asm-volatile-in-__getcpu.patch
+
%if 0%{?aarch64patches}
ApplyPatch kernel-arm64.patch
%ifnarch aarch64 # this is stupid, but i want to notice before secondary koji does.
@@ -2202,6 +2208,9 @@ fi
# ||----w |
# || ||
%changelog
+* Mon Jan 12 2015 Josh Boyer <jwboyer@fedoraproject.org>
+- Add patch to fix loop in VDSO (rhbz 1178975)
+
* Fri Jan 09 2015 Josh Boyer <jwboyer@fedoraproject.org> - 3.19.0-0.rc3.git2.1
- Linux v3.19-rc3-69-g11c8f01b423b
diff --git a/x86-vdso-Use-asm-volatile-in-__getcpu.patch b/x86-vdso-Use-asm-volatile-in-__getcpu.patch
new file mode 100644
index 000000000..8cdf8e7c6
--- /dev/null
+++ b/x86-vdso-Use-asm-volatile-in-__getcpu.patch
@@ -0,0 +1,55 @@
+From 1ddf0b1b11aa8a90cef6706e935fc31c75c406ba Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@amacapital.net>
+Date: Sun, 21 Dec 2014 08:57:46 -0800
+Subject: x86, vdso: Use asm volatile in __getcpu
+
+In Linux 3.18 and below, GCC hoists the lsl instructions in the
+pvclock code all the way to the beginning of __vdso_clock_gettime,
+slowing the non-paravirt case significantly. For unknown reasons,
+presumably related to the removal of a branch, the performance issue
+is gone as of
+
+e76b027e6408 x86,vdso: Use LSL unconditionally for vgetcpu
+
+but I don't trust GCC enough to expect the problem to stay fixed.
+
+There should be no correctness issue, because the __getcpu calls in
+__vdso_vlock_gettime were never necessary in the first place.
+
+Note to stable maintainers: In 3.18 and below, depending on
+configuration, gcc 4.9.2 generates code like this:
+
+ 9c3: 44 0f 03 e8 lsl %ax,%r13d
+ 9c7: 45 89 eb mov %r13d,%r11d
+ 9ca: 0f 03 d8 lsl %ax,%ebx
+
+This patch won't apply as is to any released kernel, but I'll send a
+trivial backported version if needed.
+
+Fixes: 51c19b4f5927 x86: vdso: pvclock gettime support
+Cc: stable@vger.kernel.org # 3.8+
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Andy Lutomirski <luto@amacapital.net>
+
+diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
+index e7e9682..f556c48 100644
+--- a/arch/x86/include/asm/vgtod.h
++++ b/arch/x86/include/asm/vgtod.h
+@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
+
+ /*
+ * Load per CPU data from GDT. LSL is faster than RDTSCP and
+- * works on all CPUs.
++ * works on all CPUs. This is volatile so that it orders
++ * correctly wrt barrier() and to keep gcc from cleverly
++ * hoisting it out of the calling function.
+ */
+- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
++ asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+
+ return p;
+ }
+--
+cgit v0.10.2
+