summaryrefslogtreecommitdiffstats
path: root/include/asm-x86/percpu.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-08 15:06:42 -0700
committerIngo Molnar <mingo@elte.hu>2008-07-16 10:58:13 +0200
commit5b09b2876ed1a8e34a0da8f069575fc6174e2077 (patch)
tree370750e5c1d4073ed4a7525ccd0348e4154ba0d4 /include/asm-x86/percpu.h
parenta9e7062d7339f1a1df2b6d7e5d595c7d55b56bfb (diff)
downloadkernel-crypto-5b09b2876ed1a8e34a0da8f069575fc6174e2077.tar.gz
kernel-crypto-5b09b2876ed1a8e34a0da8f069575fc6174e2077.tar.xz
kernel-crypto-5b09b2876ed1a8e34a0da8f069575fc6174e2077.zip
x86_64: add workaround for no %gs-based percpu
As a stopgap until Mike Travis's x86-64 gs-based percpu patches are ready, provide workaround functions for x86_read/write_percpu for Xen's use. Specifically, this means that we can't really make use of vcpu placement, because we can't use a single gs-based memory access to get to vcpu fields. So disable all that for now. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/percpu.h')
-rw-r--r--include/asm-x86/percpu.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 912a3a17b9d..4e91ee1e37a 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -22,6 +22,32 @@
DECLARE_PER_CPU(struct x8664_pda, pda);
+/*
+ * These are supposed to be implemented as a single instruction which
+ * operates on the per-cpu data base segment. x86-64 doesn't have
+ * that yet, so this is a fairly inefficient workaround for the
+ * meantime. The single instruction is atomic with respect to
+ * preemption and interrupts, so we need to explicitly disable
+ * interrupts here to achieve the same effect. However, because it
+ * can be used from within interrupt-disable/enable, we can't actually
+ * disable interrupts; disabling preemption is enough.
+ */
+#define x86_read_percpu(var) \
+ ({ \
+ typeof(per_cpu_var(var)) __tmp; \
+ preempt_disable(); \
+ __tmp = __get_cpu_var(var); \
+ preempt_enable(); \
+ __tmp; \
+ })
+
+#define x86_write_percpu(var, val) \
+ do { \
+ preempt_disable(); \
+ __get_cpu_var(var) = (val); \
+ preempt_enable(); \
+ } while(0)
+
#else /* CONFIG_X86_64 */
#ifdef __ASSEMBLY__