summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2008-08-18 12:33:20 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-18 16:05:45 +0200
commit8bfcb3960fde049b863266dab8c3617bb5a541aa (patch)
treeb658f2337d46d7f818eecc1be1e2ce76f57a32d0 /arch/x86
parent1a10390708d675ebf1a2f5e169a5165626afbd88 (diff)
downloadkernel-crypto-8bfcb3960fde049b863266dab8c3617bb5a541aa.tar.gz
kernel-crypto-8bfcb3960fde049b863266dab8c3617bb5a541aa.tar.xz
kernel-crypto-8bfcb3960fde049b863266dab8c3617bb5a541aa.zip
x86: make movsl_mask definition non-CPU specific
movsl_mask is currently defined in arch/x86/kernel/cpu/intel.c, which contains code specific to Intel CPUs. However, movsl_mask is used in the non-CPU specific code in arch/x86/lib/usercopy_32.c, which breaks the compilation when support for Intel CPUs is compiled out. This patch solves this problem by moving movsl_mask's definition close to its users in arch/x86/lib/usercopy_32.c. Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: michael@free-electrons.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/intel.c7
-rw-r--r--arch/x86/lib/usercopy_32.c7
2 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b75f2569b8f..5c8959b8a42 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -23,13 +23,6 @@
#include <mach_apic.h>
#endif
-#ifdef CONFIG_X86_INTEL_USERCOPY
-/*
- * Alignment at which movsl is preferred for bulk memory copies.
- */
-struct movsl_mask movsl_mask __read_mostly;
-#endif
-
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 24e60944971..9e68075544f 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -14,6 +14,13 @@
#include <asm/uaccess.h>
#include <asm/mmx.h>
+#ifdef CONFIG_X86_INTEL_USERCOPY
+/*
+ * Alignment at which movsl is preferred for bulk memory copies.
+ */
+struct movsl_mask movsl_mask __read_mostly;
+#endif
+
static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
{
#ifdef CONFIG_X86_INTEL_USERCOPY