summaryrefslogtreecommitdiffstats
path: root/arch/x86/boot
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2009-05-08 16:20:34 -0700
committerH. Peter Anvin <hpa@zytor.com>2009-05-08 17:18:10 -0700
commit5b11f1cee5797b38d16b94d8745b12b6727a8373 (patch)
tree01d9ce7df1629c1ec17815c33ad3ce8b5bed3094 /arch/x86/boot
parentb40d68d5b5b799caaf99d2e073e62962e6d917ce (diff)
downloadkernel-crypto-5b11f1cee5797b38d16b94d8745b12b6727a8373.tar.gz
kernel-crypto-5b11f1cee5797b38d16b94d8745b12b6727a8373.tar.xz
kernel-crypto-5b11f1cee5797b38d16b94d8745b12b6727a8373.zip
x86, boot: straighten out ranges to copy/zero in compressed/head*.S
Both on 32 and 64 bits, we copy all the way up to the end of bss, except that on 64 bits there is a hack to avoid copying on top of the page tables. There is no point in copying bss at all, especially since we are just about to zero it all anyway. To clean up and unify the handling, we now do: - copy from startup_32 to _bss. - zero from _bss to _ebss. - the _ebss symbol is aligned to an 8-byte boundary. - the page tables are moved to a separate section. Use _bss as the copy endpoint since _edata may be misaligned. [ Impact: cleanup, trivial performance improvement ] Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/boot')
-rw-r--r--arch/x86/boot/compressed/head_32.S8
-rw-r--r--arch/x86/boot/compressed/head_64.S18
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S19
3 files changed, 29 insertions, 16 deletions
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 7bd7766ffab..59425e157df 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -93,9 +93,9 @@ ENTRY(startup_32)
* where decompression in place becomes safe.
*/
pushl %esi
- leal _ebss(%ebp), %esi
- leal _ebss(%ebx), %edi
- movl $(_ebss - startup_32), %ecx
+ leal _bss(%ebp), %esi
+ leal _bss(%ebx), %edi
+ movl $(_bss - startup_32), %ecx
std
rep movsb
cld
@@ -125,7 +125,7 @@ relocated:
* Clear BSS
*/
xorl %eax, %eax
- leal _edata(%ebx), %edi
+ leal _bss(%ebx), %edi
leal _ebss(%ebx), %ecx
subl %edi, %ecx
cld
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 26c3def43ac..5bc9052615b 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -253,9 +253,9 @@ ENTRY(startup_64)
* Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
- leaq _end_before_pgt(%rip), %r8
- leaq _end_before_pgt(%rbx), %r9
- movq $_end_before_pgt /* - $startup_32 */, %rcx
+ leaq _bss(%rip), %r8
+ leaq _bss(%rbx), %r9
+ movq $_bss /* - $startup_32 */, %rcx
1: subq $8, %r8
subq $8, %r9
movq 0(%r8), %rax
@@ -276,8 +276,8 @@ relocated:
* Clear BSS
*/
xorq %rax, %rax
- leaq _edata(%rbx), %rdi
- leaq _end_before_pgt(%rbx), %rcx
+ leaq _bss(%rbx), %rdi
+ leaq _ebss(%rbx), %rcx
subq %rdi, %rcx
cld
rep stosb
@@ -329,3 +329,11 @@ boot_heap:
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0
boot_stack_end:
+
+/*
+ * Space for page tables (not in .bss so not zeroed)
+ */
+ .section ".pgtable","a",@nobits
+ .balign 4096
+pgtable:
+ .fill 6*4096, 1, 0
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index dbe515e13fe..cc353e1b3ff 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -2,6 +2,8 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
#undef i386
+#include <asm/page_types.h>
+
#ifdef CONFIG_X86_64
OUTPUT_ARCH(i386:x86-64)
ENTRY(startup_64)
@@ -48,13 +50,16 @@ SECTIONS
*(.bss)
*(.bss.*)
*(COMMON)
-#ifdef CONFIG_X86_64
- . = ALIGN(8);
- _end_before_pgt = . ;
- . = ALIGN(4096);
- pgtable = . ;
- . = . + 4096 * 6;
-#endif
+ . = ALIGN(8); /* For convenience during zeroing */
_ebss = .;
}
+#ifdef CONFIG_X86_64
+ . = ALIGN(PAGE_SIZE);
+ .pgtable : {
+ _pgtable = . ;
+ *(.pgtable)
+ _epgtable = . ;
+ }
+#endif
+ _end = .;
}