summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 13:30:25 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:30:25 +0100
commitf8eeae682166e3b965dcdaf499932f4b63cc5b5d (patch)
treed4d1afb2a4604bc2ade01dcae086ec27776b148c
parented4aed98da8d042716d327a0b538dd8002c0a767 (diff)
downloadkernel-crypto-f8eeae682166e3b965dcdaf499932f4b63cc5b5d.tar.gz
kernel-crypto-f8eeae682166e3b965dcdaf499932f4b63cc5b5d.tar.xz
kernel-crypto-f8eeae682166e3b965dcdaf499932f4b63cc5b5d.zip
x86: clean up arch/x86/mm/mmap_32/64.c
White space and coding style clenaup. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/mmap_32.c4
-rw-r--r--arch/x86/mm/mmap_64.c11
2 files changed, 9 insertions, 6 deletions
diff --git a/arch/x86/mm/mmap_32.c b/arch/x86/mm/mmap_32.c
index 552e0847375..d7dd0962a6d 100644
--- a/arch/x86/mm/mmap_32.c
+++ b/arch/x86/mm/mmap_32.c
@@ -64,8 +64,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (sysctl_legacy_va_layout ||
- (current->personality & ADDR_COMPAT_LAYOUT) ||
- current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
+ (current->personality & ADDR_COMPAT_LAYOUT) ||
+ current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
diff --git a/arch/x86/mm/mmap_64.c b/arch/x86/mm/mmap_64.c
index 80bba0dc000..ffb71a31bb6 100644
--- a/arch/x86/mm/mmap_64.c
+++ b/arch/x86/mm/mmap_64.c
@@ -16,11 +16,14 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
#endif
mm->mmap_base = TASK_UNMAPPED_BASE;
if (current->flags & PF_RANDOMIZE) {
- /* Add 28bit randomness which is about 40bits of address space
- because mmap base has to be page aligned.
- or ~1/128 of the total user VM
- (total user address space is 47bits) */
+ /*
+ * Add 28bit randomness which is about 40bits of
+ * address space because mmap base has to be page
+ * aligned. or ~1/128 of the total user VM (total
+ * user address space is 47bits)
+ */
unsigned rnd = get_random_int() & 0xfffffff;
+
mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
}
mm->get_unmapped_area = arch_get_unmapped_area;