diff options
author | Zachary Amsden <zach@vmware.com> | 2005-09-03 15:56:40 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 00:06:12 -0700 |
commit | c9b02a24130e3ff14a553d966a79f46cf806b037 (patch) | |
tree | 1c496ef13e8d2d991f5197ec1c1eb34282beddf0 /arch/i386/mm | |
parent | e7a2ff593c0e48b130434dee4d2fd3452a850e6f (diff) | |
download | kernel-crypto-c9b02a24130e3ff14a553d966a79f46cf806b037.tar.gz kernel-crypto-c9b02a24130e3ff14a553d966a79f46cf806b037.tar.xz kernel-crypto-c9b02a24130e3ff14a553d966a79f46cf806b037.zip |
[PATCH] i386: use set_pte macros in a couple places where they were missing
Also, setting PDPEs in PAE mode does not require atomic operations, since the
PDPEs are cached by the processor, and only reloaded on an explicit or
implicit reload of CR3.
Since the four PDPEs must always be present in an active root, and the kernel
PDPE is never updated, we are safe even from SMIs and interrupts / NMIs using
task gates (which reload CR3). Actually, much of this is moot, since the user
PDPEs are never updated either, and the only usage of task gates is by the
doublefault handler. It appears the only place PGDs get updated in PAE mode
is in init_low_mappings() / zap_low_mapping() for initial page table creation
and recovery from ACPI sleep state, and these sites are safe by inspection.
Getting rid of the cmpxchg8b saves code space and 720 cycles in pgd_alloc on
P4.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/mm')
-rw-r--r-- | arch/i386/mm/init.c | 2 | ||||
-rw-r--r-- | arch/i386/mm/pageattr.c | 5 |
2 files changed, 4 insertions, 3 deletions
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index d8b23ab7653..9edfc058b89 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -349,7 +349,7 @@ static void __init pagetable_init (void) * All user-space mappings are explicitly cleared after * SMP startup. */ - pgd_base[0] = pgd_base[USER_PTRS_PER_PGD]; + set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]); #endif } diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index bce06a79eaf..f600fc244f0 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c @@ -12,6 +12,7 @@ #include <asm/uaccess.h> #include <asm/processor.h> #include <asm/tlbflush.h> +#include <asm/pgalloc.h> static DEFINE_SPINLOCK(cpa_lock); static struct list_head df_list = LIST_HEAD_INIT(df_list); @@ -52,8 +53,8 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot) addr = address & LARGE_PAGE_MASK; pbase = (pte_t *)page_address(base); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { - pbase[i] = pfn_pte(addr >> PAGE_SHIFT, - addr == address ? prot : PAGE_KERNEL); + set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, + addr == address ? prot : PAGE_KERNEL)); } return base; } |