summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorHelge Deller <deller@parisc-linux.org>2006-04-20 20:40:23 +0000
committerKyle McMartin <kyle@hera.kernel.org>2006-04-21 22:20:34 +0000
commit2fd83038160531245099c3c5b3511fa4b80765eb (patch)
tree6145a9e78723c76ceac722eb60267c0116983c12 /arch/parisc
parentd668da80d613def981c573354e1853e38bd0698d (diff)
downloadkernel-crypto-2fd83038160531245099c3c5b3511fa4b80765eb.tar.gz
kernel-crypto-2fd83038160531245099c3c5b3511fa4b80765eb.tar.xz
kernel-crypto-2fd83038160531245099c3c5b3511fa4b80765eb.zip
[PARISC] Further work for multiple page sizes
More work towards supporing multiple page sizes on 64-bit. Convert some assumptions that 64bit uses 3 level page tables into testing PT_NLEVELS. Also some BUG() to BUG_ON() conversions and some cleanups to assembler. Signed-off-by: Helge Deller <deller@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig31
-rw-r--r--arch/parisc/kernel/asm-offsets.c3
-rw-r--r--arch/parisc/kernel/entry.S36
-rw-r--r--arch/parisc/kernel/head.S15
-rw-r--r--arch/parisc/kernel/init_task.c10
-rw-r--r--arch/parisc/kernel/pacache.S25
-rw-r--r--arch/parisc/kernel/syscall.S10
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S54
-rw-r--r--arch/parisc/mm/init.c28
9 files changed, 133 insertions, 79 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 19f911c5dd5..910fb3afc0b 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -138,6 +138,37 @@ config 64BIT
enable this option otherwise. The 64bit kernel is significantly bigger
and slower than the 32bit one.
+choice
+ prompt "Kernel page size"
+ default PARISC_PAGE_SIZE_4KB if !64BIT
+ default PARISC_PAGE_SIZE_4KB if 64BIT
+# default PARISC_PAGE_SIZE_16KB if 64BIT
+
+config PARISC_PAGE_SIZE_4KB
+ bool "4KB"
+ help
+ This lets you select the page size of the kernel. For best
+ performance, a page size of 16KB is recommended. For best
+ compatibility with 32bit applications, a page size of 4KB should be
+ selected (the vast majority of 32bit binaries work perfectly fine
+ with a larger page size).
+
+ 4KB For best 32bit compatibility
+ 16KB For best performance
+ 64KB For best performance, might give more overhead.
+
+ If you don't know what to do, choose 4KB.
+
+config PARISC_PAGE_SIZE_16KB
+ bool "16KB (EXPERIMENTAL)"
+ depends on PA8X00 && EXPERIMENTAL
+
+config PARISC_PAGE_SIZE_64KB
+ bool "64KB (EXPERIMENTAL)"
+ depends on PA8X00 && EXPERIMENTAL
+
+endchoice
+
config SMP
bool "Symmetric multi-processing support"
---help---
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index e23c4e1e3a2..c11a5bc7c06 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -288,8 +288,11 @@ int main(void)
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
+ DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
+ DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64);
+ DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128);
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 7c95d7663c2..d9e53cf0372 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -502,18 +502,20 @@
* all ILP32 processes and all the kernel for machines with
* under 4GB of memory) */
.macro L3_ptep pgd,pte,index,va,fault
+#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
copy %r0,\pte
- extrd,u,*= \va,31,32,%r0
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldw,s \index(\pgd),\pgd
- extrd,u,*= \va,31,32,%r0
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
- extrd,u,*= \va,31,32,%r0
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
shld \pgd,PxD_VALUE_SHIFT,\index
- extrd,u,*= \va,31,32,%r0
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
copy \index,\pgd
- extrd,u,*<> \va,31,32,%r0
+ extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+#endif
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
@@ -563,10 +565,18 @@
extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
- /* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */
+ /* Enforce uncacheable pages.
+ * This should ONLY be use for MMIO on PA 2.0 machines.
+ * Memory/DMA is cache coherent on all PA2.0 machines we support
+ * (that means T-class is NOT supported) and the memory controllers
+ * on most of those machines only handles cache transactions.
+ */
+ extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
+ depi 1,12,1,\prot
- depd %r0,63,PAGE_SHIFT,\pte
- extrd,s \pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
.endm
/* Identical macro to make_insert_tlb above, except it
@@ -584,9 +594,8 @@
/* Get rid of prot bits and convert to page addr for iitlba */
- depi 0,31,PAGE_SHIFT,\pte
+ depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
extru \pte,24,25,\pte
-
.endm
/* This is for ILP32 PA2.0 only. The TLB insertion needs
@@ -1201,10 +1210,9 @@ intr_save:
*/
/* adjust isr/ior. */
-
- extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
- depd %r1,31,7,%r17 /* deposit them into ior */
- depdi 0,63,7,%r16 /* clear them from isr */
+ extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
+ depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
+ depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
#endif
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 0b47afc2069..3e79e62f7b0 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -76,16 +76,16 @@ $bss_loop:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
- ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
#else
/* 2-level page table, so pmd == pgd */
- ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
+ ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif
/* Fill in pmd with enough pte directories */
@@ -99,7 +99,7 @@ $bss_loop:
stw %r3,0(%r4)
ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
@@ -107,13 +107,14 @@ $bss_loop:
/* Now initialize the PTEs themselves */
- ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+ ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+ ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 PA(pg0),%r1
$pgt_fill_loop:
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
- ldo ASM_PAGE_SIZE(%r3),%r3
- bb,>= %r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop
+ ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
+ addib,> -1,%r11,$pgt_fill_loop
nop
/* Load the return address...er...crash 'n burn */
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
index 7e898fd6441..8384bf9cecd 100644
--- a/arch/parisc/kernel/init_task.c
+++ b/arch/parisc/kernel/init_task.c
@@ -53,17 +53,17 @@ union thread_union init_thread_union
__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
-#ifdef __LP64__
+#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
-pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, };
-
+pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE)));
#endif
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, };
-pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte"))) = { {0}, };
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE)));
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE)));
/*
* Initial task structure.
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 7a4f07e8d3c..f600556414d 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -65,7 +65,7 @@ flush_tlb_all_local:
*/
/* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
- rsm PSW_SM_I, %r19 /* save I-bit state */
+ rsm PSW_SM_I, %r19 /* save I-bit state */
load32 PA(1f), %r1
nop
nop
@@ -84,8 +84,7 @@ flush_tlb_all_local:
rfi
nop
-1: ldil L%PA(cache_info), %r1
- ldo R%PA(cache_info)(%r1), %r1
+1: load32 PA(cache_info), %r1
/* Flush Instruction Tlb */
@@ -212,8 +211,7 @@ flush_instruction_cache_local:
.entry
mtsp %r0, %sr1
- ldil L%cache_info, %r1
- ldo R%cache_info(%r1), %r1
+ load32 cache_info, %r1
/* Flush Instruction Cache */
@@ -254,8 +252,7 @@ flush_data_cache_local:
.entry
mtsp %r0, %sr1
- ldil L%cache_info, %r1
- ldo R%cache_info(%r1), %r1
+ load32 cache_info, %r1
/* Flush Data Cache */
@@ -303,7 +300,8 @@ copy_user_page_asm:
*/
ldd 0(%r25), %r19
- ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
+ ldi ASM_PAGE_SIZE_DIV128, %r1
+
ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
ldw 128(%r25), %r0 /* prefetch 2 */
@@ -368,7 +366,7 @@ copy_user_page_asm:
* use ldd/std on a 32 bit kernel.
*/
ldw 0(%r25), %r19
- ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
+ ldi ASM_PAGE_SIZE_DIV64, %r1
1:
ldw 4(%r25), %r20
@@ -461,6 +459,7 @@ copy_user_page_asm:
sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
ldil L%(TMPALIAS_MAP_START), %r28
+ /* FIXME for different page sizes != 4k */
#ifdef CONFIG_64BIT
extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
@@ -551,6 +550,7 @@ __clear_user_page_asm:
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
+ /* FIXME: page size dependend */
#endif
extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
@@ -566,10 +566,10 @@ __clear_user_page_asm:
pdtlb 0(%r28)
#ifdef CONFIG_64BIT
- ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
+ ldi ASM_PAGE_SIZE_DIV128, %r1
/* PREFETCH (Write) has not (yet) been proven to help here */
-/* #define PREFETCHW_OP ldd 256(%0), %r0 */
+ /* #define PREFETCHW_OP ldd 256(%0), %r0 */
1: std %r0, 0(%r28)
std %r0, 8(%r28)
@@ -591,8 +591,7 @@ __clear_user_page_asm:
ldo 128(%r28), %r28
#else /* ! CONFIG_64BIT */
-
- ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
+ ldi ASM_PAGE_SIZE_DIV64, %r1
1:
stw %r0, 0(%r28)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index af88afef41b..479d9a017cd 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -55,7 +55,7 @@
* pointers.
*/
- .align 4096
+ .align ASM_PAGE_SIZE
linux_gateway_page:
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
@@ -632,7 +632,7 @@ cas_action:
end_compare_and_swap:
/* Make sure nothing else is placed on this page */
- .align 4096
+ .align ASM_PAGE_SIZE
.export end_linux_gateway_page
end_linux_gateway_page:
@@ -652,7 +652,7 @@ end_linux_gateway_page:
.section .rodata,"a"
- .align 4096
+ .align ASM_PAGE_SIZE
/* Light-weight-syscall table */
/* Start of lws table. */
.export lws_table
@@ -662,14 +662,14 @@ lws_table:
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
/* End of lws table */
- .align 4096
+ .align ASM_PAGE_SIZE
.export sys_call_table
.Lsys_call_table:
sys_call_table:
#include "syscall_table.S"
#ifdef CONFIG_64BIT
- .align 4096
+ .align ASM_PAGE_SIZE
.export sys_call_table64
.Lsys_call_table64:
sys_call_table64:
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 6d6436a6b62..94dcc03a28e 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
+ * Copyright (C) 2006 Helge Deller <deller@gmx.de>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -27,6 +28,7 @@
/* needed for the processor specific cache alignment size */
#include <asm/cache.h>
#include <asm/page.h>
+#include <asm/asm-offsets.h>
/* ld script to make hppa Linux kernel */
#ifndef CONFIG_64BIT
@@ -68,7 +70,7 @@ SECTIONS
RODATA
/* writeable */
- . = ALIGN(4096); /* Make sure this is page aligned so
+ . = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so
that we can properly leave these
as writable */
data_start = .;
@@ -81,23 +83,17 @@ SECTIONS
__start___unwind = .; /* unwind info */
.PARISC.unwind : { *(.PARISC.unwind) }
__stop___unwind = .;
-
+
+ /* rarely changed data like cpu maps */
+ . = ALIGN(16);
+ .data.read_mostly : { *(.data.read_mostly) }
+
+ . = ALIGN(L1_CACHE_BYTES);
.data : { /* Data */
*(.data)
- *(.data.vm0.pmd)
- *(.data.vm0.pgd)
- *(.data.vm0.pte)
CONSTRUCTORS
}
- . = ALIGN(4096);
- /* nosave data is really only used for software suspend...it's here
- * just in case we ever implement it */
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(4096);
- __nosave_end = .;
-
. = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
@@ -105,12 +101,29 @@ SECTIONS
. = ALIGN(16);
.data.lock_aligned : { *(.data.lock_aligned) }
- /* rarely changed data like cpu maps */
- . = ALIGN(16);
- .data.read_mostly : { *(.data.read_mostly) }
+ . = ALIGN(ASM_PAGE_SIZE);
+ /* nosave data is really only used for software suspend...it's here
+ * just in case we ever implement it */
+ __nosave_begin = .;
+ .data_nosave : { *(.data.nosave) }
+ . = ALIGN(ASM_PAGE_SIZE);
+ __nosave_end = .;
_edata = .; /* End of data section */
+ __bss_start = .; /* BSS */
+ /* page table entries need to be PAGE_SIZE aligned */
+ . = ALIGN(ASM_PAGE_SIZE);
+ .data.vmpages : {
+ *(.data.vm0.pmd)
+ *(.data.vm0.pgd)
+ *(.data.vm0.pte)
+ }
+ .bss : { *(.bss) *(COMMON) }
+ __bss_stop = .;
+
+
+ /* assembler code expects init_task to be 16k aligned */
. = ALIGN(16384); /* init_task */
.data.init_task : { *(.data.init_task) }
@@ -126,6 +139,7 @@ SECTIONS
.dlt : { *(.dlt) }
#endif
+ /* reserve space for interrupt stack by aligning __init* to 16k */
. = ALIGN(16384);
__init_begin = .;
.init.text : {
@@ -166,7 +180,7 @@ SECTIONS
from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) }
.exit.data : { *(.exit.data) }
- . = ALIGN(4096);
+ . = ALIGN(ASM_PAGE_SIZE);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
@@ -174,14 +188,10 @@ SECTIONS
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
- . = ALIGN(4096);
+ . = ALIGN(ASM_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .bss : { *(.bss) *(COMMON) }
- __bss_stop = .;
-
_end = . ;
/* Sections to be discarded */
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3796be67cd5..63171256265 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -6,6 +6,7 @@
* changed by Philipp Rumpf
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2004 Randolph Chung (tausq@debian.org)
+ * Copyright 2006 Helge Deller (deller@gmx.de)
*
*/
@@ -371,8 +372,8 @@ static void __init setup_bootmem(void)
void free_initmem(void)
{
- unsigned long addr;
-
+ unsigned long addr, init_begin, init_end;
+
printk(KERN_INFO "Freeing unused kernel memory: ");
#ifdef CONFIG_DEBUG_KERNEL
@@ -395,8 +396,11 @@ void free_initmem(void)
local_irq_enable();
#endif
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ /* align __init_begin and __init_end to page size,
+ ignoring linker script where we might have tried to save RAM */
+ init_begin = PAGE_ALIGN((unsigned long)(&__init_begin));
+ init_end = PAGE_ALIGN((unsigned long)(&__init_end));
+ for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
@@ -407,7 +411,7 @@ void free_initmem(void)
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
- printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
+ printk("%luk freed\n", (init_end - init_begin) >> 10);
}
@@ -639,11 +643,13 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
* Map the fault vector writable so we can
* write the HPMC checksum.
*/
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
if (address >= ro_start && address < ro_end
&& address != fv_addr
&& address != gw_addr)
pte = __mk_pte(address, PAGE_KERNEL_RO);
else
+#endif
pte = __mk_pte(address, pgprot);
if (address >= end_paddr)
@@ -874,8 +880,7 @@ unsigned long alloc_sid(void)
flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
spin_lock(&sid_lock);
}
- if (free_space_ids == 0)
- BUG();
+ BUG_ON(free_space_ids == 0);
}
free_space_ids--;
@@ -899,8 +904,7 @@ void free_sid(unsigned long spaceid)
spin_lock(&sid_lock);
- if (*dirty_space_offset & (1L << index))
- BUG(); /* attempt to free space id twice */
+ BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
*dirty_space_offset |= (1L << index);
dirty_space_ids++;
@@ -975,7 +979,7 @@ static void recycle_sids(void)
static unsigned long recycle_ndirty;
static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
-static unsigned int recycle_inuse = 0;
+static unsigned int recycle_inuse;
void flush_tlb_all(void)
{
@@ -984,9 +988,7 @@ void flush_tlb_all(void)
do_recycle = 0;
spin_lock(&sid_lock);
if (dirty_space_ids > RECYCLE_THRESHOLD) {
- if (recycle_inuse) {
- BUG(); /* FIXME: Use a semaphore/wait queue here */
- }
+ BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
recycle_inuse++;
do_recycle++;