summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@r063144.stusta.swh.mhn.de>2006-03-20 18:30:36 +0100
committerAdrian Bunk <bunk@r063144.stusta.swh.mhn.de>2006-03-20 18:30:36 +0100
commit0f76ee451484d02c7405d92e7bceb39b415abb01 (patch)
tree9722f84281f786ba48971dde057f5171a49969e4 /arch/mips/mm
parent01d206a7c1167639f6ca6dac22140fbdca017558 (diff)
parent7705a8792b0fc82fd7d4dd923724606bbfd9fb20 (diff)
downloadkernel-crypto-0f76ee451484d02c7405d92e7bceb39b415abb01.tar.gz
kernel-crypto-0f76ee451484d02c7405d92e7bceb39b415abb01.tar.xz
kernel-crypto-0f76ee451484d02c7405d92e7bceb39b415abb01.zip
Merge with git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c140
-rw-r--r--arch/mips/mm/c-tx39.c71
-rw-r--r--arch/mips/mm/cache.c5
-rw-r--r--arch/mips/mm/cex-sb1.S2
-rw-r--r--arch/mips/mm/init.c16
-rw-r--r--arch/mips/mm/tlbex.c34
6 files changed, 82 insertions, 186 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 422b55fab07..9572ed44f0d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -235,7 +235,9 @@ static inline void r4k_blast_scache_page_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
- if (sc_lsize == 16)
+ if (scache_size == 0)
+ r4k_blast_scache_page = (void *)no_sc_noop;
+ else if (sc_lsize == 16)
r4k_blast_scache_page = blast_scache16_page;
else if (sc_lsize == 32)
r4k_blast_scache_page = blast_scache32_page;
@@ -251,7 +253,9 @@ static inline void r4k_blast_scache_page_indexed_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
- if (sc_lsize == 16)
+ if (scache_size == 0)
+ r4k_blast_scache_page_indexed = (void *)no_sc_noop;
+ else if (sc_lsize == 16)
r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
else if (sc_lsize == 32)
r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
@@ -267,7 +271,9 @@ static inline void r4k_blast_scache_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
- if (sc_lsize == 16)
+ if (scache_size == 0)
+ r4k_blast_scache = (void *)no_sc_noop;
+ else if (sc_lsize == 16)
r4k_blast_scache = blast_scache16;
else if (sc_lsize == 32)
r4k_blast_scache = blast_scache32;
@@ -369,6 +375,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
struct flush_cache_page_args {
struct vm_area_struct *vma;
unsigned long addr;
+ unsigned long pfn;
};
static inline void local_r4k_flush_cache_page(void *args)
@@ -376,6 +383,7 @@ static inline void local_r4k_flush_cache_page(void *args)
struct flush_cache_page_args *fcp_args = args;
struct vm_area_struct *vma = fcp_args->vma;
unsigned long addr = fcp_args->addr;
+ unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
@@ -425,11 +433,12 @@ static inline void local_r4k_flush_cache_page(void *args)
* Do indexed flush, too much work to get the (possible) TLB refills
* to work correctly.
*/
- addr = INDEX_BASE + (addr & (dcache_size - 1));
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page_indexed(addr);
- if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page_indexed(addr);
+ r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
+ paddr : addr);
+ if (exec && !cpu_icache_snoops_remote_store) {
+ r4k_blast_scache_page_indexed(paddr);
+ }
}
if (exec) {
if (cpu_has_vtag_icache) {
@@ -449,6 +458,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.vma = vma;
args.addr = addr;
+ args.pfn = pfn;
on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
}
@@ -464,72 +474,39 @@ static void r4k_flush_data_cache_page(unsigned long addr)
}
struct flush_icache_range_args {
- unsigned long __user start;
- unsigned long __user end;
+ unsigned long start;
+ unsigned long end;
};
static inline void local_r4k_flush_icache_range(void *args)
{
struct flush_icache_range_args *fir_args = args;
- unsigned long dc_lsize = cpu_dcache_line_size();
- unsigned long ic_lsize = cpu_icache_line_size();
- unsigned long sc_lsize = cpu_scache_line_size();
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
- unsigned long addr, aend;
if (!cpu_has_ic_fills_f_dc) {
if (end - start > dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
- addr = start & ~(dc_lsize - 1);
- aend = (end - 1) & ~(dc_lsize - 1);
-
- while (1) {
- /* Hit_Writeback_Inv_D */
- protected_writeback_dcache_line(addr);
- if (addr == aend)
- break;
- addr += dc_lsize;
- }
+ protected_blast_dcache_range(start, end);
}
- if (!cpu_icache_snoops_remote_store) {
- if (end - start > scache_size) {
+ if (!cpu_icache_snoops_remote_store && scache_size) {
+ if (end - start > scache_size)
r4k_blast_scache();
- } else {
- addr = start & ~(sc_lsize - 1);
- aend = (end - 1) & ~(sc_lsize - 1);
-
- while (1) {
- /* Hit_Writeback_Inv_SD */
- protected_writeback_scache_line(addr);
- if (addr == aend)
- break;
- addr += sc_lsize;
- }
- }
+ else
+ protected_blast_scache_range(start, end);
}
}
if (end - start > icache_size)
r4k_blast_icache();
- else {
- addr = start & ~(ic_lsize - 1);
- aend = (end - 1) & ~(ic_lsize - 1);
- while (1) {
- /* Hit_Invalidate_I */
- protected_flush_icache_line(addr);
- if (addr == aend)
- break;
- addr += ic_lsize;
- }
- }
+ else
+ protected_blast_icache_range(start, end);
}
-static void r4k_flush_icache_range(unsigned long __user start,
- unsigned long __user end)
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
struct flush_icache_range_args args;
@@ -620,27 +597,14 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma,
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
- unsigned long end, a;
-
/* Catch bad driver code */
BUG_ON(size == 0);
if (cpu_has_subset_pcaches) {
- unsigned long sc_lsize = cpu_scache_line_size();
-
- if (size >= scache_size) {
+ if (size >= scache_size)
r4k_blast_scache();
- return;
- }
-
- a = addr & ~(sc_lsize - 1);
- end = (addr + size - 1) & ~(sc_lsize - 1);
- while (1) {
- flush_scache_line(a); /* Hit_Writeback_Inv_SD */
- if (a == end)
- break;
- a += sc_lsize;
- }
+ else
+ blast_scache_range(addr, addr + size);
return;
}
@@ -652,17 +616,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
if (size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long dc_lsize = cpu_dcache_line_size();
-
R4600_HIT_CACHEOP_WAR_IMPL;
- a = addr & ~(dc_lsize - 1);
- end = (addr + size - 1) & ~(dc_lsize - 1);
- while (1) {
- flush_dcache_line(a); /* Hit_Writeback_Inv_D */
- if (a == end)
- break;
- a += dc_lsize;
- }
+ blast_dcache_range(addr, addr + size);
}
bc_wback_inv(addr, size);
@@ -670,44 +625,22 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
- unsigned long end, a;
-
/* Catch bad driver code */
BUG_ON(size == 0);
if (cpu_has_subset_pcaches) {
- unsigned long sc_lsize = cpu_scache_line_size();
-
- if (size >= scache_size) {
+ if (size >= scache_size)
r4k_blast_scache();
- return;
- }
-
- a = addr & ~(sc_lsize - 1);
- end = (addr + size - 1) & ~(sc_lsize - 1);
- while (1) {
- flush_scache_line(a); /* Hit_Writeback_Inv_SD */
- if (a == end)
- break;
- a += sc_lsize;
- }
+ else
+ blast_scache_range(addr, addr + size);
return;
}
if (size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long dc_lsize = cpu_dcache_line_size();
-
R4600_HIT_CACHEOP_WAR_IMPL;
- a = addr & ~(dc_lsize - 1);
- end = (addr + size - 1) & ~(dc_lsize - 1);
- while (1) {
- flush_dcache_line(a); /* Hit_Writeback_Inv_D */
- if (a == end)
- break;
- a += dc_lsize;
- }
+ blast_dcache_range(addr, addr + size);
}
bc_inv(addr, size);
@@ -728,7 +661,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
R4600_HIT_CACHEOP_WAR_IMPL;
protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
- if (!cpu_icache_snoops_remote_store)
+ if (!cpu_icache_snoops_remote_store && scache_size)
protected_writeback_scache_line(addr & ~(sc_lsize - 1));
protected_flush_icache_line(addr & ~(ic_lsize - 1));
if (MIPS4K_ICACHE_REFILL_WAR) {
@@ -1027,6 +960,7 @@ static void __init probe_pcache(void)
switch (c->cputype) {
case CPU_20KC:
case CPU_25KF:
+ c->dcache.flags |= MIPS_CACHE_PINDEX;
case CPU_R10000:
case CPU_R12000:
case CPU_SB1:
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 0a97a9434eb..fe232e3988e 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -44,8 +44,6 @@ __asm__ __volatile__( \
/* TX39H-style cache flush routines. */
static void tx39h_flush_icache_all(void)
{
- unsigned long start = KSEG0;
- unsigned long end = (start + icache_size);
unsigned long flags, config;
/* disable icache (set ICE#) */
@@ -53,33 +51,18 @@ static void tx39h_flush_icache_all(void)
config = read_c0_conf();
write_c0_conf(config & ~TX39_CONF_ICE);
TX39_STOP_STREAMING();
-
- /* invalidate icache */
- while (start < end) {
- cache16_unroll32(start, Index_Invalidate_I);
- start += 0x200;
- }
-
+ blast_icache16();
write_c0_conf(config);
local_irq_restore(flags);
}
static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
- unsigned long end, a;
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
-
/* Catch bad driver code */
BUG_ON(size == 0);
iob();
- a = addr & ~(dc_lsize - 1);
- end = (addr + size - 1) & ~(dc_lsize - 1);
- while (1) {
- invalidate_dcache_line(a); /* Hit_Invalidate_D */
- if (a == end) break;
- a += dc_lsize;
- }
+ blast_inv_dcache_range(addr, addr + size);
}
@@ -227,7 +210,6 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
* Do indexed flush, too much work to get the (possible) TLB refills
* to work correctly.
*/
- page = (KSEG0 + (page & (dcache_size - 1)));
if (cpu_has_dc_aliases || exec)
tx39_blast_dcache_page_indexed(page);
if (exec)
@@ -241,42 +223,21 @@ static void tx39_flush_data_cache_page(unsigned long addr)
static void tx39_flush_icache_range(unsigned long start, unsigned long end)
{
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long addr, aend;
-
if (end - start > dcache_size)
tx39_blast_dcache();
- else {
- addr = start & ~(dc_lsize - 1);
- aend = (end - 1) & ~(dc_lsize - 1);
-
- while (1) {
- /* Hit_Writeback_Inv_D */
- protected_writeback_dcache_line(addr);
- if (addr == aend)
- break;
- addr += dc_lsize;
- }
- }
+ else
+ protected_blast_dcache_range(start, end);
if (end - start > icache_size)
tx39_blast_icache();
else {
unsigned long flags, config;
- addr = start & ~(dc_lsize - 1);
- aend = (end - 1) & ~(dc_lsize - 1);
/* disable icache (set ICE#) */
local_irq_save(flags);
config = read_c0_conf();
write_c0_conf(config & ~TX39_CONF_ICE);
TX39_STOP_STREAMING();
- while (1) {
- /* Hit_Invalidate_I */
- protected_flush_icache_line(addr);
- if (addr == aend)
- break;
- addr += dc_lsize;
- }
+ protected_blast_icache_range(start, end);
write_c0_conf(config);
local_irq_restore(flags);
}
@@ -311,7 +272,7 @@ static void tx39_flush_icache_page(struct vm_area_struct *vma, struct page *page
static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
- unsigned long end, a;
+ unsigned long end;
if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
end = addr + size;
@@ -322,20 +283,13 @@ static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
} else if (size > dcache_size) {
tx39_blast_dcache();
} else {
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- a = addr & ~(dc_lsize - 1);
- end = (addr + size - 1) & ~(dc_lsize - 1);
- while (1) {
- flush_dcache_line(a); /* Hit_Writeback_Inv_D */
- if (a == end) break;
- a += dc_lsize;
- }
+ blast_dcache_range(addr, addr + size);
}
}
static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
{
- unsigned long end, a;
+ unsigned long end;
if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
end = addr + size;
@@ -346,14 +300,7 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
} else if (size > dcache_size) {
tx39_blast_dcache();
} else {
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- a = addr & ~(dc_lsize - 1);
- end = (addr + size - 1) & ~(dc_lsize - 1);
- while (1) {
- invalidate_dcache_line(a); /* Hit_Invalidate_D */
- if (a == end) break;
- a += dc_lsize;
- }
+ blast_inv_dcache_range(addr, addr + size);
}
}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 314701a66b1..591c22b080e 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -25,8 +25,7 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
unsigned long pfn);
-void (*flush_icache_range)(unsigned long __user start,
- unsigned long __user end);
+void (*flush_icache_range)(unsigned long start, unsigned long end);
void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page);
/* MIPS specific cache operations */
@@ -53,7 +52,7 @@ EXPORT_SYMBOL(_dma_cache_inv);
* We could optimize the case where the cache argument is not BCACHE but
* that seems very atypical use ...
*/
-asmlinkage int sys_cacheflush(unsigned long __user addr,
+asmlinkage int sys_cacheflush(unsigned long addr,
unsigned long bytes, unsigned int cache)
{
if (bytes == 0)
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 0e71580774f..e54a62f2807 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -64,7 +64,7 @@ LEAF(except_vec2_sb1)
sd k0,0x170($0)
sd k1,0x178($0)
-#if CONFIG_SB1_CEX_ALWAYS_FATAL
+#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL
j handle_vec2_sb1
nop
#else
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4ee91c9a556..0ff9a348b84 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -24,6 +24,7 @@
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
+#include <linux/proc_fs.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
@@ -200,6 +201,11 @@ static inline int page_is_ram(unsigned long pagenr)
return 0;
}
+static struct kcore_list kcore_mem, kcore_vmalloc;
+#ifdef CONFIG_64BIT
+static struct kcore_list kcore_kseg0;
+#endif
+
void __init mem_init(void)
{
unsigned long codesize, reservedpages, datasize, initsize;
@@ -249,6 +255,16 @@ void __init mem_init(void)
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+#ifdef CONFIG_64BIT
+ if ((unsigned long) &_text > (unsigned long) CKSEG0)
+ /* The -4 is a hack so that user tools don't have to handle
+ the overflow. */
+ kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
+#endif
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END-VMALLOC_START);
+
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0f9485806ba..ac4f4bfaae5 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -280,69 +280,69 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...)
}
#define I_u1u2u3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, a, b, c); \
}
#define I_u2u1u3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, b, a, c); \
}
#define I_u3u1u2(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, unsigned int c) \
{ \
build_insn(buf, insn##op, b, c, a); \
}
#define I_u1u2s3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, signed int c) \
{ \
build_insn(buf, insn##op, a, b, c); \
}
#define I_u2s3u1(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
signed int b, unsigned int c) \
{ \
build_insn(buf, insn##op, c, a, b); \
}
#define I_u2u1s3(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b, signed int c) \
{ \
build_insn(buf, insn##op, b, a, c); \
}
#define I_u1u2(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
unsigned int b) \
{ \
build_insn(buf, insn##op, a, b); \
}
#define I_u1s2(op) \
- static inline void i##op(u32 **buf, unsigned int a, \
+ static inline void __init i##op(u32 **buf, unsigned int a, \
signed int b) \
{ \
build_insn(buf, insn##op, a, b); \
}
#define I_u1(op) \
- static inline void i##op(u32 **buf, unsigned int a) \
+ static inline void __init i##op(u32 **buf, unsigned int a) \
{ \
build_insn(buf, insn##op, a); \
}
#define I_0(op) \
- static inline void i##op(u32 **buf) \
+ static inline void __init i##op(u32 **buf) \
{ \
build_insn(buf, insn##op); \
}
@@ -623,42 +623,42 @@ static __init int __attribute__((unused)) insn_has_bdelay(struct reloc *rel,
}
/* convenience functions for labeled branches */
-static void __attribute__((unused)) il_bltz(u32 **p, struct reloc **r,
- unsigned int reg, enum label_id l)
+static void __init __attribute__((unused))
+ il_bltz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
{
r_mips_pc16(r, *p, l);
i_bltz(p, reg, 0);
}
-static void __attribute__((unused)) il_b(u32 **p, struct reloc **r,
+static void __init __attribute__((unused)) il_b(u32 **p, struct reloc **r,
enum label_id l)
{
r_mips_pc16(r, *p, l);
i_b(p, 0);
}
-static void il_beqz(u32 **p, struct reloc **r, unsigned int reg,
+static void __init il_beqz(u32 **p, struct reloc **r, unsigned int reg,
enum label_id l)
{
r_mips_pc16(r, *p, l);
i_beqz(p, reg, 0);
}
-static void __attribute__((unused))
+static void __init __attribute__((unused))
il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
{
r_mips_pc16(r, *p, l);
i_beqzl(p, reg, 0);
}
-static void il_bnez(u32 **p, struct reloc **r, unsigned int reg,
+static void __init il_bnez(u32 **p, struct reloc **r, unsigned int reg,
enum label_id l)
{
r_mips_pc16(r, *p, l);
i_bnez(p, reg, 0);
}
-static void il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
+static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
enum label_id l)
{
r_mips_pc16(r, *p, l);