summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap_xip.c23
-rw-r--r--mm/madvise.c13
-rw-r--r--mm/memory.c25
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c4
5 files changed, 32 insertions, 35 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 4553b2c5aab..8c199f53773 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -68,13 +68,12 @@ do_xip_mapping_read(struct address_space *mapping,
if (unlikely(IS_ERR(page))) {
if (PTR_ERR(page) == -ENODATA) {
/* sparse */
- page = virt_to_page(empty_zero_page);
+ page = ZERO_PAGE(0);
} else {
desc->error = PTR_ERR(page);
goto out;
}
- } else
- BUG_ON(!PageUptodate(page));
+ }
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
@@ -84,8 +83,7 @@ do_xip_mapping_read(struct address_space *mapping,
flush_dcache_page(page);
/*
- * Ok, we have the page, and it's up-to-date, so
- * now we can copy it to user space...
+ * Ok, we have the page, so now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
@@ -164,7 +162,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile);
* xip_write
*
* This function walks all vmas of the address_space and unmaps the
- * empty_zero_page when found at pgoff. Should it go in rmap.c?
+ * ZERO_PAGE when found at pgoff. Should it go in rmap.c?
*/
static void
__xip_unmap (struct address_space * mapping,
@@ -187,7 +185,7 @@ __xip_unmap (struct address_space * mapping,
* We need the page_table_lock to protect us from page faults,
* munmap, fork, etc...
*/
- pte = page_check_address(virt_to_page(empty_zero_page), mm,
+ pte = page_check_address(ZERO_PAGE(address), mm,
address);
if (!IS_ERR(pte)) {
/* Nuke the page table entry. */
@@ -230,7 +228,6 @@ xip_file_nopage(struct vm_area_struct * area,
page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
if (!IS_ERR(page)) {
- BUG_ON(!PageUptodate(page));
return page;
}
if (PTR_ERR(page) != -ENODATA)
@@ -245,12 +242,11 @@ xip_file_nopage(struct vm_area_struct * area,
pgoff*(PAGE_SIZE/512), 1);
if (IS_ERR(page))
return NULL;
- BUG_ON(!PageUptodate(page));
/* unmap page at pgoff from all other vmas */
__xip_unmap(mapping, pgoff);
} else {
- /* not shared and writable, use empty_zero_page */
- page = virt_to_page(empty_zero_page);
+ /* not shared and writable, use ZERO_PAGE() */
+ page = ZERO_PAGE(address);
}
return page;
@@ -319,8 +315,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
break;
}
- BUG_ON(!PageUptodate(page));
-
copied = filemap_copy_from_user(page, offset, buf, bytes);
flush_dcache_page(page);
if (likely(copied > 0)) {
@@ -435,8 +429,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
return 0;
else
return PTR_ERR(page);
- } else
- BUG_ON(!PageUptodate(page));
+ }
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, length);
kunmap_atomic(kaddr, KM_USER0);
diff --git a/mm/madvise.c b/mm/madvise.c
index 73180a22877..c8c01a12fea 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -83,9 +83,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
{
struct file *file = vma->vm_file;
- if (!file)
- return -EBADF;
-
if (file->f_mapping->a_ops->get_xip_page) {
/* no bad return value, but ignore advice */
return 0;
@@ -140,11 +137,16 @@ static long madvise_dontneed(struct vm_area_struct * vma,
return 0;
}
-static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
- unsigned long start, unsigned long end, int behavior)
+static long
+madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
+ unsigned long start, unsigned long end, int behavior)
{
+ struct file *filp = vma->vm_file;
long error = -EBADF;
+ if (!filp)
+ goto out;
+
switch (behavior) {
case MADV_NORMAL:
case MADV_SEQUENTIAL:
@@ -165,6 +167,7 @@ static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev
break;
}
+out:
return error;
}
diff --git a/mm/memory.c b/mm/memory.c
index beabdefa625..6fe77acbc1c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -776,8 +776,8 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
* Do a quick page-table lookup for a single page.
* mm->page_table_lock must be held.
*/
-static struct page *
-__follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
+static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
+ int read, int write, int accessed)
{
pgd_t *pgd;
pud_t *pud;
@@ -818,9 +818,11 @@ __follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
- if (write && !pte_dirty(pte) && !PageDirty(page))
- set_page_dirty(page);
- mark_page_accessed(page);
+ if (accessed) {
+ if (write && !pte_dirty(pte) &&!PageDirty(page))
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ }
return page;
}
}
@@ -829,16 +831,19 @@ out:
return NULL;
}
-struct page *
+inline struct page *
follow_page(struct mm_struct *mm, unsigned long address, int write)
{
- return __follow_page(mm, address, /*read*/0, write);
+ return __follow_page(mm, address, 0, write, 1);
}
-int
-check_user_page_readable(struct mm_struct *mm, unsigned long address)
+/*
+ * check_user_page_readable() can be called frm niterrupt context by oprofile,
+ * so we need to avoid taking any non-irq-safe locks
+ */
+int check_user_page_readable(struct mm_struct *mm, unsigned long address)
{
- return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
+ return __follow_page(mm, address, 1, 0, 0) != NULL;
}
EXPORT_SYMBOL(check_user_page_readable);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cb41c31e7c8..1694845526b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1138,11 +1138,11 @@ void mpol_free_shared_policy(struct shared_policy *p)
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
+ rb_erase(&n->nd, &p->root);
mpol_free(n->policy);
kmem_cache_free(sn_cache, n);
}
spin_unlock(&p->lock);
- p->root = RB_ROOT;
}
/* assumes fs == KERNEL_DS */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1d6ba6a4b59..42bccfb8464 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1861,7 +1861,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long i, j;
- const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
int cpu, nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
@@ -1934,9 +1933,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->zone_mem_map = pfn_to_page(zone_start_pfn);
zone->zone_start_pfn = zone_start_pfn;
- if ((zone_start_pfn) & (zone_required_alignment-1))
- printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n");
-
memmap_init(size, nid, j, zone_start_pfn);
zonetable_add(zone, nid, j, zone_start_pfn, size);