diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 1 | ||||
-rw-r--r-- | mm/mempolicy.c | 8 | ||||
-rw-r--r-- | mm/page_alloc.c | 17 | ||||
-rw-r--r-- | mm/rmap.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 65 | ||||
-rw-r--r-- | mm/vmscan.c | 21 |
6 files changed, 89 insertions, 26 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a918f77f02f..1fe76d963ac 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -130,6 +130,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) onlined_pages++; } zone->present_pages += onlined_pages; + zone->zone_pgdat->node_present_pages += onlined_pages; setup_per_zone_pages_min(); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 954981b1430..2a820600942 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -748,7 +748,7 @@ long do_mbind(unsigned long start, unsigned long len, MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) || mode > MPOL_MAX) return -EINVAL; - if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE)) + if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) @@ -942,20 +942,20 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, */ if ((current->euid != task->suid) && (current->euid != task->uid) && (current->uid != task->suid) && (current->uid != task->uid) && - !capable(CAP_SYS_ADMIN)) { + !capable(CAP_SYS_NICE)) { err = -EPERM; goto out; } task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ - if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) { + if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out; } err = do_migrate_pages(mm, &old, &new, - capable(CAP_SYS_ADMIN) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); + capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); out: mmput(mm); return err; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 791690d7d3f..234bd4895d1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -590,21 +590,20 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, } #ifdef CONFIG_NUMA -/* Called from the slab reaper to drain remote pagesets */ -void drain_remote_pages(void) +/* + * Called from the slab reaper to drain pagesets on a particular node that + * belong to the currently executing processor. + */ +void drain_node_pages(int nodeid) { - struct zone *zone; - int i; + int i, z; unsigned long flags; local_irq_save(flags); - for_each_zone(zone) { + for (z = 0; z < MAX_NR_ZONES; z++) { + struct zone *zone = NODE_DATA(nodeid)->node_zones + z; struct per_cpu_pageset *pset; - /* Do not drain local pagesets */ - if (zone->zone_pgdat->node_id == numa_node_id()) - continue; - pset = zone_pcp(zone, smp_processor_id()); for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { struct per_cpu_pages *pcp; diff --git a/mm/rmap.c b/mm/rmap.c index d8ce5ff6145..67f0e20b101 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -537,9 +537,6 @@ void page_add_new_anon_rmap(struct page *page, */ void page_add_file_rmap(struct page *page) { - BUG_ON(PageAnon(page)); - BUG_ON(!pfn_valid(page_to_pfn(page))); - if (atomic_inc_and_test(&page->_mapcount)) __inc_page_state(nr_mapped); } diff --git a/mm/slab.c b/mm/slab.c index 61800b88e24..d0bd7f07ab0 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -789,6 +789,47 @@ static void __slab_error(const char *function, struct kmem_cache *cachep, char * dump_stack(); } +#ifdef CONFIG_NUMA +/* + * Special reaping functions for NUMA systems called from cache_reap(). + * These take care of doing round robin flushing of alien caches (containing + * objects freed on different nodes from which they were allocated) and the + * flushing of remote pcps by calling drain_node_pages. + */ +static DEFINE_PER_CPU(unsigned long, reap_node); + +static void init_reap_node(int cpu) +{ + int node; + + node = next_node(cpu_to_node(cpu), node_online_map); + if (node == MAX_NUMNODES) + node = 0; + + __get_cpu_var(reap_node) = node; +} + +static void next_reap_node(void) +{ + int node = __get_cpu_var(reap_node); + + /* + * Also drain per cpu pages on remote zones + */ + if (node != numa_node_id()) + drain_node_pages(node); + + node = next_node(node, node_online_map); + if (unlikely(node >= MAX_NUMNODES)) + node = first_node(node_online_map); + __get_cpu_var(reap_node) = node; +} + +#else +#define init_reap_node(cpu) do { } while (0) +#define next_reap_node(void) do { } while (0) +#endif + /* * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz * via the workqueue/eventd. @@ -806,6 +847,7 @@ static void __devinit start_cpu_timer(int cpu) * at that time. */ if (keventd_up() && reap_work->func == NULL) { + init_reap_node(cpu); INIT_WORK(reap_work, cache_reap, NULL); schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); } @@ -884,6 +926,23 @@ static void __drain_alien_cache(struct kmem_cache *cachep, } } +/* + * Called from cache_reap() to regularly drain alien caches round robin. + */ +static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) +{ + int node = __get_cpu_var(reap_node); + + if (l3->alien) { + struct array_cache *ac = l3->alien[node]; + if (ac && ac->avail) { + spin_lock_irq(&ac->lock); + __drain_alien_cache(cachep, ac, node); + spin_unlock_irq(&ac->lock); + } + } +} + static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) { int i = 0; @@ -902,6 +961,7 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **al #else #define drain_alien_cache(cachep, alien) do { } while (0) +#define reap_alien(cachep, l3) do { } while (0) static inline struct array_cache **alloc_alien_cache(int node, int limit) { @@ -3497,8 +3557,7 @@ static void cache_reap(void *unused) check_irq_on(); l3 = searchp->nodelists[numa_node_id()]; - if (l3->alien) - drain_alien_cache(searchp, l3->alien); + reap_alien(searchp, l3); spin_lock_irq(&l3->list_lock); drain_array_locked(searchp, cpu_cache_get(searchp), 0, @@ -3548,7 +3607,7 @@ static void cache_reap(void *unused) } check_irq_on(); mutex_unlock(&cache_chain_mutex); - drain_remote_pages(); + next_reap_node(); /* Setup the next iteration */ schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); } diff --git a/mm/vmscan.c b/mm/vmscan.c index b0af7593d01..4fe7e3aa02e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -700,7 +700,7 @@ int migrate_page_remove_references(struct page *newpage, * the page. */ if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) - return 1; + return -EAGAIN; /* * Establish swap ptes for anonymous pages or destroy pte @@ -721,13 +721,15 @@ int migrate_page_remove_references(struct page *newpage, * If the page was not migrated then the PageSwapCache bit * is still set and the operation may continue. */ - try_to_unmap(page, 1); + if (try_to_unmap(page, 1) == SWAP_FAIL) + /* A vma has VM_LOCKED set -> Permanent failure */ + return -EPERM; /* * Give up if we were unable to remove all mappings. */ if (page_mapcount(page)) - return 1; + return -EAGAIN; write_lock_irq(&mapping->tree_lock); @@ -738,7 +740,7 @@ int migrate_page_remove_references(struct page *newpage, if (!page_mapping(page) || page_count(page) != nr_refs || *radix_pointer != page) { write_unlock_irq(&mapping->tree_lock); - return 1; + return -EAGAIN; } /* @@ -813,10 +815,14 @@ EXPORT_SYMBOL(migrate_page_copy); */ int migrate_page(struct page *newpage, struct page *page) { + int rc; + BUG_ON(PageWriteback(page)); /* Writeback must be complete */ - if (migrate_page_remove_references(newpage, page, 2)) - return -EAGAIN; + rc = migrate_page_remove_references(newpage, page, 2); + + if (rc) + return rc; migrate_page_copy(newpage, page); @@ -1883,7 +1889,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) if (!(gfp_mask & __GFP_WAIT) || zone->all_unreclaimable || - atomic_read(&zone->reclaim_in_progress) > 0) + atomic_read(&zone->reclaim_in_progress) > 0 || + (p->flags & PF_MEMALLOC)) return 0; node_id = zone->zone_pgdat->node_id; |