diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2006-01-08 01:00:42 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-08 20:12:40 -0800 |
commit | 48db57f8ff10eb09ab887ccb6150b0da0c7be24e (patch) | |
tree | 5c9fba3937bb802aa2944af189aae57b7bf7aa8b /mm | |
parent | 23316bc86fd31c5d644a71c398ec41d9fecacec4 (diff) | |
download | kernel-crypto-48db57f8ff10eb09ab887ccb6150b0da0c7be24e.tar.gz kernel-crypto-48db57f8ff10eb09ab887ccb6150b0da0c7be24e.tar.xz kernel-crypto-48db57f8ff10eb09ab887ccb6150b0da0c7be24e.zip |
[PATCH] mm: free_pages opt
Try to streamline free_pages_bulk by ensuring callers don't pass in a
'count' that exceeds the list size.
Some cleanups:
Rename __free_pages_bulk to __free_one_page.
Put the page list manipulation from __free_pages_ok into free_one_page.
Make __free_pages_ok static.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 58 |
1 files changed, 30 insertions, 28 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6b92a945ae6..ad3d0202cde 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -308,7 +308,7 @@ static inline int page_is_buddy(struct page *page, int order) * -- wli */ -static inline void __free_pages_bulk (struct page *page, +static inline void __free_one_page(struct page *page, struct zone *zone, unsigned int order) { unsigned long page_idx; @@ -383,40 +383,42 @@ static inline int free_pages_check(struct page *page) * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ -static int -free_pages_bulk(struct zone *zone, int count, - struct list_head *list, unsigned int order) +static void free_pages_bulk(struct zone *zone, int count, + struct list_head *list, int order) { - struct page *page = NULL; - int ret = 0; - spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; - while (!list_empty(list) && count--) { + while (count--) { + struct page *page; + + BUG_ON(list_empty(list)); page = list_entry(list->prev, struct page, lru); - /* have to delete it as __free_pages_bulk list manipulates */ + /* have to delete it as __free_one_page list manipulates */ list_del(&page->lru); - __free_pages_bulk(page, zone, order); - ret++; + __free_one_page(page, zone, order); } spin_unlock(&zone->lock); - return ret; } -void __free_pages_ok(struct page *page, unsigned int order) +static void free_one_page(struct zone *zone, struct page *page, int order) { - unsigned long flags; LIST_HEAD(list); + list_add(&page->lru, &list); + free_pages_bulk(zone, 1, &list, order); +} + +static void __free_pages_ok(struct page *page, unsigned int order) +{ + unsigned long flags; int i; int reserved = 0; arch_free_page(page, order); #ifndef CONFIG_MMU - if (order > 0) - for (i = 1 ; i < (1 << order) ; ++i) - __put_page(page + i); + for (i = 1 ; i < (1 << order) ; ++i) + __put_page(page + i); #endif for (i = 0 ; i < (1 << order) ; ++i) @@ -424,11 +426,10 @@ void __free_pages_ok(struct page *page, unsigned int order) if (reserved) return; - list_add(&page->lru, &list); - kernel_map_pages(page, 1<<order, 0); + kernel_map_pages(page, 1 << order, 0); local_irq_save(flags); __mod_page_state(pgfree, 1 << order); - free_pages_bulk(page_zone(page), 1, &list, order); + free_one_page(page_zone(page), page, order); local_irq_restore(flags); } @@ -602,9 +603,8 @@ void drain_remote_pages(void) struct per_cpu_pages *pcp; pcp = &pset->pcp[i]; - if (pcp->count) - pcp->count -= free_pages_bulk(zone, pcp->count, - &pcp->list, 0); + free_pages_bulk(zone, pcp->count, &pcp->list, 0); + pcp->count = 0; } } local_irq_restore(flags); @@ -627,8 +627,8 @@ static void __drain_pages(unsigned int cpu) pcp = &pset->pcp[i]; local_irq_save(flags); - pcp->count -= free_pages_bulk(zone, pcp->count, - &pcp->list, 0); + free_pages_bulk(zone, pcp->count, &pcp->list, 0); + pcp->count = 0; local_irq_restore(flags); } } @@ -719,8 +719,10 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) __inc_page_state(pgfree); list_add(&page->lru, &pcp->list); pcp->count++; - if (pcp->count >= pcp->high) - pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); + if (pcp->count >= pcp->high) { + free_pages_bulk(zone, pcp->batch, &pcp->list, 0); + pcp->count -= pcp->batch; + } local_irq_restore(flags); put_cpu(); } @@ -759,7 +761,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist, again: cpu = get_cpu(); - if (order == 0) { + if (likely(order == 0)) { struct per_cpu_pages *pcp; pcp = &zone_pcp(zone, cpu)->pcp[cold]; |