summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-04 22:29:19 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:44:18 -0800
commit3dfa5721f12c3d5a441448086bee156887daa961 (patch)
tree8ace8c3f842f8b626b762bb9d2a9b24d8e3bd130
parent5dc331852848a38ca00a2817e5b98a1d0561b116 (diff)
downloadkernel-crypto-3dfa5721f12c3d5a441448086bee156887daa961.tar.gz
kernel-crypto-3dfa5721f12c3d5a441448086bee156887daa961.tar.xz
kernel-crypto-3dfa5721f12c3d5a441448086bee156887daa961.zip
Page allocator: get rid of the list of cold pages
We have repeatedly discussed if the cold pages still have a point. There is one way to join the two lists: Use a single list and put the cold pages at the end and the hot pages at the beginning. That way a single list can serve for both types of allocations. The discussion of the RFC for this and Mel's measurements indicate that there may not be too much of a point left to having separate lists for hot and cold pages (see http://marc.info/?t=119492914200001&r=1&w=2). Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Martin Bligh <mbligh@mbligh.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--mm/page_alloc.c57
-rw-r--r--mm/vmstat.c30
3 files changed, 40 insertions, 49 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4c4522a51a3..8d8d1977736 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -113,7 +113,7 @@ struct per_cpu_pages {
};
struct per_cpu_pageset {
- struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
+ struct per_cpu_pages pcp;
#ifdef CONFIG_NUMA
s8 expire;
#endif
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c7de8e959f..144c0967e70 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -901,24 +901,21 @@ static void drain_pages(unsigned int cpu)
{
unsigned long flags;
struct zone *zone;
- int i;
for_each_zone(zone) {
struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
if (!populated_zone(zone))
continue;
pset = zone_pcp(zone, cpu);
- for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
- struct per_cpu_pages *pcp;
-
- pcp = &pset->pcp[i];
- local_irq_save(flags);
- free_pages_bulk(zone, pcp->count, &pcp->list, 0);
- pcp->count = 0;
- local_irq_restore(flags);
- }
+
+ pcp = &pset->pcp;
+ local_irq_save(flags);
+ free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+ pcp->count = 0;
+ local_irq_restore(flags);
}
}
@@ -993,10 +990,13 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
- pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+ pcp = &zone_pcp(zone, get_cpu())->pcp;
local_irq_save(flags);
__count_vm_event(PGFREE);
- list_add(&page->lru, &pcp->list);
+ if (cold)
+ list_add_tail(&page->lru, &pcp->list);
+ else
+ list_add(&page->lru, &pcp->list);
set_page_private(page, get_pageblock_migratetype(page));
pcp->count++;
if (pcp->count >= pcp->high) {
@@ -1054,7 +1054,7 @@ again:
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- pcp = &zone_pcp(zone, cpu)->pcp[cold];
+ pcp = &zone_pcp(zone, cpu)->pcp;
local_irq_save(flags);
if (!pcp->count) {
pcp->count = rmqueue_bulk(zone, 0,
@@ -1064,9 +1064,15 @@ again:
}
/* Find a page of the appropriate migrate type */
- list_for_each_entry(page, &pcp->list, lru)
- if (page_private(page) == migratetype)
- break;
+ if (cold) {
+ list_for_each_entry_reverse(page, &pcp->list, lru)
+ if (page_private(page) == migratetype)
+ break;
+ } else {
+ list_for_each_entry(page, &pcp->list, lru)
+ if (page_private(page) == migratetype)
+ break;
+ }
/* Allocate more to the pcp list if necessary */
if (unlikely(&page->lru == &pcp->list)) {
@@ -1793,12 +1799,9 @@ void show_free_areas(void)
pageset = zone_pcp(zone, cpu);
- printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d "
- "Cold: hi:%5d, btch:%4d usd:%4d\n",
- cpu, pageset->pcp[0].high,
- pageset->pcp[0].batch, pageset->pcp[0].count,
- pageset->pcp[1].high, pageset->pcp[1].batch,
- pageset->pcp[1].count);
+ printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
+ cpu, pageset->pcp.high,
+ pageset->pcp.batch, pageset->pcp.count);
}
}
@@ -2596,17 +2599,11 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
memset(p, 0, sizeof(*p));
- pcp = &p->pcp[0]; /* hot */
+ pcp = &p->pcp;
pcp->count = 0;
pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch);
INIT_LIST_HEAD(&pcp->list);
-
- pcp = &p->pcp[1]; /* cold*/
- pcp->count = 0;
- pcp->high = 2 * batch;
- pcp->batch = max(1UL, batch/2);
- INIT_LIST_HEAD(&pcp->list);
}
/*
@@ -2619,7 +2616,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
{
struct per_cpu_pages *pcp;
- pcp = &p->pcp[0]; /* hot list */
+ pcp = &p->pcp;
pcp->high = high;
pcp->batch = max(1UL, high/4);
if ((high/4) > (PAGE_SHIFT * 8))
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 9ffc573ceb6..888668e0b7d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -337,7 +337,7 @@ void refresh_cpu_vm_stats(int cpu)
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
- if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count))
+ if (!p->expire || !p->pcp.count)
continue;
/*
@@ -352,11 +352,8 @@ void refresh_cpu_vm_stats(int cpu)
if (p->expire)
continue;
- if (p->pcp[0].count)
- drain_zone_pages(zone, p->pcp + 0);
-
- if (p->pcp[1].count)
- drain_zone_pages(zone, p->pcp + 1);
+ if (p->pcp.count)
+ drain_zone_pages(zone, &p->pcp);
#endif
}
@@ -693,20 +690,17 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
- int j;
pageset = zone_pcp(zone, i);
- for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
- seq_printf(m,
- "\n cpu: %i pcp: %i"
- "\n count: %i"
- "\n high: %i"
- "\n batch: %i",
- i, j,
- pageset->pcp[j].count,
- pageset->pcp[j].high,
- pageset->pcp[j].batch);
- }
+ seq_printf(m,
+ "\n cpu: %i"
+ "\n count: %i"
+ "\n high: %i"
+ "\n batch: %i",
+ i,
+ pageset->pcp.count,
+ pageset->pcp.high,
+ pageset->pcp.batch);
#ifdef CONFIG_SMP
seq_printf(m, "\n vm stats threshold: %d",
pageset->stat_threshold);