diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-30 01:55:39 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-30 11:25:35 -0700 |
commit | b1e7a8fd854d2f895730e82137400012b509650e (patch) | |
tree | 9fba87ff6b0146ebd4ee5bc7d5f0c8b037dbb3ad /mm/page-writeback.c | |
parent | df849a1529c106f7460e51479ca78fe07b07dc8c (diff) | |
download | kernel-crypto-b1e7a8fd854d2f895730e82137400012b509650e.tar.gz kernel-crypto-b1e7a8fd854d2f895730e82137400012b509650e.tar.xz kernel-crypto-b1e7a8fd854d2f895730e82137400012b509650e.zip |
[PATCH] zoned vm counters: conversion of nr_dirty to per zone counter
This makes nr_dirty a per zone counter. Looping over all processors is
avoided during writeback state determination.
The counter aggregation for nr_dirty had to be undone in the NFS layer since
we summed up the page counts from multiple zones. Someone more familiar with
NFS should probably review what I have done.
[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0faacfe1890..da854783009 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -109,7 +109,7 @@ struct writeback_state static void get_writeback_state(struct writeback_state *wbs) { - wbs->nr_dirty = read_page_state(nr_dirty); + wbs->nr_dirty = global_page_state(NR_FILE_DIRTY); wbs->nr_unstable = read_page_state(nr_unstable); wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) + global_page_state(NR_ANON_PAGES); @@ -641,7 +641,8 @@ int __set_page_dirty_nobuffers(struct page *page) if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); if (mapping_cap_account_dirty(mapping)) - inc_page_state(nr_dirty); + __inc_zone_page_state(page, + NR_FILE_DIRTY); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } @@ -728,9 +729,9 @@ int test_clear_page_dirty(struct page *page) radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); - write_unlock_irqrestore(&mapping->tree_lock, flags); if (mapping_cap_account_dirty(mapping)) - dec_page_state(nr_dirty); + __dec_zone_page_state(page, NR_FILE_DIRTY); + write_unlock_irqrestore(&mapping->tree_lock, flags); return 1; } write_unlock_irqrestore(&mapping->tree_lock, flags); @@ -761,7 +762,7 @@ int clear_page_dirty_for_io(struct page *page) if (mapping) { if (TestClearPageDirty(page)) { if (mapping_cap_account_dirty(mapping)) - dec_page_state(nr_dirty); + dec_zone_page_state(page, NR_FILE_DIRTY); return 1; } return 0; |