summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-06-16 15:32:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 19:47:35 -0700
commit092cead6175bb1b3d3078a34ba71c939d526c70b (patch)
tree84dfeda6c7ca85b6d68710c824e1ce59db16cc3b
parentb6e68bc1baed9b6972a250aba66b8c5276cf6fb1 (diff)
downloadkernel-crypto-092cead6175bb1b3d3078a34ba71c939d526c70b.tar.gz
kernel-crypto-092cead6175bb1b3d3078a34ba71c939d526c70b.tar.xz
kernel-crypto-092cead6175bb1b3d3078a34ba71c939d526c70b.zip
page allocator: move free_page_mlock() to page_alloc.c
Currently, free_page_mlock() is only called from page_alloc.c. Thus, we can move it to page_alloc.c. Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/internal.h13
-rw-r--r--mm/page_alloc.c16
2 files changed, 16 insertions, 13 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 58ec1bc262c..4b1672a8cf7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -150,18 +150,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
}
}
-/*
- * free_page_mlock() -- clean up attempts to free and mlocked() page.
- * Page should not be on lru, so no need to fix that up.
- * free_pages_check() will verify...
- */
-static inline void free_page_mlock(struct page *page)
-{
- __ClearPageMlocked(page);
- __dec_zone_page_state(page, NR_MLOCK);
- __count_vm_event(UNEVICTABLE_MLOCKFREED);
-}
-
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
@@ -170,7 +158,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-static inline void free_page_mlock(struct page *page) { }
#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0c9f406e3c4..5dac5d8cb14 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -493,6 +493,22 @@ static inline void __free_one_page(struct page *page,
zone->free_area[order].nr_free++;
}
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static inline void free_page_mlock(struct page *page)
+{
+ __ClearPageMlocked(page);
+ __dec_zone_page_state(page, NR_MLOCK);
+ __count_vm_event(UNEVICTABLE_MLOCKFREED);
+}
+#else
+static void free_page_mlock(struct page *page) { }
+#endif
+
static inline int free_pages_check(struct page *page)
{
if (unlikely(page_mapcount(page) |