summaryrefslogtreecommitdiffstats
path: root/mm-don-t-do-zero_resv_unavail-if-memmap-is-not-allocated.patch
diff options
context:
space:
mode:
Diffstat (limited to 'mm-don-t-do-zero_resv_unavail-if-memmap-is-not-allocated.patch')
-rw-r--r--mm-don-t-do-zero_resv_unavail-if-memmap-is-not-allocated.patch69
1 files changed, 69 insertions, 0 deletions
diff --git a/mm-don-t-do-zero_resv_unavail-if-memmap-is-not-allocated.patch b/mm-don-t-do-zero_resv_unavail-if-memmap-is-not-allocated.patch
new file mode 100644
index 000000000..f72e111ad
--- /dev/null
+++ b/mm-don-t-do-zero_resv_unavail-if-memmap-is-not-allocated.patch
@@ -0,0 +1,69 @@
+From d1b47a7c9efcf3c3384b70f6e3c8f1423b44d8c7 Mon Sep 17 00:00:00 2001
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+Date: Mon, 16 Jul 2018 11:16:30 -0400
+Subject: mm: don't do zero_resv_unavail if memmap is not allocated
+
+From: Pavel Tatashin <pasha.tatashin@oracle.com>
+
+commit d1b47a7c9efcf3c3384b70f6e3c8f1423b44d8c7 upstream.
+
+Moving zero_resv_unavail before memmap_init_zone(), caused a regression on
+x86-32.
+
+The cause is that we access struct pages before they are allocated when
+CONFIG_FLAT_NODE_MEM_MAP is used.
+
+free_area_init_nodes()
+ zero_resv_unavail()
+ mm_zero_struct_page(pfn_to_page(pfn)); <- struct page is not alloced
+ free_area_init_node()
+ if CONFIG_FLAT_NODE_MEM_MAP
+ alloc_node_mem_map()
+ memblock_virt_alloc_node_nopanic() <- struct page alloced here
+
+On the other hand memblock_virt_alloc_node_nopanic() zeroes all the memory
+that it returns, so we do not need to do zero_resv_unavail() here.
+
+Fixes: e181ae0c5db9 ("mm: zero unavailable pages before memmap init")
+Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
+Tested-by: Matt Hart <matt@mattface.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mm.h | 2 +-
+ mm/page_alloc.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2081,7 +2081,7 @@ extern int __meminit __early_pfn_to_nid(
+ struct mminit_pfnnid_cache *state);
+ #endif
+
+-#ifdef CONFIG_HAVE_MEMBLOCK
++#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
+ void zero_resv_unavail(void);
+ #else
+ static inline void zero_resv_unavail(void) {}
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6377,7 +6377,7 @@ void __paginginit free_area_init_node(in
+ free_area_init_core(pgdat);
+ }
+
+-#ifdef CONFIG_HAVE_MEMBLOCK
++#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
+ /*
+ * Only struct pages that are backed by physical memory are zeroed and
+ * initialized by going through __init_single_page(). But, there are some
+@@ -6415,7 +6415,7 @@ void __paginginit zero_resv_unavail(void
+ if (pgcnt)
+ pr_info("Reserved but unavailable: %lld pages", pgcnt);
+ }
+-#endif /* CONFIG_HAVE_MEMBLOCK */
++#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
+
+ #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+