summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 17:02:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 07:17:33 -0700
commit35451beecbd7c86ce3249d543594517a5fe9a0cd (patch)
treee72759e6e6fcd142ab85c607a9b3dd3e1c016eed /mm
parenta913e182ab9484308e870af37a14d372742d53b0 (diff)
downloadkernel-crypto-35451beecbd7c86ce3249d543594517a5fe9a0cd.tar.gz
kernel-crypto-35451beecbd7c86ce3249d543594517a5fe9a0cd.tar.xz
kernel-crypto-35451beecbd7c86ce3249d543594517a5fe9a0cd.zip
ksm: unmerge is an origin of OOMs
Just as the swapoff system call allocates many pages of RAM to various processes, perhaps triggering OOM, so "echo 2 >/sys/kernel/mm/ksm/run" (unmerge) is liable to allocate many pages of RAM to various processes, perhaps triggering OOM; and each is normally run from a modest admin process (swapoff or shell), easily repeated until it succeeds. So treat unmerge_and_remove_all_rmap_items() in the same way that we treat try_to_unuse(): generalize PF_SWAPOFF to PF_OOM_ORIGIN, and bracket both with that, to ask the OOM killer to kill them first, to prevent them from spawning more and more OOM kills. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/swapfile.c4
3 files changed, 5 insertions, 3 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index e11e7a5ac84..37cc3732509 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1557,7 +1557,9 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
if (ksm_run != flags) {
ksm_run = flags;
if (flags & KSM_RUN_UNMERGE) {
+ current->flags |= PF_OOM_ORIGIN;
err = unmerge_and_remove_all_rmap_items();
+ current->flags &= ~PF_OOM_ORIGIN;
if (err) {
ksm_run = KSM_RUN_STOP;
count = err;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a7b2460e922..da4c342f264 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -79,7 +79,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/*
* swapoff can easily use up all memory, so kill those first.
*/
- if (p->flags & PF_SWAPOFF)
+ if (p->flags & PF_OOM_ORIGIN)
return ULONG_MAX;
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 74f1102e874..f1bf19daadc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1575,9 +1575,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->flags &= ~SWP_WRITEOK;
spin_unlock(&swap_lock);
- current->flags |= PF_SWAPOFF;
+ current->flags |= PF_OOM_ORIGIN;
err = try_to_unuse(type);
- current->flags &= ~PF_SWAPOFF;
+ current->flags &= ~PF_OOM_ORIGIN;
if (err) {
/* re-insert swap space back into swap_list */