summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-03 10:07:09 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-03 10:07:09 -0700
commita68d2ebc1581a3aec57bd032651e013fa609f530 (patch)
treeb41977c7157d7e26f37e9cb502cd1afbbddcbc17 /mm/memory.c
parentf33ea7f404e592e4563b12101b7a4d17da6558d7 (diff)
downloadkernel-crypto-a68d2ebc1581a3aec57bd032651e013fa609f530.tar.gz
kernel-crypto-a68d2ebc1581a3aec57bd032651e013fa609f530.tar.xz
kernel-crypto-a68d2ebc1581a3aec57bd032651e013fa609f530.zip
Fix up recent get_user_pages() handling
The VM_FAULT_WRITE thing is an extra bit, not a valid return value, and has to be treated as such by get_user_pages(). Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 81d7117aa58..e046b7e4b53 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -949,6 +949,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
cond_resched_lock(&mm->page_table_lock);
while (!(page = follow_page(mm, start, write_access))) {
+ int ret;
+
/*
* Shortcut for anonymous pages. We don't want
* to force the creation of pages tables for
@@ -961,16 +963,18 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
break;
}
spin_unlock(&mm->page_table_lock);
- switch (__handle_mm_fault(mm, vma, start,
- write_access)) {
- case VM_FAULT_WRITE:
- /*
- * do_wp_page has broken COW when
- * necessary, even if maybe_mkwrite
- * decided not to set pte_write
- */
+ ret = __handle_mm_fault(mm, vma, start, write_access);
+
+ /*
+ * The VM_FAULT_WRITE bit tells us that do_wp_page has
+ * broken COW when necessary, even if maybe_mkwrite
+ * decided not to set pte_write. We can thus safely do
+ * subsequent page lookups as if they were reads.
+ */
+ if (ret & VM_FAULT_WRITE)
write_access = 0;
- /* FALLTHRU */
+
+ switch (ret & ~VM_FAULT_WRITE) {
case VM_FAULT_MINOR:
tsk->min_flt++;
break;