summaryrefslogtreecommitdiffstats
path: root/hibernate-watermark.patch
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2012-04-05 11:51:13 -0400
committerDave Jones <davej@redhat.com>2012-04-05 11:51:13 -0400
commit0930ea3e98235544eda820c0cbe983ec5c8a03cc (patch)
tree68594ad4af55b21d7ad8b0cf0d9655a40813fc34 /hibernate-watermark.patch
parentec9ffe0db8d6bd843bd5a472652b03664b85111f (diff)
downloadkernel-0930ea3e98235544eda820c0cbe983ec5c8a03cc.tar.gz
kernel-0930ea3e98235544eda820c0cbe983ec5c8a03cc.tar.xz
kernel-0930ea3e98235544eda820c0cbe983ec5c8a03cc.zip
Better watermark the number of pages used by hibernation I/O (Bojan Smojver) (rhbz 785384)
Diffstat (limited to 'hibernate-watermark.patch')
-rw-r--r--hibernate-watermark.patch99
1 files changed, 99 insertions, 0 deletions
diff --git a/hibernate-watermark.patch b/hibernate-watermark.patch
new file mode 100644
index 000000000..2cb75e6ef
--- /dev/null
+++ b/hibernate-watermark.patch
@@ -0,0 +1,99 @@
+
+--- a/kernel/power/swap.c
++++ a/kernel/power/swap.c
+@@ -6,7 +6,7 @@
+ *
+ * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
++ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
+ *
+ * This file is released under the GPLv2.
+ *
+@@ -51,6 +51,15 @@
+
+ #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
+
++/*
++ * Number of pages required to be kept free while writing the image. Always
++ * three quarters of all available pages before the writing starts.
++ */
++static inline unsigned long reqd_free_pages(void)
++{
++ return (nr_free_pages() / 4) * 3;
++}
++
+ struct swap_map_page {
+ sector_t entries[MAP_PAGE_ENTRIES];
+ sector_t next_swap;
+@@ -72,7 +81,7 @@ struct swap_map_handle {
+ sector_t cur_swap;
+ sector_t first_sector;
+ unsigned int k;
+- unsigned long nr_free_pages, written;
++ unsigned long reqd_free_pages;
+ u32 crc32;
+ };
+
+@@ -316,8 +325,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
+ goto err_rel;
+ }
+ handle->k = 0;
+- handle->nr_free_pages = nr_free_pages() >> 1;
+- handle->written = 0;
++ handle->reqd_free_pages = reqd_free_pages();
+ handle->first_sector = handle->cur_swap;
+ return 0;
+ err_rel:
+@@ -352,11 +360,15 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
+ handle->cur_swap = offset;
+ handle->k = 0;
+ }
+- if (bio_chain && ++handle->written > handle->nr_free_pages) {
++ if (bio_chain && nr_free_pages() <= handle->reqd_free_pages) {
+ error = hib_wait_on_bio_chain(bio_chain);
+ if (error)
+ goto out;
+- handle->written = 0;
++ /*
++ * Recalculate the number of required free pages, to make sure
++ * we never take more than a quarter.
++ */
++ handle->reqd_free_pages = reqd_free_pages();
+ }
+ out:
+ return error;
+@@ -404,7 +416,7 @@ static int swap_writer_finish(struct swap_map_handle *handle,
+ #define LZO_THREADS 3
+
+ /* Maximum number of pages for read buffering. */
+-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
++#define LZO_READ_PAGES 8192
+
+
+ /**
+@@ -615,10 +627,10 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ }
+
+ /*
+- * Adjust number of free pages after all allocations have been done.
+- * We don't want to run out of pages when writing.
++ * Adjust the number of required free pages after all allocations have
++ * been done. We don't want to run out of pages when writing.
+ */
+- handle->nr_free_pages = nr_free_pages() >> 1;
++ handle->reqd_free_pages = reqd_free_pages();
+
+ /*
+ * Start the CRC32 thread.
+@@ -1129,8 +1141,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
+
+ /*
+ * Adjust number of pages for read buffering, in case we are short.
++ * Never take more than a quarter of all available pages.
+ */
+- read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
++ read_pages = (nr_free_pages() - snapshot_get_image_size()) / 4;
+ read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
+
+ for (i = 0; i < read_pages; i++) {