On 2012-04-05 12:53, Jonathan Nieder wrote: > Per Olofsson wrote: > >> > I can confirm that the proposed patch[1] fixes the issue for me. >> > >> > [1] http://thread.gmane.org/gmane.linux.kernel/1273425 > Thanks! The patch mentioned above is v7. I'm attaching v8, which > based on the upstream report I assume you have also already tested.
AFAICT they are identical. But maybe I'm missing some small detail. The patch I tested is the one attached at Redhat's bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=785384 I'm attaching it to this mail as well for reference. -- Pelle
kernel/power/swap.c | 35 ++++++++++++++++++++++++----------- 1 files changed, 24 insertions(+), 11 deletions(-) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8742fd0..26d304b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -6,7 +6,7 @@ * * Copyright (C) 1998,2001-2005 Pavel Machek <pa...@ucw.cz> * Copyright (C) 2006 Rafael J. Wysocki <r...@sisk.pl> - * Copyright (C) 2010 Bojan Smojver <bo...@rexursive.com> + * Copyright (C) 2010-2012 Bojan Smojver <bo...@rexursive.com> * * This file is released under the GPLv2. * @@ -51,6 +51,15 @@ #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) +/* + * Number of pages required to be kept free while writing the image. Always + * three quarters of all available pages before the writing starts. + */ +static inline unsigned long reqd_free_pages(void) +{ + return (nr_free_pages() / 4) * 3; +} + struct swap_map_page { sector_t entries[MAP_PAGE_ENTRIES]; sector_t next_swap; @@ -72,7 +81,7 @@ struct swap_map_handle { sector_t cur_swap; sector_t first_sector; unsigned int k; - unsigned long nr_free_pages, written; + unsigned long reqd_free_pages; u32 crc32; }; @@ -316,8 +325,7 @@ static int get_swap_writer(struct swap_map_handle *handle) goto err_rel; } handle->k = 0; - handle->nr_free_pages = nr_free_pages() >> 1; - handle->written = 0; + handle->reqd_free_pages = reqd_free_pages(); handle->first_sector = handle->cur_swap; return 0; err_rel: @@ -352,11 +360,15 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, handle->cur_swap = offset; handle->k = 0; } - if (bio_chain && ++handle->written > handle->nr_free_pages) { + if (bio_chain && nr_free_pages() <= handle->reqd_free_pages) { error = hib_wait_on_bio_chain(bio_chain); if (error) goto out; - handle->written = 0; + /* + * Recalculate the number of required free pages, to make sure + * we never take more than a quarter. + */ + handle->reqd_free_pages = reqd_free_pages(); } out: return error; @@ -404,7 +416,7 @@ static int swap_writer_finish(struct swap_map_handle *handle, #define LZO_THREADS 3 /* Maximum number of pages for read buffering. */ -#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) +#define LZO_READ_PAGES 8192 /** @@ -615,10 +627,10 @@ static int save_image_lzo(struct swap_map_handle *handle, } /* - * Adjust number of free pages after all allocations have been done. - * We don't want to run out of pages when writing. + * Adjust the number of required free pages after all allocations have + * been done. We don't want to run out of pages when writing. */ - handle->nr_free_pages = nr_free_pages() >> 1; + handle->reqd_free_pages = reqd_free_pages(); /* * Start the CRC32 thread. @@ -1129,8 +1141,9 @@ static int load_image_lzo(struct swap_map_handle *handle, /* * Adjust number of pages for read buffering, in case we are short. + * Never take more than a quarter of all available pages. */ - read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; + read_pages = (nr_free_pages() - snapshot_get_image_size()) / 4; read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); for (i = 0; i < read_pages; i++) {