swap: revert special hibernation allocation
Please revert 2.6.36-rc commit d2997b1042
"hibernation: freeze swap at hibernation". It complicated matters by
adding a second swap allocation path, just for hibernation; without in any
way fixing the issue that it was intended to address - page reclaim after
fixing the hibernation image might free swap from a page already imaged as
swapcache, letting its swap be reallocated to store a different page of
the image: resulting in data corruption if the imaged page were freed as
clean then swapped back in. Pages freed to si->swap_map were still in
danger of being reallocated by the alternative allocation path.
I guess it inadvertently fixed slow SSD swap allocation for hibernation,
as reported by Nigel Cunningham: by missing out the discards that occur on
the usual swap allocation path; but that was unintentional, and needs a
separate fix.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Ondrej Zary <linux@rainbow-software.org>
Cc: Andrea Gelmini <andrea.gelmini@gmail.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nigel Cunningham <nigel@tuxonice.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
ac8456d6f9
commit
910321ea81
@@ -315,6 +315,7 @@ extern long nr_swap_pages;
|
|||||||
extern long total_swap_pages;
|
extern long total_swap_pages;
|
||||||
extern void si_swapinfo(struct sysinfo *);
|
extern void si_swapinfo(struct sysinfo *);
|
||||||
extern swp_entry_t get_swap_page(void);
|
extern swp_entry_t get_swap_page(void);
|
||||||
|
extern swp_entry_t get_swap_page_of_type(int);
|
||||||
extern int valid_swaphandles(swp_entry_t, unsigned long *);
|
extern int valid_swaphandles(swp_entry_t, unsigned long *);
|
||||||
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
|
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
|
||||||
extern void swap_shmem_alloc(swp_entry_t);
|
extern void swap_shmem_alloc(swp_entry_t);
|
||||||
@@ -331,13 +332,6 @@ extern int reuse_swap_page(struct page *);
|
|||||||
extern int try_to_free_swap(struct page *);
|
extern int try_to_free_swap(struct page *);
|
||||||
struct backing_dev_info;
|
struct backing_dev_info;
|
||||||
|
|
||||||
#ifdef CONFIG_HIBERNATION
|
|
||||||
void hibernation_freeze_swap(void);
|
|
||||||
void hibernation_thaw_swap(void);
|
|
||||||
swp_entry_t get_swap_for_hibernation(int type);
|
|
||||||
void swap_free_for_hibernation(swp_entry_t val);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* linux/mm/thrash.c */
|
/* linux/mm/thrash.c */
|
||||||
extern struct mm_struct *swap_token_mm;
|
extern struct mm_struct *swap_token_mm;
|
||||||
extern void grab_swap_token(struct mm_struct *);
|
extern void grab_swap_token(struct mm_struct *);
|
||||||
|
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
|
|||||||
goto Close;
|
goto Close;
|
||||||
|
|
||||||
suspend_console();
|
suspend_console();
|
||||||
hibernation_freeze_swap();
|
|
||||||
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
|
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
|
||||||
error = dpm_suspend_start(PMSG_FREEZE);
|
error = dpm_suspend_start(PMSG_FREEZE);
|
||||||
if (error)
|
if (error)
|
||||||
|
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
|
|||||||
buffer = NULL;
|
buffer = NULL;
|
||||||
alloc_normal = 0;
|
alloc_normal = 0;
|
||||||
alloc_highmem = 0;
|
alloc_highmem = 0;
|
||||||
hibernation_thaw_swap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Helper functions used for the shrinking of memory. */
|
/* Helper functions used for the shrinking of memory. */
|
||||||
|
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
|
|||||||
{
|
{
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
|
|
||||||
offset = swp_offset(get_swap_for_hibernation(swap));
|
offset = swp_offset(get_swap_page_of_type(swap));
|
||||||
if (offset) {
|
if (offset) {
|
||||||
if (swsusp_extents_insert(offset))
|
if (swsusp_extents_insert(offset))
|
||||||
swap_free_for_hibernation(swp_entry(swap, offset));
|
swap_free(swp_entry(swap, offset));
|
||||||
else
|
else
|
||||||
return swapdev_block(swap, offset);
|
return swapdev_block(swap, offset);
|
||||||
}
|
}
|
||||||
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
|
|||||||
ext = container_of(node, struct swsusp_extent, node);
|
ext = container_of(node, struct swsusp_extent, node);
|
||||||
rb_erase(node, &swsusp_extents);
|
rb_erase(node, &swsusp_extents);
|
||||||
for (offset = ext->start; offset <= ext->end; offset++)
|
for (offset = ext->start; offset <= ext->end; offset++)
|
||||||
swap_free_for_hibernation(swp_entry(swap, offset));
|
swap_free(swp_entry(swap, offset));
|
||||||
|
|
||||||
kfree(ext);
|
kfree(ext);
|
||||||
}
|
}
|
||||||
|
@@ -47,8 +47,6 @@ long nr_swap_pages;
|
|||||||
long total_swap_pages;
|
long total_swap_pages;
|
||||||
static int least_priority;
|
static int least_priority;
|
||||||
|
|
||||||
static bool swap_for_hibernation;
|
|
||||||
|
|
||||||
static const char Bad_file[] = "Bad swap file entry ";
|
static const char Bad_file[] = "Bad swap file entry ";
|
||||||
static const char Unused_file[] = "Unused swap file entry ";
|
static const char Unused_file[] = "Unused swap file entry ";
|
||||||
static const char Bad_offset[] = "Bad swap offset entry ";
|
static const char Bad_offset[] = "Bad swap offset entry ";
|
||||||
@@ -453,8 +451,6 @@ swp_entry_t get_swap_page(void)
|
|||||||
spin_lock(&swap_lock);
|
spin_lock(&swap_lock);
|
||||||
if (nr_swap_pages <= 0)
|
if (nr_swap_pages <= 0)
|
||||||
goto noswap;
|
goto noswap;
|
||||||
if (swap_for_hibernation)
|
|
||||||
goto noswap;
|
|
||||||
nr_swap_pages--;
|
nr_swap_pages--;
|
||||||
|
|
||||||
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
|
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
|
||||||
@@ -487,6 +483,28 @@ noswap:
|
|||||||
return (swp_entry_t) {0};
|
return (swp_entry_t) {0};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The only caller of this function is now susupend routine */
|
||||||
|
swp_entry_t get_swap_page_of_type(int type)
|
||||||
|
{
|
||||||
|
struct swap_info_struct *si;
|
||||||
|
pgoff_t offset;
|
||||||
|
|
||||||
|
spin_lock(&swap_lock);
|
||||||
|
si = swap_info[type];
|
||||||
|
if (si && (si->flags & SWP_WRITEOK)) {
|
||||||
|
nr_swap_pages--;
|
||||||
|
/* This is called for allocating swap entry, not cache */
|
||||||
|
offset = scan_swap_map(si, 1);
|
||||||
|
if (offset) {
|
||||||
|
spin_unlock(&swap_lock);
|
||||||
|
return swp_entry(type, offset);
|
||||||
|
}
|
||||||
|
nr_swap_pages++;
|
||||||
|
}
|
||||||
|
spin_unlock(&swap_lock);
|
||||||
|
return (swp_entry_t) {0};
|
||||||
|
}
|
||||||
|
|
||||||
static struct swap_info_struct *swap_info_get(swp_entry_t entry)
|
static struct swap_info_struct *swap_info_get(swp_entry_t entry)
|
||||||
{
|
{
|
||||||
struct swap_info_struct *p;
|
struct swap_info_struct *p;
|
||||||
@@ -746,74 +764,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HIBERNATION
|
#ifdef CONFIG_HIBERNATION
|
||||||
|
|
||||||
static pgoff_t hibernation_offset[MAX_SWAPFILES];
|
|
||||||
/*
|
|
||||||
* Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
|
|
||||||
* saved swap_map[] image to the disk will be an incomplete because it's
|
|
||||||
* changing without synchronization with hibernation snap shot.
|
|
||||||
* At resume, we just make swap_for_hibernation=false. We can forget
|
|
||||||
* used maps easily.
|
|
||||||
*/
|
|
||||||
void hibernation_freeze_swap(void)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
spin_lock(&swap_lock);
|
|
||||||
|
|
||||||
printk(KERN_INFO "PM: Freeze Swap\n");
|
|
||||||
swap_for_hibernation = true;
|
|
||||||
for (i = 0; i < MAX_SWAPFILES; i++)
|
|
||||||
hibernation_offset[i] = 1;
|
|
||||||
spin_unlock(&swap_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void hibernation_thaw_swap(void)
|
|
||||||
{
|
|
||||||
spin_lock(&swap_lock);
|
|
||||||
if (swap_for_hibernation) {
|
|
||||||
printk(KERN_INFO "PM: Thaw Swap\n");
|
|
||||||
swap_for_hibernation = false;
|
|
||||||
}
|
|
||||||
spin_unlock(&swap_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Because updateing swap_map[] can make not-saved-status-change,
|
|
||||||
* we use our own easy allocator.
|
|
||||||
* Please see kernel/power/swap.c, Used swaps are recorded into
|
|
||||||
* RB-tree.
|
|
||||||
*/
|
|
||||||
swp_entry_t get_swap_for_hibernation(int type)
|
|
||||||
{
|
|
||||||
pgoff_t off;
|
|
||||||
swp_entry_t val = {0};
|
|
||||||
struct swap_info_struct *si;
|
|
||||||
|
|
||||||
spin_lock(&swap_lock);
|
|
||||||
|
|
||||||
si = swap_info[type];
|
|
||||||
if (!si || !(si->flags & SWP_WRITEOK))
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
for (off = hibernation_offset[type]; off < si->max; ++off) {
|
|
||||||
if (!si->swap_map[off])
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (off < si->max) {
|
|
||||||
val = swp_entry(type, off);
|
|
||||||
hibernation_offset[type] = off + 1;
|
|
||||||
}
|
|
||||||
done:
|
|
||||||
spin_unlock(&swap_lock);
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
void swap_free_for_hibernation(swp_entry_t ent)
|
|
||||||
{
|
|
||||||
/* Nothing to do */
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the swap type that corresponds to given device (if any).
|
* Find the swap type that corresponds to given device (if any).
|
||||||
*
|
*
|
||||||
|
Reference in New Issue
Block a user