forked from luck/tmp_suning_uos_patched
[PATCH] swsusp: add architecture special saveable pages support
1. Add architecture specific pages save/restore support. Next two patches will use this to save/restore 'ACPI NVS' pages. 2. Allow reserved pages 'nosave'. This could avoid save/restore BIOS reserved pages. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Nigel Cunningham <nigel@suspend2.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
82dcaafc92
commit
ce4ab0012b
|
@ -71,6 +71,7 @@ struct saved_context;
|
|||
void __save_processor_state(struct saved_context *ctxt);
|
||||
void __restore_processor_state(struct saved_context *ctxt);
|
||||
unsigned long get_safe_page(gfp_t gfp_mask);
|
||||
int swsusp_add_arch_pages(unsigned long start, unsigned long end);
|
||||
|
||||
/*
|
||||
* XXX: We try to keep some more pages free so that I/O operations succeed
|
||||
|
|
|
@ -105,6 +105,10 @@ extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
|
|||
extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
|
||||
extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
|
||||
|
||||
extern unsigned int count_special_pages(void);
|
||||
extern int save_special_mem(void);
|
||||
extern int restore_special_mem(void);
|
||||
|
||||
extern int swsusp_check(void);
|
||||
extern int swsusp_shrink_memory(void);
|
||||
extern void swsusp_free(void);
|
||||
|
|
|
@ -39,6 +39,88 @@ static unsigned int nr_copy_pages;
|
|||
static unsigned int nr_meta_pages;
|
||||
static unsigned long *buffer;
|
||||
|
||||
struct arch_saveable_page {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
char *data;
|
||||
struct arch_saveable_page *next;
|
||||
};
|
||||
static struct arch_saveable_page *arch_pages;
|
||||
|
||||
int swsusp_add_arch_pages(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct arch_saveable_page *tmp;
|
||||
|
||||
while (start < end) {
|
||||
tmp = kzalloc(sizeof(struct arch_saveable_page), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
tmp->start = start;
|
||||
tmp->end = ((start >> PAGE_SHIFT) + 1) << PAGE_SHIFT;
|
||||
if (tmp->end > end)
|
||||
tmp->end = end;
|
||||
tmp->next = arch_pages;
|
||||
start = tmp->end;
|
||||
arch_pages = tmp;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int count_arch_pages(void)
|
||||
{
|
||||
unsigned int count = 0;
|
||||
struct arch_saveable_page *tmp = arch_pages;
|
||||
while (tmp) {
|
||||
count++;
|
||||
tmp = tmp->next;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static int save_arch_mem(void)
|
||||
{
|
||||
char *kaddr;
|
||||
struct arch_saveable_page *tmp = arch_pages;
|
||||
int offset;
|
||||
|
||||
pr_debug("swsusp: Saving arch specific memory");
|
||||
while (tmp) {
|
||||
tmp->data = (char *)__get_free_page(GFP_ATOMIC);
|
||||
if (!tmp->data)
|
||||
return -ENOMEM;
|
||||
offset = tmp->start - (tmp->start & PAGE_MASK);
|
||||
/* arch pages might haven't a 'struct page' */
|
||||
kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
|
||||
memcpy(tmp->data + offset, kaddr + offset,
|
||||
tmp->end - tmp->start);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
|
||||
tmp = tmp->next;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int restore_arch_mem(void)
|
||||
{
|
||||
char *kaddr;
|
||||
struct arch_saveable_page *tmp = arch_pages;
|
||||
int offset;
|
||||
|
||||
while (tmp) {
|
||||
if (!tmp->data)
|
||||
continue;
|
||||
offset = tmp->start - (tmp->start & PAGE_MASK);
|
||||
kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
|
||||
memcpy(kaddr + offset, tmp->data + offset,
|
||||
tmp->end - tmp->start);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
free_page((long)tmp->data);
|
||||
tmp->data = NULL;
|
||||
tmp = tmp->next;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned int count_highmem_pages(void)
|
||||
{
|
||||
|
@ -150,8 +232,35 @@ int restore_highmem(void)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static unsigned int count_highmem_pages(void) {return 0;}
|
||||
static int save_highmem(void) {return 0;}
|
||||
static int restore_highmem(void) {return 0;}
|
||||
#endif
|
||||
|
||||
unsigned int count_special_pages(void)
|
||||
{
|
||||
return count_arch_pages() + count_highmem_pages();
|
||||
}
|
||||
|
||||
int save_special_mem(void)
|
||||
{
|
||||
int ret;
|
||||
ret = save_arch_mem();
|
||||
if (!ret)
|
||||
ret = save_highmem();
|
||||
return ret;
|
||||
}
|
||||
|
||||
int restore_special_mem(void)
|
||||
{
|
||||
int ret;
|
||||
ret = restore_arch_mem();
|
||||
if (!ret)
|
||||
ret = restore_highmem();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pfn_is_nosave(unsigned long pfn)
|
||||
{
|
||||
unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
|
||||
|
@ -177,7 +286,6 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn)
|
|||
return 0;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
BUG_ON(PageReserved(page) && PageNosave(page));
|
||||
if (PageNosave(page))
|
||||
return 0;
|
||||
if (PageReserved(page) && pfn_is_nosave(pfn))
|
||||
|
|
|
@ -62,16 +62,6 @@ unsigned long image_size = 500 * 1024 * 1024;
|
|||
|
||||
int in_suspend __nosavedata = 0;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned int count_highmem_pages(void);
|
||||
int save_highmem(void);
|
||||
int restore_highmem(void);
|
||||
#else
|
||||
static int save_highmem(void) { return 0; }
|
||||
static int restore_highmem(void) { return 0; }
|
||||
static unsigned int count_highmem_pages(void) { return 0; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* The following functions are used for tracing the allocated
|
||||
* swap pages, so that they can be freed in case of an error.
|
||||
|
@ -192,7 +182,7 @@ int swsusp_shrink_memory(void)
|
|||
|
||||
printk("Shrinking memory... ");
|
||||
do {
|
||||
size = 2 * count_highmem_pages();
|
||||
size = 2 * count_special_pages();
|
||||
size += size / 50 + count_data_pages();
|
||||
size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE +
|
||||
PAGES_FOR_IO;
|
||||
|
@ -234,7 +224,7 @@ int swsusp_suspend(void)
|
|||
goto Enable_irqs;
|
||||
}
|
||||
|
||||
if ((error = save_highmem())) {
|
||||
if ((error = save_special_mem())) {
|
||||
printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
|
||||
goto Restore_highmem;
|
||||
}
|
||||
|
@ -245,7 +235,7 @@ int swsusp_suspend(void)
|
|||
/* Restore control flow magically appears here */
|
||||
restore_processor_state();
|
||||
Restore_highmem:
|
||||
restore_highmem();
|
||||
restore_special_mem();
|
||||
device_power_up();
|
||||
Enable_irqs:
|
||||
local_irq_enable();
|
||||
|
@ -271,7 +261,7 @@ int swsusp_resume(void)
|
|||
*/
|
||||
swsusp_free();
|
||||
restore_processor_state();
|
||||
restore_highmem();
|
||||
restore_special_mem();
|
||||
touch_softlockup_watchdog();
|
||||
device_power_up();
|
||||
local_irq_enable();
|
||||
|
|
Loading…
Reference in New Issue
Block a user