Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6: PM: PM QOS update fix Freezer / cgroup freezer: Update stale locking comments PM / platform_bus: Allow runtime PM by default i2c: Fix bus-level power management callbacks PM QOS update PM / Hibernate: Fix block_io.c printk warning PM / Hibernate: Group swap ops PM / Hibernate: Move the first_sector out of swsusp_write PM / Hibernate: Separate block_io PM / Hibernate: Snapshot cleanup FS / libfs: Implement simple_write_to_buffer PM / Hibernate: document open(/dev/snapshot) side effects PM / Runtime: Add sysfs debug files PM: Improve device power management document PM: Update device power management document PM: Allow runtime_suspend methods to call pm_schedule_suspend() PM: pm_wakeup - switch to using bool
This commit is contained in:
@@ -89,10 +89,10 @@ struct cgroup_subsys freezer_subsys;
|
||||
|
||||
/* Locks taken and their ordering
|
||||
* ------------------------------
|
||||
* css_set_lock
|
||||
* cgroup_mutex (AKA cgroup_lock)
|
||||
* task->alloc_lock (AKA task_lock)
|
||||
* freezer->lock
|
||||
* css_set_lock
|
||||
* task->alloc_lock (AKA task_lock)
|
||||
* task->sighand->siglock
|
||||
*
|
||||
* cgroup code forces css_set_lock to be taken before task->alloc_lock
|
||||
@@ -100,33 +100,38 @@ struct cgroup_subsys freezer_subsys;
|
||||
* freezer_create(), freezer_destroy():
|
||||
* cgroup_mutex [ by cgroup core ]
|
||||
*
|
||||
* can_attach():
|
||||
* cgroup_mutex
|
||||
* freezer_can_attach():
|
||||
* cgroup_mutex (held by caller of can_attach)
|
||||
*
|
||||
* cgroup_frozen():
|
||||
* cgroup_freezing_or_frozen():
|
||||
* task->alloc_lock (to get task's cgroup)
|
||||
*
|
||||
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
|
||||
* task->alloc_lock (to get task's cgroup)
|
||||
* freezer->lock
|
||||
* sighand->siglock (if the cgroup is freezing)
|
||||
*
|
||||
* freezer_read():
|
||||
* cgroup_mutex
|
||||
* freezer->lock
|
||||
* write_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock
|
||||
* read_lock css_set_lock (cgroup iterator start)
|
||||
*
|
||||
* freezer_write() (freeze):
|
||||
* cgroup_mutex
|
||||
* freezer->lock
|
||||
* write_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock
|
||||
* read_lock css_set_lock (cgroup iterator start)
|
||||
* sighand->siglock
|
||||
* sighand->siglock (fake signal delivery inside freeze_task())
|
||||
*
|
||||
* freezer_write() (unfreeze):
|
||||
* cgroup_mutex
|
||||
* freezer->lock
|
||||
* write_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock
|
||||
* read_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock (to prevent races with freeze_task())
|
||||
* task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
|
||||
* sighand->siglock
|
||||
*/
|
||||
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
* This module exposes the interface to kernel space for specifying
|
||||
* QoS dependencies. It provides infrastructure for registration of:
|
||||
*
|
||||
* Dependents on a QoS value : register requirements
|
||||
* Dependents on a QoS value : register requests
|
||||
* Watchers of QoS value : get notified when target QoS value changes
|
||||
*
|
||||
* This QoS design is best effort based. Dependents register their QoS needs.
|
||||
@@ -14,19 +14,21 @@
|
||||
* timeout: usec <-- currently not used.
|
||||
* throughput: kbs (kilo byte / sec)
|
||||
*
|
||||
* There are lists of pm_qos_objects each one wrapping requirements, notifiers
|
||||
* There are lists of pm_qos_objects each one wrapping requests, notifiers
|
||||
*
|
||||
* User mode requirements on a QOS parameter register themselves to the
|
||||
* User mode requests on a QOS parameter register themselves to the
|
||||
* subsystem by opening the device node /dev/... and writing there request to
|
||||
* the node. As long as the process holds a file handle open to the node the
|
||||
* client continues to be accounted for. Upon file release the usermode
|
||||
* requirement is removed and a new qos target is computed. This way when the
|
||||
* requirement that the application has is cleaned up when closes the file
|
||||
* request is removed and a new qos target is computed. This way when the
|
||||
* request that the application has is cleaned up when closes the file
|
||||
* pointer or exits the pm_qos_object will get an opportunity to clean up.
|
||||
*
|
||||
* Mark Gross <mgross@linux.intel.com>
|
||||
*/
|
||||
|
||||
/*#define DEBUG*/
|
||||
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/spinlock.h>
|
||||
@@ -42,25 +44,25 @@
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* locking rule: all changes to requirements or notifiers lists
|
||||
* locking rule: all changes to requests or notifiers lists
|
||||
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
|
||||
* held, taken with _irqsave. One lock to rule them all
|
||||
*/
|
||||
struct requirement_list {
|
||||
struct pm_qos_request_list {
|
||||
struct list_head list;
|
||||
union {
|
||||
s32 value;
|
||||
s32 usec;
|
||||
s32 kbps;
|
||||
};
|
||||
char *name;
|
||||
int pm_qos_class;
|
||||
};
|
||||
|
||||
static s32 max_compare(s32 v1, s32 v2);
|
||||
static s32 min_compare(s32 v1, s32 v2);
|
||||
|
||||
struct pm_qos_object {
|
||||
struct requirement_list requirements;
|
||||
struct pm_qos_request_list requests;
|
||||
struct blocking_notifier_head *notifiers;
|
||||
struct miscdevice pm_qos_power_miscdev;
|
||||
char *name;
|
||||
@@ -72,7 +74,7 @@ struct pm_qos_object {
|
||||
static struct pm_qos_object null_pm_qos;
|
||||
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
|
||||
static struct pm_qos_object cpu_dma_pm_qos = {
|
||||
.requirements = {LIST_HEAD_INIT(cpu_dma_pm_qos.requirements.list)},
|
||||
.requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
|
||||
.notifiers = &cpu_dma_lat_notifier,
|
||||
.name = "cpu_dma_latency",
|
||||
.default_value = 2000 * USEC_PER_SEC,
|
||||
@@ -82,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
|
||||
static struct pm_qos_object network_lat_pm_qos = {
|
||||
.requirements = {LIST_HEAD_INIT(network_lat_pm_qos.requirements.list)},
|
||||
.requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
|
||||
.notifiers = &network_lat_notifier,
|
||||
.name = "network_latency",
|
||||
.default_value = 2000 * USEC_PER_SEC,
|
||||
@@ -93,8 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
|
||||
static struct pm_qos_object network_throughput_pm_qos = {
|
||||
.requirements =
|
||||
{LIST_HEAD_INIT(network_throughput_pm_qos.requirements.list)},
|
||||
.requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
|
||||
.notifiers = &network_throughput_notifier,
|
||||
.name = "network_throughput",
|
||||
.default_value = 0,
|
||||
@@ -135,31 +136,34 @@ static s32 min_compare(s32 v1, s32 v2)
|
||||
}
|
||||
|
||||
|
||||
static void update_target(int target)
|
||||
static void update_target(int pm_qos_class)
|
||||
{
|
||||
s32 extreme_value;
|
||||
struct requirement_list *node;
|
||||
struct pm_qos_request_list *node;
|
||||
unsigned long flags;
|
||||
int call_notifier = 0;
|
||||
|
||||
spin_lock_irqsave(&pm_qos_lock, flags);
|
||||
extreme_value = pm_qos_array[target]->default_value;
|
||||
extreme_value = pm_qos_array[pm_qos_class]->default_value;
|
||||
list_for_each_entry(node,
|
||||
&pm_qos_array[target]->requirements.list, list) {
|
||||
extreme_value = pm_qos_array[target]->comparitor(
|
||||
&pm_qos_array[pm_qos_class]->requests.list, list) {
|
||||
extreme_value = pm_qos_array[pm_qos_class]->comparitor(
|
||||
extreme_value, node->value);
|
||||
}
|
||||
if (atomic_read(&pm_qos_array[target]->target_value) != extreme_value) {
|
||||
if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
|
||||
extreme_value) {
|
||||
call_notifier = 1;
|
||||
atomic_set(&pm_qos_array[target]->target_value, extreme_value);
|
||||
pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
|
||||
atomic_read(&pm_qos_array[target]->target_value));
|
||||
atomic_set(&pm_qos_array[pm_qos_class]->target_value,
|
||||
extreme_value);
|
||||
pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
|
||||
atomic_read(&pm_qos_array[pm_qos_class]->target_value));
|
||||
}
|
||||
spin_unlock_irqrestore(&pm_qos_lock, flags);
|
||||
|
||||
if (call_notifier)
|
||||
blocking_notifier_call_chain(pm_qos_array[target]->notifiers,
|
||||
(unsigned long) extreme_value, NULL);
|
||||
blocking_notifier_call_chain(
|
||||
pm_qos_array[pm_qos_class]->notifiers,
|
||||
(unsigned long) extreme_value, NULL);
|
||||
}
|
||||
|
||||
static int register_pm_qos_misc(struct pm_qos_object *qos)
|
||||
@@ -185,125 +189,112 @@ static int find_pm_qos_object_by_minor(int minor)
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_qos_requirement - returns current system wide qos expectation
|
||||
* pm_qos_request - returns current system wide qos expectation
|
||||
* @pm_qos_class: identification of which qos value is requested
|
||||
*
|
||||
* This function returns the current target value in an atomic manner.
|
||||
*/
|
||||
int pm_qos_requirement(int pm_qos_class)
|
||||
int pm_qos_request(int pm_qos_class)
|
||||
{
|
||||
return atomic_read(&pm_qos_array[pm_qos_class]->target_value);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_requirement);
|
||||
EXPORT_SYMBOL_GPL(pm_qos_request);
|
||||
|
||||
/**
|
||||
* pm_qos_add_requirement - inserts new qos request into the list
|
||||
* pm_qos_add_request - inserts new qos request into the list
|
||||
* @pm_qos_class: identifies which list of qos request to us
|
||||
* @name: identifies the request
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* This function inserts a new entry in the pm_qos_class list of requested qos
|
||||
* performance characteristics. It recomputes the aggregate QoS expectations
|
||||
* for the pm_qos_class of parameters.
|
||||
* for the pm_qos_class of parameters, and returns the pm_qos_request list
|
||||
* element as a handle for use in updating and removal. Call needs to save
|
||||
* this handle for later use.
|
||||
*/
|
||||
int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
|
||||
struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value)
|
||||
{
|
||||
struct requirement_list *dep;
|
||||
struct pm_qos_request_list *dep;
|
||||
unsigned long flags;
|
||||
|
||||
dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
|
||||
dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL);
|
||||
if (dep) {
|
||||
if (value == PM_QOS_DEFAULT_VALUE)
|
||||
dep->value = pm_qos_array[pm_qos_class]->default_value;
|
||||
else
|
||||
dep->value = value;
|
||||
dep->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!dep->name)
|
||||
goto cleanup;
|
||||
dep->pm_qos_class = pm_qos_class;
|
||||
|
||||
spin_lock_irqsave(&pm_qos_lock, flags);
|
||||
list_add(&dep->list,
|
||||
&pm_qos_array[pm_qos_class]->requirements.list);
|
||||
&pm_qos_array[pm_qos_class]->requests.list);
|
||||
spin_unlock_irqrestore(&pm_qos_lock, flags);
|
||||
update_target(pm_qos_class);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
kfree(dep);
|
||||
return -ENOMEM;
|
||||
return dep;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
|
||||
EXPORT_SYMBOL_GPL(pm_qos_add_request);
|
||||
|
||||
/**
|
||||
* pm_qos_update_requirement - modifies an existing qos request
|
||||
* @pm_qos_class: identifies which list of qos request to us
|
||||
* @name: identifies the request
|
||||
* pm_qos_update_request - modifies an existing qos request
|
||||
* @pm_qos_req : handle to list element holding a pm_qos request to use
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* Updates an existing qos requirement for the pm_qos_class of parameters along
|
||||
* Updates an existing qos request for the pm_qos_class of parameters along
|
||||
* with updating the target pm_qos_class value.
|
||||
*
|
||||
* If the named request isn't in the list then no change is made.
|
||||
* Attempts are made to make this code callable on hot code paths.
|
||||
*/
|
||||
int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
|
||||
void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
|
||||
s32 new_value)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct requirement_list *node;
|
||||
int pending_update = 0;
|
||||
s32 temp;
|
||||
|
||||
spin_lock_irqsave(&pm_qos_lock, flags);
|
||||
list_for_each_entry(node,
|
||||
&pm_qos_array[pm_qos_class]->requirements.list, list) {
|
||||
if (strcmp(node->name, name) == 0) {
|
||||
if (new_value == PM_QOS_DEFAULT_VALUE)
|
||||
node->value =
|
||||
pm_qos_array[pm_qos_class]->default_value;
|
||||
else
|
||||
node->value = new_value;
|
||||
if (pm_qos_req) { /*guard against callers passing in null */
|
||||
spin_lock_irqsave(&pm_qos_lock, flags);
|
||||
if (new_value == PM_QOS_DEFAULT_VALUE)
|
||||
temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value;
|
||||
else
|
||||
temp = new_value;
|
||||
|
||||
if (temp != pm_qos_req->value) {
|
||||
pending_update = 1;
|
||||
break;
|
||||
pm_qos_req->value = temp;
|
||||
}
|
||||
spin_unlock_irqrestore(&pm_qos_lock, flags);
|
||||
if (pending_update)
|
||||
update_target(pm_qos_req->pm_qos_class);
|
||||
}
|
||||
spin_unlock_irqrestore(&pm_qos_lock, flags);
|
||||
if (pending_update)
|
||||
update_target(pm_qos_class);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
|
||||
EXPORT_SYMBOL_GPL(pm_qos_update_request);
|
||||
|
||||
/**
|
||||
* pm_qos_remove_requirement - modifies an existing qos request
|
||||
* @pm_qos_class: identifies which list of qos request to us
|
||||
* @name: identifies the request
|
||||
* pm_qos_remove_request - modifies an existing qos request
|
||||
* @pm_qos_req: handle to request list element
|
||||
*
|
||||
* Will remove named qos request from pm_qos_class list of parameters and
|
||||
* recompute the current target value for the pm_qos_class.
|
||||
* Will remove pm qos request from the list of requests and
|
||||
* recompute the current target value for the pm_qos_class. Call this
|
||||
* on slow code paths.
|
||||
*/
|
||||
void pm_qos_remove_requirement(int pm_qos_class, char *name)
|
||||
void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct requirement_list *node;
|
||||
int pending_update = 0;
|
||||
int qos_class;
|
||||
|
||||
if (pm_qos_req == NULL)
|
||||
return;
|
||||
/* silent return to keep pcm code cleaner */
|
||||
|
||||
qos_class = pm_qos_req->pm_qos_class;
|
||||
spin_lock_irqsave(&pm_qos_lock, flags);
|
||||
list_for_each_entry(node,
|
||||
&pm_qos_array[pm_qos_class]->requirements.list, list) {
|
||||
if (strcmp(node->name, name) == 0) {
|
||||
kfree(node->name);
|
||||
list_del(&node->list);
|
||||
kfree(node);
|
||||
pending_update = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
list_del(&pm_qos_req->list);
|
||||
kfree(pm_qos_req);
|
||||
spin_unlock_irqrestore(&pm_qos_lock, flags);
|
||||
if (pending_update)
|
||||
update_target(pm_qos_class);
|
||||
update_target(qos_class);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
|
||||
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
|
||||
|
||||
/**
|
||||
* pm_qos_add_notifier - sets notification entry for changes to target value
|
||||
@@ -313,7 +304,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
|
||||
* will register the notifier into a notification chain that gets called
|
||||
* upon changes to the pm_qos_class target value.
|
||||
*/
|
||||
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
|
||||
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
|
||||
{
|
||||
int retval;
|
||||
|
||||
@@ -343,21 +334,16 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
|
||||
|
||||
#define PID_NAME_LEN 32
|
||||
|
||||
static int pm_qos_power_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int ret;
|
||||
long pm_qos_class;
|
||||
char name[PID_NAME_LEN];
|
||||
|
||||
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
|
||||
if (pm_qos_class >= 0) {
|
||||
filp->private_data = (void *)pm_qos_class;
|
||||
snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
|
||||
ret = pm_qos_add_requirement(pm_qos_class, name,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
if (ret >= 0)
|
||||
filp->private_data = (void *) pm_qos_add_request(pm_qos_class,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
if (filp->private_data)
|
||||
return 0;
|
||||
}
|
||||
return -EPERM;
|
||||
@@ -365,32 +351,40 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
|
||||
|
||||
static int pm_qos_power_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int pm_qos_class;
|
||||
char name[PID_NAME_LEN];
|
||||
struct pm_qos_request_list *req;
|
||||
|
||||
pm_qos_class = (long)filp->private_data;
|
||||
snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
|
||||
pm_qos_remove_requirement(pm_qos_class, name);
|
||||
req = (struct pm_qos_request_list *)filp->private_data;
|
||||
pm_qos_remove_request(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
|
||||
size_t count, loff_t *f_pos)
|
||||
{
|
||||
s32 value;
|
||||
int pm_qos_class;
|
||||
char name[PID_NAME_LEN];
|
||||
int x;
|
||||
char ascii_value[11];
|
||||
struct pm_qos_request_list *pm_qos_req;
|
||||
|
||||
pm_qos_class = (long)filp->private_data;
|
||||
if (count != sizeof(s32))
|
||||
if (count == sizeof(s32)) {
|
||||
if (copy_from_user(&value, buf, sizeof(s32)))
|
||||
return -EFAULT;
|
||||
} else if (count == 11) { /* len('0x12345678/0') */
|
||||
if (copy_from_user(ascii_value, buf, 11))
|
||||
return -EFAULT;
|
||||
x = sscanf(ascii_value, "%x", &value);
|
||||
if (x != 1)
|
||||
return -EINVAL;
|
||||
pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
|
||||
} else
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&value, buf, sizeof(s32)))
|
||||
return -EFAULT;
|
||||
snprintf(name, PID_NAME_LEN, "process_%d", current->pid);
|
||||
pm_qos_update_requirement(pm_qos_class, name, value);
|
||||
|
||||
return sizeof(s32);
|
||||
pm_qos_req = (struct pm_qos_request_list *)filp->private_data;
|
||||
pm_qos_update_request(pm_qos_req, value);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -8,7 +8,8 @@ obj-$(CONFIG_PM_SLEEP) += console.o
|
||||
obj-$(CONFIG_FREEZER) += process.o
|
||||
obj-$(CONFIG_SUSPEND) += suspend.o
|
||||
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
|
||||
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o
|
||||
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
|
||||
block_io.o
|
||||
obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
|
||||
|
||||
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
|
||||
|
103
kernel/power/block_io.c
Normal file
103
kernel/power/block_io.c
Normal file
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* This file provides functions for block I/O operations on swap/file.
|
||||
*
|
||||
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
|
||||
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*/
|
||||
|
||||
#include <linux/bio.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
/**
|
||||
* submit - submit BIO request.
|
||||
* @rw: READ or WRITE.
|
||||
* @off physical offset of page.
|
||||
* @page: page we're reading or writing.
|
||||
* @bio_chain: list of pending biod (for async reading)
|
||||
*
|
||||
* Straight from the textbook - allocate and initialize the bio.
|
||||
* If we're reading, make sure the page is marked as dirty.
|
||||
* Then submit it and, if @bio_chain == NULL, wait.
|
||||
*/
|
||||
static int submit(int rw, struct block_device *bdev, sector_t sector,
|
||||
struct page *page, struct bio **bio_chain)
|
||||
{
|
||||
const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_end_io = end_swap_bio_read;
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
||||
printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
|
||||
(unsigned long long)sector);
|
||||
bio_put(bio);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
lock_page(page);
|
||||
bio_get(bio);
|
||||
|
||||
if (bio_chain == NULL) {
|
||||
submit_bio(bio_rw, bio);
|
||||
wait_on_page_locked(page);
|
||||
if (rw == READ)
|
||||
bio_set_pages_dirty(bio);
|
||||
bio_put(bio);
|
||||
} else {
|
||||
if (rw == READ)
|
||||
get_page(page); /* These pages are freed later */
|
||||
bio->bi_private = *bio_chain;
|
||||
*bio_chain = bio;
|
||||
submit_bio(bio_rw, bio);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hib_bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
|
||||
{
|
||||
return submit(READ, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
|
||||
virt_to_page(addr), bio_chain);
|
||||
}
|
||||
|
||||
int hib_bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
|
||||
{
|
||||
return submit(WRITE, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
|
||||
virt_to_page(addr), bio_chain);
|
||||
}
|
||||
|
||||
int hib_wait_on_bio_chain(struct bio **bio_chain)
|
||||
{
|
||||
struct bio *bio;
|
||||
struct bio *next_bio;
|
||||
int ret = 0;
|
||||
|
||||
if (bio_chain == NULL)
|
||||
return 0;
|
||||
|
||||
bio = *bio_chain;
|
||||
if (bio == NULL)
|
||||
return 0;
|
||||
while (bio) {
|
||||
struct page *page;
|
||||
|
||||
next_bio = bio->bi_private;
|
||||
page = bio->bi_io_vec[0].bv_page;
|
||||
wait_on_page_locked(page);
|
||||
if (!PageUptodate(page) || PageError(page))
|
||||
ret = -EIO;
|
||||
put_page(page);
|
||||
bio_put(bio);
|
||||
bio = next_bio;
|
||||
}
|
||||
*bio_chain = NULL;
|
||||
return ret;
|
||||
}
|
@@ -97,24 +97,12 @@ extern int hibernate_preallocate_memory(void);
|
||||
*/
|
||||
|
||||
struct snapshot_handle {
|
||||
loff_t offset; /* number of the last byte ready for reading
|
||||
* or writing in the sequence
|
||||
*/
|
||||
unsigned int cur; /* number of the block of PAGE_SIZE bytes the
|
||||
* next operation will refer to (ie. current)
|
||||
*/
|
||||
unsigned int cur_offset; /* offset with respect to the current
|
||||
* block (for the next operation)
|
||||
*/
|
||||
unsigned int prev; /* number of the block of PAGE_SIZE bytes that
|
||||
* was the current one previously
|
||||
*/
|
||||
void *buffer; /* address of the block to read from
|
||||
* or write to
|
||||
*/
|
||||
unsigned int buf_offset; /* location to read from or write to,
|
||||
* given as a displacement from 'buffer'
|
||||
*/
|
||||
int sync_read; /* Set to one to notify the caller of
|
||||
* snapshot_write_next() that it may
|
||||
* need to call wait_on_bio_chain()
|
||||
@@ -125,12 +113,12 @@ struct snapshot_handle {
|
||||
* snapshot_read_next()/snapshot_write_next() is allowed to
|
||||
* read/write data after the function returns
|
||||
*/
|
||||
#define data_of(handle) ((handle).buffer + (handle).buf_offset)
|
||||
#define data_of(handle) ((handle).buffer)
|
||||
|
||||
extern unsigned int snapshot_additional_pages(struct zone *zone);
|
||||
extern unsigned long snapshot_get_image_size(void);
|
||||
extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
|
||||
extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
|
||||
extern int snapshot_read_next(struct snapshot_handle *handle);
|
||||
extern int snapshot_write_next(struct snapshot_handle *handle);
|
||||
extern void snapshot_write_finalize(struct snapshot_handle *handle);
|
||||
extern int snapshot_image_loaded(struct snapshot_handle *handle);
|
||||
|
||||
@@ -154,6 +142,15 @@ extern int swsusp_read(unsigned int *flags_p);
|
||||
extern int swsusp_write(unsigned int flags);
|
||||
extern void swsusp_close(fmode_t);
|
||||
|
||||
/* kernel/power/block_io.c */
|
||||
extern struct block_device *hib_resume_bdev;
|
||||
|
||||
extern int hib_bio_read_page(pgoff_t page_off, void *addr,
|
||||
struct bio **bio_chain);
|
||||
extern int hib_bio_write_page(pgoff_t page_off, void *addr,
|
||||
struct bio **bio_chain);
|
||||
extern int hib_wait_on_bio_chain(struct bio **bio_chain);
|
||||
|
||||
struct timeval;
|
||||
/* kernel/power/swsusp.c */
|
||||
extern void swsusp_show_speed(struct timeval *, struct timeval *,
|
||||
|
@@ -1604,14 +1604,9 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
|
||||
* snapshot_handle structure. The structure gets updated and a pointer
|
||||
* to it should be passed to this function every next time.
|
||||
*
|
||||
* The @count parameter should contain the number of bytes the caller
|
||||
* wants to read from the snapshot. It must not be zero.
|
||||
*
|
||||
* On success the function returns a positive number. Then, the caller
|
||||
* is allowed to read up to the returned number of bytes from the memory
|
||||
* location computed by the data_of() macro. The number returned
|
||||
* may be smaller than @count, but this only happens if the read would
|
||||
* cross a page boundary otherwise.
|
||||
* location computed by the data_of() macro.
|
||||
*
|
||||
* The function returns 0 to indicate the end of data stream condition,
|
||||
* and a negative number is returned on error. In such cases the
|
||||
@@ -1619,7 +1614,7 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
|
||||
* any more.
|
||||
*/
|
||||
|
||||
int snapshot_read_next(struct snapshot_handle *handle, size_t count)
|
||||
int snapshot_read_next(struct snapshot_handle *handle)
|
||||
{
|
||||
if (handle->cur > nr_meta_pages + nr_copy_pages)
|
||||
return 0;
|
||||
@@ -1630,7 +1625,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!handle->offset) {
|
||||
if (!handle->cur) {
|
||||
int error;
|
||||
|
||||
error = init_header((struct swsusp_info *)buffer);
|
||||
@@ -1639,42 +1634,30 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
|
||||
handle->buffer = buffer;
|
||||
memory_bm_position_reset(&orig_bm);
|
||||
memory_bm_position_reset(©_bm);
|
||||
}
|
||||
if (handle->prev < handle->cur) {
|
||||
if (handle->cur <= nr_meta_pages) {
|
||||
memset(buffer, 0, PAGE_SIZE);
|
||||
pack_pfns(buffer, &orig_bm);
|
||||
} else {
|
||||
struct page *page;
|
||||
|
||||
page = pfn_to_page(memory_bm_next_pfn(©_bm));
|
||||
if (PageHighMem(page)) {
|
||||
/* Highmem pages are copied to the buffer,
|
||||
* because we can't return with a kmapped
|
||||
* highmem page (we may not be called again).
|
||||
*/
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memcpy(buffer, kaddr, PAGE_SIZE);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
handle->buffer = buffer;
|
||||
} else {
|
||||
handle->buffer = page_address(page);
|
||||
}
|
||||
}
|
||||
handle->prev = handle->cur;
|
||||
}
|
||||
handle->buf_offset = handle->cur_offset;
|
||||
if (handle->cur_offset + count >= PAGE_SIZE) {
|
||||
count = PAGE_SIZE - handle->cur_offset;
|
||||
handle->cur_offset = 0;
|
||||
handle->cur++;
|
||||
} else if (handle->cur <= nr_meta_pages) {
|
||||
memset(buffer, 0, PAGE_SIZE);
|
||||
pack_pfns(buffer, &orig_bm);
|
||||
} else {
|
||||
handle->cur_offset += count;
|
||||
struct page *page;
|
||||
|
||||
page = pfn_to_page(memory_bm_next_pfn(©_bm));
|
||||
if (PageHighMem(page)) {
|
||||
/* Highmem pages are copied to the buffer,
|
||||
* because we can't return with a kmapped
|
||||
* highmem page (we may not be called again).
|
||||
*/
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memcpy(buffer, kaddr, PAGE_SIZE);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
handle->buffer = buffer;
|
||||
} else {
|
||||
handle->buffer = page_address(page);
|
||||
}
|
||||
}
|
||||
handle->offset += count;
|
||||
return count;
|
||||
handle->cur++;
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2133,14 +2116,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
|
||||
* snapshot_handle structure. The structure gets updated and a pointer
|
||||
* to it should be passed to this function every next time.
|
||||
*
|
||||
* The @count parameter should contain the number of bytes the caller
|
||||
* wants to write to the image. It must not be zero.
|
||||
*
|
||||
* On success the function returns a positive number. Then, the caller
|
||||
* is allowed to write up to the returned number of bytes to the memory
|
||||
* location computed by the data_of() macro. The number returned
|
||||
* may be smaller than @count, but this only happens if the write would
|
||||
* cross a page boundary otherwise.
|
||||
* location computed by the data_of() macro.
|
||||
*
|
||||
* The function returns 0 to indicate the "end of file" condition,
|
||||
* and a negative number is returned on error. In such cases the
|
||||
@@ -2148,16 +2126,18 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
|
||||
* any more.
|
||||
*/
|
||||
|
||||
int snapshot_write_next(struct snapshot_handle *handle, size_t count)
|
||||
int snapshot_write_next(struct snapshot_handle *handle)
|
||||
{
|
||||
static struct chain_allocator ca;
|
||||
int error = 0;
|
||||
|
||||
/* Check if we have already loaded the entire image */
|
||||
if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
|
||||
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
|
||||
return 0;
|
||||
|
||||
if (handle->offset == 0) {
|
||||
handle->sync_read = 1;
|
||||
|
||||
if (!handle->cur) {
|
||||
if (!buffer)
|
||||
/* This makes the buffer be freed by swsusp_free() */
|
||||
buffer = get_image_page(GFP_ATOMIC, PG_ANY);
|
||||
@@ -2166,56 +2146,43 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
|
||||
return -ENOMEM;
|
||||
|
||||
handle->buffer = buffer;
|
||||
}
|
||||
handle->sync_read = 1;
|
||||
if (handle->prev < handle->cur) {
|
||||
if (handle->prev == 0) {
|
||||
error = load_header(buffer);
|
||||
} else if (handle->cur == 1) {
|
||||
error = load_header(buffer);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
} else if (handle->cur <= nr_meta_pages + 1) {
|
||||
error = unpack_orig_pfns(buffer, ©_bm);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (handle->cur == nr_meta_pages + 1) {
|
||||
error = prepare_image(&orig_bm, ©_bm);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
} else if (handle->prev <= nr_meta_pages) {
|
||||
error = unpack_orig_pfns(buffer, ©_bm);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (handle->prev == nr_meta_pages) {
|
||||
error = prepare_image(&orig_bm, ©_bm);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
chain_init(&ca, GFP_ATOMIC, PG_SAFE);
|
||||
memory_bm_position_reset(&orig_bm);
|
||||
restore_pblist = NULL;
|
||||
handle->buffer = get_buffer(&orig_bm, &ca);
|
||||
handle->sync_read = 0;
|
||||
if (IS_ERR(handle->buffer))
|
||||
return PTR_ERR(handle->buffer);
|
||||
}
|
||||
} else {
|
||||
copy_last_highmem_page();
|
||||
chain_init(&ca, GFP_ATOMIC, PG_SAFE);
|
||||
memory_bm_position_reset(&orig_bm);
|
||||
restore_pblist = NULL;
|
||||
handle->buffer = get_buffer(&orig_bm, &ca);
|
||||
handle->sync_read = 0;
|
||||
if (IS_ERR(handle->buffer))
|
||||
return PTR_ERR(handle->buffer);
|
||||
if (handle->buffer != buffer)
|
||||
handle->sync_read = 0;
|
||||
}
|
||||
handle->prev = handle->cur;
|
||||
}
|
||||
handle->buf_offset = handle->cur_offset;
|
||||
if (handle->cur_offset + count >= PAGE_SIZE) {
|
||||
count = PAGE_SIZE - handle->cur_offset;
|
||||
handle->cur_offset = 0;
|
||||
handle->cur++;
|
||||
} else {
|
||||
handle->cur_offset += count;
|
||||
copy_last_highmem_page();
|
||||
handle->buffer = get_buffer(&orig_bm, &ca);
|
||||
if (IS_ERR(handle->buffer))
|
||||
return PTR_ERR(handle->buffer);
|
||||
if (handle->buffer != buffer)
|
||||
handle->sync_read = 0;
|
||||
}
|
||||
handle->offset += count;
|
||||
return count;
|
||||
handle->cur++;
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2230,7 +2197,7 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
|
||||
{
|
||||
copy_last_highmem_page();
|
||||
/* Free only if we have loaded the image entirely */
|
||||
if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
|
||||
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
|
||||
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
|
||||
free_highmem_data();
|
||||
}
|
||||
|
@@ -29,6 +29,40 @@
|
||||
|
||||
#define SWSUSP_SIG "S1SUSPEND"
|
||||
|
||||
/*
|
||||
* The swap map is a data structure used for keeping track of each page
|
||||
* written to a swap partition. It consists of many swap_map_page
|
||||
* structures that contain each an array of MAP_PAGE_SIZE swap entries.
|
||||
* These structures are stored on the swap and linked together with the
|
||||
* help of the .next_swap member.
|
||||
*
|
||||
* The swap map is created during suspend. The swap map pages are
|
||||
* allocated and populated one at a time, so we only need one memory
|
||||
* page to set up the entire structure.
|
||||
*
|
||||
* During resume we also only need to use one swap_map_page structure
|
||||
* at a time.
|
||||
*/
|
||||
|
||||
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
|
||||
|
||||
struct swap_map_page {
|
||||
sector_t entries[MAP_PAGE_ENTRIES];
|
||||
sector_t next_swap;
|
||||
};
|
||||
|
||||
/**
|
||||
* The swap_map_handle structure is used for handling swap in
|
||||
* a file-alike way
|
||||
*/
|
||||
|
||||
struct swap_map_handle {
|
||||
struct swap_map_page *cur;
|
||||
sector_t cur_swap;
|
||||
sector_t first_sector;
|
||||
unsigned int k;
|
||||
};
|
||||
|
||||
struct swsusp_header {
|
||||
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
|
||||
sector_t image;
|
||||
@@ -145,110 +179,24 @@ int swsusp_swap_in_use(void)
|
||||
*/
|
||||
|
||||
static unsigned short root_swap = 0xffff;
|
||||
static struct block_device *resume_bdev;
|
||||
|
||||
/**
|
||||
* submit - submit BIO request.
|
||||
* @rw: READ or WRITE.
|
||||
* @off physical offset of page.
|
||||
* @page: page we're reading or writing.
|
||||
* @bio_chain: list of pending biod (for async reading)
|
||||
*
|
||||
* Straight from the textbook - allocate and initialize the bio.
|
||||
* If we're reading, make sure the page is marked as dirty.
|
||||
* Then submit it and, if @bio_chain == NULL, wait.
|
||||
*/
|
||||
static int submit(int rw, pgoff_t page_off, struct page *page,
|
||||
struct bio **bio_chain)
|
||||
{
|
||||
const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
|
||||
bio->bi_sector = page_off * (PAGE_SIZE >> 9);
|
||||
bio->bi_bdev = resume_bdev;
|
||||
bio->bi_end_io = end_swap_bio_read;
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
||||
printk(KERN_ERR "PM: Adding page to bio failed at %ld\n",
|
||||
page_off);
|
||||
bio_put(bio);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
lock_page(page);
|
||||
bio_get(bio);
|
||||
|
||||
if (bio_chain == NULL) {
|
||||
submit_bio(bio_rw, bio);
|
||||
wait_on_page_locked(page);
|
||||
if (rw == READ)
|
||||
bio_set_pages_dirty(bio);
|
||||
bio_put(bio);
|
||||
} else {
|
||||
if (rw == READ)
|
||||
get_page(page); /* These pages are freed later */
|
||||
bio->bi_private = *bio_chain;
|
||||
*bio_chain = bio;
|
||||
submit_bio(bio_rw, bio);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
|
||||
{
|
||||
return submit(READ, page_off, virt_to_page(addr), bio_chain);
|
||||
}
|
||||
|
||||
static int bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
|
||||
{
|
||||
return submit(WRITE, page_off, virt_to_page(addr), bio_chain);
|
||||
}
|
||||
|
||||
static int wait_on_bio_chain(struct bio **bio_chain)
|
||||
{
|
||||
struct bio *bio;
|
||||
struct bio *next_bio;
|
||||
int ret = 0;
|
||||
|
||||
if (bio_chain == NULL)
|
||||
return 0;
|
||||
|
||||
bio = *bio_chain;
|
||||
if (bio == NULL)
|
||||
return 0;
|
||||
while (bio) {
|
||||
struct page *page;
|
||||
|
||||
next_bio = bio->bi_private;
|
||||
page = bio->bi_io_vec[0].bv_page;
|
||||
wait_on_page_locked(page);
|
||||
if (!PageUptodate(page) || PageError(page))
|
||||
ret = -EIO;
|
||||
put_page(page);
|
||||
bio_put(bio);
|
||||
bio = next_bio;
|
||||
}
|
||||
*bio_chain = NULL;
|
||||
return ret;
|
||||
}
|
||||
struct block_device *hib_resume_bdev;
|
||||
|
||||
/*
|
||||
* Saving part
|
||||
*/
|
||||
|
||||
static int mark_swapfiles(sector_t start, unsigned int flags)
|
||||
static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
bio_read_page(swsusp_resume_block, swsusp_header, NULL);
|
||||
hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
|
||||
if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
|
||||
!memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
|
||||
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
|
||||
memcpy(swsusp_header->sig,SWSUSP_SIG, 10);
|
||||
swsusp_header->image = start;
|
||||
swsusp_header->image = handle->first_sector;
|
||||
swsusp_header->flags = flags;
|
||||
error = bio_write_page(swsusp_resume_block,
|
||||
error = hib_bio_write_page(swsusp_resume_block,
|
||||
swsusp_header, NULL);
|
||||
} else {
|
||||
printk(KERN_ERR "PM: Swap header not found!\n");
|
||||
@@ -260,25 +208,26 @@ static int mark_swapfiles(sector_t start, unsigned int flags)
|
||||
/**
|
||||
* swsusp_swap_check - check if the resume device is a swap device
|
||||
* and get its index (if so)
|
||||
*
|
||||
* This is called before saving image
|
||||
*/
|
||||
|
||||
static int swsusp_swap_check(void) /* This is called before saving image */
|
||||
static int swsusp_swap_check(void)
|
||||
{
|
||||
int res;
|
||||
|
||||
res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
|
||||
&resume_bdev);
|
||||
&hib_resume_bdev);
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
root_swap = res;
|
||||
res = blkdev_get(resume_bdev, FMODE_WRITE);
|
||||
res = blkdev_get(hib_resume_bdev, FMODE_WRITE);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
res = set_blocksize(resume_bdev, PAGE_SIZE);
|
||||
res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
|
||||
if (res < 0)
|
||||
blkdev_put(resume_bdev, FMODE_WRITE);
|
||||
blkdev_put(hib_resume_bdev, FMODE_WRITE);
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -309,42 +258,9 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
|
||||
} else {
|
||||
src = buf;
|
||||
}
|
||||
return bio_write_page(offset, src, bio_chain);
|
||||
return hib_bio_write_page(offset, src, bio_chain);
|
||||
}
|
||||
|
||||
/*
|
||||
* The swap map is a data structure used for keeping track of each page
|
||||
* written to a swap partition. It consists of many swap_map_page
|
||||
* structures that contain each an array of MAP_PAGE_SIZE swap entries.
|
||||
* These structures are stored on the swap and linked together with the
|
||||
* help of the .next_swap member.
|
||||
*
|
||||
* The swap map is created during suspend. The swap map pages are
|
||||
* allocated and populated one at a time, so we only need one memory
|
||||
* page to set up the entire structure.
|
||||
*
|
||||
* During resume we also only need to use one swap_map_page structure
|
||||
* at a time.
|
||||
*/
|
||||
|
||||
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
|
||||
|
||||
struct swap_map_page {
|
||||
sector_t entries[MAP_PAGE_ENTRIES];
|
||||
sector_t next_swap;
|
||||
};
|
||||
|
||||
/**
|
||||
* The swap_map_handle structure is used for handling swap in
|
||||
* a file-alike way
|
||||
*/
|
||||
|
||||
struct swap_map_handle {
|
||||
struct swap_map_page *cur;
|
||||
sector_t cur_swap;
|
||||
unsigned int k;
|
||||
};
|
||||
|
||||
static void release_swap_writer(struct swap_map_handle *handle)
|
||||
{
|
||||
if (handle->cur)
|
||||
@@ -354,16 +270,33 @@ static void release_swap_writer(struct swap_map_handle *handle)
|
||||
|
||||
static int get_swap_writer(struct swap_map_handle *handle)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = swsusp_swap_check();
|
||||
if (ret) {
|
||||
if (ret != -ENOSPC)
|
||||
printk(KERN_ERR "PM: Cannot find swap device, try "
|
||||
"swapon -a.\n");
|
||||
return ret;
|
||||
}
|
||||
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!handle->cur)
|
||||
return -ENOMEM;
|
||||
if (!handle->cur) {
|
||||
ret = -ENOMEM;
|
||||
goto err_close;
|
||||
}
|
||||
handle->cur_swap = alloc_swapdev_block(root_swap);
|
||||
if (!handle->cur_swap) {
|
||||
release_swap_writer(handle);
|
||||
return -ENOSPC;
|
||||
ret = -ENOSPC;
|
||||
goto err_rel;
|
||||
}
|
||||
handle->k = 0;
|
||||
handle->first_sector = handle->cur_swap;
|
||||
return 0;
|
||||
err_rel:
|
||||
release_swap_writer(handle);
|
||||
err_close:
|
||||
swsusp_close(FMODE_WRITE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int swap_write_page(struct swap_map_handle *handle, void *buf,
|
||||
@@ -380,7 +313,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
|
||||
return error;
|
||||
handle->cur->entries[handle->k++] = offset;
|
||||
if (handle->k >= MAP_PAGE_ENTRIES) {
|
||||
error = wait_on_bio_chain(bio_chain);
|
||||
error = hib_wait_on_bio_chain(bio_chain);
|
||||
if (error)
|
||||
goto out;
|
||||
offset = alloc_swapdev_block(root_swap);
|
||||
@@ -406,6 +339,24 @@ static int flush_swap_writer(struct swap_map_handle *handle)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int swap_writer_finish(struct swap_map_handle *handle,
|
||||
unsigned int flags, int error)
|
||||
{
|
||||
if (!error) {
|
||||
flush_swap_writer(handle);
|
||||
printk(KERN_INFO "PM: S");
|
||||
error = mark_swapfiles(handle, flags);
|
||||
printk("|\n");
|
||||
}
|
||||
|
||||
if (error)
|
||||
free_all_swap_pages(root_swap);
|
||||
release_swap_writer(handle);
|
||||
swsusp_close(FMODE_WRITE);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* save_image - save the suspend image data
|
||||
*/
|
||||
@@ -431,7 +382,7 @@ static int save_image(struct swap_map_handle *handle,
|
||||
bio = NULL;
|
||||
do_gettimeofday(&start);
|
||||
while (1) {
|
||||
ret = snapshot_read_next(snapshot, PAGE_SIZE);
|
||||
ret = snapshot_read_next(snapshot);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
ret = swap_write_page(handle, data_of(*snapshot), &bio);
|
||||
@@ -441,7 +392,7 @@ static int save_image(struct swap_map_handle *handle,
|
||||
printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
|
||||
nr_pages++;
|
||||
}
|
||||
err2 = wait_on_bio_chain(&bio);
|
||||
err2 = hib_wait_on_bio_chain(&bio);
|
||||
do_gettimeofday(&stop);
|
||||
if (!ret)
|
||||
ret = err2;
|
||||
@@ -483,50 +434,34 @@ int swsusp_write(unsigned int flags)
|
||||
struct swap_map_handle handle;
|
||||
struct snapshot_handle snapshot;
|
||||
struct swsusp_info *header;
|
||||
unsigned long pages;
|
||||
int error;
|
||||
|
||||
error = swsusp_swap_check();
|
||||
pages = snapshot_get_image_size();
|
||||
error = get_swap_writer(&handle);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Cannot find swap device, try "
|
||||
"swapon -a.\n");
|
||||
printk(KERN_ERR "PM: Cannot get swap writer\n");
|
||||
return error;
|
||||
}
|
||||
if (!enough_swap(pages)) {
|
||||
printk(KERN_ERR "PM: Not enough free swap\n");
|
||||
error = -ENOSPC;
|
||||
goto out_finish;
|
||||
}
|
||||
memset(&snapshot, 0, sizeof(struct snapshot_handle));
|
||||
error = snapshot_read_next(&snapshot, PAGE_SIZE);
|
||||
error = snapshot_read_next(&snapshot);
|
||||
if (error < PAGE_SIZE) {
|
||||
if (error >= 0)
|
||||
error = -EFAULT;
|
||||
|
||||
goto out;
|
||||
goto out_finish;
|
||||
}
|
||||
header = (struct swsusp_info *)data_of(snapshot);
|
||||
if (!enough_swap(header->pages)) {
|
||||
printk(KERN_ERR "PM: Not enough free swap\n");
|
||||
error = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
error = get_swap_writer(&handle);
|
||||
if (!error) {
|
||||
sector_t start = handle.cur_swap;
|
||||
|
||||
error = swap_write_page(&handle, header, NULL);
|
||||
if (!error)
|
||||
error = save_image(&handle, &snapshot,
|
||||
header->pages - 1);
|
||||
|
||||
if (!error) {
|
||||
flush_swap_writer(&handle);
|
||||
printk(KERN_INFO "PM: S");
|
||||
error = mark_swapfiles(start, flags);
|
||||
printk("|\n");
|
||||
}
|
||||
}
|
||||
if (error)
|
||||
free_all_swap_pages(root_swap);
|
||||
|
||||
release_swap_writer(&handle);
|
||||
out:
|
||||
swsusp_close(FMODE_WRITE);
|
||||
error = swap_write_page(&handle, header, NULL);
|
||||
if (!error)
|
||||
error = save_image(&handle, &snapshot, pages - 1);
|
||||
out_finish:
|
||||
error = swap_writer_finish(&handle, flags, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -542,18 +477,21 @@ static void release_swap_reader(struct swap_map_handle *handle)
|
||||
handle->cur = NULL;
|
||||
}
|
||||
|
||||
static int get_swap_reader(struct swap_map_handle *handle, sector_t start)
|
||||
static int get_swap_reader(struct swap_map_handle *handle,
|
||||
unsigned int *flags_p)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!start)
|
||||
*flags_p = swsusp_header->flags;
|
||||
|
||||
if (!swsusp_header->image) /* how can this happen? */
|
||||
return -EINVAL;
|
||||
|
||||
handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
|
||||
if (!handle->cur)
|
||||
return -ENOMEM;
|
||||
|
||||
error = bio_read_page(start, handle->cur, NULL);
|
||||
error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL);
|
||||
if (error) {
|
||||
release_swap_reader(handle);
|
||||
return error;
|
||||
@@ -573,21 +511,28 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
|
||||
offset = handle->cur->entries[handle->k];
|
||||
if (!offset)
|
||||
return -EFAULT;
|
||||
error = bio_read_page(offset, buf, bio_chain);
|
||||
error = hib_bio_read_page(offset, buf, bio_chain);
|
||||
if (error)
|
||||
return error;
|
||||
if (++handle->k >= MAP_PAGE_ENTRIES) {
|
||||
error = wait_on_bio_chain(bio_chain);
|
||||
error = hib_wait_on_bio_chain(bio_chain);
|
||||
handle->k = 0;
|
||||
offset = handle->cur->next_swap;
|
||||
if (!offset)
|
||||
release_swap_reader(handle);
|
||||
else if (!error)
|
||||
error = bio_read_page(offset, handle->cur, NULL);
|
||||
error = hib_bio_read_page(offset, handle->cur, NULL);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
static int swap_reader_finish(struct swap_map_handle *handle)
|
||||
{
|
||||
release_swap_reader(handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* load_image - load the image using the swap map handle
|
||||
* @handle and the snapshot handle @snapshot
|
||||
@@ -615,21 +560,21 @@ static int load_image(struct swap_map_handle *handle,
|
||||
bio = NULL;
|
||||
do_gettimeofday(&start);
|
||||
for ( ; ; ) {
|
||||
error = snapshot_write_next(snapshot, PAGE_SIZE);
|
||||
error = snapshot_write_next(snapshot);
|
||||
if (error <= 0)
|
||||
break;
|
||||
error = swap_read_page(handle, data_of(*snapshot), &bio);
|
||||
if (error)
|
||||
break;
|
||||
if (snapshot->sync_read)
|
||||
error = wait_on_bio_chain(&bio);
|
||||
error = hib_wait_on_bio_chain(&bio);
|
||||
if (error)
|
||||
break;
|
||||
if (!(nr_pages % m))
|
||||
printk("\b\b\b\b%3d%%", nr_pages / m);
|
||||
nr_pages++;
|
||||
}
|
||||
err2 = wait_on_bio_chain(&bio);
|
||||
err2 = hib_wait_on_bio_chain(&bio);
|
||||
do_gettimeofday(&stop);
|
||||
if (!error)
|
||||
error = err2;
|
||||
@@ -657,20 +602,20 @@ int swsusp_read(unsigned int *flags_p)
|
||||
struct snapshot_handle snapshot;
|
||||
struct swsusp_info *header;
|
||||
|
||||
*flags_p = swsusp_header->flags;
|
||||
|
||||
memset(&snapshot, 0, sizeof(struct snapshot_handle));
|
||||
error = snapshot_write_next(&snapshot, PAGE_SIZE);
|
||||
error = snapshot_write_next(&snapshot);
|
||||
if (error < PAGE_SIZE)
|
||||
return error < 0 ? error : -EFAULT;
|
||||
header = (struct swsusp_info *)data_of(snapshot);
|
||||
error = get_swap_reader(&handle, swsusp_header->image);
|
||||
error = get_swap_reader(&handle, flags_p);
|
||||
if (error)
|
||||
goto end;
|
||||
if (!error)
|
||||
error = swap_read_page(&handle, header, NULL);
|
||||
if (!error)
|
||||
error = load_image(&handle, &snapshot, header->pages - 1);
|
||||
release_swap_reader(&handle);
|
||||
|
||||
swap_reader_finish(&handle);
|
||||
end:
|
||||
if (!error)
|
||||
pr_debug("PM: Image successfully loaded\n");
|
||||
else
|
||||
@@ -686,11 +631,11 @@ int swsusp_check(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
|
||||
if (!IS_ERR(resume_bdev)) {
|
||||
set_blocksize(resume_bdev, PAGE_SIZE);
|
||||
hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
|
||||
if (!IS_ERR(hib_resume_bdev)) {
|
||||
set_blocksize(hib_resume_bdev, PAGE_SIZE);
|
||||
memset(swsusp_header, 0, PAGE_SIZE);
|
||||
error = bio_read_page(swsusp_resume_block,
|
||||
error = hib_bio_read_page(swsusp_resume_block,
|
||||
swsusp_header, NULL);
|
||||
if (error)
|
||||
goto put;
|
||||
@@ -698,7 +643,7 @@ int swsusp_check(void)
|
||||
if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) {
|
||||
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
|
||||
/* Reset swap signature now */
|
||||
error = bio_write_page(swsusp_resume_block,
|
||||
error = hib_bio_write_page(swsusp_resume_block,
|
||||
swsusp_header, NULL);
|
||||
} else {
|
||||
error = -EINVAL;
|
||||
@@ -706,11 +651,11 @@ int swsusp_check(void)
|
||||
|
||||
put:
|
||||
if (error)
|
||||
blkdev_put(resume_bdev, FMODE_READ);
|
||||
blkdev_put(hib_resume_bdev, FMODE_READ);
|
||||
else
|
||||
pr_debug("PM: Signature found, resuming\n");
|
||||
} else {
|
||||
error = PTR_ERR(resume_bdev);
|
||||
error = PTR_ERR(hib_resume_bdev);
|
||||
}
|
||||
|
||||
if (error)
|
||||
@@ -725,12 +670,12 @@ put:
|
||||
|
||||
void swsusp_close(fmode_t mode)
|
||||
{
|
||||
if (IS_ERR(resume_bdev)) {
|
||||
if (IS_ERR(hib_resume_bdev)) {
|
||||
pr_debug("PM: Image device not initialised\n");
|
||||
return;
|
||||
}
|
||||
|
||||
blkdev_put(resume_bdev, mode);
|
||||
blkdev_put(hib_resume_bdev, mode);
|
||||
}
|
||||
|
||||
static int swsusp_header_init(void)
|
||||
|
@@ -151,6 +151,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
||||
{
|
||||
struct snapshot_data *data;
|
||||
ssize_t res;
|
||||
loff_t pg_offp = *offp & ~PAGE_MASK;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
|
||||
@@ -159,14 +160,19 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
||||
res = -ENODATA;
|
||||
goto Unlock;
|
||||
}
|
||||
res = snapshot_read_next(&data->handle, count);
|
||||
if (res > 0) {
|
||||
if (copy_to_user(buf, data_of(data->handle), res))
|
||||
res = -EFAULT;
|
||||
else
|
||||
*offp = data->handle.offset;
|
||||
if (!pg_offp) { /* on page boundary? */
|
||||
res = snapshot_read_next(&data->handle);
|
||||
if (res <= 0)
|
||||
goto Unlock;
|
||||
} else {
|
||||
res = PAGE_SIZE - pg_offp;
|
||||
}
|
||||
|
||||
res = simple_read_from_buffer(buf, count, &pg_offp,
|
||||
data_of(data->handle), res);
|
||||
if (res > 0)
|
||||
*offp += res;
|
||||
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
|
||||
@@ -178,18 +184,25 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
||||
{
|
||||
struct snapshot_data *data;
|
||||
ssize_t res;
|
||||
loff_t pg_offp = *offp & ~PAGE_MASK;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
|
||||
data = filp->private_data;
|
||||
res = snapshot_write_next(&data->handle, count);
|
||||
if (res > 0) {
|
||||
if (copy_from_user(data_of(data->handle), buf, res))
|
||||
res = -EFAULT;
|
||||
else
|
||||
*offp = data->handle.offset;
|
||||
|
||||
if (!pg_offp) {
|
||||
res = snapshot_write_next(&data->handle);
|
||||
if (res <= 0)
|
||||
goto unlock;
|
||||
} else {
|
||||
res = PAGE_SIZE - pg_offp;
|
||||
}
|
||||
|
||||
res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
|
||||
buf, count);
|
||||
if (res > 0)
|
||||
*offp += res;
|
||||
unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
|
||||
return res;
|
||||
|
Reference in New Issue
Block a user