Merge branch 'pm-runtime' into pm-for-linus

* pm-runtime:
  PM / Tracing: build rpm-traces.c only if CONFIG_PM_RUNTIME is set
  PM / Runtime: Replace dev_dbg() with trace_rpm_*()
  PM / Runtime: Introduce trace points for tracing rpm_* functions
  PM / Runtime: Don't run callbacks under lock for power.irq_safe set
  USB: Add wakeup info to debugging messages
  PM / Runtime: pm_runtime_idle() can be called in atomic context
  PM / Runtime: Add macro to test for runtime PM events
  PM / Runtime: Add might_sleep() to runtime PM functions
This commit is contained in:
Rafael J. Wysocki
2011-10-07 23:16:55 +02:00
20 changed files with 226 additions and 66 deletions

View File

@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/pm_runtime.h>
#include <trace/events/rpm.h>
#include "power.h"
static int rpm_resume(struct device *dev, int rpmflags);
@ -154,6 +155,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
* @dev: Device to run the callback for.
*/
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
if (dev->power.irq_safe)
spin_unlock(&dev->power.lock);
else
spin_unlock_irq(&dev->power.lock);
retval = cb(dev);
if (dev->power.irq_safe)
spin_lock(&dev->power.lock);
else
spin_lock_irq(&dev->power.lock);
return retval;
}
/**
* rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about.
@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
if (callback) {
if (dev->power.irq_safe)
spin_unlock(&dev->power.lock);
else
spin_unlock_irq(&dev->power.lock);
callback(dev);
if (dev->power.irq_safe)
spin_lock(&dev->power.lock);
else
spin_lock_irq(&dev->power.lock);
}
if (callback)
__rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
* @dev: Device to run the callback for.
*/
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
if (!cb)
return -ENOSYS;
if (dev->power.irq_safe) {
retval = cb(dev);
} else {
spin_unlock_irq(&dev->power.lock);
retval = __rpm_callback(cb, dev);
retval = cb(dev);
spin_lock_irq(&dev->power.lock);
}
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
}
@ -295,7 +304,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@ -347,6 +356,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
cpu_relax();
spin_lock(&dev->power.lock);
goto repeat;
}
/* Wait for the other suspend running in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@ -430,7 +448,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
dev_dbg(dev, "%s returns %d\n", __func__, retval);
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@ -459,7 +477,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error)
@ -496,6 +514,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out;
}
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
cpu_relax();
spin_lock(&dev->power.lock);
goto repeat;
}
/* Wait for the operation carried out in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@ -615,7 +642,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
dev_dbg(dev, "%s returns %d\n", __func__, retval);
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@ -732,13 +759,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
* return immediately if it is larger than zero. Then carry out an idle
* notification, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_idle(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@ -761,13 +791,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
* return immediately if it is larger than zero. Then carry out a suspend,
* either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@ -789,13 +822,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
* If the RPM_GET_PUT flag is set, increment the device's usage count. Then
* carry out a resume, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
* or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_resume(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);