PM / Wakeup: Combine atomic counters to avoid reordering issues
The memory barrier in wakeup_source_deactivate() is supposed to prevent the callers of pm_wakeup_pending() and pm_get_wakeup_count() from seeing the new value of events_in_progress (0, in particular) and the old value of event_count at the same time. However, if wakeup_source_deactivate() is executed by CPU0 and, for instance, pm_wakeup_pending() is executed by CPU1, where both processors can reorder operations, the memory barrier in wakeup_source_deactivate() doesn't affect CPU1 which can reorder reads. In that case CPU1 may very well decide to fetch event_count before it's modified and events_in_progress after it's been updated, so pm_wakeup_pending() may fail to detect a wakeup event. This issue can be addressed by using a single atomic variable to store both events_in_progress and event_count, so that they can be updated together in a single atomic operation. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
This commit is contained in:
@@ -24,12 +24,26 @@
|
|||||||
*/
|
*/
|
||||||
bool events_check_enabled;
|
bool events_check_enabled;
|
||||||
|
|
||||||
/* The counter of registered wakeup events. */
|
/*
|
||||||
static atomic_t event_count = ATOMIC_INIT(0);
|
* Combined counters of registered wakeup events and wakeup events in progress.
|
||||||
/* A preserved old value of event_count. */
|
* They need to be modified together atomically, so it's better to use one
|
||||||
|
* atomic variable to hold them both.
|
||||||
|
*/
|
||||||
|
static atomic_t combined_event_count = ATOMIC_INIT(0);
|
||||||
|
|
||||||
|
#define IN_PROGRESS_BITS (sizeof(int) * 4)
|
||||||
|
#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
|
||||||
|
|
||||||
|
static void split_counters(unsigned int *cnt, unsigned int *inpr)
|
||||||
|
{
|
||||||
|
unsigned int comb = atomic_read(&combined_event_count);
|
||||||
|
|
||||||
|
*cnt = (comb >> IN_PROGRESS_BITS);
|
||||||
|
*inpr = comb & MAX_IN_PROGRESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* A preserved old value of the events counter. */
|
||||||
static unsigned int saved_count;
|
static unsigned int saved_count;
|
||||||
/* The counter of wakeup events being processed. */
|
|
||||||
static atomic_t events_in_progress = ATOMIC_INIT(0);
|
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(events_lock);
|
static DEFINE_SPINLOCK(events_lock);
|
||||||
|
|
||||||
@@ -307,7 +321,8 @@ static void wakeup_source_activate(struct wakeup_source *ws)
|
|||||||
ws->timer_expires = jiffies;
|
ws->timer_expires = jiffies;
|
||||||
ws->last_time = ktime_get();
|
ws->last_time = ktime_get();
|
||||||
|
|
||||||
atomic_inc(&events_in_progress);
|
/* Increment the counter of events in progress. */
|
||||||
|
atomic_inc(&combined_event_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -394,14 +409,10 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
|
|||||||
del_timer(&ws->timer);
|
del_timer(&ws->timer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* event_count has to be incremented before events_in_progress is
|
* Increment the counter of registered wakeup events and decrement the
|
||||||
* modified, so that the callers of pm_check_wakeup_events() and
|
* couter of wakeup events in progress simultaneously.
|
||||||
* pm_save_wakeup_count() don't see the old value of event_count and
|
|
||||||
* events_in_progress equal to zero at the same time.
|
|
||||||
*/
|
*/
|
||||||
atomic_inc(&event_count);
|
atomic_add(MAX_IN_PROGRESS, &combined_event_count);
|
||||||
smp_mb__before_atomic_dec();
|
|
||||||
atomic_dec(&events_in_progress);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -556,8 +567,10 @@ bool pm_wakeup_pending(void)
|
|||||||
|
|
||||||
spin_lock_irqsave(&events_lock, flags);
|
spin_lock_irqsave(&events_lock, flags);
|
||||||
if (events_check_enabled) {
|
if (events_check_enabled) {
|
||||||
ret = ((unsigned int)atomic_read(&event_count) != saved_count)
|
unsigned int cnt, inpr;
|
||||||
|| atomic_read(&events_in_progress);
|
|
||||||
|
split_counters(&cnt, &inpr);
|
||||||
|
ret = (cnt != saved_count || inpr > 0);
|
||||||
events_check_enabled = !ret;
|
events_check_enabled = !ret;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&events_lock, flags);
|
spin_unlock_irqrestore(&events_lock, flags);
|
||||||
@@ -579,19 +592,22 @@ bool pm_wakeup_pending(void)
|
|||||||
*/
|
*/
|
||||||
bool pm_get_wakeup_count(unsigned int *count)
|
bool pm_get_wakeup_count(unsigned int *count)
|
||||||
{
|
{
|
||||||
bool ret;
|
unsigned int cnt, inpr;
|
||||||
|
|
||||||
if (capable(CAP_SYS_ADMIN))
|
if (capable(CAP_SYS_ADMIN))
|
||||||
events_check_enabled = false;
|
events_check_enabled = false;
|
||||||
|
|
||||||
while (atomic_read(&events_in_progress) && !signal_pending(current)) {
|
for (;;) {
|
||||||
|
split_counters(&cnt, &inpr);
|
||||||
|
if (inpr == 0 || signal_pending(current))
|
||||||
|
break;
|
||||||
pm_wakeup_update_hit_counts();
|
pm_wakeup_update_hit_counts();
|
||||||
schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
|
schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = !atomic_read(&events_in_progress);
|
split_counters(&cnt, &inpr);
|
||||||
*count = atomic_read(&event_count);
|
*count = cnt;
|
||||||
return ret;
|
return !inpr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -605,11 +621,12 @@ bool pm_get_wakeup_count(unsigned int *count)
|
|||||||
*/
|
*/
|
||||||
bool pm_save_wakeup_count(unsigned int count)
|
bool pm_save_wakeup_count(unsigned int count)
|
||||||
{
|
{
|
||||||
|
unsigned int cnt, inpr;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
spin_lock_irq(&events_lock);
|
spin_lock_irq(&events_lock);
|
||||||
if (count == (unsigned int)atomic_read(&event_count)
|
split_counters(&cnt, &inpr);
|
||||||
&& !atomic_read(&events_in_progress)) {
|
if (cnt == count && inpr == 0) {
|
||||||
saved_count = count;
|
saved_count = count;
|
||||||
events_check_enabled = true;
|
events_check_enabled = true;
|
||||||
ret = true;
|
ret = true;
|
||||||
|
Reference in New Issue
Block a user