locking, ARM: Annotate low level hw locks as raw
Annotate the low level hardware locks which must not be preempted. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
a1741e7fcb
commit
bd31b85960
@@ -26,7 +26,7 @@
|
||||
#define CACHE_LINE_SIZE 32
|
||||
|
||||
static void __iomem *l2x0_base;
|
||||
static DEFINE_SPINLOCK(l2x0_lock);
|
||||
static DEFINE_RAW_SPINLOCK(l2x0_lock);
|
||||
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
|
||||
static uint32_t l2x0_size;
|
||||
|
||||
@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void __l2x0_flush_all(void)
|
||||
@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
|
||||
unsigned long flags;
|
||||
|
||||
/* clean all ways */
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
__l2x0_flush_all();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_clean_all(void)
|
||||
@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
|
||||
unsigned long flags;
|
||||
|
||||
/* clean all ways */
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
|
||||
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_inv_all(void)
|
||||
@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
|
||||
unsigned long flags;
|
||||
|
||||
/* invalidate all ways */
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
/* Invalidating when L2 is enabled is a nono */
|
||||
BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
|
||||
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
|
||||
cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_inv_range(unsigned long start, unsigned long end)
|
||||
@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
|
||||
void __iomem *base = l2x0_base;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
if (start & (CACHE_LINE_SIZE - 1)) {
|
||||
start &= ~(CACHE_LINE_SIZE - 1);
|
||||
debug_writel(0x03);
|
||||
@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
if (blk_end < end) {
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
}
|
||||
}
|
||||
cache_wait(base + L2X0_INV_LINE_PA, 1);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_clean_range(unsigned long start, unsigned long end)
|
||||
@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
start &= ~(CACHE_LINE_SIZE - 1);
|
||||
while (start < end) {
|
||||
unsigned long blk_end = start + min(end - start, 4096UL);
|
||||
@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
if (blk_end < end) {
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
}
|
||||
}
|
||||
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_flush_range(unsigned long start, unsigned long end)
|
||||
@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
start &= ~(CACHE_LINE_SIZE - 1);
|
||||
while (start < end) {
|
||||
unsigned long blk_end = start + min(end - start, 4096UL);
|
||||
@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
|
||||
debug_writel(0x00);
|
||||
|
||||
if (blk_end < end) {
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
}
|
||||
}
|
||||
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_disable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
__l2x0_flush_all();
|
||||
writel_relaxed(0, l2x0_base + L2X0_CTRL);
|
||||
dsb();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void __init l2x0_unlock(__u32 cache_id)
|
||||
|
Reference in New Issue
Block a user