sh: GUSA atomic rollback support.
This implements kernel-level atomic rollback built on top of gUSA, as an alternative non-IRQ based atomicity method. This is generally a faster method for platforms that are lacking the LL/SC pairs that SH-4A and later use, and is only supportable on legacy cores. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
committed by
Paul Mundt
parent
53ff09422e
commit
1efe4ce3ca
@@ -11,97 +11,18 @@
|
||||
/* For __swab32 */
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
static inline void set_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int mask;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_GUSA_RB
|
||||
#include <asm/bitops-grb.h>
|
||||
#else
|
||||
#include <asm/bitops-irq.h>
|
||||
#endif
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
*a |= mask;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* clear_bit() doesn't provide any barrier for the compiler.
|
||||
*/
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
static inline void clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int mask;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
*a &= ~mask;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void change_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int mask;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
*a ^= mask;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
retval = (mask & *a) != 0;
|
||||
*a |= mask;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int test_and_clear_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
retval = (mask & *a) != 0;
|
||||
*a &= ~mask;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(int nr, volatile void * addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
retval = (mask & *a) != 0;
|
||||
*a ^= mask;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
||||
|
Reference in New Issue
Block a user