Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull arm fixes from Russell King: "Not much here again. The two most notable things here are the sched_clock() fix, which was causing problems with the scheduling of threaded IRQs after a suspend event, and the vfp fix, which afaik has only been seen on some older OMAP boards. Nevertheless, both are fairly important fixes." * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7569/1: mm: uninitialized warning corrections ARM: 7567/1: io: avoid GCC's offsettable addressing modes for halfword accesses ARM: 7566/1: vfp: fix save and restore when running on pre-VFPv3 and CONFIG_VFPv3 set ARM: 7565/1: sched: stop sched_clock() during suspend
This commit is contained in:
@@ -64,7 +64,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
|
|||||||
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
asm volatile("strh %1, %0"
|
asm volatile("strh %1, %0"
|
||||||
: "+Qo" (*(volatile u16 __force *)addr)
|
: "+Q" (*(volatile u16 __force *)addr)
|
||||||
: "r" (val));
|
: "r" (val));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,7 +72,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|||||||
{
|
{
|
||||||
u16 val;
|
u16 val;
|
||||||
asm volatile("ldrh %1, %0"
|
asm volatile("ldrh %1, %0"
|
||||||
: "+Qo" (*(volatile u16 __force *)addr),
|
: "+Q" (*(volatile u16 __force *)addr),
|
||||||
"=r" (val));
|
"=r" (val));
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
@@ -10,7 +10,5 @@
|
|||||||
|
|
||||||
extern void sched_clock_postinit(void);
|
extern void sched_clock_postinit(void);
|
||||||
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
|
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
|
||||||
extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
|
|
||||||
unsigned long rate);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -27,9 +27,9 @@
|
|||||||
#if __LINUX_ARM_ARCH__ <= 6
|
#if __LINUX_ARM_ARCH__ <= 6
|
||||||
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
||||||
ldr \tmp, [\tmp, #0]
|
ldr \tmp, [\tmp, #0]
|
||||||
tst \tmp, #HWCAP_VFPv3D16
|
tst \tmp, #HWCAP_VFPD32
|
||||||
ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
|
ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
|
||||||
addne \base, \base, #32*4 @ step over unused register space
|
addeq \base, \base, #32*4 @ step over unused register space
|
||||||
#else
|
#else
|
||||||
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
||||||
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
||||||
@@ -51,9 +51,9 @@
|
|||||||
#if __LINUX_ARM_ARCH__ <= 6
|
#if __LINUX_ARM_ARCH__ <= 6
|
||||||
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
||||||
ldr \tmp, [\tmp, #0]
|
ldr \tmp, [\tmp, #0]
|
||||||
tst \tmp, #HWCAP_VFPv3D16
|
tst \tmp, #HWCAP_VFPD32
|
||||||
stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
|
stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
|
||||||
addne \base, \base, #32*4 @ step over unused register space
|
addeq \base, \base, #32*4 @ step over unused register space
|
||||||
#else
|
#else
|
||||||
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
||||||
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
||||||
|
@@ -18,11 +18,12 @@
|
|||||||
#define HWCAP_THUMBEE (1 << 11)
|
#define HWCAP_THUMBEE (1 << 11)
|
||||||
#define HWCAP_NEON (1 << 12)
|
#define HWCAP_NEON (1 << 12)
|
||||||
#define HWCAP_VFPv3 (1 << 13)
|
#define HWCAP_VFPv3 (1 << 13)
|
||||||
#define HWCAP_VFPv3D16 (1 << 14)
|
#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
|
||||||
#define HWCAP_TLS (1 << 15)
|
#define HWCAP_TLS (1 << 15)
|
||||||
#define HWCAP_VFPv4 (1 << 16)
|
#define HWCAP_VFPv4 (1 << 16)
|
||||||
#define HWCAP_IDIVA (1 << 17)
|
#define HWCAP_IDIVA (1 << 17)
|
||||||
#define HWCAP_IDIVT (1 << 18)
|
#define HWCAP_IDIVT (1 << 18)
|
||||||
|
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
|
||||||
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
|
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
|
||||||
|
|
||||||
|
|
||||||
|
@@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)
|
|||||||
update_sched_clock();
|
update_sched_clock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
|
|
||||||
unsigned long rate)
|
|
||||||
{
|
|
||||||
setup_sched_clock(read, bits, rate);
|
|
||||||
cd.needs_suspend = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
|
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
|
||||||
{
|
{
|
||||||
unsigned long r, w;
|
unsigned long r, w;
|
||||||
@@ -189,18 +182,15 @@ void __init sched_clock_postinit(void)
|
|||||||
static int sched_clock_suspend(void)
|
static int sched_clock_suspend(void)
|
||||||
{
|
{
|
||||||
sched_clock_poll(sched_clock_timer.data);
|
sched_clock_poll(sched_clock_timer.data);
|
||||||
if (cd.needs_suspend)
|
cd.suspended = true;
|
||||||
cd.suspended = true;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sched_clock_resume(void)
|
static void sched_clock_resume(void)
|
||||||
{
|
{
|
||||||
if (cd.needs_suspend) {
|
cd.epoch_cyc = read_sched_clock();
|
||||||
cd.epoch_cyc = read_sched_clock();
|
cd.epoch_cyc_copy = cd.epoch_cyc;
|
||||||
cd.epoch_cyc_copy = cd.epoch_cyc;
|
cd.suspended = false;
|
||||||
cd.suspended = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct syscore_ops sched_clock_ops = {
|
static struct syscore_ops sched_clock_ops = {
|
||||||
|
@@ -745,7 +745,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
|
|||||||
static int
|
static int
|
||||||
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
union offset_union offset;
|
union offset_union uninitialized_var(offset);
|
||||||
unsigned long instr = 0, instrptr;
|
unsigned long instr = 0, instrptr;
|
||||||
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
|
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
|
@@ -701,11 +701,14 @@ static int __init vfp_init(void)
|
|||||||
elf_hwcap |= HWCAP_VFPv3;
|
elf_hwcap |= HWCAP_VFPv3;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for VFPv3 D16. CPUs in this configuration
|
* Check for VFPv3 D16 and VFPv4 D16. CPUs in
|
||||||
* only have 16 x 64bit registers.
|
* this configuration only have 16 x 64bit
|
||||||
|
* registers.
|
||||||
*/
|
*/
|
||||||
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
|
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
|
||||||
elf_hwcap |= HWCAP_VFPv3D16;
|
elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
|
||||||
|
else
|
||||||
|
elf_hwcap |= HWCAP_VFPD32;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
|
Reference in New Issue
Block a user