Merge branches 'at91', 'ep93xx', 'errata', 'footbridge', 'fncpy', 'gemini', 'irqdata', 'pm', 'sh', 'smp', 'spear', 'ux500' and 'via' into devel
This commit is contained in:
@@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
|
||||
obj-$(CONFIG_ARTHUR) += arthur.o
|
||||
obj-$(CONFIG_ISA_DMA) += dma-isa.o
|
||||
obj-$(CONFIG_PCI) += bios32.o isa.o
|
||||
obj-$(CONFIG_PM) += sleep.o
|
||||
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
|
||||
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
|
||||
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
|
||||
|
@@ -13,6 +13,9 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/glue-df.h>
|
||||
#include <asm/glue-pf.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
@@ -113,6 +116,14 @@ int main(void)
|
||||
#endif
|
||||
#ifdef MULTI_PABORT
|
||||
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
|
||||
#endif
|
||||
#ifdef MULTI_CPU
|
||||
DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size));
|
||||
DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend));
|
||||
DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume));
|
||||
#endif
|
||||
#ifdef MULTI_CACHE
|
||||
DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
|
||||
|
@@ -16,7 +16,8 @@
|
||||
*/
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/glue.h>
|
||||
#include <asm/glue-df.h>
|
||||
#include <asm/glue-pf.h>
|
||||
#include <asm/vfpmacros.h>
|
||||
#include <mach/entry-macro.S>
|
||||
#include <asm/thread_notify.h>
|
||||
|
@@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||
/*
|
||||
* One-time initialisation.
|
||||
*/
|
||||
static void reset_ctrl_regs(void *unused)
|
||||
static void reset_ctrl_regs(void *info)
|
||||
{
|
||||
int i;
|
||||
int i, cpu = smp_processor_id();
|
||||
u32 dbg_power;
|
||||
cpumask_t *cpumask = info;
|
||||
|
||||
/*
|
||||
* v7 debug contains save and restore registers so that debug state
|
||||
@@ -849,6 +851,17 @@ static void reset_ctrl_regs(void *unused)
|
||||
* later on.
|
||||
*/
|
||||
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
|
||||
/*
|
||||
* Ensure sticky power-down is clear (i.e. debug logic is
|
||||
* powered up).
|
||||
*/
|
||||
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
|
||||
if ((dbg_power & 0x1) == 0) {
|
||||
pr_warning("CPU %d debug is powered down!\n", cpu);
|
||||
cpumask_or(cpumask, cpumask, cpumask_of(cpu));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally clear the lock by writing a value
|
||||
* other than 0xC5ACCE55 to the access register.
|
||||
@@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
|
||||
static int __init arch_hw_breakpoint_init(void)
|
||||
{
|
||||
u32 dscr;
|
||||
cpumask_t cpumask = { CPU_BITS_NONE };
|
||||
|
||||
debug_arch = get_debug_arch();
|
||||
|
||||
@@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
* Reset the breakpoint resources. We assume that a halting
|
||||
* debugger will leave the world in a nice state for us.
|
||||
*/
|
||||
on_each_cpu(reset_ctrl_regs, NULL, 1);
|
||||
on_each_cpu(reset_ctrl_regs, &cpumask, 1);
|
||||
if (!cpumask_empty(&cpumask)) {
|
||||
core_num_brps = 0;
|
||||
core_num_reserved_brps = 0;
|
||||
core_num_wrps = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
if (dscr & ARM_DSCR_HDBGEN) {
|
||||
|
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
||||
|
||||
return space_cccc_1100_010x(insn, asi);
|
||||
|
||||
} else if ((insn & 0x0e000000) == 0x0c400000) {
|
||||
} else if ((insn & 0x0e000000) == 0x0c000000) {
|
||||
|
||||
return space_cccc_110x(insn, asi);
|
||||
|
||||
|
@@ -97,28 +97,34 @@ set_irq_affinity(int irq,
|
||||
irq, cpu);
|
||||
return err;
|
||||
#else
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int
|
||||
init_cpu_pmu(void)
|
||||
{
|
||||
int i, err = 0;
|
||||
int i, irqs, err = 0;
|
||||
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
|
||||
|
||||
if (!pdev) {
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < pdev->num_resources; ++i) {
|
||||
irqs = pdev->num_resources;
|
||||
|
||||
/*
|
||||
* If we have a single PMU interrupt that we can't shift, assume that
|
||||
* we're running on a uniprocessor machine and continue.
|
||||
*/
|
||||
if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
err = set_irq_affinity(platform_get_irq(pdev, i), i);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
|
||||
while (!(arch_ctrl.len & 0x1))
|
||||
arch_ctrl.len >>= 1;
|
||||
|
||||
if (idx & 0x1)
|
||||
reg = encode_ctrl_reg(arch_ctrl);
|
||||
else
|
||||
if (num & 0x1)
|
||||
reg = bp->attr.bp_addr;
|
||||
else
|
||||
reg = encode_ctrl_reg(arch_ctrl);
|
||||
}
|
||||
|
||||
put:
|
||||
|
@@ -226,8 +226,8 @@ int cpu_architecture(void)
|
||||
* Register 0 and check for VMSAv7 or PMSAv7 */
|
||||
asm("mrc p15, 0, %0, c0, c1, 4"
|
||||
: "=r" (mmfr0));
|
||||
if ((mmfr0 & 0x0000000f) == 0x00000003 ||
|
||||
(mmfr0 & 0x000000f0) == 0x00000030)
|
||||
if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
|
||||
(mmfr0 & 0x000000f0) >= 0x00000030)
|
||||
cpu_arch = CPU_ARCH_ARMv7;
|
||||
else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
|
||||
(mmfr0 & 0x000000f0) == 0x00000020)
|
||||
|
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
|
||||
unsigned long handler = (unsigned long)ka->sa.sa_handler;
|
||||
unsigned long retcode;
|
||||
int thumb = 0;
|
||||
unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
|
||||
unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
|
||||
|
||||
cpsr |= PSR_ENDSTATE;
|
||||
|
||||
/*
|
||||
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
|
||||
|
134
arch/arm/kernel/sleep.S
Normal file
134
arch/arm/kernel/sleep.S
Normal file
@@ -0,0 +1,134 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/glue-cache.h>
|
||||
#include <asm/glue-proc.h>
|
||||
#include <asm/system.h>
|
||||
.text
|
||||
|
||||
/*
|
||||
* Save CPU state for a suspend
|
||||
* r1 = v:p offset
|
||||
* r3 = virtual return function
|
||||
* Note: sp is decremented to allocate space for CPU state on stack
|
||||
* r0-r3,r9,r10,lr corrupted
|
||||
*/
|
||||
ENTRY(cpu_suspend)
|
||||
mov r9, lr
|
||||
#ifdef MULTI_CPU
|
||||
ldr r10, =processor
|
||||
mov r2, sp @ current virtual SP
|
||||
ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
|
||||
ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
|
||||
sub sp, sp, r0 @ allocate CPU state on stack
|
||||
mov r0, sp @ save pointer
|
||||
add ip, ip, r1 @ convert resume fn to phys
|
||||
stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn
|
||||
ldr r3, =sleep_save_sp
|
||||
add r2, sp, r1 @ convert SP to phys
|
||||
#ifdef CONFIG_SMP
|
||||
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
|
||||
ALT_UP(mov lr, #0)
|
||||
and lr, lr, #15
|
||||
str r2, [r3, lr, lsl #2] @ save phys SP
|
||||
#else
|
||||
str r2, [r3] @ save phys SP
|
||||
#endif
|
||||
mov lr, pc
|
||||
ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
|
||||
#else
|
||||
mov r2, sp @ current virtual SP
|
||||
ldr r0, =cpu_suspend_size
|
||||
sub sp, sp, r0 @ allocate CPU state on stack
|
||||
mov r0, sp @ save pointer
|
||||
stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
|
||||
ldr r3, =sleep_save_sp
|
||||
add r2, sp, r1 @ convert SP to phys
|
||||
#ifdef CONFIG_SMP
|
||||
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
|
||||
ALT_UP(mov lr, #0)
|
||||
and lr, lr, #15
|
||||
str r2, [r3, lr, lsl #2] @ save phys SP
|
||||
#else
|
||||
str r2, [r3] @ save phys SP
|
||||
#endif
|
||||
bl cpu_do_suspend
|
||||
#endif
|
||||
|
||||
@ flush data cache
|
||||
#ifdef MULTI_CACHE
|
||||
ldr r10, =cpu_cache
|
||||
mov lr, r9
|
||||
ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
|
||||
#else
|
||||
mov lr, r9
|
||||
b __cpuc_flush_kern_all
|
||||
#endif
|
||||
ENDPROC(cpu_suspend)
|
||||
.ltorg
|
||||
|
||||
/*
|
||||
* r0 = control register value
|
||||
* r1 = v:p offset (preserved by cpu_do_resume)
|
||||
* r2 = phys page table base
|
||||
* r3 = L1 section flags
|
||||
*/
|
||||
ENTRY(cpu_resume_mmu)
|
||||
adr r4, cpu_resume_turn_mmu_on
|
||||
mov r4, r4, lsr #20
|
||||
orr r3, r3, r4, lsl #20
|
||||
ldr r5, [r2, r4, lsl #2] @ save old mapping
|
||||
str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
|
||||
sub r2, r2, r1
|
||||
ldr r3, =cpu_resume_after_mmu
|
||||
bic r1, r0, #CR_C @ ensure D-cache is disabled
|
||||
b cpu_resume_turn_mmu_on
|
||||
ENDPROC(cpu_resume_mmu)
|
||||
.ltorg
|
||||
.align 5
|
||||
cpu_resume_turn_mmu_on:
|
||||
mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
|
||||
mrc p15, 0, r1, c0, c0, 0 @ read id reg
|
||||
mov r1, r1
|
||||
mov r1, r1
|
||||
mov pc, r3 @ jump to virtual address
|
||||
ENDPROC(cpu_resume_turn_mmu_on)
|
||||
cpu_resume_after_mmu:
|
||||
str r5, [r2, r4, lsl #2] @ restore old mapping
|
||||
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
|
||||
mov pc, lr
|
||||
ENDPROC(cpu_resume_after_mmu)
|
||||
|
||||
/*
|
||||
* Note: Yes, part of the following code is located into the .data section.
|
||||
* This is to allow sleep_save_sp to be accessed with a relative load
|
||||
* while we can't rely on any MMU translation. We could have put
|
||||
* sleep_save_sp in the .text section as well, but some setups might
|
||||
* insist on it to be truly read-only.
|
||||
*/
|
||||
.data
|
||||
.align
|
||||
ENTRY(cpu_resume)
|
||||
#ifdef CONFIG_SMP
|
||||
adr r0, sleep_save_sp
|
||||
ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
|
||||
ALT_UP(mov r1, #0)
|
||||
and r1, r1, #15
|
||||
ldr r0, [r0, r1, lsl #2] @ stack phys addr
|
||||
#else
|
||||
ldr r0, sleep_save_sp @ stack phys addr
|
||||
#endif
|
||||
msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
|
||||
#ifdef MULTI_CPU
|
||||
ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
|
||||
#else
|
||||
ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
|
||||
b cpu_do_resume
|
||||
#endif
|
||||
ENDPROC(cpu_resume)
|
||||
|
||||
sleep_save_sp:
|
||||
.rept CONFIG_NR_CPUS
|
||||
.long 0 @ preserve stack phys ptr here
|
||||
.endr
|
@@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base)
|
||||
*/
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the executing CPUs power mode as defined. This will be in
|
||||
* preparation for it executing a WFI instruction.
|
||||
*
|
||||
* This function must be called with preemption disabled, and as it
|
||||
* has the side effect of disabling coherency, caches must have been
|
||||
* flushed. Interrupts must also have been disabled.
|
||||
*/
|
||||
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
|
||||
{
|
||||
unsigned int val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (mode > 3 || mode == 1 || cpu > 3)
|
||||
return -EINVAL;
|
||||
|
||||
val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
|
||||
val |= mode;
|
||||
__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -21,6 +21,12 @@
|
||||
#define ARM_CPU_KEEP(x)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
|
||||
#define ARM_EXIT_KEEP(x) x
|
||||
#else
|
||||
#define ARM_EXIT_KEEP(x)
|
||||
#endif
|
||||
|
||||
OUTPUT_ARCH(arm)
|
||||
ENTRY(stext)
|
||||
|
||||
@@ -43,6 +49,7 @@ SECTIONS
|
||||
_sinittext = .;
|
||||
HEAD_TEXT
|
||||
INIT_TEXT
|
||||
ARM_EXIT_KEEP(EXIT_TEXT)
|
||||
_einittext = .;
|
||||
ARM_CPU_DISCARD(PROC_INFO)
|
||||
__arch_info_begin = .;
|
||||
@@ -67,6 +74,7 @@ SECTIONS
|
||||
#ifndef CONFIG_XIP_KERNEL
|
||||
__init_begin = _stext;
|
||||
INIT_DATA
|
||||
ARM_EXIT_KEEP(EXIT_DATA)
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -162,6 +170,7 @@ SECTIONS
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_begin = .;
|
||||
INIT_DATA
|
||||
ARM_EXIT_KEEP(EXIT_DATA)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
#endif
|
||||
@@ -247,6 +256,8 @@ SECTIONS
|
||||
}
|
||||
#endif
|
||||
|
||||
NOTES
|
||||
|
||||
BSS_SECTION(0, 0, 0)
|
||||
_end = .;
|
||||
|
||||
|
Reference in New Issue
Block a user