Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix changes. As alpha in percpu tree uses 'weak' attribute instead of inline assembly, there's no need for __used attribute. Conflicts: arch/alpha/include/asm/percpu.h arch/mn10300/kernel/vmlinux.lds.S include/linux/percpu-defs.h
This commit is contained in:
@@ -372,6 +372,8 @@
|
||||
/* 360 */ CALL(sys_inotify_init1)
|
||||
CALL(sys_preadv)
|
||||
CALL(sys_pwritev)
|
||||
CALL(sys_rt_tgsigqueueinfo)
|
||||
CALL(sys_perf_counter_open)
|
||||
#ifndef syscalls_counted
|
||||
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
|
||||
#define syscalls_counted
|
||||
|
@@ -98,17 +98,6 @@ unlock:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Handle bad interrupts */
|
||||
static struct irq_desc bad_irq_desc = {
|
||||
.handle_irq = handle_bad_irq,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
|
||||
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
|
||||
#endif
|
||||
|
||||
/*
|
||||
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
|
||||
* come via this function. Instead, they should provide their
|
||||
@@ -124,10 +113,13 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
* Some hardware gives randomly wrong interrupts. Rather
|
||||
* than crashing, do something sensible.
|
||||
*/
|
||||
if (irq >= NR_IRQS)
|
||||
handle_bad_irq(irq, &bad_irq_desc);
|
||||
else
|
||||
if (unlikely(irq >= NR_IRQS)) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING "Bad IRQ%u\n", irq);
|
||||
ack_bad_irq(irq);
|
||||
} else {
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
|
||||
/* AT91 specific workaround */
|
||||
irq_finish(irq);
|
||||
@@ -165,10 +157,6 @@ void __init init_IRQ(void)
|
||||
for (irq = 0; irq < NR_IRQS; irq++)
|
||||
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_setall(bad_irq_desc.affinity);
|
||||
bad_irq_desc.cpu = smp_processor_id();
|
||||
#endif
|
||||
init_arch_irq();
|
||||
}
|
||||
|
||||
@@ -176,7 +164,7 @@ void __init init_IRQ(void)
|
||||
|
||||
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
|
||||
{
|
||||
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
|
||||
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
desc->chip->set_affinity(irq, cpumask_of(cpu));
|
||||
@@ -195,7 +183,7 @@ void migrate_irqs(void)
|
||||
for (i = 0; i < NR_IRQS; i++) {
|
||||
struct irq_desc *desc = irq_desc + i;
|
||||
|
||||
if (desc->cpu == cpu) {
|
||||
if (desc->node == cpu) {
|
||||
unsigned int newcpu = cpumask_any_and(desc->affinity,
|
||||
cpu_online_mask);
|
||||
if (newcpu >= nr_cpu_ids) {
|
||||
|
@@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd)
|
||||
/*
|
||||
* Function pointers to optional machine specific functions
|
||||
*/
|
||||
void (*pm_idle)(void);
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
|
||||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
@@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
|
||||
*/
|
||||
static void default_idle(void)
|
||||
{
|
||||
if (hlt_counter)
|
||||
cpu_relax();
|
||||
else {
|
||||
local_irq_disable();
|
||||
if (!need_resched())
|
||||
arch_idle();
|
||||
local_irq_enable();
|
||||
}
|
||||
if (!need_resched())
|
||||
arch_idle();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
void (*pm_idle)(void) = default_idle;
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
|
||||
/*
|
||||
* The idle thread. We try to conserve power, while trying to keep
|
||||
* overall latency low. The architecture specific idle is passed
|
||||
* a value to indicate the level of "idleness" of the system.
|
||||
* The idle thread, has rather strange semantics for calling pm_idle,
|
||||
* but this is what x86 does and we need to do the same, so that
|
||||
* things like cpuidle get called in the same way. The only difference
|
||||
* is that we always respect 'hlt_counter' to prevent low power idle.
|
||||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
@@ -151,21 +147,31 @@ void cpu_idle(void)
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
void (*idle)(void) = pm_idle;
|
||||
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
leds_event(led_idle_start);
|
||||
while (!need_resched()) {
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpu_is_offline(smp_processor_id())) {
|
||||
leds_event(led_idle_start);
|
||||
cpu_die();
|
||||
}
|
||||
if (cpu_is_offline(smp_processor_id()))
|
||||
cpu_die();
|
||||
#endif
|
||||
|
||||
if (!idle)
|
||||
idle = default_idle;
|
||||
leds_event(led_idle_start);
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
while (!need_resched())
|
||||
idle();
|
||||
local_irq_disable();
|
||||
if (hlt_counter) {
|
||||
local_irq_enable();
|
||||
cpu_relax();
|
||||
} else {
|
||||
stop_critical_timings();
|
||||
pm_idle();
|
||||
start_critical_timings();
|
||||
/*
|
||||
* This will eventually be removed - pm_idle
|
||||
* functions should always return with IRQs
|
||||
* enabled.
|
||||
*/
|
||||
WARN_ON(irqs_disabled());
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
leds_event(led_idle_end);
|
||||
tick_nohz_restart_sched_tick();
|
||||
preempt_enable_no_resched();
|
||||
@@ -352,6 +358,23 @@ asm( ".section .text\n"
|
||||
" .size kernel_thread_helper, . - kernel_thread_helper\n"
|
||||
" .previous");
|
||||
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
extern void kernel_thread_exit(long code);
|
||||
asm( ".section .text\n"
|
||||
" .align\n"
|
||||
" .type kernel_thread_exit, #function\n"
|
||||
"kernel_thread_exit:\n"
|
||||
" .fnstart\n"
|
||||
" .cantunwind\n"
|
||||
" bl do_exit\n"
|
||||
" nop\n"
|
||||
" .fnend\n"
|
||||
" .size kernel_thread_exit, . - kernel_thread_exit\n"
|
||||
" .previous");
|
||||
#else
|
||||
#define kernel_thread_exit do_exit
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create a kernel thread.
|
||||
*/
|
||||
@@ -363,7 +386,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
|
||||
|
||||
regs.ARM_r1 = (unsigned long)arg;
|
||||
regs.ARM_r2 = (unsigned long)fn;
|
||||
regs.ARM_r3 = (unsigned long)do_exit;
|
||||
regs.ARM_r3 = (unsigned long)kernel_thread_exit;
|
||||
regs.ARM_pc = (unsigned long)kernel_thread_helper;
|
||||
regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
|
||||
|
||||
|
@@ -212,7 +212,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
|
||||
ctrl->vrs[14] = *vsp++;
|
||||
ctrl->vrs[SP] = (unsigned long)vsp;
|
||||
} else if (insn == 0xb0) {
|
||||
ctrl->vrs[PC] = ctrl->vrs[LR];
|
||||
if (ctrl->vrs[PC] == 0)
|
||||
ctrl->vrs[PC] = ctrl->vrs[LR];
|
||||
/* no further processing */
|
||||
ctrl->entries = 0;
|
||||
} else if (insn == 0xb1) {
|
||||
@@ -309,18 +310,20 @@ int unwind_frame(struct stackframe *frame)
|
||||
}
|
||||
|
||||
while (ctrl.entries > 0) {
|
||||
int urc;
|
||||
|
||||
if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
|
||||
return -URC_FAILURE;
|
||||
urc = unwind_exec_insn(&ctrl);
|
||||
int urc = unwind_exec_insn(&ctrl);
|
||||
if (urc < 0)
|
||||
return urc;
|
||||
if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
|
||||
return -URC_FAILURE;
|
||||
}
|
||||
|
||||
if (ctrl.vrs[PC] == 0)
|
||||
ctrl.vrs[PC] = ctrl.vrs[LR];
|
||||
|
||||
/* check for infinite loop */
|
||||
if (frame->pc == ctrl.vrs[PC])
|
||||
return -URC_FAILURE;
|
||||
|
||||
frame->fp = ctrl.vrs[FP];
|
||||
frame->sp = ctrl.vrs[SP];
|
||||
frame->lr = ctrl.vrs[LR];
|
||||
@@ -332,7 +335,6 @@ int unwind_frame(struct stackframe *frame)
|
||||
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
{
|
||||
struct stackframe frame;
|
||||
unsigned long high, low;
|
||||
register unsigned long current_sp asm ("sp");
|
||||
|
||||
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
||||
@@ -362,9 +364,6 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
frame.pc = thread_saved_pc(tsk);
|
||||
}
|
||||
|
||||
low = frame.sp & ~(THREAD_SIZE - 1);
|
||||
high = low + THREAD_SIZE;
|
||||
|
||||
while (1) {
|
||||
int urc;
|
||||
unsigned long where = frame.pc;
|
||||
|
@@ -6,6 +6,7 @@
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
OUTPUT_ARCH(arm)
|
||||
ENTRY(stext)
|
||||
@@ -63,7 +64,7 @@ SECTIONS
|
||||
usr/built-in.o(.init.ramfs)
|
||||
__initramfs_end = .;
|
||||
#endif
|
||||
. = ALIGN(4096);
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__per_cpu_load = .;
|
||||
__per_cpu_start = .;
|
||||
*(.data.percpu.page_aligned)
|
||||
@@ -73,7 +74,7 @@ SECTIONS
|
||||
#ifndef CONFIG_XIP_KERNEL
|
||||
__init_begin = _stext;
|
||||
INIT_DATA
|
||||
. = ALIGN(4096);
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
#endif
|
||||
}
|
||||
@@ -85,6 +86,14 @@ SECTIONS
|
||||
*(.discard)
|
||||
*(.ARM.exidx.exit.text)
|
||||
*(.ARM.extab.exit.text)
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
*(.ARM.exidx.cpuexit.text)
|
||||
*(.ARM.extab.cpuexit.text)
|
||||
#endif
|
||||
#ifndef CONFIG_HOTPLUG
|
||||
*(.ARM.exidx.devexit.text)
|
||||
*(.ARM.extab.devexit.text)
|
||||
#endif
|
||||
#ifndef CONFIG_MMU
|
||||
*(.fixup)
|
||||
*(__ex_table)
|
||||
@@ -111,7 +120,7 @@ SECTIONS
|
||||
*(.got) /* Global offset table */
|
||||
}
|
||||
|
||||
RODATA
|
||||
RO_DATA(PAGE_SIZE)
|
||||
|
||||
_etext = .; /* End of text and rodata section */
|
||||
|
||||
@@ -151,17 +160,17 @@ SECTIONS
|
||||
*(.data.init_task)
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
. = ALIGN(4096);
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_begin = .;
|
||||
INIT_DATA
|
||||
. = ALIGN(4096);
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
#endif
|
||||
|
||||
. = ALIGN(4096);
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_begin = .;
|
||||
*(.data.nosave)
|
||||
. = ALIGN(4096);
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_end = .;
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user