Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Ben Herrenschmidt: "Here are a few things for -rc2, this time it's all written by me so it can only be perfect .... right ? :) So we have the fix to call irq_enter/exit on the irq stack we've been discussing, plus a cleanup on top to remove an unused (and broken) stack limit tracking feature (well, make it 32-bit only in fact where it is used and works properly). Then we have two things that I wrote over the last couple of days and made the executive decision to include just because I can (and I'm sure you won't object .... right ?). They fix a couple of annoying and long standing "issues": - We had separate zImages for when booting via Open Firmware vs. booting via a flat device-tree, while it's trivial to make one that deals with both - We wasted a ton of cycles spinning secondary CPUs uselessly at boot instead of starting them when needed on pseries, thus contributing significantly to global warming" * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc/pseries: Do not start secondaries in Open Firmware powerpc/zImage: make the "OF" wrapper support ePAPR boot powerpc: Remove ksp_limit on ppc64 powerpc/irq: Run softirqs off the top of the irq stack
This commit is contained in:
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
|
|||||||
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
|
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
|
||||||
src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
|
src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
|
||||||
|
|
||||||
src-plat-y := of.c
|
src-plat-y := of.c epapr.c
|
||||||
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
|
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
|
||||||
treeboot-walnut.c cuboot-acadia.c \
|
treeboot-walnut.c cuboot-acadia.c \
|
||||||
cuboot-kilauea.c simpleboot.c \
|
cuboot-kilauea.c simpleboot.c \
|
||||||
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
|
|||||||
prpmc2800.c
|
prpmc2800.c
|
||||||
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
|
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
|
||||||
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
|
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
|
||||||
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
|
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
|
||||||
|
|
||||||
src-wlib := $(sort $(src-wlib-y))
|
src-wlib := $(sort $(src-wlib-y))
|
||||||
src-plat := $(sort $(src-plat-y))
|
src-plat := $(sort $(src-plat-y))
|
||||||
|
9
arch/powerpc/boot/epapr-wrapper.c
Normal file
9
arch/powerpc/boot/epapr-wrapper.c
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
extern void epapr_platform_init(unsigned long r3, unsigned long r4,
|
||||||
|
unsigned long r5, unsigned long r6,
|
||||||
|
unsigned long r7);
|
||||||
|
|
||||||
|
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||||
|
unsigned long r6, unsigned long r7)
|
||||||
|
{
|
||||||
|
epapr_platform_init(r3, r4, r5, r6, r7);
|
||||||
|
}
|
@@ -48,7 +48,7 @@ static void platform_fixups(void)
|
|||||||
fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
|
fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||||
unsigned long r6, unsigned long r7)
|
unsigned long r6, unsigned long r7)
|
||||||
{
|
{
|
||||||
epapr_magic = r6;
|
epapr_magic = r6;
|
||||||
|
@@ -26,6 +26,9 @@
|
|||||||
|
|
||||||
static unsigned long claim_base;
|
static unsigned long claim_base;
|
||||||
|
|
||||||
|
void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||||
|
unsigned long r6, unsigned long r7);
|
||||||
|
|
||||||
static void *of_try_claim(unsigned long size)
|
static void *of_try_claim(unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long addr = 0;
|
unsigned long addr = 0;
|
||||||
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
||||||
{
|
{
|
||||||
platform_ops.image_hdr = of_image_hdr;
|
platform_ops.image_hdr = of_image_hdr;
|
||||||
platform_ops.malloc = of_try_claim;
|
platform_ops.malloc = of_try_claim;
|
||||||
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
|||||||
loader_info.initrd_size = a2;
|
loader_info.initrd_size = a2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||||
|
unsigned long r6, unsigned long r7)
|
||||||
|
{
|
||||||
|
/* Detect OF vs. ePAPR boot */
|
||||||
|
if (r5)
|
||||||
|
of_platform_init(r3, r4, (void *)r5);
|
||||||
|
else
|
||||||
|
epapr_platform_init(r3, r4, r5, r6, r7);
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -148,18 +148,18 @@ make_space=y
|
|||||||
|
|
||||||
case "$platform" in
|
case "$platform" in
|
||||||
pseries)
|
pseries)
|
||||||
platformo=$object/of.o
|
platformo="$object/of.o $object/epapr.o"
|
||||||
link_address='0x4000000'
|
link_address='0x4000000'
|
||||||
;;
|
;;
|
||||||
maple)
|
maple)
|
||||||
platformo=$object/of.o
|
platformo="$object/of.o $object/epapr.o"
|
||||||
link_address='0x400000'
|
link_address='0x400000'
|
||||||
;;
|
;;
|
||||||
pmac|chrp)
|
pmac|chrp)
|
||||||
platformo=$object/of.o
|
platformo="$object/of.o $object/epapr.o"
|
||||||
;;
|
;;
|
||||||
coff)
|
coff)
|
||||||
platformo="$object/crt0.o $object/of.o"
|
platformo="$object/crt0.o $object/of.o $object/epapr.o"
|
||||||
lds=$object/zImage.coff.lds
|
lds=$object/zImage.coff.lds
|
||||||
link_address='0x500000'
|
link_address='0x500000'
|
||||||
pie=
|
pie=
|
||||||
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
|
|||||||
platformo="$object/treeboot-iss4xx.o"
|
platformo="$object/treeboot-iss4xx.o"
|
||||||
;;
|
;;
|
||||||
epapr)
|
epapr)
|
||||||
|
platformo="$object/epapr.o $object/epapr-wrapper.o"
|
||||||
link_address='0x20000000'
|
link_address='0x20000000'
|
||||||
pie=-pie
|
pie=-pie
|
||||||
;;
|
;;
|
||||||
|
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
|
|||||||
|
|
||||||
extern void irq_ctx_init(void);
|
extern void irq_ctx_init(void);
|
||||||
extern void call_do_softirq(struct thread_info *tp);
|
extern void call_do_softirq(struct thread_info *tp);
|
||||||
extern int call_handle_irq(int irq, void *p1,
|
extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
|
||||||
struct thread_info *tp, void *func);
|
|
||||||
extern void do_IRQ(struct pt_regs *regs);
|
extern void do_IRQ(struct pt_regs *regs);
|
||||||
|
extern void __do_irq(struct pt_regs *regs);
|
||||||
|
|
||||||
int irq_choose_cpu(const struct cpumask *mask);
|
int irq_choose_cpu(const struct cpumask *mask);
|
||||||
|
|
||||||
|
@@ -149,8 +149,6 @@ typedef struct {
|
|||||||
|
|
||||||
struct thread_struct {
|
struct thread_struct {
|
||||||
unsigned long ksp; /* Kernel stack pointer */
|
unsigned long ksp; /* Kernel stack pointer */
|
||||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
unsigned long ksp_vsid;
|
unsigned long ksp_vsid;
|
||||||
#endif
|
#endif
|
||||||
@@ -162,6 +160,7 @@ struct thread_struct {
|
|||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PPC32
|
#ifdef CONFIG_PPC32
|
||||||
void *pgdir; /* root of page-table tree */
|
void *pgdir; /* root of page-table tree */
|
||||||
|
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||||
/*
|
/*
|
||||||
@@ -321,7 +320,6 @@ struct thread_struct {
|
|||||||
#else
|
#else
|
||||||
#define INIT_THREAD { \
|
#define INIT_THREAD { \
|
||||||
.ksp = INIT_SP, \
|
.ksp = INIT_SP, \
|
||||||
.ksp_limit = INIT_SP_LIMIT, \
|
|
||||||
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
||||||
.fs = KERNEL_DS, \
|
.fs = KERNEL_DS, \
|
||||||
.fpr = {{0}}, \
|
.fpr = {{0}}, \
|
||||||
|
@@ -80,10 +80,11 @@ int main(void)
|
|||||||
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
|
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
|
||||||
#else
|
#else
|
||||||
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
||||||
|
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
|
||||||
|
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
|
||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
||||||
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
||||||
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
|
|
||||||
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
||||||
#ifdef CONFIG_BOOKE
|
#ifdef CONFIG_BOOKE
|
||||||
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
|
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
|
||||||
|
@@ -441,50 +441,6 @@ void migrate_irqs(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void handle_one_irq(unsigned int irq)
|
|
||||||
{
|
|
||||||
struct thread_info *curtp, *irqtp;
|
|
||||||
unsigned long saved_sp_limit;
|
|
||||||
struct irq_desc *desc;
|
|
||||||
|
|
||||||
desc = irq_to_desc(irq);
|
|
||||||
if (!desc)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Switch to the irq stack to handle this */
|
|
||||||
curtp = current_thread_info();
|
|
||||||
irqtp = hardirq_ctx[smp_processor_id()];
|
|
||||||
|
|
||||||
if (curtp == irqtp) {
|
|
||||||
/* We're already on the irq stack, just handle it */
|
|
||||||
desc->handle_irq(irq, desc);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
saved_sp_limit = current->thread.ksp_limit;
|
|
||||||
|
|
||||||
irqtp->task = curtp->task;
|
|
||||||
irqtp->flags = 0;
|
|
||||||
|
|
||||||
/* Copy the softirq bits in preempt_count so that the
|
|
||||||
* softirq checks work in the hardirq context. */
|
|
||||||
irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
|
|
||||||
(curtp->preempt_count & SOFTIRQ_MASK);
|
|
||||||
|
|
||||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
|
||||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
|
||||||
|
|
||||||
call_handle_irq(irq, desc, irqtp, desc->handle_irq);
|
|
||||||
current->thread.ksp_limit = saved_sp_limit;
|
|
||||||
irqtp->task = NULL;
|
|
||||||
|
|
||||||
/* Set any flag that may have been set on the
|
|
||||||
* alternate stack
|
|
||||||
*/
|
|
||||||
if (irqtp->flags)
|
|
||||||
set_bits(irqtp->flags, &curtp->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void check_stack_overflow(void)
|
static inline void check_stack_overflow(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||||
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void do_IRQ(struct pt_regs *regs)
|
void __do_irq(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
struct irq_desc *desc;
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
|
|||||||
*/
|
*/
|
||||||
irq = ppc_md.get_irq();
|
irq = ppc_md.get_irq();
|
||||||
|
|
||||||
/* We can hard enable interrupts now */
|
/* We can hard enable interrupts now to allow perf interrupts */
|
||||||
may_hard_irq_enable();
|
may_hard_irq_enable();
|
||||||
|
|
||||||
/* And finally process it */
|
/* And finally process it */
|
||||||
if (irq != NO_IRQ)
|
if (unlikely(irq == NO_IRQ))
|
||||||
handle_one_irq(irq);
|
|
||||||
else
|
|
||||||
__get_cpu_var(irq_stat).spurious_irqs++;
|
__get_cpu_var(irq_stat).spurious_irqs++;
|
||||||
|
else {
|
||||||
|
desc = irq_to_desc(irq);
|
||||||
|
if (likely(desc))
|
||||||
|
desc->handle_irq(irq, desc);
|
||||||
|
}
|
||||||
|
|
||||||
trace_irq_exit(regs);
|
trace_irq_exit(regs);
|
||||||
|
|
||||||
irq_exit();
|
irq_exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
void do_IRQ(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||||
|
struct thread_info *curtp, *irqtp;
|
||||||
|
|
||||||
|
/* Switch to the irq stack to handle this */
|
||||||
|
curtp = current_thread_info();
|
||||||
|
irqtp = hardirq_ctx[raw_smp_processor_id()];
|
||||||
|
|
||||||
|
/* Already there ? */
|
||||||
|
if (unlikely(curtp == irqtp)) {
|
||||||
|
__do_irq(regs);
|
||||||
|
set_irq_regs(old_regs);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Prepare the thread_info in the irq stack */
|
||||||
|
irqtp->task = curtp->task;
|
||||||
|
irqtp->flags = 0;
|
||||||
|
|
||||||
|
/* Copy the preempt_count so that the [soft]irq checks work. */
|
||||||
|
irqtp->preempt_count = curtp->preempt_count;
|
||||||
|
|
||||||
|
/* Switch stack and call */
|
||||||
|
call_do_irq(regs, irqtp);
|
||||||
|
|
||||||
|
/* Restore stack limit */
|
||||||
|
irqtp->task = NULL;
|
||||||
|
|
||||||
|
/* Copy back updates to the thread_info */
|
||||||
|
if (irqtp->flags)
|
||||||
|
set_bits(irqtp->flags, &curtp->flags);
|
||||||
|
|
||||||
set_irq_regs(old_regs);
|
set_irq_regs(old_regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
|
|||||||
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
||||||
tp = softirq_ctx[i];
|
tp = softirq_ctx[i];
|
||||||
tp->cpu = i;
|
tp->cpu = i;
|
||||||
tp->preempt_count = 0;
|
|
||||||
|
|
||||||
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
|
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
|
||||||
tp = hardirq_ctx[i];
|
tp = hardirq_ctx[i];
|
||||||
tp->cpu = i;
|
tp->cpu = i;
|
||||||
tp->preempt_count = HARDIRQ_OFFSET;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void do_softirq_onstack(void)
|
static inline void do_softirq_onstack(void)
|
||||||
{
|
{
|
||||||
struct thread_info *curtp, *irqtp;
|
struct thread_info *curtp, *irqtp;
|
||||||
unsigned long saved_sp_limit = current->thread.ksp_limit;
|
|
||||||
|
|
||||||
curtp = current_thread_info();
|
curtp = current_thread_info();
|
||||||
irqtp = softirq_ctx[smp_processor_id()];
|
irqtp = softirq_ctx[smp_processor_id()];
|
||||||
irqtp->task = curtp->task;
|
irqtp->task = curtp->task;
|
||||||
irqtp->flags = 0;
|
irqtp->flags = 0;
|
||||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
|
||||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
|
||||||
call_do_softirq(irqtp);
|
call_do_softirq(irqtp);
|
||||||
current->thread.ksp_limit = saved_sp_limit;
|
|
||||||
irqtp->task = NULL;
|
irqtp->task = NULL;
|
||||||
|
|
||||||
/* Set any flag that may have been set on the
|
/* Set any flag that may have been set on the
|
||||||
|
@@ -36,26 +36,41 @@
|
|||||||
|
|
||||||
.text
|
.text
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We store the saved ksp_limit in the unused part
|
||||||
|
* of the STACK_FRAME_OVERHEAD
|
||||||
|
*/
|
||||||
_GLOBAL(call_do_softirq)
|
_GLOBAL(call_do_softirq)
|
||||||
mflr r0
|
mflr r0
|
||||||
stw r0,4(r1)
|
stw r0,4(r1)
|
||||||
|
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||||
|
addi r11,r3,THREAD_INFO_GAP
|
||||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
||||||
mr r1,r3
|
mr r1,r3
|
||||||
|
stw r10,8(r1)
|
||||||
|
stw r11,THREAD+KSP_LIMIT(r2)
|
||||||
bl __do_softirq
|
bl __do_softirq
|
||||||
|
lwz r10,8(r1)
|
||||||
lwz r1,0(r1)
|
lwz r1,0(r1)
|
||||||
lwz r0,4(r1)
|
lwz r0,4(r1)
|
||||||
|
stw r10,THREAD+KSP_LIMIT(r2)
|
||||||
mtlr r0
|
mtlr r0
|
||||||
blr
|
blr
|
||||||
|
|
||||||
_GLOBAL(call_handle_irq)
|
_GLOBAL(call_do_irq)
|
||||||
mflr r0
|
mflr r0
|
||||||
stw r0,4(r1)
|
stw r0,4(r1)
|
||||||
mtctr r6
|
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
|
addi r11,r3,THREAD_INFO_GAP
|
||||||
mr r1,r5
|
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||||
bctrl
|
mr r1,r4
|
||||||
|
stw r10,8(r1)
|
||||||
|
stw r11,THREAD+KSP_LIMIT(r2)
|
||||||
|
bl __do_irq
|
||||||
|
lwz r10,8(r1)
|
||||||
lwz r1,0(r1)
|
lwz r1,0(r1)
|
||||||
lwz r0,4(r1)
|
lwz r0,4(r1)
|
||||||
|
stw r10,THREAD+KSP_LIMIT(r2)
|
||||||
mtlr r0
|
mtlr r0
|
||||||
blr
|
blr
|
||||||
|
|
||||||
|
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
|
|||||||
mtlr r0
|
mtlr r0
|
||||||
blr
|
blr
|
||||||
|
|
||||||
_GLOBAL(call_handle_irq)
|
_GLOBAL(call_do_irq)
|
||||||
ld r8,0(r6)
|
|
||||||
mflr r0
|
mflr r0
|
||||||
std r0,16(r1)
|
std r0,16(r1)
|
||||||
mtctr r8
|
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||||
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
|
mr r1,r4
|
||||||
mr r1,r5
|
bl .__do_irq
|
||||||
bctrl
|
|
||||||
ld r1,0(r1)
|
ld r1,0(r1)
|
||||||
ld r0,16(r1)
|
ld r0,16(r1)
|
||||||
mtlr r0
|
mtlr r0
|
||||||
|
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||||||
kregs = (struct pt_regs *) sp;
|
kregs = (struct pt_regs *) sp;
|
||||||
sp -= STACK_FRAME_OVERHEAD;
|
sp -= STACK_FRAME_OVERHEAD;
|
||||||
p->thread.ksp = sp;
|
p->thread.ksp = sp;
|
||||||
|
#ifdef CONFIG_PPC32
|
||||||
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
|
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
|
||||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||||
p->thread.ptrace_bps[0] = NULL;
|
p->thread.ptrace_bps[0] = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
|
|||||||
|
|
||||||
static cell_t __initdata regbuf[1024];
|
static cell_t __initdata regbuf[1024];
|
||||||
|
|
||||||
|
static bool rtas_has_query_cpu_stopped;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Error results ... some OF calls will return "-1" on error, some
|
* Error results ... some OF calls will return "-1" on error, some
|
||||||
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
|
|||||||
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
|
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
|
||||||
&val, sizeof(val));
|
&val, sizeof(val));
|
||||||
|
|
||||||
|
/* Check if it supports "query-cpu-stopped-state" */
|
||||||
|
if (prom_getprop(rtas_node, "query-cpu-stopped-state",
|
||||||
|
&val, sizeof(val)) != PROM_ERROR)
|
||||||
|
rtas_has_query_cpu_stopped = true;
|
||||||
|
|
||||||
#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
|
#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
|
||||||
/* PowerVN takeover hack */
|
/* PowerVN takeover hack */
|
||||||
prom_rtas_data = base;
|
prom_rtas_data = base;
|
||||||
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
|
|||||||
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
|
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
|
||||||
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
|
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On pseries, if RTAS supports "query-cpu-stopped-state",
|
||||||
|
* we skip this stage, the CPUs will be started by the
|
||||||
|
* kernel using RTAS.
|
||||||
|
*/
|
||||||
|
if ((of_platform == PLATFORM_PSERIES ||
|
||||||
|
of_platform == PLATFORM_PSERIES_LPAR) &&
|
||||||
|
rtas_has_query_cpu_stopped) {
|
||||||
|
prom_printf("prom_hold_cpus: skipped\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
prom_debug("prom_hold_cpus: start...\n");
|
prom_debug("prom_hold_cpus: start...\n");
|
||||||
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
|
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
|
||||||
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
|
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
|
||||||
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
|||||||
* On non-powermacs, put all CPUs in spin-loops.
|
* On non-powermacs, put all CPUs in spin-loops.
|
||||||
*
|
*
|
||||||
* PowerMacs use a different mechanism to spin CPUs
|
* PowerMacs use a different mechanism to spin CPUs
|
||||||
|
*
|
||||||
|
* (This must be done after instanciating RTAS)
|
||||||
*/
|
*/
|
||||||
if (of_platform != PLATFORM_POWERMAC &&
|
if (of_platform != PLATFORM_POWERMAC &&
|
||||||
of_platform != PLATFORM_OPAL)
|
of_platform != PLATFORM_OPAL)
|
||||||
|
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
|||||||
*/
|
*/
|
||||||
if ((ra == 1) && !(regs->msr & MSR_PR) \
|
if ((ra == 1) && !(regs->msr & MSR_PR) \
|
||||||
&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
|
&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
|
||||||
|
#ifdef CONFIG_PPC32
|
||||||
/*
|
/*
|
||||||
* Check if we will touch kernel sack overflow
|
* Check if we will touch kernel sack overflow
|
||||||
*/
|
*/
|
||||||
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
|||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_PPC32 */
|
||||||
/*
|
/*
|
||||||
* Check if we already set since that means we'll
|
* Check if we already set since that means we'll
|
||||||
* lose the previous value.
|
* lose the previous value.
|
||||||
|
@@ -233,17 +233,23 @@ static void __init smp_init_pseries(void)
|
|||||||
|
|
||||||
alloc_bootmem_cpumask_var(&of_spin_mask);
|
alloc_bootmem_cpumask_var(&of_spin_mask);
|
||||||
|
|
||||||
/* Mark threads which are still spinning in hold loops. */
|
/*
|
||||||
|
* Mark threads which are still spinning in hold loops
|
||||||
|
*
|
||||||
|
* We know prom_init will not have started them if RTAS supports
|
||||||
|
* query-cpu-stopped-state.
|
||||||
|
*/
|
||||||
|
if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
|
||||||
if (cpu_has_feature(CPU_FTR_SMT)) {
|
if (cpu_has_feature(CPU_FTR_SMT)) {
|
||||||
for_each_present_cpu(i) {
|
for_each_present_cpu(i) {
|
||||||
if (cpu_thread_in_core(i) == 0)
|
if (cpu_thread_in_core(i) == 0)
|
||||||
cpumask_set_cpu(i, of_spin_mask);
|
cpumask_set_cpu(i, of_spin_mask);
|
||||||
}
|
}
|
||||||
} else {
|
} else
|
||||||
cpumask_copy(of_spin_mask, cpu_present_mask);
|
cpumask_copy(of_spin_mask, cpu_present_mask);
|
||||||
}
|
|
||||||
|
|
||||||
cpumask_clear_cpu(boot_cpuid, of_spin_mask);
|
cpumask_clear_cpu(boot_cpuid, of_spin_mask);
|
||||||
|
}
|
||||||
|
|
||||||
/* Non-lpar has additional take/give timebase */
|
/* Non-lpar has additional take/give timebase */
|
||||||
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
|
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
|
||||||
|
Reference in New Issue
Block a user