Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix changes. As alpha in percpu tree uses 'weak' attribute instead of inline assembly, there's no need for __used attribute. Conflicts: arch/alpha/include/asm/percpu.h arch/mn10300/kernel/vmlinux.lds.S include/linux/percpu-defs.h
This commit is contained in:
@ -191,11 +191,49 @@ transfer_to_handler_cont:
|
||||
mflr r9
|
||||
lwz r11,0(r9) /* virtual address of handler */
|
||||
lwz r9,4(r9) /* where to go when done */
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
lis r12,reenable_mmu@h
|
||||
ori r12,r12,reenable_mmu@l
|
||||
mtspr SPRN_SRR0,r12
|
||||
mtspr SPRN_SRR1,r10
|
||||
SYNC
|
||||
RFI
|
||||
reenable_mmu: /* re-enable mmu so we can */
|
||||
mfmsr r10
|
||||
lwz r12,_MSR(r1)
|
||||
xor r10,r10,r12
|
||||
andi. r10,r10,MSR_EE /* Did EE change? */
|
||||
beq 1f
|
||||
|
||||
/* Save handler and return address into the 2 unused words
|
||||
* of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
|
||||
* else can be recovered from the pt_regs except r3 which for
|
||||
* normal interrupts has been set to pt_regs and for syscalls
|
||||
* is an argument, so we temporarily use ORIG_GPR3 to save it
|
||||
*/
|
||||
stw r9,8(r1)
|
||||
stw r11,12(r1)
|
||||
stw r3,ORIG_GPR3(r1)
|
||||
bl trace_hardirqs_off
|
||||
lwz r0,GPR0(r1)
|
||||
lwz r3,ORIG_GPR3(r1)
|
||||
lwz r4,GPR4(r1)
|
||||
lwz r5,GPR5(r1)
|
||||
lwz r6,GPR6(r1)
|
||||
lwz r7,GPR7(r1)
|
||||
lwz r8,GPR8(r1)
|
||||
lwz r9,8(r1)
|
||||
lwz r11,12(r1)
|
||||
1: mtctr r11
|
||||
mtlr r9
|
||||
bctr /* jump to handler */
|
||||
#else /* CONFIG_TRACE_IRQFLAGS */
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r10
|
||||
mtlr r9
|
||||
SYNC
|
||||
RFI /* jump to handler, enable MMU */
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
|
||||
#if defined (CONFIG_6xx) || defined(CONFIG_E500)
|
||||
4: rlwinm r12,r12,0,~_TLF_NAPPING
|
||||
@ -251,6 +289,31 @@ _GLOBAL(DoSyscall)
|
||||
#ifdef SHOW_SYSCALLS
|
||||
bl do_show_syscall
|
||||
#endif /* SHOW_SYSCALLS */
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* Return from syscalls can (and generally will) hard enable
|
||||
* interrupts. You aren't supposed to call a syscall with
|
||||
* interrupts disabled in the first place. However, to ensure
|
||||
* that we get it right vs. lockdep if it happens, we force
|
||||
* that hard enable here with appropriate tracing if we see
|
||||
* that we have been called with interrupts off
|
||||
*/
|
||||
mfmsr r11
|
||||
andi. r12,r11,MSR_EE
|
||||
bne+ 1f
|
||||
/* We came in with interrupts disabled, we enable them now */
|
||||
bl trace_hardirqs_on
|
||||
mfmsr r11
|
||||
lwz r0,GPR0(r1)
|
||||
lwz r3,GPR3(r1)
|
||||
lwz r4,GPR4(r1)
|
||||
ori r11,r11,MSR_EE
|
||||
lwz r5,GPR5(r1)
|
||||
lwz r6,GPR6(r1)
|
||||
lwz r7,GPR7(r1)
|
||||
lwz r8,GPR8(r1)
|
||||
mtmsr r11
|
||||
1:
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
|
||||
lwz r11,TI_FLAGS(r10)
|
||||
andi. r11,r11,_TIF_SYSCALL_T_OR_A
|
||||
@ -275,6 +338,7 @@ ret_from_syscall:
|
||||
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
|
||||
/* disable interrupts so current_thread_info()->flags can't change */
|
||||
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
|
||||
/* Note: We don't bother telling lockdep about it */
|
||||
SYNC
|
||||
MTMSRD(r10)
|
||||
lwz r9,TI_FLAGS(r12)
|
||||
@ -288,6 +352,19 @@ ret_from_syscall:
|
||||
oris r11,r11,0x1000 /* Set SO bit in CR */
|
||||
stw r11,_CCR(r1)
|
||||
syscall_exit_cont:
|
||||
lwz r8,_MSR(r1)
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* If we are going to return from the syscall with interrupts
|
||||
* off, we trace that here. It shouldn't happen though but we
|
||||
* want to catch the bugger if it does right ?
|
||||
*/
|
||||
andi. r10,r8,MSR_EE
|
||||
bne+ 1f
|
||||
stw r3,GPR3(r1)
|
||||
bl trace_hardirqs_off
|
||||
lwz r3,GPR3(r1)
|
||||
1:
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
/* If the process has its own DBCR0 value, load it up. The internal
|
||||
debug mode bit tells us that dbcr0 should be loaded. */
|
||||
@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
|
||||
mtlr r4
|
||||
mtcr r5
|
||||
lwz r7,_NIP(r1)
|
||||
lwz r8,_MSR(r1)
|
||||
FIX_SRR1(r8, r0)
|
||||
lwz r2,GPR2(r1)
|
||||
lwz r1,GPR1(r1)
|
||||
@ -394,7 +470,9 @@ syscall_exit_work:
|
||||
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
|
||||
beq ret_from_except
|
||||
|
||||
/* Re-enable interrupts */
|
||||
/* Re-enable interrupts. There is no need to trace that with
|
||||
* lockdep as we are supposed to have IRQs on at this point
|
||||
*/
|
||||
ori r10,r10,MSR_EE
|
||||
SYNC
|
||||
MTMSRD(r10)
|
||||
@ -705,6 +783,7 @@ ret_from_except:
|
||||
/* Hard-disable interrupts so that current_thread_info()->flags
|
||||
* can't change between when we test it and when we return
|
||||
* from the interrupt. */
|
||||
/* Note: We don't bother telling lockdep about it */
|
||||
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
|
||||
SYNC /* Some chip revs have problems here... */
|
||||
MTMSRD(r10) /* disable interrupts */
|
||||
@ -744,11 +823,24 @@ resume_kernel:
|
||||
beq+ restore
|
||||
andi. r0,r3,MSR_EE /* interrupts off? */
|
||||
beq restore /* don't schedule if so */
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* Lockdep thinks irqs are enabled, we need to call
|
||||
* preempt_schedule_irq with IRQs off, so we inform lockdep
|
||||
* now that we -did- turn them off already
|
||||
*/
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
1: bl preempt_schedule_irq
|
||||
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
|
||||
lwz r3,TI_FLAGS(r9)
|
||||
andi. r0,r3,_TIF_NEED_RESCHED
|
||||
bne- 1b
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* And now, to properly rebalance the above, we tell lockdep they
|
||||
* are being turned back on, which will happen when we return
|
||||
*/
|
||||
bl trace_hardirqs_on
|
||||
#endif
|
||||
#else
|
||||
resume_kernel:
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
@ -765,6 +857,28 @@ restore:
|
||||
stw r6,icache_44x_need_flush@l(r4)
|
||||
1:
|
||||
#endif /* CONFIG_44x */
|
||||
|
||||
lwz r9,_MSR(r1)
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* Lockdep doesn't know about the fact that IRQs are temporarily turned
|
||||
* off in this assembly code while peeking at TI_FLAGS() and such. However
|
||||
* we need to inform it if the exception turned interrupts off, and we
|
||||
* are about to trun them back on.
|
||||
*
|
||||
* The problem here sadly is that we don't know whether the exceptions was
|
||||
* one that turned interrupts off or not. So we always tell lockdep about
|
||||
* turning them on here when we go back to wherever we came from with EE
|
||||
* on, even if that may meen some redudant calls being tracked. Maybe later
|
||||
* we could encode what the exception did somewhere or test the exception
|
||||
* type in the pt_regs but that sounds overkill
|
||||
*/
|
||||
andi. r10,r9,MSR_EE
|
||||
beq 1f
|
||||
bl trace_hardirqs_on
|
||||
lwz r9,_MSR(r1)
|
||||
1:
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
|
||||
lwz r0,GPR0(r1)
|
||||
lwz r2,GPR2(r1)
|
||||
REST_4GPRS(3, r1)
|
||||
@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
|
||||
stwcx. r0,0,r1 /* to clear the reservation */
|
||||
|
||||
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
|
||||
lwz r9,_MSR(r1)
|
||||
andi. r10,r9,MSR_RI /* check if this exception occurred */
|
||||
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
|
||||
|
||||
@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
|
||||
MTMSRD(r10) /* clear the RI bit */
|
||||
.globl exc_exit_restart
|
||||
exc_exit_restart:
|
||||
lwz r9,_MSR(r1)
|
||||
lwz r12,_NIP(r1)
|
||||
FIX_SRR1(r9,r10)
|
||||
mtspr SPRN_SRR0,r12
|
||||
@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */
|
||||
beq do_user_signal
|
||||
|
||||
do_resched: /* r10 contains MSR_KERNEL here */
|
||||
/* Note: We don't need to inform lockdep that we are enabling
|
||||
* interrupts here. As far as it knows, they are already enabled
|
||||
*/
|
||||
ori r10,r10,MSR_EE
|
||||
SYNC
|
||||
MTMSRD(r10) /* hard-enable interrupts */
|
||||
bl schedule
|
||||
recheck:
|
||||
/* Note: And we don't tell it we are disabling them again
|
||||
* neither. Those disable/enable cycles used to peek at
|
||||
* TI_FLAGS aren't advertised.
|
||||
*/
|
||||
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
|
||||
SYNC
|
||||
MTMSRD(r10) /* disable interrupts */
|
||||
|
@ -1124,9 +1124,8 @@ mmu_off:
|
||||
RFI
|
||||
|
||||
/*
|
||||
* Use the first pair of BAT registers to map the 1st 16MB
|
||||
* of RAM to PAGE_OFFSET. From this point on we can't safely
|
||||
* call OF any more.
|
||||
* On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
|
||||
* (we keep one for debugging) and on others, we use one 256M BAT.
|
||||
*/
|
||||
initial_bats:
|
||||
lis r11,PAGE_OFFSET@h
|
||||
@ -1136,12 +1135,16 @@ initial_bats:
|
||||
bne 4f
|
||||
ori r11,r11,4 /* set up BAT registers for 601 */
|
||||
li r8,0x7f /* valid, block length = 8MB */
|
||||
oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
|
||||
oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
|
||||
mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
|
||||
mtspr SPRN_IBAT0L,r8 /* lower BAT register */
|
||||
mtspr SPRN_IBAT1U,r9
|
||||
mtspr SPRN_IBAT1L,r10
|
||||
addis r11,r11,0x800000@h
|
||||
addis r8,r8,0x800000@h
|
||||
mtspr SPRN_IBAT1U,r11
|
||||
mtspr SPRN_IBAT1L,r8
|
||||
addis r11,r11,0x800000@h
|
||||
addis r8,r8,0x800000@h
|
||||
mtspr SPRN_IBAT2U,r11
|
||||
mtspr SPRN_IBAT2L,r8
|
||||
isync
|
||||
blr
|
||||
|
||||
|
@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
|
||||
dev->dev.archdata.of_node = np;
|
||||
|
||||
if (bus_id)
|
||||
dev_set_name(&dev->dev, bus_id);
|
||||
dev_set_name(&dev->dev, "%s", bus_id);
|
||||
else
|
||||
of_device_make_bus_id(dev);
|
||||
|
||||
|
@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs)
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
if ((i % REGS_PER_LINE) == 0)
|
||||
printk("\n" KERN_INFO "GPR%02d: ", i);
|
||||
printk("\nGPR%02d: ", i);
|
||||
printk(REG " ", regs->gpr[i]);
|
||||
if (i == LAST_VOLATILE && !FULL_REGS(regs))
|
||||
break;
|
||||
|
@ -38,9 +38,10 @@
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/time.h>
|
||||
|
||||
struct rtas_t rtas = {
|
||||
.lock = SPIN_LOCK_UNLOCKED
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED
|
||||
};
|
||||
EXPORT_SYMBOL(rtas);
|
||||
|
||||
@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf;
|
||||
void (*rtas_flash_term_hook)(int);
|
||||
EXPORT_SYMBOL(rtas_flash_term_hook);
|
||||
|
||||
/* RTAS use home made raw locking instead of spin_lock_irqsave
|
||||
* because those can be called from within really nasty contexts
|
||||
* such as having the timebase stopped which would lockup with
|
||||
* normal locks and spinlock debugging enabled
|
||||
*/
|
||||
static unsigned long lock_rtas(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
preempt_disable();
|
||||
__raw_spin_lock_flags(&rtas.lock, flags);
|
||||
return flags;
|
||||
}
|
||||
|
||||
static void unlock_rtas(unsigned long flags)
|
||||
{
|
||||
__raw_spin_unlock(&rtas.lock);
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* call_rtas_display_status and call_rtas_display_status_delay
|
||||
* are designed only for very early low-level debugging, which
|
||||
@ -79,7 +102,7 @@ static void call_rtas_display_status(char c)
|
||||
|
||||
if (!rtas.base)
|
||||
return;
|
||||
spin_lock_irqsave(&rtas.lock, s);
|
||||
s = lock_rtas();
|
||||
|
||||
args->token = 10;
|
||||
args->nargs = 1;
|
||||
@ -89,7 +112,7 @@ static void call_rtas_display_status(char c)
|
||||
|
||||
enter_rtas(__pa(args));
|
||||
|
||||
spin_unlock_irqrestore(&rtas.lock, s);
|
||||
unlock_rtas(s);
|
||||
}
|
||||
|
||||
static void call_rtas_display_status_delay(char c)
|
||||
@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
|
||||
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
|
||||
return -1;
|
||||
|
||||
/* Gotta do something different here, use global lock for now... */
|
||||
spin_lock_irqsave(&rtas.lock, s);
|
||||
s = lock_rtas();
|
||||
rtas_args = &rtas.args;
|
||||
|
||||
rtas_args->token = token;
|
||||
@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
|
||||
outputs[i] = rtas_args->rets[i+1];
|
||||
ret = (nret > 0)? rtas_args->rets[0]: 0;
|
||||
|
||||
/* Gotta do something different here, use global lock for now... */
|
||||
spin_unlock_irqrestore(&rtas.lock, s);
|
||||
unlock_rtas(s);
|
||||
|
||||
if (buff_copy) {
|
||||
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
|
||||
@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
|
||||
|
||||
buff_copy = get_errorlog_buffer();
|
||||
|
||||
spin_lock_irqsave(&rtas.lock, flags);
|
||||
flags = lock_rtas();
|
||||
|
||||
rtas.args = args;
|
||||
enter_rtas(__pa(&rtas.args));
|
||||
@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
|
||||
if (args.rets[0] == -1)
|
||||
errbuf = __fetch_rtas_last_error(buff_copy);
|
||||
|
||||
spin_unlock_irqrestore(&rtas.lock, flags);
|
||||
unlock_rtas(flags);
|
||||
|
||||
if (buff_copy) {
|
||||
if (errbuf)
|
||||
@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node,
|
||||
/* break now */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static raw_spinlock_t timebase_lock;
|
||||
static u64 timebase = 0;
|
||||
|
||||
void __cpuinit rtas_give_timebase(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
hard_irq_disable();
|
||||
__raw_spin_lock(&timebase_lock);
|
||||
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
|
||||
timebase = get_tb();
|
||||
__raw_spin_unlock(&timebase_lock);
|
||||
|
||||
while (timebase)
|
||||
barrier();
|
||||
rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __cpuinit rtas_take_timebase(void)
|
||||
{
|
||||
while (!timebase)
|
||||
barrier();
|
||||
__raw_spin_lock(&timebase_lock);
|
||||
set_tb(timebase >> 32, timebase & 0xffffffff);
|
||||
timebase = 0;
|
||||
__raw_spin_unlock(&timebase_lock);
|
||||
}
|
||||
|
@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
|
||||
*/
|
||||
notrace void __init machine_init(unsigned long dt_ptr)
|
||||
{
|
||||
lockdep_init();
|
||||
|
||||
/* Enable early debugging if any specified (see udbg.h) */
|
||||
udbg_early_init();
|
||||
|
||||
|
@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
||||
/* SMP operations for this machine */
|
||||
struct smp_ops_t *smp_ops;
|
||||
|
||||
static volatile unsigned int cpu_callin_map[NR_CPUS];
|
||||
/* Can't be static due to PowerMac hackery */
|
||||
volatile unsigned int cpu_callin_map[NR_CPUS];
|
||||
|
||||
int smt_enabled_at_boot = 1;
|
||||
|
||||
|
@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void)
|
||||
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
|
||||
#include <platforms/44x/44x.h>
|
||||
|
||||
static int udbg_44x_as1_flush(void)
|
||||
static void udbg_44x_as1_flush(void)
|
||||
{
|
||||
if (udbg_comport) {
|
||||
while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
|
||||
|
Reference in New Issue
Block a user