arch/tile: parameterize system PLs to support KVM port
While not a port to KVM (yet), this change modifies the kernel to be able to build either at PL1 or at PL2 with a suitable config switch. Pushing up this change avoids handling branch merge issues going forward with the KVM work. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
@ -15,7 +15,9 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/processor.h>
|
||||
#include <arch/abi.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define bnzt bnezt
|
||||
@ -80,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
|
||||
STD_ENTRY(cpu_idle_on_new_stack)
|
||||
{
|
||||
move sp, r1
|
||||
mtspr SYSTEM_SAVE_1_0, r2
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r2
|
||||
}
|
||||
jal free_thread_info
|
||||
j cpu_idle
|
||||
@ -102,15 +104,15 @@ STD_ENTRY(smp_nap)
|
||||
STD_ENTRY(_cpu_idle)
|
||||
{
|
||||
lnk r0
|
||||
movei r1, 1
|
||||
movei r1, KERNEL_PL
|
||||
}
|
||||
{
|
||||
addli r0, r0, _cpu_idle_nap - .
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, r1
|
||||
}
|
||||
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
|
||||
mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */
|
||||
mtspr EX_CONTEXT_1_0, r0
|
||||
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
|
||||
mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
|
||||
mtspr SPR_EX_CONTEXT_K_0, r0
|
||||
iret
|
||||
.global _cpu_idle_nap
|
||||
_cpu_idle_nap:
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/chip.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/*
|
||||
* This module contains the entry code for kernel images. It performs the
|
||||
@ -76,7 +77,7 @@ ENTRY(_start)
|
||||
}
|
||||
1:
|
||||
|
||||
/* Get our processor number and save it away in SAVE_1_0. */
|
||||
/* Get our processor number and save it away in SAVE_K_0. */
|
||||
jal hv_inquire_topology
|
||||
mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
|
||||
add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
|
||||
@ -124,7 +125,7 @@ ENTRY(_start)
|
||||
lw r0, r0
|
||||
lw sp, r1
|
||||
or r4, sp, r4
|
||||
mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
|
||||
addi sp, sp, -STACK_TOP_DELTA
|
||||
{
|
||||
move lr, zero /* stop backtraces in the called function */
|
||||
|
@ -32,8 +32,8 @@
|
||||
# error "No support for kernel preemption currently"
|
||||
#endif
|
||||
|
||||
#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48
|
||||
# error INT_INTCTRL_1 coded to set high interrupt mask
|
||||
#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
|
||||
# error INT_INTCTRL_K coded to set high interrupt mask
|
||||
#endif
|
||||
|
||||
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
|
||||
@ -132,8 +132,8 @@ intvec_\vecname:
|
||||
|
||||
/* Temporarily save a register so we have somewhere to work. */
|
||||
|
||||
mtspr SYSTEM_SAVE_1_1, r0
|
||||
mfspr r0, EX_CONTEXT_1_1
|
||||
mtspr SPR_SYSTEM_SAVE_K_1, r0
|
||||
mfspr r0, SPR_EX_CONTEXT_K_1
|
||||
|
||||
/* The cmpxchg code clears sp to force us to reset it here on fault. */
|
||||
{
|
||||
@ -167,18 +167,18 @@ intvec_\vecname:
|
||||
* The page_fault handler may be downcalled directly by the
|
||||
* hypervisor even when Linux is running and has ICS set.
|
||||
*
|
||||
* In this case the contents of EX_CONTEXT_1_1 reflect the
|
||||
* In this case the contents of EX_CONTEXT_K_1 reflect the
|
||||
* previous fault and can't be relied on to choose whether or
|
||||
* not to reinitialize the stack pointer. So we add a test
|
||||
* to see whether SYSTEM_SAVE_1_2 has the high bit set,
|
||||
* to see whether SYSTEM_SAVE_K_2 has the high bit set,
|
||||
* and if so we don't reinitialize sp, since we must be coming
|
||||
* from Linux. (In fact the precise case is !(val & ~1),
|
||||
* but any Linux PC has to have the high bit set.)
|
||||
*
|
||||
* Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for
|
||||
* Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
|
||||
* any path that turns into a downcall to one of our TLB handlers.
|
||||
*/
|
||||
mfspr r0, SYSTEM_SAVE_1_2
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_2
|
||||
{
|
||||
blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
|
||||
move r0, sp
|
||||
@ -187,12 +187,12 @@ intvec_\vecname:
|
||||
|
||||
2:
|
||||
/*
|
||||
* SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and
|
||||
* SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
|
||||
* the current stack top in the higher bits. So we recover
|
||||
* our stack top by just masking off the low bits, then
|
||||
* point sp at the top aligned address on the actual stack page.
|
||||
*/
|
||||
mfspr r0, SYSTEM_SAVE_1_0
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_0
|
||||
mm r0, r0, zero, LOG2_THREAD_SIZE, 31
|
||||
|
||||
0:
|
||||
@ -254,7 +254,7 @@ intvec_\vecname:
|
||||
sw sp, r3
|
||||
addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
|
||||
}
|
||||
mfspr r0, EX_CONTEXT_1_0
|
||||
mfspr r0, SPR_EX_CONTEXT_K_0
|
||||
.ifc \processing,handle_syscall
|
||||
/*
|
||||
* Bump the saved PC by one bundle so that when we return, we won't
|
||||
@ -267,7 +267,7 @@ intvec_\vecname:
|
||||
sw sp, r0
|
||||
addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
|
||||
}
|
||||
mfspr r0, EX_CONTEXT_1_1
|
||||
mfspr r0, SPR_EX_CONTEXT_K_1
|
||||
{
|
||||
sw sp, r0
|
||||
addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
|
||||
@ -289,7 +289,7 @@ intvec_\vecname:
|
||||
.endif
|
||||
addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
|
||||
}
|
||||
mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */
|
||||
mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
|
||||
{
|
||||
sw sp, r0
|
||||
addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
|
||||
@ -309,12 +309,12 @@ intvec_\vecname:
|
||||
* See discussion below at "finish_interrupt_save".
|
||||
*/
|
||||
.ifc \c_routine, do_page_fault
|
||||
mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */
|
||||
mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */
|
||||
mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
|
||||
mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
|
||||
.else
|
||||
.ifc \vecnum, INT_DOUBLE_FAULT
|
||||
{
|
||||
mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */
|
||||
mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
|
||||
movei r3, 0
|
||||
}
|
||||
.else
|
||||
@ -467,7 +467,7 @@ intvec_\vecname:
|
||||
/* Load tp with our per-cpu offset. */
|
||||
#ifdef CONFIG_SMP
|
||||
{
|
||||
mfspr r20, SYSTEM_SAVE_1_0
|
||||
mfspr r20, SPR_SYSTEM_SAVE_K_0
|
||||
moveli r21, lo16(__per_cpu_offset)
|
||||
}
|
||||
{
|
||||
@ -487,7 +487,7 @@ intvec_\vecname:
|
||||
* We load flags in r32 here so we can jump to .Lrestore_regs
|
||||
* directly after do_page_fault_ics() if necessary.
|
||||
*/
|
||||
mfspr r32, EX_CONTEXT_1_1
|
||||
mfspr r32, SPR_EX_CONTEXT_K_1
|
||||
{
|
||||
andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
|
||||
@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
|
||||
pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
|
||||
pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
|
||||
{
|
||||
mtspr EX_CONTEXT_1_0, r21
|
||||
mtspr SPR_EX_CONTEXT_K_0, r21
|
||||
move r5, zero
|
||||
}
|
||||
{
|
||||
mtspr EX_CONTEXT_1_1, lr
|
||||
mtspr SPR_EX_CONTEXT_K_1, lr
|
||||
andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
|
||||
}
|
||||
|
||||
@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
|
||||
STD_ENDPROC(interrupt_return)
|
||||
|
||||
/*
|
||||
* This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit
|
||||
* This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
|
||||
* before returning, so we can properly get more downcalls.
|
||||
*/
|
||||
.pushsection .text.handle_interrupt_downcall,"ax"
|
||||
@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
|
||||
check_single_stepping normal, .Ldispatch_downcall
|
||||
.Ldispatch_downcall:
|
||||
|
||||
/* Clear INTCTRL_1 from the set of interrupts we ever enable. */
|
||||
/* Clear INTCTRL_K from the set of interrupts we ever enable. */
|
||||
GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
|
||||
{
|
||||
addi r30, r30, 4
|
||||
movei r31, INT_MASK(INT_INTCTRL_1)
|
||||
movei r31, INT_MASK(INT_INTCTRL_K)
|
||||
}
|
||||
{
|
||||
lw r20, r30
|
||||
@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
|
||||
}
|
||||
FEEDBACK_REENTER(handle_interrupt_downcall)
|
||||
|
||||
/* Allow INTCTRL_1 to be enabled next time we enable interrupts. */
|
||||
/* Allow INTCTRL_K to be enabled next time we enable interrupts. */
|
||||
lw r20, r30
|
||||
or r20, r20, r31
|
||||
sw r30, r20
|
||||
@ -1509,7 +1509,7 @@ handle_ill:
|
||||
/* Various stub interrupt handlers and syscall handlers */
|
||||
|
||||
STD_ENTRY_LOCAL(_kernel_double_fault)
|
||||
mfspr r1, EX_CONTEXT_1_0
|
||||
mfspr r1, SPR_EX_CONTEXT_K_0
|
||||
move r2, lr
|
||||
move r3, sp
|
||||
move r4, r52
|
||||
@ -1518,7 +1518,7 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
|
||||
STD_ENDPROC(_kernel_double_fault)
|
||||
|
||||
STD_ENTRY_LOCAL(bad_intr)
|
||||
mfspr r2, EX_CONTEXT_1_0
|
||||
mfspr r2, SPR_EX_CONTEXT_K_0
|
||||
panic "Unhandled interrupt %#x: PC %#lx"
|
||||
STD_ENDPROC(bad_intr)
|
||||
|
||||
@ -1560,7 +1560,7 @@ STD_ENTRY(_sys_clone)
|
||||
* a page fault which would assume the stack was valid, it does
|
||||
* save/restore the stack pointer and zero it out to make sure it gets reset.
|
||||
* Since we always keep interrupts disabled, the hypervisor won't
|
||||
* clobber our EX_CONTEXT_1_x registers, so we don't save/restore them
|
||||
* clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
|
||||
* (other than to advance the PC on return).
|
||||
*
|
||||
* We have to manually validate the user vs kernel address range
|
||||
@ -1766,7 +1766,7 @@ ENTRY(sys_cmpxchg)
|
||||
/* Do slow mtspr here so the following "mf" waits less. */
|
||||
{
|
||||
move sp, r27
|
||||
mtspr EX_CONTEXT_1_0, r28
|
||||
mtspr SPR_EX_CONTEXT_K_0, r28
|
||||
}
|
||||
mf
|
||||
|
||||
@ -1785,7 +1785,7 @@ ENTRY(sys_cmpxchg)
|
||||
}
|
||||
{
|
||||
move sp, r27
|
||||
mtspr EX_CONTEXT_1_0, r28
|
||||
mtspr SPR_EX_CONTEXT_K_0, r28
|
||||
}
|
||||
iret
|
||||
|
||||
@ -1813,7 +1813,7 @@ ENTRY(sys_cmpxchg)
|
||||
#endif
|
||||
|
||||
/* Issue the slow SPR here while the tns result is in flight. */
|
||||
mfspr r28, EX_CONTEXT_1_0
|
||||
mfspr r28, SPR_EX_CONTEXT_K_0
|
||||
|
||||
{
|
||||
addi r28, r28, 8 /* return to the instruction after the swint1 */
|
||||
@ -1901,7 +1901,7 @@ ENTRY(sys_cmpxchg)
|
||||
.Lcmpxchg64_mismatch:
|
||||
{
|
||||
move sp, r27
|
||||
mtspr EX_CONTEXT_1_0, r28
|
||||
mtspr SPR_EX_CONTEXT_K_0, r28
|
||||
}
|
||||
mf
|
||||
{
|
||||
@ -1982,8 +1982,13 @@ int_unalign:
|
||||
int_hand INT_PERF_COUNT, PERF_COUNT, \
|
||||
op_handle_perf_interrupt, handle_nmi
|
||||
int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
dc_dispatch INT_INTCTRL_2, INTCTRL_2
|
||||
int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
|
||||
#else
|
||||
int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
|
||||
dc_dispatch INT_INTCTRL_1, INTCTRL_1
|
||||
#endif
|
||||
int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
|
||||
int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
|
||||
hv_message_intr, handle_interrupt_downcall
|
||||
|
@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
|
||||
|
||||
#if CHIP_HAS_IPI()
|
||||
/* Use SPRs to manipulate device interrupts. */
|
||||
#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask)
|
||||
#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask)
|
||||
#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask)
|
||||
#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
|
||||
#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
|
||||
#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
|
||||
#else
|
||||
/* Use HV to manipulate device interrupts. */
|
||||
#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
|
||||
@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
|
||||
* masked by a previous interrupt. Then, mask out the ones
|
||||
* we're going to handle.
|
||||
*/
|
||||
unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
|
||||
original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
|
||||
__insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
|
||||
unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
|
||||
original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
|
||||
__insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
|
||||
#else
|
||||
/*
|
||||
* Hypervisor performs the equivalent of the Gx code above and
|
||||
* then puts the pending interrupt mask into a system save reg
|
||||
* for us to find.
|
||||
*/
|
||||
original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
|
||||
original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
|
||||
#endif
|
||||
remaining_irqs = original_irqs;
|
||||
|
||||
@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
|
||||
/* Enable interrupt delivery. */
|
||||
unmask_irqs(~0UL);
|
||||
#if CHIP_HAS_IPI()
|
||||
raw_local_irq_unmask(INT_IPI_1);
|
||||
raw_local_irq_unmask(INT_IPI_K);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
|
||||
panic("hv_register_message_state: error %d", rc);
|
||||
|
||||
/* Make sure downcall interrupts will be enabled. */
|
||||
raw_local_irq_unmask(INT_INTCTRL_1);
|
||||
raw_local_irq_unmask(INT_INTCTRL_K);
|
||||
}
|
||||
|
||||
void hv_message_intr(struct pt_regs *regs, int intnum)
|
||||
|
@ -305,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
|
||||
/* Allow user processes to access the DMA SPRs */
|
||||
void grant_dma_mpls(void)
|
||||
{
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
|
||||
#else
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Forbid user processes from accessing the DMA SPRs */
|
||||
void restrict_dma_mpls(void)
|
||||
{
|
||||
#if CONFIG_KERNEL_PL == 2
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
|
||||
#else
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Pause the DMA engine, then save off its state registers. */
|
||||
@ -524,7 +534,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
|
||||
* Switch kernel SP, PC, and callee-saved registers.
|
||||
* In the context of the new task, return the old task pointer
|
||||
* (i.e. the task that actually called __switch_to).
|
||||
* Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp.
|
||||
* Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
|
||||
*/
|
||||
return __switch_to(prev, next, next_current_ksp0(next));
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
|
||||
{
|
||||
/* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
|
||||
move sp, r13
|
||||
mtspr SYSTEM_SAVE_1_0, r2
|
||||
mtspr SPR_SYSTEM_SAVE_K_0, r2
|
||||
}
|
||||
FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
|
||||
.L__switch_to_pc:
|
||||
|
@ -187,11 +187,11 @@ early_param("vmalloc", parse_vmalloc);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Determine for each controller where its lowmem is mapped and how
|
||||
* much of it is mapped there. On controller zero, the first few
|
||||
* megabytes are mapped at 0xfd000000 as code, so in principle we
|
||||
* could start our data mappings higher up, but for now we don't
|
||||
* bother, to avoid additional confusion.
|
||||
* Determine for each controller where its lowmem is mapped and how much of
|
||||
* it is mapped there. On controller zero, the first few megabytes are
|
||||
* already mapped in as code at MEM_SV_INTRPT, so in principle we could
|
||||
* start our data mappings higher up, but for now we don't bother, to avoid
|
||||
* additional confusion.
|
||||
*
|
||||
* One question is whether, on systems with more than 768 Mb and
|
||||
* controllers of different sizes, to map in a proportionate amount of
|
||||
@ -876,6 +876,9 @@ void __cpuinit setup_cpu(int boot)
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
raw_local_irq_unmask(INT_SNITLB_MISS);
|
||||
#endif
|
||||
#ifdef __tilegx__
|
||||
raw_local_irq_unmask(INT_SINGLE_STEP_K);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allow user access to many generic SPRs, like the cycle
|
||||
@ -893,11 +896,12 @@ void __cpuinit setup_cpu(int boot)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set the MPL for interrupt control 0 to user level.
|
||||
* This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs,
|
||||
* as well as the PL 0 interrupt mask.
|
||||
* Set the MPL for interrupt control 0 & 1 to the corresponding
|
||||
* values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
|
||||
* SPRs, as well as the interrupt mask.
|
||||
*/
|
||||
__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
|
||||
__insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
|
||||
|
||||
/* Initialize IRQ support for this cpu. */
|
||||
setup_irq_regs();
|
||||
@ -1033,7 +1037,7 @@ static void __init validate_va(void)
|
||||
* In addition, make sure we CAN'T use the end of memory, since
|
||||
* we use the last chunk of each pgd for the pgd_list.
|
||||
*/
|
||||
int i, fc_fd_ok = 0;
|
||||
int i, user_kernel_ok = 0;
|
||||
unsigned long max_va = 0;
|
||||
unsigned long list_va =
|
||||
((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
|
||||
@ -1044,13 +1048,13 @@ static void __init validate_va(void)
|
||||
break;
|
||||
if (range.start <= MEM_USER_INTRPT &&
|
||||
range.start + range.size >= MEM_HV_INTRPT)
|
||||
fc_fd_ok = 1;
|
||||
user_kernel_ok = 1;
|
||||
if (range.start == 0)
|
||||
max_va = range.size;
|
||||
BUG_ON(range.start + range.size > list_va);
|
||||
}
|
||||
if (!fc_fd_ok)
|
||||
early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n");
|
||||
if (!user_kernel_ok)
|
||||
early_panic("Hypervisor not configured for user/kernel VAs\n");
|
||||
if (max_va == 0)
|
||||
early_panic("Hypervisor not configured for low VAs\n");
|
||||
if (max_va < KERNEL_HIGH_VADDR)
|
||||
|
@ -212,7 +212,7 @@ void __init ipi_init(void)
|
||||
|
||||
tile.x = cpu_x(cpu);
|
||||
tile.y = cpu_y(cpu);
|
||||
if (hv_get_ipi_pte(tile, 1, &pte) != 0)
|
||||
if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
|
||||
panic("Failed to initialize IPI for cpu %d\n", cpu);
|
||||
|
||||
offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
|
||||
|
@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||
case INT_DOUBLE_FAULT:
|
||||
/*
|
||||
* For double fault, "reason" is actually passed as
|
||||
* SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so
|
||||
* SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
|
||||
* we can provide the original fault number rather than
|
||||
* the uninteresting "INT_DOUBLE_FAULT" so the user can
|
||||
* learn what actually struck while PL0 ICS was set.
|
||||
|
Reference in New Issue
Block a user