arch/tile: core support for Tilera 32-bit chips.
This change is the core kernel support for TILEPro and TILE64 chips. No driver support (except the console driver) is included yet. This includes the relevant Linux headers in asm/; the low-level low-level "Tile architecture" headers in arch/, which are shared with the hypervisor, etc., and are build-system agnostic; and the relevant hypervisor headers in hv/. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
16
arch/tile/kernel/Makefile
Normal file
16
arch/tile/kernel/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
#
|
||||
# Makefile for the Linux/TILE kernel.
|
||||
#
|
||||
|
||||
extra-y := vmlinux.lds head_$(BITS).o
|
||||
obj-y := backtrace.o entry.o init_task.o irq.o messaging.o \
|
||||
pci-dma.o proc.o process.o ptrace.o reboot.o \
|
||||
setup.o signal.o single_step.o stack.o sys.o time.o traps.o \
|
||||
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
|
||||
|
||||
obj-$(CONFIG_TILEGX) += futex_64.o
|
||||
obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
76
arch/tile/kernel/asm-offsets.c
Normal file
76
arch/tile/kernel/asm-offsets.c
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Generates definitions from c-type structures used by assembly sources.
|
||||
*/
|
||||
|
||||
#include <linux/kbuild.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Check for compatible compiler early in the build. */
|
||||
#ifdef CONFIG_TILEGX
|
||||
# ifndef __tilegx__
|
||||
# error Can only build TILE-Gx configurations with tilegx compiler
|
||||
# endif
|
||||
# ifndef __LP64__
|
||||
# error Must not specify -m32 when building the TILE-Gx kernel
|
||||
# endif
|
||||
#else
|
||||
# ifdef __tilegx__
|
||||
# error Can not build TILEPro/TILE64 configurations with tilegx compiler
|
||||
# endif
|
||||
#endif
|
||||
|
||||
void foo(void)
|
||||
{
|
||||
DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \
|
||||
offsetof(struct single_step_state, buffer));
|
||||
DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \
|
||||
offsetof(struct single_step_state, flags));
|
||||
DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \
|
||||
offsetof(struct single_step_state, orig_pc));
|
||||
DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \
|
||||
offsetof(struct single_step_state, next_pc));
|
||||
DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \
|
||||
offsetof(struct single_step_state, branch_next_pc));
|
||||
DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \
|
||||
offsetof(struct single_step_state, update_value));
|
||||
|
||||
DEFINE(THREAD_INFO_TASK_OFFSET, \
|
||||
offsetof(struct thread_info, task));
|
||||
DEFINE(THREAD_INFO_FLAGS_OFFSET, \
|
||||
offsetof(struct thread_info, flags));
|
||||
DEFINE(THREAD_INFO_STATUS_OFFSET, \
|
||||
offsetof(struct thread_info, status));
|
||||
DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \
|
||||
offsetof(struct thread_info, homecache_cpu));
|
||||
DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \
|
||||
offsetof(struct thread_info, step_state));
|
||||
|
||||
DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET,
|
||||
offsetof(struct task_struct, thread.ksp));
|
||||
DEFINE(TASK_STRUCT_THREAD_PC_OFFSET,
|
||||
offsetof(struct task_struct, thread.pc));
|
||||
|
||||
DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \
|
||||
offsetof(HV_Topology, width));
|
||||
DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \
|
||||
offsetof(HV_Topology, height));
|
||||
|
||||
DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \
|
||||
offsetof(irq_cpustat_t, irq_syscall_count));
|
||||
}
|
634
arch/tile/kernel/backtrace.c
Normal file
634
arch/tile/kernel/backtrace.c
Normal file
@@ -0,0 +1,634 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <asm/backtrace.h>
|
||||
|
||||
#include <arch/chip.h>
|
||||
|
||||
#if TILE_CHIP < 10
|
||||
|
||||
|
||||
#include <asm/opcode-tile.h>
|
||||
|
||||
|
||||
#define TREG_SP 54
|
||||
#define TREG_LR 55
|
||||
|
||||
|
||||
/** A decoded bundle used for backtracer analysis. */
|
||||
typedef struct {
|
||||
tile_bundle_bits bits;
|
||||
int num_insns;
|
||||
struct tile_decoded_instruction
|
||||
insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
|
||||
} BacktraceBundle;
|
||||
|
||||
|
||||
/* This implementation only makes sense for native tools. */
|
||||
/** Default function to read memory. */
|
||||
static bool
|
||||
bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra)
|
||||
{
|
||||
/* FIXME: this should do some horrible signal stuff to catch
|
||||
* SEGV cleanly and fail.
|
||||
*
|
||||
* Or else the caller should do the setjmp for efficiency.
|
||||
*/
|
||||
|
||||
memcpy(result, (const void *)addr, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/** Locates an instruction inside the given bundle that
|
||||
* has the specified mnemonic, and whose first 'num_operands_to_match'
|
||||
* operands exactly match those in 'operand_values'.
|
||||
*/
|
||||
static const struct tile_decoded_instruction*
|
||||
find_matching_insn(const BacktraceBundle *bundle,
|
||||
tile_mnemonic mnemonic,
|
||||
const int *operand_values,
|
||||
int num_operands_to_match)
|
||||
{
|
||||
int i, j;
|
||||
bool match;
|
||||
|
||||
for (i = 0; i < bundle->num_insns; i++) {
|
||||
const struct tile_decoded_instruction *insn =
|
||||
&bundle->insns[i];
|
||||
|
||||
if (insn->opcode->mnemonic != mnemonic)
|
||||
continue;
|
||||
|
||||
match = true;
|
||||
for (j = 0; j < num_operands_to_match; j++) {
|
||||
if (operand_values[j] != insn->operand_values[j]) {
|
||||
match = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (match)
|
||||
return insn;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/** Does this bundle contain an 'iret' instruction? */
|
||||
static inline bool
|
||||
bt_has_iret(const BacktraceBundle *bundle)
|
||||
{
|
||||
return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
|
||||
}
|
||||
|
||||
/** Does this bundle contain an 'addi sp, sp, OFFSET' or
|
||||
* 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
|
||||
*/
|
||||
static bool
|
||||
bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust)
|
||||
{
|
||||
static const int vals[2] = { TREG_SP, TREG_SP };
|
||||
|
||||
const struct tile_decoded_instruction *insn =
|
||||
find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
|
||||
if (insn == NULL)
|
||||
insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
|
||||
if (insn == NULL)
|
||||
return false;
|
||||
|
||||
*adjust = insn->operand_values[2];
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Does this bundle contain any 'info OP' or 'infol OP'
|
||||
* instruction, and if so, what are their OP? Note that OP is interpreted
|
||||
* as an unsigned value by this code since that's what the caller wants.
|
||||
* Returns the number of info ops found.
|
||||
*/
|
||||
static int
|
||||
bt_get_info_ops(const BacktraceBundle *bundle,
|
||||
int operands[MAX_INFO_OPS_PER_BUNDLE])
|
||||
{
|
||||
int num_ops = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bundle->num_insns; i++) {
|
||||
const struct tile_decoded_instruction *insn =
|
||||
&bundle->insns[i];
|
||||
|
||||
if (insn->opcode->mnemonic == TILE_OPC_INFO ||
|
||||
insn->opcode->mnemonic == TILE_OPC_INFOL) {
|
||||
operands[num_ops++] = insn->operand_values[0];
|
||||
}
|
||||
}
|
||||
|
||||
return num_ops;
|
||||
}
|
||||
|
||||
/** Does this bundle contain a jrp instruction, and if so, to which
|
||||
* register is it jumping?
|
||||
*/
|
||||
static bool
|
||||
bt_has_jrp(const BacktraceBundle *bundle, int *target_reg)
|
||||
{
|
||||
const struct tile_decoded_instruction *insn =
|
||||
find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
|
||||
if (insn == NULL)
|
||||
return false;
|
||||
|
||||
*target_reg = insn->operand_values[0];
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Does this bundle modify the specified register in any way? */
|
||||
static bool
|
||||
bt_modifies_reg(const BacktraceBundle *bundle, int reg)
|
||||
{
|
||||
int i, j;
|
||||
for (i = 0; i < bundle->num_insns; i++) {
|
||||
const struct tile_decoded_instruction *insn =
|
||||
&bundle->insns[i];
|
||||
|
||||
if (insn->opcode->implicitly_written_register == reg)
|
||||
return true;
|
||||
|
||||
for (j = 0; j < insn->opcode->num_operands; j++)
|
||||
if (insn->operands[j]->is_dest_reg &&
|
||||
insn->operand_values[j] == reg)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Does this bundle modify sp? */
|
||||
static inline bool
|
||||
bt_modifies_sp(const BacktraceBundle *bundle)
|
||||
{
|
||||
return bt_modifies_reg(bundle, TREG_SP);
|
||||
}
|
||||
|
||||
/** Does this bundle modify lr? */
|
||||
static inline bool
|
||||
bt_modifies_lr(const BacktraceBundle *bundle)
|
||||
{
|
||||
return bt_modifies_reg(bundle, TREG_LR);
|
||||
}
|
||||
|
||||
/** Does this bundle contain the instruction 'move fp, sp'? */
|
||||
static inline bool
|
||||
bt_has_move_r52_sp(const BacktraceBundle *bundle)
|
||||
{
|
||||
static const int vals[2] = { 52, TREG_SP };
|
||||
return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
|
||||
}
|
||||
|
||||
/** Does this bundle contain the instruction 'sw sp, lr'? */
|
||||
static inline bool
|
||||
bt_has_sw_sp_lr(const BacktraceBundle *bundle)
|
||||
{
|
||||
static const int vals[2] = { TREG_SP, TREG_LR };
|
||||
return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL;
|
||||
}
|
||||
|
||||
/** Locates the caller's PC and SP for a program starting at the
|
||||
* given address.
|
||||
*/
|
||||
static void
|
||||
find_caller_pc_and_caller_sp(CallerLocation *location,
|
||||
const VirtualAddress start_pc,
|
||||
BacktraceMemoryReader read_memory_func,
|
||||
void *read_memory_func_extra)
|
||||
{
|
||||
/* Have we explicitly decided what the sp is,
|
||||
* rather than just the default?
|
||||
*/
|
||||
bool sp_determined = false;
|
||||
|
||||
/* Has any bundle seen so far modified lr? */
|
||||
bool lr_modified = false;
|
||||
|
||||
/* Have we seen a move from sp to fp? */
|
||||
bool sp_moved_to_r52 = false;
|
||||
|
||||
/* Have we seen a terminating bundle? */
|
||||
bool seen_terminating_bundle = false;
|
||||
|
||||
/* Cut down on round-trip reading overhead by reading several
|
||||
* bundles at a time.
|
||||
*/
|
||||
tile_bundle_bits prefetched_bundles[32];
|
||||
int num_bundles_prefetched = 0;
|
||||
int next_bundle = 0;
|
||||
VirtualAddress pc;
|
||||
|
||||
/* Default to assuming that the caller's sp is the current sp.
|
||||
* This is necessary to handle the case where we start backtracing
|
||||
* right at the end of the epilog.
|
||||
*/
|
||||
location->sp_location = SP_LOC_OFFSET;
|
||||
location->sp_offset = 0;
|
||||
|
||||
/* Default to having no idea where the caller PC is. */
|
||||
location->pc_location = PC_LOC_UNKNOWN;
|
||||
|
||||
/* Don't even try if the PC is not aligned. */
|
||||
if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
|
||||
return;
|
||||
|
||||
for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
|
||||
|
||||
BacktraceBundle bundle;
|
||||
int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
|
||||
int one_ago, jrp_reg;
|
||||
bool has_jrp;
|
||||
|
||||
if (next_bundle >= num_bundles_prefetched) {
|
||||
/* Prefetch some bytes, but don't cross a page
|
||||
* boundary since that might cause a read failure we
|
||||
* don't care about if we only need the first few
|
||||
* bytes. Note: we don't care what the actual page
|
||||
* size is; using the minimum possible page size will
|
||||
* prevent any problems.
|
||||
*/
|
||||
unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
|
||||
if (bytes_to_prefetch > sizeof prefetched_bundles)
|
||||
bytes_to_prefetch = sizeof prefetched_bundles;
|
||||
|
||||
if (!read_memory_func(prefetched_bundles, pc,
|
||||
bytes_to_prefetch,
|
||||
read_memory_func_extra)) {
|
||||
if (pc == start_pc) {
|
||||
/* The program probably called a bad
|
||||
* address, such as a NULL pointer.
|
||||
* So treat this as if we are at the
|
||||
* start of the function prolog so the
|
||||
* backtrace will show how we got here.
|
||||
*/
|
||||
location->pc_location = PC_LOC_IN_LR;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unreadable address. Give up. */
|
||||
break;
|
||||
}
|
||||
|
||||
next_bundle = 0;
|
||||
num_bundles_prefetched =
|
||||
bytes_to_prefetch / sizeof(tile_bundle_bits);
|
||||
}
|
||||
|
||||
/* Decode the next bundle. */
|
||||
bundle.bits = prefetched_bundles[next_bundle++];
|
||||
bundle.num_insns =
|
||||
parse_insn_tile(bundle.bits, pc, bundle.insns);
|
||||
num_info_ops = bt_get_info_ops(&bundle, info_operands);
|
||||
|
||||
/* First look at any one_ago info ops if they are interesting,
|
||||
* since they should shadow any non-one-ago info ops.
|
||||
*/
|
||||
for (one_ago = (pc != start_pc) ? 1 : 0;
|
||||
one_ago >= 0; one_ago--) {
|
||||
int i;
|
||||
for (i = 0; i < num_info_ops; i++) {
|
||||
int info_operand = info_operands[i];
|
||||
if (info_operand < CALLER_UNKNOWN_BASE) {
|
||||
/* Weird; reserved value, ignore it. */
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Skip info ops which are not in the
|
||||
* "one_ago" mode we want right now.
|
||||
*/
|
||||
if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
|
||||
!= (one_ago != 0))
|
||||
continue;
|
||||
|
||||
/* Clear the flag to make later checking
|
||||
* easier. */
|
||||
info_operand &= ~ONE_BUNDLE_AGO_FLAG;
|
||||
|
||||
/* Default to looking at PC_IN_LR_FLAG. */
|
||||
if (info_operand & PC_IN_LR_FLAG)
|
||||
location->pc_location =
|
||||
PC_LOC_IN_LR;
|
||||
else
|
||||
location->pc_location =
|
||||
PC_LOC_ON_STACK;
|
||||
|
||||
switch (info_operand) {
|
||||
case CALLER_UNKNOWN_BASE:
|
||||
location->pc_location = PC_LOC_UNKNOWN;
|
||||
location->sp_location = SP_LOC_UNKNOWN;
|
||||
return;
|
||||
|
||||
case CALLER_SP_IN_R52_BASE:
|
||||
case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
|
||||
location->sp_location = SP_LOC_IN_R52;
|
||||
return;
|
||||
|
||||
default:
|
||||
{
|
||||
const unsigned int val = info_operand
|
||||
- CALLER_SP_OFFSET_BASE;
|
||||
const unsigned int sp_offset =
|
||||
(val >> NUM_INFO_OP_FLAGS) * 8;
|
||||
if (sp_offset < 32768) {
|
||||
/* This is a properly encoded
|
||||
* SP offset. */
|
||||
location->sp_location =
|
||||
SP_LOC_OFFSET;
|
||||
location->sp_offset =
|
||||
sp_offset;
|
||||
return;
|
||||
} else {
|
||||
/* This looked like an SP
|
||||
* offset, but it's outside
|
||||
* the legal range, so this
|
||||
* must be an unrecognized
|
||||
* info operand. Ignore it.
|
||||
*/
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (seen_terminating_bundle) {
|
||||
/* We saw a terminating bundle during the previous
|
||||
* iteration, so we were only looking for an info op.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
if (bundle.bits == 0) {
|
||||
/* Wacky terminating bundle. Stop looping, and hope
|
||||
* we've already seen enough to find the caller.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to determine caller's SP.
|
||||
*/
|
||||
|
||||
if (!sp_determined) {
|
||||
int adjust;
|
||||
if (bt_has_addi_sp(&bundle, &adjust)) {
|
||||
location->sp_location = SP_LOC_OFFSET;
|
||||
|
||||
if (adjust <= 0) {
|
||||
/* We are in prolog about to adjust
|
||||
* SP. */
|
||||
location->sp_offset = 0;
|
||||
} else {
|
||||
/* We are in epilog restoring SP. */
|
||||
location->sp_offset = adjust;
|
||||
}
|
||||
|
||||
sp_determined = true;
|
||||
} else {
|
||||
if (bt_has_move_r52_sp(&bundle)) {
|
||||
/* Maybe in prolog, creating an
|
||||
* alloca-style frame. But maybe in
|
||||
* the middle of a fixed-size frame
|
||||
* clobbering r52 with SP.
|
||||
*/
|
||||
sp_moved_to_r52 = true;
|
||||
}
|
||||
|
||||
if (bt_modifies_sp(&bundle)) {
|
||||
if (sp_moved_to_r52) {
|
||||
/* We saw SP get saved into
|
||||
* r52 earlier (or now), which
|
||||
* must have been in the
|
||||
* prolog, so we now know that
|
||||
* SP is still holding the
|
||||
* caller's sp value.
|
||||
*/
|
||||
location->sp_location =
|
||||
SP_LOC_OFFSET;
|
||||
location->sp_offset = 0;
|
||||
} else {
|
||||
/* Someone must have saved
|
||||
* aside the caller's SP value
|
||||
* into r52, so r52 holds the
|
||||
* current value.
|
||||
*/
|
||||
location->sp_location =
|
||||
SP_LOC_IN_R52;
|
||||
}
|
||||
sp_determined = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bt_has_iret(&bundle)) {
|
||||
/* This is a terminating bundle. */
|
||||
seen_terminating_bundle = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to determine caller's PC.
|
||||
*/
|
||||
|
||||
jrp_reg = -1;
|
||||
has_jrp = bt_has_jrp(&bundle, &jrp_reg);
|
||||
if (has_jrp)
|
||||
seen_terminating_bundle = true;
|
||||
|
||||
if (location->pc_location == PC_LOC_UNKNOWN) {
|
||||
if (has_jrp) {
|
||||
if (jrp_reg == TREG_LR && !lr_modified) {
|
||||
/* Looks like a leaf function, or else
|
||||
* lr is already restored. */
|
||||
location->pc_location =
|
||||
PC_LOC_IN_LR;
|
||||
} else {
|
||||
location->pc_location =
|
||||
PC_LOC_ON_STACK;
|
||||
}
|
||||
} else if (bt_has_sw_sp_lr(&bundle)) {
|
||||
/* In prolog, spilling initial lr to stack. */
|
||||
location->pc_location = PC_LOC_IN_LR;
|
||||
} else if (bt_modifies_lr(&bundle)) {
|
||||
lr_modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
backtrace_init(BacktraceIterator *state,
|
||||
BacktraceMemoryReader read_memory_func,
|
||||
void *read_memory_func_extra,
|
||||
VirtualAddress pc, VirtualAddress lr,
|
||||
VirtualAddress sp, VirtualAddress r52)
|
||||
{
|
||||
CallerLocation location;
|
||||
VirtualAddress fp, initial_frame_caller_pc;
|
||||
|
||||
if (read_memory_func == NULL) {
|
||||
read_memory_func = bt_read_memory;
|
||||
}
|
||||
|
||||
/* Find out where we are in the initial frame. */
|
||||
find_caller_pc_and_caller_sp(&location, pc,
|
||||
read_memory_func, read_memory_func_extra);
|
||||
|
||||
switch (location.sp_location) {
|
||||
case SP_LOC_UNKNOWN:
|
||||
/* Give up. */
|
||||
fp = -1;
|
||||
break;
|
||||
|
||||
case SP_LOC_IN_R52:
|
||||
fp = r52;
|
||||
break;
|
||||
|
||||
case SP_LOC_OFFSET:
|
||||
fp = sp + location.sp_offset;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Give up. */
|
||||
fp = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* The frame pointer should theoretically be aligned mod 8. If
|
||||
* it's not even aligned mod 4 then something terrible happened
|
||||
* and we should mark it as invalid.
|
||||
*/
|
||||
if (fp % 4 != 0)
|
||||
fp = -1;
|
||||
|
||||
/* -1 means "don't know initial_frame_caller_pc". */
|
||||
initial_frame_caller_pc = -1;
|
||||
|
||||
switch (location.pc_location) {
|
||||
case PC_LOC_UNKNOWN:
|
||||
/* Give up. */
|
||||
fp = -1;
|
||||
break;
|
||||
|
||||
case PC_LOC_IN_LR:
|
||||
if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
|
||||
/* Give up. */
|
||||
fp = -1;
|
||||
} else {
|
||||
initial_frame_caller_pc = lr;
|
||||
}
|
||||
break;
|
||||
|
||||
case PC_LOC_ON_STACK:
|
||||
/* Leave initial_frame_caller_pc as -1,
|
||||
* meaning check the stack.
|
||||
*/
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Give up. */
|
||||
fp = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
state->pc = pc;
|
||||
state->sp = sp;
|
||||
state->fp = fp;
|
||||
state->initial_frame_caller_pc = initial_frame_caller_pc;
|
||||
state->read_memory_func = read_memory_func;
|
||||
state->read_memory_func_extra = read_memory_func_extra;
|
||||
}
|
||||
|
||||
bool
|
||||
backtrace_next(BacktraceIterator *state)
|
||||
{
|
||||
VirtualAddress next_fp, next_pc, next_frame[2];
|
||||
|
||||
if (state->fp == -1) {
|
||||
/* No parent frame. */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Try to read the frame linkage data chaining to the next function. */
|
||||
if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
|
||||
state->read_memory_func_extra)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
next_fp = next_frame[1];
|
||||
if (next_fp % 4 != 0) {
|
||||
/* Caller's frame pointer is suspect, so give up.
|
||||
* Technically it should be aligned mod 8, but we will
|
||||
* be forgiving here.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
if (state->initial_frame_caller_pc != -1) {
|
||||
/* We must be in the initial stack frame and already know the
|
||||
* caller PC.
|
||||
*/
|
||||
next_pc = state->initial_frame_caller_pc;
|
||||
|
||||
/* Force reading stack next time, in case we were in the
|
||||
* initial frame. We don't do this above just to paranoidly
|
||||
* avoid changing the struct at all when we return false.
|
||||
*/
|
||||
state->initial_frame_caller_pc = -1;
|
||||
} else {
|
||||
/* Get the caller PC from the frame linkage area. */
|
||||
next_pc = next_frame[0];
|
||||
if (next_pc == 0 ||
|
||||
next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
|
||||
/* The PC is suspect, so give up. */
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update state to become the caller's stack frame. */
|
||||
state->pc = next_pc;
|
||||
state->sp = state->fp;
|
||||
state->fp = next_fp;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else /* TILE_CHIP < 10 */
|
||||
|
||||
void
|
||||
backtrace_init(BacktraceIterator *state,
|
||||
BacktraceMemoryReader read_memory_func,
|
||||
void *read_memory_func_extra,
|
||||
VirtualAddress pc, VirtualAddress lr,
|
||||
VirtualAddress sp, VirtualAddress r52)
|
||||
{
|
||||
state->pc = pc;
|
||||
state->sp = sp;
|
||||
state->fp = -1;
|
||||
state->initial_frame_caller_pc = -1;
|
||||
state->read_memory_func = read_memory_func;
|
||||
state->read_memory_func_extra = read_memory_func_extra;
|
||||
}
|
||||
|
||||
bool backtrace_next(BacktraceIterator *state) { return false; }
|
||||
|
||||
#endif /* TILE_CHIP < 10 */
|
183
arch/tile/kernel/compat.c
Normal file
183
arch/tile/kernel/compat.c
Normal file
@@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/* Adjust unistd.h to provide 32-bit numbers and functions. */
|
||||
#define __SYSCALL_COMPAT
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/msg.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/signal.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
/*
|
||||
* Syscalls that take 64-bit numbers traditionally take them in 32-bit
|
||||
* "high" and "low" value parts on 32-bit architectures.
|
||||
* In principle, one could imagine passing some register arguments as
|
||||
* fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to
|
||||
* adapt the usual convention.
|
||||
*/
|
||||
|
||||
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high)
|
||||
{
|
||||
return sys_truncate(filename, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high)
|
||||
{
|
||||
return sys_ftruncate(fd, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high)
|
||||
{
|
||||
return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
|
||||
u32 dummy, u32 low, u32 high)
|
||||
{
|
||||
return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
|
||||
}
|
||||
|
||||
long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len)
|
||||
{
|
||||
return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
|
||||
}
|
||||
|
||||
long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 nbytes_lo, u32 nbytes_hi)
|
||||
{
|
||||
return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
((loff_t)nbytes_hi << 32) | nbytes_lo,
|
||||
flags);
|
||||
}
|
||||
|
||||
long compat_sys_fallocate(int fd, int mode,
|
||||
u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi)
|
||||
{
|
||||
return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
((loff_t)len_hi << 32) | len_lo);
|
||||
}
|
||||
|
||||
|
||||
|
||||
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
|
||||
struct compat_timespec __user *interval)
|
||||
{
|
||||
struct timespec t;
|
||||
int ret;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
|
||||
set_fs(old_fs);
|
||||
if (put_compat_timespec(&t, interval))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
|
||||
size_t count)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
int ret;
|
||||
off_t of;
|
||||
|
||||
if (offset && get_user(of, offset))
|
||||
return -EFAULT;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
|
||||
count);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (offset && put_user(of, offset))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming
|
||||
* some different calling convention than our normal 32-bit tile code.
|
||||
*/
|
||||
|
||||
/* Already defined in ipc/compat.c, but we need it here. */
|
||||
struct compat_msgbuf {
|
||||
compat_long_t mtype;
|
||||
char mtext[1];
|
||||
};
|
||||
|
||||
long tile_compat_sys_msgsnd(int msqid,
|
||||
struct compat_msgbuf __user *msgp,
|
||||
size_t msgsz, int msgflg)
|
||||
{
|
||||
compat_long_t mtype;
|
||||
|
||||
if (get_user(mtype, &msgp->mtype))
|
||||
return -EFAULT;
|
||||
return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
|
||||
}
|
||||
|
||||
long tile_compat_sys_msgrcv(int msqid,
|
||||
struct compat_msgbuf __user *msgp,
|
||||
size_t msgsz, long msgtyp, int msgflg)
|
||||
{
|
||||
long err, mtype;
|
||||
|
||||
err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (put_user(mtype, &msgp->mtype))
|
||||
err = -EFAULT;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Provide the compat syscall number to call mapping. */
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, call) [nr] = (compat_##call),
|
||||
|
||||
/* The generic versions of these don't work for Tile. */
|
||||
#define compat_sys_msgrcv tile_compat_sys_msgrcv
|
||||
#define compat_sys_msgsnd tile_compat_sys_msgsnd
|
||||
|
||||
/* See comments in sys.c */
|
||||
#define compat_sys_fadvise64 sys32_fadvise64
|
||||
#define compat_sys_fadvise64_64 sys32_fadvise64_64
|
||||
#define compat_sys_readahead sys32_readahead
|
||||
#define compat_sys_sync_file_range compat_sys_sync_file_range2
|
||||
|
||||
/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */
|
||||
#define compat_sys_stat64 sys_newstat
|
||||
#define compat_sys_lstat64 sys_newlstat
|
||||
#define compat_sys_fstat64 sys_newfstat
|
||||
#define compat_sys_fstatat64 sys_newfstatat
|
||||
|
||||
/* Pass full 64-bit values through ptrace. */
|
||||
#define compat_sys_ptrace tile_compat_sys_ptrace
|
||||
|
||||
void *compat_sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls-1] = sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
};
|
433
arch/tile/kernel/compat_signal.c
Normal file
433
arch/tile/kernel/compat_signal.c
Normal file
@@ -0,0 +1,433 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <arch/interrupts.h>
|
||||
|
||||
struct compat_sigaction {
|
||||
compat_uptr_t sa_handler;
|
||||
compat_ulong_t sa_flags;
|
||||
compat_uptr_t sa_restorer;
|
||||
sigset_t sa_mask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
struct compat_sigaltstack {
|
||||
compat_uptr_t ss_sp;
|
||||
int ss_flags;
|
||||
compat_size_t ss_size;
|
||||
};
|
||||
|
||||
struct compat_ucontext {
|
||||
compat_ulong_t uc_flags;
|
||||
compat_uptr_t uc_link;
|
||||
struct compat_sigaltstack uc_stack;
|
||||
struct sigcontext uc_mcontext;
|
||||
sigset_t uc_sigmask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
struct compat_siginfo {
|
||||
int si_signo;
|
||||
int si_errno;
|
||||
int si_code;
|
||||
|
||||
union {
|
||||
int _pad[SI_PAD_SIZE];
|
||||
|
||||
/* kill() */
|
||||
struct {
|
||||
unsigned int _pid; /* sender's pid */
|
||||
unsigned int _uid; /* sender's uid */
|
||||
} _kill;
|
||||
|
||||
/* POSIX.1b timers */
|
||||
struct {
|
||||
compat_timer_t _tid; /* timer id */
|
||||
int _overrun; /* overrun count */
|
||||
compat_sigval_t _sigval; /* same as below */
|
||||
int _sys_private; /* not to be passed to user */
|
||||
int _overrun_incr; /* amount to add to overrun */
|
||||
} _timer;
|
||||
|
||||
/* POSIX.1b signals */
|
||||
struct {
|
||||
unsigned int _pid; /* sender's pid */
|
||||
unsigned int _uid; /* sender's uid */
|
||||
compat_sigval_t _sigval;
|
||||
} _rt;
|
||||
|
||||
/* SIGCHLD */
|
||||
struct {
|
||||
unsigned int _pid; /* which child */
|
||||
unsigned int _uid; /* sender's uid */
|
||||
int _status; /* exit code */
|
||||
compat_clock_t _utime;
|
||||
compat_clock_t _stime;
|
||||
} _sigchld;
|
||||
|
||||
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
struct {
|
||||
unsigned int _addr; /* faulting insn/memory ref. */
|
||||
#ifdef __ARCH_SI_TRAPNO
|
||||
int _trapno; /* TRAP # which caused the signal */
|
||||
#endif
|
||||
} _sigfault;
|
||||
|
||||
/* SIGPOLL */
|
||||
struct {
|
||||
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
|
||||
int _fd;
|
||||
} _sigpoll;
|
||||
} _sifields;
|
||||
};
|
||||
|
||||
struct compat_rt_sigframe {
|
||||
unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */
|
||||
struct compat_siginfo info;
|
||||
struct compat_ucontext uc;
|
||||
};
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
|
||||
struct compat_sigaction __user *oact,
|
||||
size_t sigsetsize)
|
||||
{
|
||||
struct k_sigaction new_sa, old_sa;
|
||||
int ret = -EINVAL;
|
||||
|
||||
/* XXX: Don't preclude handling different sized sigset_t's. */
|
||||
if (sigsetsize != sizeof(sigset_t))
|
||||
goto out;
|
||||
|
||||
if (act) {
|
||||
compat_uptr_t handler, restorer;
|
||||
|
||||
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
|
||||
__get_user(handler, &act->sa_handler) ||
|
||||
__get_user(new_sa.sa.sa_flags, &act->sa_flags) ||
|
||||
__get_user(restorer, &act->sa_restorer) ||
|
||||
__copy_from_user(&new_sa.sa.sa_mask, &act->sa_mask,
|
||||
sizeof(sigset_t)))
|
||||
return -EFAULT;
|
||||
new_sa.sa.sa_handler = compat_ptr(handler);
|
||||
new_sa.sa.sa_restorer = compat_ptr(restorer);
|
||||
}
|
||||
|
||||
ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
|
||||
|
||||
if (!ret && oact) {
|
||||
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
|
||||
__put_user(ptr_to_compat(old_sa.sa.sa_handler),
|
||||
&oact->sa_handler) ||
|
||||
__put_user(ptr_to_compat(old_sa.sa.sa_restorer),
|
||||
&oact->sa_restorer) ||
|
||||
__put_user(old_sa.sa.sa_flags, &oact->sa_flags) ||
|
||||
__copy_to_user(&oact->sa_mask, &old_sa.sa.sa_mask,
|
||||
sizeof(sigset_t)))
|
||||
return -EFAULT;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
long compat_sys_rt_sigqueueinfo(int pid, int sig,
|
||||
struct compat_siginfo __user *uinfo)
|
||||
{
|
||||
siginfo_t info;
|
||||
int ret;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
if (copy_siginfo_from_user32(&info, uinfo))
|
||||
return -EFAULT;
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
|
||||
set_fs(old_fs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo)))
|
||||
return -EFAULT;
|
||||
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
err = __put_user(from->si_signo, &to->si_signo);
|
||||
err |= __put_user(from->si_errno, &to->si_errno);
|
||||
err |= __put_user((short)from->si_code, &to->si_code);
|
||||
|
||||
if (from->si_code < 0) {
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
err |= __put_user(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
err |= __put_user(from->si_utime, &to->si_utime);
|
||||
err |= __put_user(from->si_stime, &to->si_stime);
|
||||
err |= __put_user(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
err |= __put_user(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
err |= __put_user(from->si_overrun, &to->si_overrun);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
|
||||
{
|
||||
int err;
|
||||
u32 ptr32;
|
||||
|
||||
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
|
||||
return -EFAULT;
|
||||
|
||||
err = __get_user(to->si_signo, &from->si_signo);
|
||||
err |= __get_user(to->si_errno, &from->si_errno);
|
||||
err |= __get_user(to->si_code, &from->si_code);
|
||||
|
||||
err |= __get_user(to->si_pid, &from->si_pid);
|
||||
err |= __get_user(to->si_uid, &from->si_uid);
|
||||
err |= __get_user(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
|
||||
struct compat_sigaltstack __user *uoss_ptr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
stack_t uss, uoss;
|
||||
int ret;
|
||||
mm_segment_t seg;
|
||||
|
||||
if (uss_ptr) {
|
||||
u32 ptr;
|
||||
|
||||
memset(&uss, 0, sizeof(stack_t));
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(*uss_ptr)) ||
|
||||
__get_user(ptr, &uss_ptr->ss_sp) ||
|
||||
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
|
||||
__get_user(uss.ss_size, &uss_ptr->ss_size))
|
||||
return -EFAULT;
|
||||
uss.ss_sp = compat_ptr(ptr);
|
||||
}
|
||||
seg = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
|
||||
(unsigned long)compat_ptr(regs->sp));
|
||||
set_fs(seg);
|
||||
if (ret >= 0 && uoss_ptr) {
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(*uoss_ptr)) ||
|
||||
__put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
|
||||
__put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
|
||||
__put_user(uoss.ss_size, &uoss_ptr->ss_size))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
long _compat_sys_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
struct compat_rt_sigframe __user *frame =
|
||||
(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
|
||||
sigset_t set;
|
||||
long r0;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
|
||||
goto badframe;
|
||||
|
||||
if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
|
||||
goto badframe;
|
||||
|
||||
return r0;
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV, current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine which stack to use..
|
||||
*/
|
||||
static inline void __user *compat_get_sigframe(struct k_sigaction *ka,
|
||||
struct pt_regs *regs,
|
||||
size_t frame_size)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
/* Default to using normal stack */
|
||||
sp = (unsigned long)compat_ptr(regs->sp);
|
||||
|
||||
/*
|
||||
* If we are on the alternate signal stack and would overflow
|
||||
* it, don't. Return an always-bogus address instead so we
|
||||
* will die with SIGSEGV.
|
||||
*/
|
||||
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
|
||||
return (void __user *) -1L;
|
||||
|
||||
/* This is the X/Open sanctioned signal stack switching. */
|
||||
if (ka->sa.sa_flags & SA_ONSTACK) {
|
||||
if (sas_ss_flags(sp) == 0)
|
||||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
sp -= frame_size;
|
||||
/*
|
||||
* Align the stack pointer according to the TILE ABI,
|
||||
* i.e. so that on function entry (sp & 15) == 0.
|
||||
*/
|
||||
sp &= -16UL;
|
||||
return (void __user *) sp;
|
||||
}
|
||||
|
||||
int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long restorer;
|
||||
struct compat_rt_sigframe __user *frame;
|
||||
int err = 0;
|
||||
int usig;
|
||||
|
||||
frame = compat_get_sigframe(ka, regs, sizeof(*frame));
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
goto give_sigsegv;
|
||||
|
||||
usig = current_thread_info()->exec_domain
|
||||
&& current_thread_info()->exec_domain->signal_invmap
|
||||
&& sig < 32
|
||||
? current_thread_info()->exec_domain->signal_invmap[sig]
|
||||
: sig;
|
||||
|
||||
/* Always write at least the signal number for the stack backtracer. */
|
||||
if (ka->sa.sa_flags & SA_SIGINFO) {
|
||||
/* At sigreturn time, restore the callee-save registers too. */
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
regs->flags |= PT_FLAGS_RESTORE_REGS;
|
||||
} else {
|
||||
err |= __put_user(info->si_signo, &frame->info.si_signo);
|
||||
}
|
||||
|
||||
/* Create the ucontext. */
|
||||
err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(ptr_to_compat((void *)(current->sas_ss_sp)),
|
||||
&frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
goto give_sigsegv;
|
||||
|
||||
restorer = VDSO_BASE;
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ptr_to_compat_reg(ka->sa.sa_restorer);
|
||||
|
||||
/*
|
||||
* Set up registers for signal handler.
|
||||
* Registers that we don't modify keep the value they had from
|
||||
* user-space at the time we took the signal.
|
||||
*/
|
||||
regs->pc = ptr_to_compat_reg(ka->sa.sa_handler);
|
||||
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
|
||||
regs->sp = ptr_to_compat_reg(frame);
|
||||
regs->lr = restorer;
|
||||
regs->regs[0] = (unsigned long) usig;
|
||||
|
||||
if (ka->sa.sa_flags & SA_SIGINFO) {
|
||||
/* Need extra arguments, so mark to restore caller-saves. */
|
||||
regs->regs[1] = ptr_to_compat_reg(&frame->info);
|
||||
regs->regs[2] = ptr_to_compat_reg(&frame->uc);
|
||||
regs->flags |= PT_FLAGS_CALLER_SAVES;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify any tracer that was single-stepping it.
|
||||
* The tracer may want to single-step inside the
|
||||
* handler too.
|
||||
*/
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
ptrace_notify(SIGTRAP);
|
||||
|
||||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
}
|
109
arch/tile/kernel/early_printk.c
Normal file
109
arch/tile/kernel/early_printk.c
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/console.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/setup.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
static void early_hv_write(struct console *con, const char *s, unsigned n)
|
||||
{
|
||||
hv_console_write((HV_VirtAddr) s, n);
|
||||
}
|
||||
|
||||
static struct console early_hv_console = {
|
||||
.name = "earlyhv",
|
||||
.write = early_hv_write,
|
||||
.flags = CON_PRINTBUFFER,
|
||||
.index = -1,
|
||||
};
|
||||
|
||||
/* Direct interface for emergencies */
|
||||
struct console *early_console = &early_hv_console;
|
||||
static int early_console_initialized;
|
||||
static int early_console_complete;
|
||||
|
||||
static void early_vprintk(const char *fmt, va_list ap)
|
||||
{
|
||||
char buf[512];
|
||||
int n = vscnprintf(buf, sizeof(buf), fmt, ap);
|
||||
early_console->write(early_console, buf, n);
|
||||
}
|
||||
|
||||
void early_printk(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
early_vprintk(fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void early_panic(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
raw_local_irq_disable_all();
|
||||
va_start(ap, fmt);
|
||||
early_printk("Kernel panic - not syncing: ");
|
||||
early_vprintk(fmt, ap);
|
||||
early_console->write(early_console, "\n", 1);
|
||||
va_end(ap);
|
||||
dump_stack();
|
||||
hv_halt();
|
||||
}
|
||||
|
||||
static int __initdata keep_early;
|
||||
|
||||
static int __init setup_early_printk(char *str)
|
||||
{
|
||||
if (early_console_initialized)
|
||||
return 1;
|
||||
|
||||
if (str != NULL && strncmp(str, "keep", 4) == 0)
|
||||
keep_early = 1;
|
||||
|
||||
early_console = &early_hv_console;
|
||||
early_console_initialized = 1;
|
||||
register_console(early_console);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init disable_early_printk(void)
|
||||
{
|
||||
early_console_complete = 1;
|
||||
if (!early_console_initialized || !early_console)
|
||||
return;
|
||||
if (!keep_early) {
|
||||
early_printk("disabling early console\n");
|
||||
unregister_console(early_console);
|
||||
early_console_initialized = 0;
|
||||
} else {
|
||||
early_printk("keeping early console\n");
|
||||
}
|
||||
}
|
||||
|
||||
void warn_early_printk(void)
|
||||
{
|
||||
if (early_console_complete || early_console_initialized)
|
||||
return;
|
||||
early_printk("\
|
||||
Machine shutting down before console output is fully initialized.\n\
|
||||
You may wish to reboot and add the option 'earlyprintk' to your\n\
|
||||
boot command line to see any diagnostic early console output.\n\
|
||||
");
|
||||
}
|
||||
|
||||
early_param("earlyprintk", setup_early_printk);
|
141
arch/tile/kernel/entry.S
Normal file
141
arch/tile/kernel/entry.S
Normal file
@@ -0,0 +1,141 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <arch/abi.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/irqflags.h>
|
||||
|
||||
#ifdef __tilegx__
|
||||
#define bnzt bnezt
|
||||
#endif
|
||||
|
||||
STD_ENTRY(current_text_addr)
|
||||
{ move r0, lr; jrp lr }
|
||||
STD_ENDPROC(current_text_addr)
|
||||
|
||||
STD_ENTRY(_sim_syscall)
|
||||
/*
|
||||
* Wait for r0-r9 to be ready (and lr on the off chance we
|
||||
* want the syscall to locate its caller), then make a magic
|
||||
* simulator syscall.
|
||||
*
|
||||
* We carefully stall until the registers are readable in case they
|
||||
* are the target of a slow load, etc. so that tile-sim will
|
||||
* definitely be able to read all of them inside the magic syscall.
|
||||
*
|
||||
* Technically this is wrong for r3-r9 and lr, since an interrupt
|
||||
* could come in and restore the registers with a slow load right
|
||||
* before executing the mtspr. We may need to modify tile-sim to
|
||||
* explicitly stall for this case, but we do not yet have
|
||||
* a way to implement such a stall.
|
||||
*/
|
||||
{ and zero, lr, r9 ; and zero, r8, r7 }
|
||||
{ and zero, r6, r5 ; and zero, r4, r3 }
|
||||
{ and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
|
||||
{ jrp lr }
|
||||
STD_ENDPROC(_sim_syscall)
|
||||
|
||||
/*
|
||||
* Implement execve(). The i386 code has a note that forking from kernel
|
||||
* space results in no copy on write until the execve, so we should be
|
||||
* careful not to write to the stack here.
|
||||
*/
|
||||
STD_ENTRY(kernel_execve)
|
||||
moveli TREG_SYSCALL_NR_NAME, __NR_execve
|
||||
swint1
|
||||
jrp lr
|
||||
STD_ENDPROC(kernel_execve)
|
||||
|
||||
/* Delay a fixed number of cycles. */
|
||||
STD_ENTRY(__delay)
|
||||
{ addi r0, r0, -1; bnzt r0, . }
|
||||
jrp lr
|
||||
STD_ENDPROC(__delay)
|
||||
|
||||
/*
|
||||
* We don't run this function directly, but instead copy it to a page
|
||||
* we map into every user process. See vdso_setup().
|
||||
*
|
||||
* Note that libc has a copy of this function that it uses to compare
|
||||
* against the PC when a stack backtrace ends, so if this code is
|
||||
* changed, the libc implementation(s) should also be updated.
|
||||
*/
|
||||
.pushsection .data
|
||||
ENTRY(__rt_sigreturn)
|
||||
moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn
|
||||
swint1
|
||||
ENDPROC(__rt_sigreturn)
|
||||
ENTRY(__rt_sigreturn_end)
|
||||
.popsection
|
||||
|
||||
STD_ENTRY(dump_stack)
|
||||
{ move r2, lr; lnk r1 }
|
||||
{ move r4, r52; addli r1, r1, dump_stack - . }
|
||||
{ move r3, sp; j _dump_stack }
|
||||
jrp lr /* keep backtracer happy */
|
||||
STD_ENDPROC(dump_stack)
|
||||
|
||||
STD_ENTRY(KBacktraceIterator_init_current)
|
||||
{ move r2, lr; lnk r1 }
|
||||
{ move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
|
||||
{ move r3, sp; j _KBacktraceIterator_init_current }
|
||||
jrp lr /* keep backtracer happy */
|
||||
STD_ENDPROC(KBacktraceIterator_init_current)
|
||||
|
||||
/*
|
||||
* Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
|
||||
* free the old stack (passed in r0) and re-invoke cpu_idle().
|
||||
* We update sp and ksp0 simultaneously to avoid backtracer warnings.
|
||||
*/
|
||||
STD_ENTRY(cpu_idle_on_new_stack)
|
||||
{
|
||||
move sp, r1
|
||||
mtspr SYSTEM_SAVE_1_0, r2
|
||||
}
|
||||
jal free_thread_info
|
||||
j cpu_idle
|
||||
STD_ENDPROC(cpu_idle_on_new_stack)
|
||||
|
||||
/* Loop forever on a nap during SMP boot. */
|
||||
STD_ENTRY(smp_nap)
|
||||
nap
|
||||
j smp_nap /* we are not architecturally guaranteed not to exit nap */
|
||||
jrp lr /* clue in the backtracer */
|
||||
STD_ENDPROC(smp_nap)
|
||||
|
||||
/*
|
||||
* Enable interrupts racelessly and then nap until interrupted.
|
||||
* This function's _cpu_idle_nap address is special; see intvec.S.
|
||||
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
|
||||
* as a result return to the function that called _cpu_idle().
|
||||
*/
|
||||
STD_ENTRY(_cpu_idle)
|
||||
{
|
||||
lnk r0
|
||||
movei r1, 1
|
||||
}
|
||||
{
|
||||
addli r0, r0, _cpu_idle_nap - .
|
||||
mtspr INTERRUPT_CRITICAL_SECTION, r1
|
||||
}
|
||||
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
|
||||
mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */
|
||||
mtspr EX_CONTEXT_1_0, r0
|
||||
iret
|
||||
.global _cpu_idle_nap
|
||||
_cpu_idle_nap:
|
||||
nap
|
||||
jrp lr
|
||||
STD_ENDPROC(_cpu_idle)
|
180
arch/tile/kernel/head_32.S
Normal file
180
arch/tile/kernel/head_32.S
Normal file
@@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* TILE startup code.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
/*
|
||||
* This module contains the entry code for kernel images. It performs the
|
||||
* minimal setup needed to call the generic C routines.
|
||||
*/
|
||||
|
||||
__HEAD
|
||||
ENTRY(_start)
|
||||
/* Notify the hypervisor of what version of the API we want */
|
||||
{
|
||||
movei r1, TILE_CHIP
|
||||
movei r2, TILE_CHIP_REV
|
||||
}
|
||||
{
|
||||
moveli r0, _HV_VERSION
|
||||
jal hv_init
|
||||
}
|
||||
/* Get a reasonable default ASID in r0 */
|
||||
{
|
||||
move r0, zero
|
||||
jal hv_inquire_asid
|
||||
}
|
||||
/* Install the default page table */
|
||||
{
|
||||
moveli r6, lo16(swapper_pgprot - PAGE_OFFSET)
|
||||
move r4, r0 /* use starting ASID of range for this page table */
|
||||
}
|
||||
{
|
||||
moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET)
|
||||
auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET)
|
||||
}
|
||||
{
|
||||
lw r2, r6
|
||||
addi r6, r6, 4
|
||||
}
|
||||
{
|
||||
lw r3, r6
|
||||
auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET)
|
||||
}
|
||||
{
|
||||
inv r6
|
||||
move r1, zero /* high 32 bits of CPA is zero */
|
||||
}
|
||||
{
|
||||
moveli lr, lo16(1f)
|
||||
move r5, zero
|
||||
}
|
||||
{
|
||||
auli lr, lr, ha16(1f)
|
||||
j hv_install_context
|
||||
}
|
||||
1:
|
||||
|
||||
/* Get our processor number and save it away in SAVE_1_0. */
|
||||
jal hv_inquire_topology
|
||||
mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
|
||||
add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Load up our per-cpu offset. When the first (master) tile
|
||||
* boots, this value is still zero, so we will load boot_pc
|
||||
* with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
|
||||
* The master tile initializes the per-cpu offset array, so that
|
||||
* when subsequent (secondary) tiles boot, they will instead load
|
||||
* from their per-cpu versions of boot_sp and boot_pc.
|
||||
*/
|
||||
moveli r5, lo16(__per_cpu_offset)
|
||||
auli r5, r5, ha16(__per_cpu_offset)
|
||||
s2a r5, r4, r5
|
||||
lw r5, r5
|
||||
bnz r5, 1f
|
||||
|
||||
/*
|
||||
* Save the width and height to the smp_topology variable
|
||||
* for later use.
|
||||
*/
|
||||
moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
|
||||
auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
|
||||
{
|
||||
sw r0, r2
|
||||
addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET)
|
||||
}
|
||||
sw r0, r3
|
||||
1:
|
||||
#else
|
||||
move r5, zero
|
||||
#endif
|
||||
|
||||
/* Load and go with the correct pc and sp. */
|
||||
{
|
||||
addli r1, r5, lo16(boot_sp)
|
||||
addli r0, r5, lo16(boot_pc)
|
||||
}
|
||||
{
|
||||
auli r1, r1, ha16(boot_sp)
|
||||
auli r0, r0, ha16(boot_pc)
|
||||
}
|
||||
lw r0, r0
|
||||
lw sp, r1
|
||||
or r4, sp, r4
|
||||
mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */
|
||||
addi sp, sp, -STACK_TOP_DELTA
|
||||
{
|
||||
move lr, zero /* stop backtraces in the called function */
|
||||
jr r0
|
||||
}
|
||||
ENDPROC(_start)
|
||||
|
||||
.section ".bss.page_aligned","w"
|
||||
.align PAGE_SIZE
|
||||
ENTRY(empty_zero_page)
|
||||
.fill PAGE_SIZE,1,0
|
||||
END(empty_zero_page)
|
||||
|
||||
.macro PTE va, cpa, bits1, no_org=0
|
||||
.ifeq \no_org
|
||||
.org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE
|
||||
.endif
|
||||
.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
|
||||
(HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
|
||||
.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
|
||||
.endm
|
||||
|
||||
.section ".data.page_aligned","wa"
|
||||
.align PAGE_SIZE
|
||||
ENTRY(swapper_pg_dir)
|
||||
/*
|
||||
* All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as
|
||||
* VA = PA + PAGE_OFFSET. We remap things with more precise access
|
||||
* permissions and more respect for size of RAM later.
|
||||
*/
|
||||
.set addr, 0
|
||||
.rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT
|
||||
PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE
|
||||
.set addr, addr + PGDIR_SIZE
|
||||
.endr
|
||||
|
||||
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
|
||||
PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE
|
||||
.org swapper_pg_dir + HV_L1_SIZE
|
||||
END(swapper_pg_dir)
|
||||
|
||||
/*
|
||||
* Isolate swapper_pgprot to its own cache line, since each cpu
|
||||
* starting up will read it using VA-is-PA and local homing.
|
||||
* This would otherwise likely conflict with other data on the cache
|
||||
* line, once we have set its permanent home in the page tables.
|
||||
*/
|
||||
__INITDATA
|
||||
.align CHIP_L2_LINE_SIZE()
|
||||
ENTRY(swapper_pgprot)
|
||||
PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1
|
||||
.align CHIP_L2_LINE_SIZE()
|
||||
END(swapper_pgprot)
|
56
arch/tile/kernel/hvglue.lds
Normal file
56
arch/tile/kernel/hvglue.lds
Normal file
@@ -0,0 +1,56 @@
|
||||
/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
|
||||
hv_init = TEXT_OFFSET + 0x10020;
|
||||
hv_install_context = TEXT_OFFSET + 0x10040;
|
||||
hv_sysconf = TEXT_OFFSET + 0x10060;
|
||||
hv_get_rtc = TEXT_OFFSET + 0x10080;
|
||||
hv_set_rtc = TEXT_OFFSET + 0x100a0;
|
||||
hv_flush_asid = TEXT_OFFSET + 0x100c0;
|
||||
hv_flush_page = TEXT_OFFSET + 0x100e0;
|
||||
hv_flush_pages = TEXT_OFFSET + 0x10100;
|
||||
hv_restart = TEXT_OFFSET + 0x10120;
|
||||
hv_halt = TEXT_OFFSET + 0x10140;
|
||||
hv_power_off = TEXT_OFFSET + 0x10160;
|
||||
hv_inquire_physical = TEXT_OFFSET + 0x10180;
|
||||
hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0;
|
||||
hv_inquire_virtual = TEXT_OFFSET + 0x101c0;
|
||||
hv_inquire_asid = TEXT_OFFSET + 0x101e0;
|
||||
hv_nanosleep = TEXT_OFFSET + 0x10200;
|
||||
hv_console_read_if_ready = TEXT_OFFSET + 0x10220;
|
||||
hv_console_write = TEXT_OFFSET + 0x10240;
|
||||
hv_downcall_dispatch = TEXT_OFFSET + 0x10260;
|
||||
hv_inquire_topology = TEXT_OFFSET + 0x10280;
|
||||
hv_fs_findfile = TEXT_OFFSET + 0x102a0;
|
||||
hv_fs_fstat = TEXT_OFFSET + 0x102c0;
|
||||
hv_fs_pread = TEXT_OFFSET + 0x102e0;
|
||||
hv_physaddr_read64 = TEXT_OFFSET + 0x10300;
|
||||
hv_physaddr_write64 = TEXT_OFFSET + 0x10320;
|
||||
hv_get_command_line = TEXT_OFFSET + 0x10340;
|
||||
hv_set_caching = TEXT_OFFSET + 0x10360;
|
||||
hv_bzero_page = TEXT_OFFSET + 0x10380;
|
||||
hv_register_message_state = TEXT_OFFSET + 0x103a0;
|
||||
hv_send_message = TEXT_OFFSET + 0x103c0;
|
||||
hv_receive_message = TEXT_OFFSET + 0x103e0;
|
||||
hv_inquire_context = TEXT_OFFSET + 0x10400;
|
||||
hv_start_all_tiles = TEXT_OFFSET + 0x10420;
|
||||
hv_dev_open = TEXT_OFFSET + 0x10440;
|
||||
hv_dev_close = TEXT_OFFSET + 0x10460;
|
||||
hv_dev_pread = TEXT_OFFSET + 0x10480;
|
||||
hv_dev_pwrite = TEXT_OFFSET + 0x104a0;
|
||||
hv_dev_poll = TEXT_OFFSET + 0x104c0;
|
||||
hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0;
|
||||
hv_dev_preada = TEXT_OFFSET + 0x10500;
|
||||
hv_dev_pwritea = TEXT_OFFSET + 0x10520;
|
||||
hv_flush_remote = TEXT_OFFSET + 0x10540;
|
||||
hv_console_putc = TEXT_OFFSET + 0x10560;
|
||||
hv_inquire_tiles = TEXT_OFFSET + 0x10580;
|
||||
hv_confstr = TEXT_OFFSET + 0x105a0;
|
||||
hv_reexec = TEXT_OFFSET + 0x105c0;
|
||||
hv_set_command_line = TEXT_OFFSET + 0x105e0;
|
||||
hv_dev_register_intr_state = TEXT_OFFSET + 0x10600;
|
||||
hv_enable_intr = TEXT_OFFSET + 0x10620;
|
||||
hv_disable_intr = TEXT_OFFSET + 0x10640;
|
||||
hv_trigger_ipi = TEXT_OFFSET + 0x10660;
|
||||
hv_store_mapping = TEXT_OFFSET + 0x10680;
|
||||
hv_inquire_realpa = TEXT_OFFSET + 0x106a0;
|
||||
hv_flush_all = TEXT_OFFSET + 0x106c0;
|
||||
hv_glue_internals = TEXT_OFFSET + 0x106e0;
|
59
arch/tile/kernel/init_task.c
Normal file
59
arch/tile/kernel/init_task.c
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/mqueue.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/start_kernel.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
||||
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
||||
|
||||
/*
|
||||
* Initial thread structure.
|
||||
*
|
||||
* We need to make sure that this is THREAD_SIZE aligned due to the
|
||||
* way process stacks are handled. This is done by having a special
|
||||
* "init_task" linker map entry..
|
||||
*/
|
||||
union thread_union init_thread_union __init_task_data = {
|
||||
INIT_THREAD_INFO(init_task)
|
||||
};
|
||||
|
||||
/*
|
||||
* Initial task structure.
|
||||
*
|
||||
* All other task structs will be allocated on slabs in fork.c
|
||||
*/
|
||||
struct task_struct init_task = INIT_TASK(init_task);
|
||||
EXPORT_SYMBOL(init_task);
|
||||
|
||||
/*
|
||||
* per-CPU stack and boot info.
|
||||
*/
|
||||
DEFINE_PER_CPU(unsigned long, boot_sp) =
|
||||
(unsigned long)init_stack + THREAD_SIZE;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
|
||||
#else
|
||||
/*
|
||||
* The variable must be __initdata since it references __init code.
|
||||
* With CONFIG_SMP it is per-cpu data, which is exempt from validation.
|
||||
*/
|
||||
unsigned long __initdata boot_pc = (unsigned long)start_kernel;
|
||||
#endif
|
2006
arch/tile/kernel/intvec_32.S
Normal file
2006
arch/tile/kernel/intvec_32.S
Normal file
File diff suppressed because it is too large
Load Diff
227
arch/tile/kernel/irq.c
Normal file
227
arch/tile/kernel/irq.c
Normal file
@@ -0,0 +1,227 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <hv/drv_pcie_rc_intf.h>
|
||||
|
||||
/*
|
||||
* The set of interrupts we enable for raw_local_irq_enable().
|
||||
* This is initialized to have just a single interrupt that the kernel
|
||||
* doesn't actually use as a sentinel. During kernel init,
|
||||
* interrupts are added as the kernel gets prepared to support them.
|
||||
* NOTE: we could probably initialize them all statically up front.
|
||||
*/
|
||||
DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
|
||||
INITIAL_INTERRUPTS_ENABLED;
|
||||
EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
|
||||
|
||||
/* Define per-tile device interrupt state */
|
||||
DEFINE_PER_CPU(HV_IntrState, dev_intr_state);
|
||||
|
||||
DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Interrupt dispatcher, invoked upon a hypervisor device interrupt downcall
|
||||
*/
|
||||
void tile_dev_intr(struct pt_regs *regs, int intnum)
|
||||
{
|
||||
int irq;
|
||||
|
||||
/*
|
||||
* Get the device interrupt pending mask from where the hypervisor
|
||||
* has tucked it away for us.
|
||||
*/
|
||||
unsigned long pending_dev_intr_mask = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
|
||||
|
||||
|
||||
/* Track time spent here in an interrupt context. */
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
/* Debugging check for stack overflow: less than 1/8th stack free? */
|
||||
{
|
||||
long sp = stack_pointer - (long) current_thread_info();
|
||||
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
|
||||
printk(KERN_EMERG "tile_dev_intr: "
|
||||
"stack overflow: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for (irq = 0; pending_dev_intr_mask; ++irq) {
|
||||
if (pending_dev_intr_mask & 0x1) {
|
||||
generic_handle_irq(irq);
|
||||
|
||||
/* Count device irqs; IPIs are counted elsewhere. */
|
||||
if (irq > HV_MAX_IPI_INTERRUPT)
|
||||
__get_cpu_var(irq_stat).irq_dev_intr_count++;
|
||||
}
|
||||
pending_dev_intr_mask >>= 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Track time spent against the current process again and
|
||||
* process any softirqs if they are waiting.
|
||||
*/
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
|
||||
/* Mask an interrupt. */
|
||||
static void hv_dev_irq_mask(unsigned int irq)
|
||||
{
|
||||
HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state);
|
||||
hv_disable_intr(p_intr_state, 1 << irq);
|
||||
}
|
||||
|
||||
/* Unmask an interrupt. */
|
||||
static void hv_dev_irq_unmask(unsigned int irq)
|
||||
{
|
||||
/* Re-enable the hypervisor to generate interrupts. */
|
||||
HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state);
|
||||
hv_enable_intr(p_intr_state, 1 << irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* The HV doesn't latch incoming interrupts while an interrupt is
|
||||
* disabled, so we need to reenable interrupts before running the
|
||||
* handler.
|
||||
*
|
||||
* ISSUE: Enabling the interrupt this early avoids any race conditions
|
||||
* but introduces the possibility of nested interrupt stack overflow.
|
||||
* An imminent change to the HV IRQ model will fix this.
|
||||
*/
|
||||
static void hv_dev_irq_ack(unsigned int irq)
|
||||
{
|
||||
hv_dev_irq_unmask(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Since ack() reenables interrupts, there's nothing to do at eoi().
|
||||
*/
|
||||
static void hv_dev_irq_eoi(unsigned int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static struct irq_chip hv_dev_irq_chip = {
|
||||
.typename = "hv_dev_irq_chip",
|
||||
.ack = hv_dev_irq_ack,
|
||||
.mask = hv_dev_irq_mask,
|
||||
.unmask = hv_dev_irq_unmask,
|
||||
.eoi = hv_dev_irq_eoi,
|
||||
};
|
||||
|
||||
static struct irqaction resched_action = {
|
||||
.handler = handle_reschedule_ipi,
|
||||
.name = "resched",
|
||||
.dev_id = handle_reschedule_ipi /* unique token */,
|
||||
};
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
/* Bind IPI irqs. Does this belong somewhere else in init? */
|
||||
tile_irq_activate(IRQ_RESCHEDULE);
|
||||
BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
|
||||
}
|
||||
|
||||
void __cpuinit init_per_tile_IRQs(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Set the pointer to the per-tile device interrupt state. */
|
||||
HV_IntrState *sv_ptr = &__get_cpu_var(dev_intr_state);
|
||||
rc = hv_dev_register_intr_state(sv_ptr);
|
||||
if (rc != HV_OK)
|
||||
panic("hv_dev_register_intr_state: error %d", rc);
|
||||
|
||||
}
|
||||
|
||||
void tile_irq_activate(unsigned int irq)
|
||||
{
|
||||
/*
|
||||
* Paravirtualized drivers can call up to the HV to find out
|
||||
* which irq they're associated with. The HV interface
|
||||
* doesn't provide a generic call for discovering all valid
|
||||
* IRQs, so drivers must call this method to initialize newly
|
||||
* discovered IRQs.
|
||||
*
|
||||
* We could also just initialize all 32 IRQs at startup, but
|
||||
* doing so would lead to a kernel fault if an unexpected
|
||||
* interrupt fires and jumps to a NULL action. By defering
|
||||
* the set_irq_chip_and_handler() call, unexpected IRQs are
|
||||
* handled properly by handle_bad_irq().
|
||||
*/
|
||||
hv_dev_irq_mask(irq);
|
||||
set_irq_chip_and_handler(irq, &hv_dev_irq_chip, handle_percpu_irq);
|
||||
}
|
||||
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic, controller-independent functions:
|
||||
*/
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
struct irqaction *action;
|
||||
unsigned long flags;
|
||||
|
||||
if (i == 0) {
|
||||
seq_printf(p, " ");
|
||||
for (j = 0; j < NR_CPUS; j++)
|
||||
if (cpu_online(j))
|
||||
seq_printf(p, "CPU%-8d", j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
if (i < NR_IRQS) {
|
||||
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||
action = irq_desc[i].action;
|
||||
if (!action)
|
||||
goto skip;
|
||||
seq_printf(p, "%3d: ", i);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
||||
for (action = action->next; action; action = action->next)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
}
|
||||
return 0;
|
||||
}
|
291
arch/tile/kernel/machine_kexec.c
Normal file
291
arch/tile/kernel/machine_kexec.c
Normal file
@@ -0,0 +1,291 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* based on machine_kexec.c from other architectures in linux-2.6.18
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/timex.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
|
||||
/*
|
||||
* This stuff is not in elf.h and is not in any other kernel include.
|
||||
* This stuff is needed below in the little boot notes parser to
|
||||
* extract the command line so we can pass it to the hypervisor.
|
||||
*/
|
||||
struct Elf32_Bhdr {
|
||||
Elf32_Word b_signature;
|
||||
Elf32_Word b_size;
|
||||
Elf32_Half b_checksum;
|
||||
Elf32_Half b_records;
|
||||
};
|
||||
#define ELF_BOOT_MAGIC 0x0E1FB007
|
||||
#define EBN_COMMAND_LINE 0x00000004
|
||||
#define roundupsz(X) (((X) + 3) & ~3)
|
||||
|
||||
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
|
||||
|
||||
|
||||
void machine_shutdown(void)
|
||||
{
|
||||
/*
|
||||
* Normally we would stop all the other processors here, but
|
||||
* the check in machine_kexec_prepare below ensures we'll only
|
||||
* get this far if we've been booted with "nosmp" on the
|
||||
* command line or without CONFIG_SMP so there's nothing to do
|
||||
* here (for now).
|
||||
*/
|
||||
}
|
||||
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Cannot happen. This type of kexec is disabled on this
|
||||
* architecture (and enforced in machine_kexec_prepare below).
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
int machine_kexec_prepare(struct kimage *image)
|
||||
{
|
||||
if (num_online_cpus() > 1) {
|
||||
printk(KERN_WARNING "%s: detected attempt to kexec "
|
||||
"with num_online_cpus() > 1\n",
|
||||
__func__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
if (image->type != KEXEC_TYPE_DEFAULT) {
|
||||
printk(KERN_WARNING "%s: detected attempt to kexec "
|
||||
"with unsupported type: %d\n",
|
||||
__func__,
|
||||
image->type);
|
||||
return -ENOSYS;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void machine_kexec_cleanup(struct kimage *image)
|
||||
{
|
||||
/*
|
||||
* We did nothing in machine_kexec_prepare,
|
||||
* so we have nothing to do here.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* If we can find elf boot notes on this page, return the command
|
||||
* line. Otherwise, silently return null. Somewhat kludgy, but no
|
||||
* good way to do this without significantly rearchitecting the
|
||||
* architecture-independent kexec code.
|
||||
*/
|
||||
|
||||
static unsigned char *kexec_bn2cl(void *pg)
|
||||
{
|
||||
struct Elf32_Bhdr *bhdrp;
|
||||
Elf32_Nhdr *nhdrp;
|
||||
unsigned char *desc;
|
||||
unsigned char *command_line;
|
||||
__sum16 csum;
|
||||
|
||||
bhdrp = (struct Elf32_Bhdr *) pg;
|
||||
|
||||
/*
|
||||
* This routine is invoked for every source page, so make
|
||||
* sure to quietly ignore every impossible page.
|
||||
*/
|
||||
if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
|
||||
bhdrp->b_size > PAGE_SIZE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we get a checksum mismatch, it's possible that this is
|
||||
* just a false positive, but relatively unlikely. We dump
|
||||
* out the contents of the section so we can diagnose better.
|
||||
*/
|
||||
csum = ip_compute_csum(pg, bhdrp->b_size);
|
||||
if (csum != 0) {
|
||||
int i;
|
||||
unsigned char *p = pg;
|
||||
int nbytes = min((Elf32_Word)1000, bhdrp->b_size);
|
||||
printk(KERN_INFO "%s: bad checksum %#x\n", __func__, csum);
|
||||
printk(KERN_INFO "bytes (%d):", bhdrp->b_size);
|
||||
for (i = 0; i < nbytes; ++i)
|
||||
printk(" %02x", p[i]);
|
||||
if (bhdrp->b_size != nbytes)
|
||||
printk(" ...");
|
||||
printk("\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
|
||||
|
||||
while (nhdrp->n_type != EBN_COMMAND_LINE) {
|
||||
|
||||
desc = (unsigned char *) (nhdrp + 1);
|
||||
desc += roundupsz(nhdrp->n_descsz);
|
||||
|
||||
nhdrp = (Elf32_Nhdr *) desc;
|
||||
|
||||
/* still in bounds? */
|
||||
if ((unsigned char *) (nhdrp + 1) >
|
||||
((unsigned char *) pg) + bhdrp->b_size) {
|
||||
|
||||
printk(KERN_INFO "%s: out of bounds\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
command_line = (unsigned char *) (nhdrp + 1);
|
||||
desc = command_line;
|
||||
|
||||
while (*desc != '\0') {
|
||||
desc++;
|
||||
if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
|
||||
printk(KERN_INFO "%s: ran off end of page\n",
|
||||
__func__);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return command_line;
|
||||
}
|
||||
|
||||
static void kexec_find_and_set_command_line(struct kimage *image)
|
||||
{
|
||||
kimage_entry_t *ptr, entry;
|
||||
|
||||
unsigned char *command_line = 0;
|
||||
unsigned char *r;
|
||||
HV_Errno hverr;
|
||||
|
||||
for (ptr = &image->head;
|
||||
(entry = *ptr) && !(entry & IND_DONE);
|
||||
ptr = (entry & IND_INDIRECTION) ?
|
||||
phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
|
||||
|
||||
if ((entry & IND_SOURCE)) {
|
||||
void *va =
|
||||
kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0);
|
||||
r = kexec_bn2cl(va);
|
||||
if (r) {
|
||||
command_line = r;
|
||||
break;
|
||||
}
|
||||
kunmap_atomic(va, KM_USER0);
|
||||
}
|
||||
}
|
||||
|
||||
if (command_line != 0) {
|
||||
printk(KERN_INFO "setting new command line to \"%s\"\n",
|
||||
command_line);
|
||||
|
||||
hverr = hv_set_command_line(
|
||||
(HV_VirtAddr) command_line, strlen(command_line));
|
||||
kunmap_atomic(command_line, KM_USER0);
|
||||
} else {
|
||||
printk(KERN_INFO "%s: no command line found; making empty\n",
|
||||
__func__);
|
||||
hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
|
||||
}
|
||||
if (hverr) {
|
||||
printk(KERN_WARNING
|
||||
"%s: call to hv_set_command_line returned error: %d\n",
|
||||
__func__, hverr);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The kexec code range-checks all its PAs, so to avoid having it run
|
||||
* amok and allocate memory and then sequester it from every other
|
||||
* controller, we force it to come from controller zero. We also
|
||||
* disable the oom-killer since if we do end up running out of memory,
|
||||
* that almost certainly won't help.
|
||||
*/
|
||||
struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
|
||||
return alloc_pages_node(0, gfp_mask, order);
|
||||
}
|
||||
|
||||
static void setup_quasi_va_is_pa(void)
|
||||
{
|
||||
HV_PTE *pgtable;
|
||||
HV_PTE pte;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Flush our TLB to prevent conflicts between the previous contents
|
||||
* and the new stuff we're about to add.
|
||||
*/
|
||||
local_flush_tlb_all();
|
||||
|
||||
/* setup VA is PA, at least up to PAGE_OFFSET */
|
||||
|
||||
pgtable = (HV_PTE *)current->mm->pgd;
|
||||
pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
|
||||
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
|
||||
|
||||
for (i = 0; i < pgd_index(PAGE_OFFSET); i++)
|
||||
pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte);
|
||||
}
|
||||
|
||||
|
||||
NORET_TYPE void machine_kexec(struct kimage *image)
|
||||
{
|
||||
void *reboot_code_buffer;
|
||||
NORET_TYPE void (*rnk)(unsigned long, void *, unsigned long)
|
||||
ATTRIB_NORET;
|
||||
|
||||
/* Mask all interrupts before starting to reboot. */
|
||||
interrupt_mask_set_mask(~0ULL);
|
||||
|
||||
kexec_find_and_set_command_line(image);
|
||||
|
||||
/*
|
||||
* Adjust the home caching of the control page to be cached on
|
||||
* this cpu, and copy the assembly helper into the control
|
||||
* code page, which we map in the vmalloc area.
|
||||
*/
|
||||
homecache_change_page_home(image->control_code_page, 0,
|
||||
smp_processor_id());
|
||||
reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
|
||||
__pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
|
||||
memcpy(reboot_code_buffer, relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
__flush_icache_range(
|
||||
(unsigned long) reboot_code_buffer,
|
||||
(unsigned long) reboot_code_buffer + relocate_new_kernel_size);
|
||||
|
||||
setup_quasi_va_is_pa();
|
||||
|
||||
/* now call it */
|
||||
rnk = reboot_code_buffer;
|
||||
(*rnk)(image->head, reboot_code_buffer, image->start);
|
||||
}
|
115
arch/tile/kernel/messaging.c
Normal file
115
arch/tile/kernel/messaging.c
Normal file
@@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/hv_driver.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/interrupts.h>
|
||||
|
||||
/* All messages are stored here */
|
||||
static DEFINE_PER_CPU(HV_MsgState, msg_state);
|
||||
|
||||
void __cpuinit init_messaging()
|
||||
{
|
||||
/* Allocate storage for messages in kernel space */
|
||||
HV_MsgState *state = &__get_cpu_var(msg_state);
|
||||
int rc = hv_register_message_state(state);
|
||||
if (rc != HV_OK)
|
||||
panic("hv_register_message_state: error %d", rc);
|
||||
|
||||
/* Make sure downcall interrupts will be enabled. */
|
||||
raw_local_irq_unmask(INT_INTCTRL_1);
|
||||
}
|
||||
|
||||
void hv_message_intr(struct pt_regs *regs, int intnum)
|
||||
{
|
||||
/*
|
||||
* We enter with interrupts disabled and leave them disabled,
|
||||
* to match expectations of called functions (e.g.
|
||||
* do_ccupdate_local() in mm/slab.c). This is also consistent
|
||||
* with normal call entry for device interrupts.
|
||||
*/
|
||||
|
||||
int message[HV_MAX_MESSAGE_SIZE/sizeof(int)];
|
||||
HV_RcvMsgInfo rmi;
|
||||
int nmsgs = 0;
|
||||
|
||||
/* Track time spent here in an interrupt context */
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
/* Debugging check for stack overflow: less than 1/8th stack free? */
|
||||
{
|
||||
long sp = stack_pointer - (long) current_thread_info();
|
||||
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
|
||||
printk(KERN_EMERG "hv_message_intr: "
|
||||
"stack overflow: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
while (1) {
|
||||
rmi = hv_receive_message(__get_cpu_var(msg_state),
|
||||
(HV_VirtAddr) message,
|
||||
sizeof(message));
|
||||
if (rmi.msglen == 0)
|
||||
break;
|
||||
|
||||
if (rmi.msglen < 0)
|
||||
panic("hv_receive_message failed: %d", rmi.msglen);
|
||||
|
||||
++nmsgs;
|
||||
|
||||
if (rmi.source == HV_MSG_TILE) {
|
||||
int tag;
|
||||
|
||||
/* we just send tags for now */
|
||||
BUG_ON(rmi.msglen != sizeof(int));
|
||||
|
||||
tag = message[0];
|
||||
#ifdef CONFIG_SMP
|
||||
evaluate_message(message[0]);
|
||||
#else
|
||||
panic("Received IPI message %d in UP mode", tag);
|
||||
#endif
|
||||
} else if (rmi.source == HV_MSG_INTR) {
|
||||
HV_IntrMsg *him = (HV_IntrMsg *)message;
|
||||
struct hv_driver_cb *cb =
|
||||
(struct hv_driver_cb *)him->intarg;
|
||||
cb->callback(cb, him->intdata);
|
||||
__get_cpu_var(irq_stat).irq_hv_msg_count++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We shouldn't have gotten a message downcall with no
|
||||
* messages available.
|
||||
*/
|
||||
if (nmsgs == 0)
|
||||
panic("Message downcall invoked with no messages!");
|
||||
|
||||
/*
|
||||
* Track time spent against the current process again and
|
||||
* process any softirqs if they are waiting.
|
||||
*/
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
257
arch/tile/kernel/module.c
Normal file
257
arch/tile/kernel/module.c
Normal file
@@ -0,0 +1,257 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Based on i386 version, copyright (C) 2001 Rusty Russell.
|
||||
*/
|
||||
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/opcode-tile.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#ifdef __tilegx__
|
||||
# define Elf_Rela Elf64_Rela
|
||||
# define ELF_R_SYM ELF64_R_SYM
|
||||
# define ELF_R_TYPE ELF64_R_TYPE
|
||||
#else
|
||||
# define Elf_Rela Elf32_Rela
|
||||
# define ELF_R_SYM ELF32_R_SYM
|
||||
# define ELF_R_TYPE ELF32_R_TYPE
|
||||
#endif
|
||||
|
||||
#ifdef MODULE_DEBUG
|
||||
#define DEBUGP printk
|
||||
#else
|
||||
#define DEBUGP(fmt...)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allocate some address space in the range MEM_MODULE_START to
|
||||
* MEM_MODULE_END and populate it with memory.
|
||||
*/
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
struct page **pages;
|
||||
pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC);
|
||||
struct vm_struct *area;
|
||||
int i = 0;
|
||||
int npages;
|
||||
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
|
||||
if (pages == NULL)
|
||||
return NULL;
|
||||
for (; i < npages; ++i) {
|
||||
pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
|
||||
if (!pages[i])
|
||||
goto error;
|
||||
}
|
||||
|
||||
area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
|
||||
if (!area)
|
||||
goto error;
|
||||
|
||||
if (map_vm_area(area, prot_rwx, &pages)) {
|
||||
vunmap(area->addr);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return area->addr;
|
||||
|
||||
error:
|
||||
while (--i >= 0)
|
||||
__free_page(pages[i]);
|
||||
kfree(pages);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/* Free memory returned from module_alloc */
|
||||
void module_free(struct module *mod, void *module_region)
|
||||
{
|
||||
vfree(module_region);
|
||||
/*
|
||||
* FIXME: If module_region == mod->init_region, trim exception
|
||||
* table entries.
|
||||
*/
|
||||
}
|
||||
|
||||
/* We don't need anything special. */
|
||||
int module_frob_arch_sections(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
char *secstrings,
|
||||
struct module *mod)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apply_relocate(Elf_Shdr *sechdrs,
|
||||
const char *strtab,
|
||||
unsigned int symindex,
|
||||
unsigned int relsec,
|
||||
struct module *me)
|
||||
{
|
||||
printk(KERN_ERR "module %s: .rel relocation unsupported\n", me->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
#ifdef __tilegx__
|
||||
/*
|
||||
* Validate that the high 16 bits of "value" is just the sign-extension of
|
||||
* the low 48 bits.
|
||||
*/
|
||||
static int validate_hw2_last(long value, struct module *me)
|
||||
{
|
||||
if (((value << 16) >> 16) != value) {
|
||||
printk("module %s: Out of range HW2_LAST value %#lx\n",
|
||||
me->name, value);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate that "value" isn't too big to hold in a JumpOff relocation.
|
||||
*/
|
||||
static int validate_jumpoff(long value)
|
||||
{
|
||||
/* Determine size of jump offset. */
|
||||
int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1)));
|
||||
|
||||
/* Check to see if it fits into the relocation slot. */
|
||||
long f = get_JumpOff_X1(create_JumpOff_X1(value));
|
||||
f = (f << shift) >> shift;
|
||||
|
||||
return f == value;
|
||||
}
|
||||
#endif
|
||||
|
||||
int apply_relocate_add(Elf_Shdr *sechdrs,
|
||||
const char *strtab,
|
||||
unsigned int symindex,
|
||||
unsigned int relsec,
|
||||
struct module *me)
|
||||
{
|
||||
unsigned int i;
|
||||
Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
|
||||
Elf_Sym *sym;
|
||||
u64 *location;
|
||||
unsigned long value;
|
||||
|
||||
DEBUGP("Applying relocate section %u to %u\n", relsec,
|
||||
sechdrs[relsec].sh_info);
|
||||
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
||||
/* This is where to make the change */
|
||||
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
|
||||
+ rel[i].r_offset;
|
||||
/*
|
||||
* This is the symbol it is referring to.
|
||||
* Note that all undefined symbols have been resolved.
|
||||
*/
|
||||
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
|
||||
+ ELF_R_SYM(rel[i].r_info);
|
||||
value = sym->st_value + rel[i].r_addend;
|
||||
|
||||
switch (ELF_R_TYPE(rel[i].r_info)) {
|
||||
|
||||
#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value)))
|
||||
|
||||
#ifndef __tilegx__
|
||||
case R_TILE_32:
|
||||
*(uint32_t *)location = value;
|
||||
break;
|
||||
case R_TILE_IMM16_X0_HA:
|
||||
value = (value + 0x8000) >> 16;
|
||||
/*FALLTHROUGH*/
|
||||
case R_TILE_IMM16_X0_LO:
|
||||
MUNGE(create_Imm16_X0);
|
||||
break;
|
||||
case R_TILE_IMM16_X1_HA:
|
||||
value = (value + 0x8000) >> 16;
|
||||
/*FALLTHROUGH*/
|
||||
case R_TILE_IMM16_X1_LO:
|
||||
MUNGE(create_Imm16_X1);
|
||||
break;
|
||||
case R_TILE_JOFFLONG_X1:
|
||||
value -= (unsigned long) location; /* pc-relative */
|
||||
value = (long) value >> 3; /* count by instrs */
|
||||
MUNGE(create_JOffLong_X1);
|
||||
break;
|
||||
#else
|
||||
case R_TILEGX_64:
|
||||
*location = value;
|
||||
break;
|
||||
case R_TILEGX_IMM16_X0_HW2_LAST:
|
||||
if (!validate_hw2_last(value, me))
|
||||
return -ENOEXEC;
|
||||
value >>= 16;
|
||||
/*FALLTHROUGH*/
|
||||
case R_TILEGX_IMM16_X0_HW1:
|
||||
value >>= 16;
|
||||
/*FALLTHROUGH*/
|
||||
case R_TILEGX_IMM16_X0_HW0:
|
||||
MUNGE(create_Imm16_X0);
|
||||
break;
|
||||
case R_TILEGX_IMM16_X1_HW2_LAST:
|
||||
if (!validate_hw2_last(value, me))
|
||||
return -ENOEXEC;
|
||||
value >>= 16;
|
||||
/*FALLTHROUGH*/
|
||||
case R_TILEGX_IMM16_X1_HW1:
|
||||
value >>= 16;
|
||||
/*FALLTHROUGH*/
|
||||
case R_TILEGX_IMM16_X1_HW0:
|
||||
MUNGE(create_Imm16_X1);
|
||||
break;
|
||||
case R_TILEGX_JUMPOFF_X1:
|
||||
value -= (unsigned long) location; /* pc-relative */
|
||||
value = (long) value >> 3; /* count by instrs */
|
||||
if (!validate_jumpoff(value)) {
|
||||
printk("module %s: Out of range jump to"
|
||||
" %#llx at %#llx (%p)\n", me->name,
|
||||
sym->st_value + rel[i].r_addend,
|
||||
rel[i].r_offset, location);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
MUNGE(create_JumpOff_X1);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#undef MUNGE
|
||||
|
||||
default:
|
||||
printk(KERN_ERR "module %s: Unknown relocation: %d\n",
|
||||
me->name, (int) ELF_R_TYPE(rel[i].r_info));
|
||||
return -ENOEXEC;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *me)
|
||||
{
|
||||
/* FIXME: perhaps remove the "writable" bit from the TLB? */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
}
|
252
arch/tile/kernel/pci-dma.c
Normal file
252
arch/tile/kernel/pci-dma.c
Normal file
@@ -0,0 +1,252 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
|
||||
/* Generic DMA mapping functions: */
|
||||
|
||||
/*
|
||||
* Allocate what Linux calls "coherent" memory, which for us just
|
||||
* means uncached.
|
||||
*/
|
||||
void *dma_alloc_coherent(struct device *dev,
|
||||
size_t size,
|
||||
dma_addr_t *dma_handle,
|
||||
gfp_t gfp)
|
||||
{
|
||||
u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
|
||||
int node = dev_to_node(dev);
|
||||
int order = get_order(size);
|
||||
struct page *pg;
|
||||
dma_addr_t addr;
|
||||
|
||||
/* Set GFP_KERNEL to ensure we have memory with a kernel VA. */
|
||||
gfp |= GFP_KERNEL | __GFP_ZERO;
|
||||
|
||||
/*
|
||||
* By forcing NUMA node 0 for 32-bit masks we ensure that the
|
||||
* high 32 bits of the resulting PA will be zero. If the mask
|
||||
* size is, e.g., 24, we may still not be able to guarantee a
|
||||
* suitable memory address, in which case we will return NULL.
|
||||
* But such devices are uncommon.
|
||||
*/
|
||||
if (dma_mask <= DMA_BIT_MASK(32))
|
||||
node = 0;
|
||||
|
||||
pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED);
|
||||
if (pg == NULL)
|
||||
return NULL;
|
||||
|
||||
addr = page_to_phys(pg);
|
||||
if (addr + size > dma_mask) {
|
||||
homecache_free_pages(addr, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*dma_handle = addr;
|
||||
return page_address(pg);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_alloc_coherent);
|
||||
|
||||
/*
|
||||
* Free memory that was allocated with dma_alloc_coherent.
|
||||
*/
|
||||
void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
homecache_free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
EXPORT_SYMBOL(dma_free_coherent);
|
||||
|
||||
/*
|
||||
* The map routines "map" the specified address range for DMA
|
||||
* accesses. The memory belongs to the device after this call is
|
||||
* issued, until it is unmapped with dma_unmap_single.
|
||||
*
|
||||
* We don't need to do any mapping, we just flush the address range
|
||||
* out of the cache and return a DMA address.
|
||||
*
|
||||
* The unmap routines do whatever is necessary before the processor
|
||||
* accesses the memory again, and must be called before the driver
|
||||
* touches the memory. We can get away with a cache invalidate if we
|
||||
* can count on nothing having been touched.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* dma_map_single can be passed any memory address, and there appear
|
||||
* to be no alignment constraints.
|
||||
*
|
||||
* There is a chance that the start of the buffer will share a cache
|
||||
* line with some other data that has been touched in the meantime.
|
||||
*/
|
||||
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct page *page;
|
||||
dma_addr_t dma_addr;
|
||||
int thispage;
|
||||
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
WARN_ON(size == 0);
|
||||
|
||||
dma_addr = __pa(ptr);
|
||||
|
||||
/* We might have been handed a buffer that wraps a page boundary */
|
||||
while ((int)size > 0) {
|
||||
/* The amount to flush that's on this page */
|
||||
thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1));
|
||||
thispage = min((int)thispage, (int)size);
|
||||
/* Is this valid for any page we could be handed? */
|
||||
page = pfn_to_page(kaddr_to_pfn(ptr));
|
||||
homecache_flush_cache(page, 0);
|
||||
ptr += thispage;
|
||||
size -= thispage;
|
||||
}
|
||||
|
||||
return dma_addr;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_single);
|
||||
|
||||
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_single);
|
||||
|
||||
int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
WARN_ON(nents == 0 || sglist->length == 0);
|
||||
|
||||
for_each_sg(sglist, sg, nents, i) {
|
||||
struct page *page;
|
||||
sg->dma_address = sg_phys(sg);
|
||||
page = pfn_to_page(sg->dma_address >> PAGE_SHIFT);
|
||||
homecache_flush_cache(page, 0);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_sg);
|
||||
|
||||
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_sg);
|
||||
|
||||
dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
homecache_flush_cache(page, 0);
|
||||
|
||||
return page_to_pa(page) + offset;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_page);
|
||||
|
||||
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_page);
|
||||
|
||||
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_for_cpu);
|
||||
|
||||
void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
unsigned long start = PFN_DOWN(dma_handle);
|
||||
unsigned long end = PFN_DOWN(dma_handle + size - 1);
|
||||
unsigned long i;
|
||||
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
for (i = start; i <= end; ++i)
|
||||
homecache_flush_cache(pfn_to_page(i), 0);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_for_device);
|
||||
|
||||
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
WARN_ON(nelems == 0 || sg[0].length == 0);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
|
||||
|
||||
/*
|
||||
* Flush and invalidate cache for scatterlist.
|
||||
*/
|
||||
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
WARN_ON(nelems == 0 || sglist->length == 0);
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
dma_sync_single_for_device(dev, sg->dma_address,
|
||||
sg_dma_len(sg), direction);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
||||
|
||||
void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
|
||||
|
||||
void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_sync_single_for_device(dev, dma_handle + offset, size, direction);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_range_for_device);
|
||||
|
||||
/*
|
||||
* dma_alloc_noncoherent() returns non-cacheable memory, so there's no
|
||||
* need to do any flushing here.
|
||||
*/
|
||||
void dma_cache_sync(void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
91
arch/tile/kernel/proc.c
Normal file
91
arch/tile/kernel/proc.c
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
|
||||
/*
|
||||
* Support /proc/cpuinfo
|
||||
*/
|
||||
|
||||
#define cpu_to_ptr(n) ((void *)((long)(n)+1))
|
||||
#define ptr_to_cpu(p) ((long)(p) - 1)
|
||||
|
||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
{
|
||||
int n = ptr_to_cpu(v);
|
||||
|
||||
if (n == 0) {
|
||||
char buf[NR_CPUS*5];
|
||||
cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
|
||||
seq_printf(m, "cpu count\t: %d\n", num_online_cpus());
|
||||
seq_printf(m, "cpu list\t: %s\n", buf);
|
||||
seq_printf(m, "model name\t: %s\n", chip_model);
|
||||
seq_printf(m, "flags\t\t:\n"); /* nothing for now */
|
||||
seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n",
|
||||
get_clock_rate() / 1000000,
|
||||
(get_clock_rate() % 1000000));
|
||||
seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
|
||||
loops_per_jiffy/(500000/HZ),
|
||||
(loops_per_jiffy/(5000/HZ)) % 100);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_online(n))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
seq_printf(m, "processor\t: %d\n", n);
|
||||
|
||||
/* Print only num_online_cpus() blank lines total. */
|
||||
if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids)
|
||||
seq_printf(m, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
|
||||
}
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
++*pos;
|
||||
return c_start(m, pos);
|
||||
}
|
||||
static void c_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
}
|
||||
const struct seq_operations cpuinfo_op = {
|
||||
.start = c_start,
|
||||
.next = c_next,
|
||||
.stop = c_stop,
|
||||
.show = show_cpuinfo,
|
||||
};
|
647
arch/tile/kernel/process.c
Normal file
647
arch/tile/kernel/process.c
Normal file
@@ -0,0 +1,647 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/stack.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <arch/chip.h>
|
||||
#include <arch/abi.h>
|
||||
|
||||
|
||||
/*
|
||||
* Use the (x86) "idle=poll" option to prefer low latency when leaving the
|
||||
* idle loop over low power while in the idle loop, e.g. if we have
|
||||
* one thread per core and we want to get threads out of futex waits fast.
|
||||
*/
|
||||
static int no_idle_nap;
|
||||
static int __init idle_setup(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "poll")) {
|
||||
printk("using polling idle threads.\n");
|
||||
no_idle_nap = 1;
|
||||
} else if (!strcmp(str, "halt"))
|
||||
no_idle_nap = 0;
|
||||
else
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("idle", idle_setup);
|
||||
|
||||
/*
|
||||
* The idle thread. There's no useful work to be
|
||||
* done, so just try to conserve power and have a
|
||||
* low exit latency (ie sit in a loop waiting for
|
||||
* somebody to say that they'd like to reschedule)
|
||||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
extern void _cpu_idle(void);
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
|
||||
if (no_idle_nap) {
|
||||
while (1) {
|
||||
while (!need_resched())
|
||||
cpu_relax();
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
while (!need_resched()) {
|
||||
if (cpu_is_offline(cpu))
|
||||
BUG(); /* no HOTPLUG_CPU */
|
||||
|
||||
local_irq_disable();
|
||||
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
|
||||
current_thread_info()->status &= ~TS_POLLING;
|
||||
/*
|
||||
* TS_POLLING-cleared state must be visible before we
|
||||
* test NEED_RESCHED:
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (!need_resched())
|
||||
_cpu_idle();
|
||||
else
|
||||
local_irq_enable();
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
}
|
||||
tick_nohz_restart_sched_tick();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
struct thread_info *alloc_thread_info(struct task_struct *task)
|
||||
{
|
||||
struct page *page;
|
||||
int flags = GFP_KERNEL;
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
flags |= __GFP_ZERO;
|
||||
#endif
|
||||
|
||||
page = alloc_pages(flags, THREAD_SIZE_ORDER);
|
||||
if (!page)
|
||||
return 0;
|
||||
|
||||
return (struct thread_info *)page_address(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a thread_info node, and all of its derivative
|
||||
* data structures.
|
||||
*/
|
||||
void free_thread_info(struct thread_info *info)
|
||||
{
|
||||
struct single_step_state *step_state = info->step_state;
|
||||
|
||||
|
||||
if (step_state) {
|
||||
|
||||
/*
|
||||
* FIXME: we don't munmap step_state->buffer
|
||||
* because the mm_struct for this process (info->task->mm)
|
||||
* has already been zeroed in exit_mm(). Keeping a
|
||||
* reference to it here seems like a bad move, so this
|
||||
* means we can't munmap() the buffer, and therefore if we
|
||||
* ptrace multiple threads in a process, we will slowly
|
||||
* leak user memory. (Note that as soon as the last
|
||||
* thread in a process dies, we will reclaim all user
|
||||
* memory including single-step buffers in the usual way.)
|
||||
* We should either assign a kernel VA to this buffer
|
||||
* somehow, or we should associate the buffer(s) with the
|
||||
* mm itself so we can clean them up that way.
|
||||
*/
|
||||
kfree(step_state);
|
||||
}
|
||||
|
||||
free_page((unsigned long)info);
|
||||
}
|
||||
|
||||
static void save_arch_state(struct thread_struct *t);
|
||||
|
||||
extern void ret_from_fork(void);
|
||||
|
||||
int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||
unsigned long stack_size,
|
||||
struct task_struct *p, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *childregs;
|
||||
unsigned long ksp;
|
||||
|
||||
/*
|
||||
* When creating a new kernel thread we pass sp as zero.
|
||||
* Assign it to a reasonable value now that we have the stack.
|
||||
*/
|
||||
if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
|
||||
sp = KSTK_TOP(p);
|
||||
|
||||
/*
|
||||
* Do not clone step state from the parent; each thread
|
||||
* must make its own lazily.
|
||||
*/
|
||||
task_thread_info(p)->step_state = NULL;
|
||||
|
||||
/*
|
||||
* Start new thread in ret_from_fork so it schedules properly
|
||||
* and then return from interrupt like the parent.
|
||||
*/
|
||||
p->thread.pc = (unsigned long) ret_from_fork;
|
||||
|
||||
/* Save user stack top pointer so we can ID the stack vm area later. */
|
||||
p->thread.usp0 = sp;
|
||||
|
||||
/* Record the pid of the process that created this one. */
|
||||
p->thread.creator_pid = current->pid;
|
||||
|
||||
/*
|
||||
* Copy the registers onto the kernel stack so the
|
||||
* return-from-interrupt code will reload it into registers.
|
||||
*/
|
||||
childregs = task_pt_regs(p);
|
||||
*childregs = *regs;
|
||||
childregs->regs[0] = 0; /* return value is zero */
|
||||
childregs->sp = sp; /* override with new user stack pointer */
|
||||
|
||||
/*
|
||||
* Copy the callee-saved registers from the passed pt_regs struct
|
||||
* into the context-switch callee-saved registers area.
|
||||
* We have to restore the callee-saved registers since we may
|
||||
* be cloning a userspace task with userspace register state,
|
||||
* and we won't be unwinding the same kernel frames to restore them.
|
||||
* Zero out the C ABI save area to mark the top of the stack.
|
||||
*/
|
||||
ksp = (unsigned long) childregs;
|
||||
ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
|
||||
((long *)ksp)[0] = ((long *)ksp)[1] = 0;
|
||||
ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
|
||||
memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG],
|
||||
CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
|
||||
ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
|
||||
((long *)ksp)[0] = ((long *)ksp)[1] = 0;
|
||||
p->thread.ksp = ksp;
|
||||
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/*
|
||||
* No DMA in the new thread. We model this on the fact that
|
||||
* fork() clears the pending signals, alarms, and aio for the child.
|
||||
*/
|
||||
memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
|
||||
memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/* Likewise, the new thread is not running static processor code. */
|
||||
p->thread.sn_proc_running = 0;
|
||||
memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
|
||||
#endif
|
||||
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
/* New thread has its miscellaneous processor state bits clear. */
|
||||
p->thread.proc_status = 0;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Start the new thread with the current architecture state
|
||||
* (user interrupt masks, etc.).
|
||||
*/
|
||||
save_arch_state(&p->thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return "current" if it looks plausible, or else a pointer to a dummy.
|
||||
* This can be helpful if we are just trying to emit a clean panic.
|
||||
*/
|
||||
struct task_struct *validate_current(void)
|
||||
{
|
||||
static struct task_struct corrupt = { .comm = "<corrupt>" };
|
||||
struct task_struct *tsk = current;
|
||||
if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
|
||||
(void *)tsk > high_memory ||
|
||||
((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
|
||||
printk("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
|
||||
tsk = &corrupt;
|
||||
}
|
||||
return tsk;
|
||||
}
|
||||
|
||||
/* Take and return the pointer to the previous task, for schedule_tail(). */
|
||||
struct task_struct *sim_notify_fork(struct task_struct *prev)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
|
||||
(tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
|
||||
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
|
||||
(tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
|
||||
return prev;
|
||||
}
|
||||
|
||||
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
|
||||
{
|
||||
struct pt_regs *ptregs = task_pt_regs(tsk);
|
||||
elf_core_copy_regs(regs, ptregs);
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
|
||||
/* Allow user processes to access the DMA SPRs */
|
||||
void grant_dma_mpls(void)
|
||||
{
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
|
||||
}
|
||||
|
||||
/* Forbid user processes from accessing the DMA SPRs */
|
||||
void restrict_dma_mpls(void)
|
||||
{
|
||||
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
|
||||
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
|
||||
}
|
||||
|
||||
/* Pause the DMA engine, then save off its state registers. */
|
||||
static void save_tile_dma_state(struct tile_dma_state *dma)
|
||||
{
|
||||
unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
|
||||
unsigned long post_suspend_state;
|
||||
|
||||
/* If we're running, suspend the engine. */
|
||||
if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
|
||||
__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
|
||||
|
||||
/*
|
||||
* Wait for the engine to idle, then save regs. Note that we
|
||||
* want to record the "running" bit from before suspension,
|
||||
* and the "done" bit from after, so that we can properly
|
||||
* distinguish a case where the user suspended the engine from
|
||||
* the case where the kernel suspended as part of the context
|
||||
* swap.
|
||||
*/
|
||||
do {
|
||||
post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
|
||||
} while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
|
||||
|
||||
dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
|
||||
dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
|
||||
dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
|
||||
dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
|
||||
dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
|
||||
dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
|
||||
dma->byte = __insn_mfspr(SPR_DMA_BYTE);
|
||||
dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
|
||||
(post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
|
||||
}
|
||||
|
||||
/* Restart a DMA that was running before we were context-switched out. */
|
||||
static void restore_tile_dma_state(struct thread_struct *t)
|
||||
{
|
||||
const struct tile_dma_state *dma = &t->tile_dma_state;
|
||||
|
||||
/*
|
||||
* The only way to restore the done bit is to run a zero
|
||||
* length transaction.
|
||||
*/
|
||||
if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
|
||||
!(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
|
||||
__insn_mtspr(SPR_DMA_BYTE, 0);
|
||||
__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
|
||||
while (__insn_mfspr(SPR_DMA_USER_STATUS) &
|
||||
SPR_DMA_STATUS__BUSY_MASK)
|
||||
;
|
||||
}
|
||||
|
||||
__insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
|
||||
__insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
|
||||
__insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
|
||||
__insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
|
||||
__insn_mtspr(SPR_DMA_STRIDE, dma->strides);
|
||||
__insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
|
||||
__insn_mtspr(SPR_DMA_BYTE, dma->byte);
|
||||
|
||||
/*
|
||||
* Restart the engine if we were running and not done.
|
||||
* Clear a pending async DMA fault that we were waiting on return
|
||||
* to user space to execute, since we expect the DMA engine
|
||||
* to regenerate those faults for us now. Note that we don't
|
||||
* try to clear the TIF_ASYNC_TLB flag, since it's relatively
|
||||
* harmless if set, and it covers both DMA and the SN processor.
|
||||
*/
|
||||
if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
|
||||
t->dma_async_tlb.fault_num = 0;
|
||||
__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void save_arch_state(struct thread_struct *t)
|
||||
{
|
||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
||||
t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
|
||||
((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
|
||||
#else
|
||||
t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
|
||||
#endif
|
||||
t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
|
||||
t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
|
||||
t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
|
||||
t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
|
||||
t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
|
||||
t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
|
||||
t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void restore_arch_state(const struct thread_struct *t)
|
||||
{
|
||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
|
||||
#else
|
||||
__insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
|
||||
#endif
|
||||
__insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
|
||||
__insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
|
||||
__insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
|
||||
__insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
|
||||
__insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
|
||||
__insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
|
||||
__insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
|
||||
#if CHIP_HAS_PROC_STATUS_SPR()
|
||||
__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_RTF_HWM()
|
||||
/*
|
||||
* Clear this whenever we switch back to a process in case
|
||||
* the previous process was monkeying with it. Even if enabled
|
||||
* in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a
|
||||
* performance hint, so isn't worth a full save/restore.
|
||||
*/
|
||||
__insn_mtspr(SPR_TILE_RTF_HWM, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void _prepare_arch_switch(struct task_struct *next)
|
||||
{
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
int snctl;
|
||||
#endif
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
struct tile_dma_state *dma = ¤t->thread.tile_dma_state;
|
||||
if (dma->enabled)
|
||||
save_tile_dma_state(dma);
|
||||
#endif
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/*
|
||||
* Suspend the static network processor if it was running.
|
||||
* We do not suspend the fabric itself, just like we don't
|
||||
* try to suspend the UDN.
|
||||
*/
|
||||
snctl = __insn_mfspr(SPR_SNCTL);
|
||||
current->thread.sn_proc_running =
|
||||
(snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
|
||||
if (current->thread.sn_proc_running)
|
||||
__insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
extern struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next,
|
||||
unsigned long new_system_save_1_0);
|
||||
|
||||
struct task_struct *__sched _switch_to(struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
{
|
||||
/* DMA state is already saved; save off other arch state. */
|
||||
save_arch_state(&prev->thread);
|
||||
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
/*
|
||||
* Restore DMA in new task if desired.
|
||||
* Note that it is only safe to restart here since interrupts
|
||||
* are disabled, so we can't take any DMATLB miss or access
|
||||
* interrupts before we have finished switching stacks.
|
||||
*/
|
||||
if (next->thread.tile_dma_state.enabled) {
|
||||
restore_tile_dma_state(&next->thread);
|
||||
grant_dma_mpls();
|
||||
} else {
|
||||
restrict_dma_mpls();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Restore other arch state. */
|
||||
restore_arch_state(&next->thread);
|
||||
|
||||
#if CHIP_HAS_SN_PROC()
|
||||
/*
|
||||
* Restart static network processor in the new process
|
||||
* if it was running before.
|
||||
*/
|
||||
if (next->thread.sn_proc_running) {
|
||||
int snctl = __insn_mfspr(SPR_SNCTL);
|
||||
__insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Switch kernel SP, PC, and callee-saved registers.
|
||||
* In the context of the new task, return the old task pointer
|
||||
* (i.e. the task that actually called __switch_to).
|
||||
* Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp.
|
||||
*/
|
||||
return __switch_to(prev, next, next_current_ksp0(next));
|
||||
}
|
||||
|
||||
int _sys_fork(struct pt_regs *regs)
|
||||
{
|
||||
return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
int _sys_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
int __user *parent_tidptr, int __user *child_tidptr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (!newsp)
|
||||
newsp = regs->sp;
|
||||
return do_fork(clone_flags, newsp, regs, 0,
|
||||
parent_tidptr, child_tidptr);
|
||||
}
|
||||
|
||||
int _sys_vfork(struct pt_regs *regs)
|
||||
{
|
||||
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
|
||||
regs, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* sys_execve() executes a new program.
|
||||
*/
|
||||
int _sys_execve(char __user *path, char __user *__user *argv,
|
||||
char __user *__user *envp, struct pt_regs *regs)
|
||||
{
|
||||
int error;
|
||||
char *filename;
|
||||
|
||||
filename = getname(path);
|
||||
error = PTR_ERR(filename);
|
||||
if (IS_ERR(filename))
|
||||
goto out;
|
||||
error = do_execve(filename, argv, envp, regs);
|
||||
putname(filename);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
int _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
|
||||
compat_uptr_t __user *envp, struct pt_regs *regs)
|
||||
{
|
||||
int error;
|
||||
char *filename;
|
||||
|
||||
filename = getname(path);
|
||||
error = PTR_ERR(filename);
|
||||
if (IS_ERR(filename))
|
||||
goto out;
|
||||
error = compat_do_execve(filename, argv, envp, regs);
|
||||
putname(filename);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
|
||||
for (KBacktraceIterator_init(&kbt, p, NULL);
|
||||
!KBacktraceIterator_end(&kbt);
|
||||
KBacktraceIterator_next(&kbt)) {
|
||||
if (!in_sched_functions(kbt.it.pc))
|
||||
return kbt.it.pc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We pass in lr as zero (cleared in kernel_thread) and the caller
|
||||
* part of the backtrace ABI on the stack also zeroed (in copy_thread)
|
||||
* so that backtraces will stop with this function.
|
||||
* Note that we don't use r0, since copy_thread() clears it.
|
||||
*/
|
||||
static void start_kernel_thread(int dummy, int (*fn)(int), int arg)
|
||||
{
|
||||
do_exit(fn(arg));
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a kernel thread
|
||||
*/
|
||||
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */
|
||||
regs.pc = (long) start_kernel_thread;
|
||||
regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */
|
||||
regs.regs[1] = (long) fn; /* function pointer */
|
||||
regs.regs[2] = (long) arg; /* parameter register */
|
||||
|
||||
/* Ok, create the new process.. */
|
||||
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s,
|
||||
0, NULL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(kernel_thread);
|
||||
|
||||
/* Flush thread state. */
|
||||
void flush_thread(void)
|
||||
{
|
||||
/* Nothing */
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
void exit_thread(void)
|
||||
{
|
||||
/* Nothing */
|
||||
}
|
||||
|
||||
#ifdef __tilegx__
|
||||
# define LINECOUNT 3
|
||||
# define EXTRA_NL "\n"
|
||||
#else
|
||||
# define LINECOUNT 4
|
||||
# define EXTRA_NL ""
|
||||
#endif
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk = validate_current();
|
||||
int i, linebreak;
|
||||
printk("\n");
|
||||
printk(" Pid: %d, comm: %20s, CPU: %d\n",
|
||||
tsk->pid, tsk->comm, smp_processor_id());
|
||||
for (i = linebreak = 0; i < 53; ++i) {
|
||||
printk(" r%-2d: "REGFMT, i, regs->regs[i]);
|
||||
if (++linebreak == LINECOUNT) {
|
||||
linebreak = 0;
|
||||
printk("\n");
|
||||
}
|
||||
}
|
||||
printk(" tp : "REGFMT EXTRA_NL " sp : "REGFMT" lr : "REGFMT"\n",
|
||||
regs->tp, regs->sp, regs->lr);
|
||||
printk(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
|
||||
regs->pc, regs->ex1, regs->faultnum);
|
||||
|
||||
dump_stack_regs(regs);
|
||||
}
|
203
arch/tile/kernel/ptrace.c
Normal file
203
arch/tile/kernel/ptrace.c
Normal file
@@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Copied from i386: Ross Biro 1/23/92
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *child)
|
||||
{
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine will put a word on the process's privileged stack.
|
||||
*/
|
||||
static void putreg(struct task_struct *task,
|
||||
unsigned long addr, unsigned long value)
|
||||
{
|
||||
unsigned int regno = addr / sizeof(unsigned long);
|
||||
struct pt_regs *childregs = task_pt_regs(task);
|
||||
childregs->regs[regno] = value;
|
||||
childregs->flags |= PT_FLAGS_RESTORE_REGS;
|
||||
}
|
||||
|
||||
static unsigned long getreg(struct task_struct *task, unsigned long addr)
|
||||
{
|
||||
unsigned int regno = addr / sizeof(unsigned long);
|
||||
struct pt_regs *childregs = task_pt_regs(task);
|
||||
return childregs->regs[regno];
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*/
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
|
||||
/*
|
||||
* These two are currently unused, but will be set by arch_ptrace()
|
||||
* and used in the syscall assembly when we do support them.
|
||||
*/
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
}
|
||||
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
unsigned long __user *datap;
|
||||
unsigned long tmp;
|
||||
int i;
|
||||
long ret = -EIO;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (task_thread_info(current)->status & TS_COMPAT)
|
||||
data = (u32)data;
|
||||
if (task_thread_info(child)->status & TS_COMPAT)
|
||||
addr = (u32)addr;
|
||||
#endif
|
||||
datap = (unsigned long __user *)data;
|
||||
|
||||
switch (request) {
|
||||
|
||||
case PTRACE_PEEKUSR: /* Read register from pt_regs. */
|
||||
if (addr & (sizeof(data)-1))
|
||||
break;
|
||||
if (addr < 0 || addr >= PTREGS_SIZE)
|
||||
break;
|
||||
tmp = getreg(child, addr); /* Read register */
|
||||
ret = put_user(tmp, datap);
|
||||
break;
|
||||
|
||||
case PTRACE_POKEUSR: /* Write register in pt_regs. */
|
||||
if (addr & (sizeof(data)-1))
|
||||
break;
|
||||
if (addr < 0 || addr >= PTREGS_SIZE)
|
||||
break;
|
||||
putreg(child, addr, data); /* Write register */
|
||||
break;
|
||||
|
||||
case PTRACE_GETREGS: /* Get all registers from the child. */
|
||||
if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
|
||||
break;
|
||||
for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
|
||||
ret = __put_user(getreg(child, i), datap);
|
||||
if (ret != 0)
|
||||
break;
|
||||
datap++;
|
||||
}
|
||||
break;
|
||||
|
||||
case PTRACE_SETREGS: /* Set all registers in the child. */
|
||||
if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
|
||||
break;
|
||||
for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
|
||||
ret = __get_user(tmp, datap);
|
||||
if (ret != 0)
|
||||
break;
|
||||
putreg(child, i, tmp);
|
||||
datap++;
|
||||
}
|
||||
break;
|
||||
|
||||
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
||||
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
||||
break;
|
||||
|
||||
case PTRACE_SETOPTIONS:
|
||||
/* Support TILE-specific ptrace options. */
|
||||
child->ptrace &= ~PT_TRACE_MASK_TILE;
|
||||
tmp = data & PTRACE_O_MASK_TILE;
|
||||
data &= ~PTRACE_O_MASK_TILE;
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
if (tmp & PTRACE_O_TRACEMIGRATE)
|
||||
child->ptrace |= PT_TRACE_MIGRATE;
|
||||
break;
|
||||
|
||||
default:
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (task_thread_info(current)->status & TS_COMPAT) {
|
||||
ret = compat_ptrace_request(child, request,
|
||||
addr, data);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/* Not used; we handle compat issues in arch_ptrace() directly. */
|
||||
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
compat_ulong_t addr, compat_ulong_t data)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
void do_syscall_trace(void)
|
||||
{
|
||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
return;
|
||||
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The 0x80 provides a way for the tracing parent to distinguish
|
||||
* between a syscall stop and SIGTRAP delivery
|
||||
*/
|
||||
ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
|
||||
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
if (current->exit_code) {
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
|
||||
{
|
||||
struct siginfo info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.si_signo = SIGTRAP;
|
||||
info.si_code = TRAP_BRKPT;
|
||||
info.si_addr = (void __user *) regs->pc;
|
||||
|
||||
/* Send us the fakey SIGTRAP */
|
||||
force_sig_info(SIGTRAP, &info, tsk);
|
||||
}
|
||||
|
||||
/* Handle synthetic interrupt delivered only by the simulator. */
|
||||
void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
|
||||
{
|
||||
send_sigtrap(current, regs, fault_num);
|
||||
}
|
52
arch/tile/kernel/reboot.c
Normal file
52
arch/tile/kernel/reboot.c
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/setup.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define smp_send_stop()
|
||||
#endif
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
warn_early_printk();
|
||||
raw_local_irq_disable_all();
|
||||
smp_send_stop();
|
||||
hv_halt();
|
||||
}
|
||||
|
||||
void machine_power_off(void)
|
||||
{
|
||||
warn_early_printk();
|
||||
raw_local_irq_disable_all();
|
||||
smp_send_stop();
|
||||
hv_power_off();
|
||||
}
|
||||
|
||||
void machine_restart(char *cmd)
|
||||
{
|
||||
raw_local_irq_disable_all();
|
||||
smp_send_stop();
|
||||
hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Power off function, if any
|
||||
*/
|
||||
void (*pm_power_off)(void) = machine_power_off;
|
145
arch/tile/kernel/regs_32.S
Normal file
145
arch/tile/kernel/regs_32.S
Normal file
@@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <arch/spr_def.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* See <asm/system.h>; called with prev and next task_struct pointers.
|
||||
* "prev" is returned in r0 for _switch_to and also for ret_from_fork.
|
||||
*
|
||||
* We want to save pc/sp in "prev", and get the new pc/sp from "next".
|
||||
* We also need to save all the callee-saved registers on the stack.
|
||||
*
|
||||
* Intel enables/disables access to the hardware cycle counter in
|
||||
* seccomp (secure computing) environments if necessary, based on
|
||||
* has_secure_computing(). We might want to do this at some point,
|
||||
* though it would require virtualizing the other SPRs under WORLD_ACCESS.
|
||||
*
|
||||
* Since we're saving to the stack, we omit sp from this list.
|
||||
* And for parallels with other architectures, we save lr separately,
|
||||
* in the thread_struct itself (as the "pc" field).
|
||||
*
|
||||
* This code also needs to be aligned with process.c copy_thread()
|
||||
*/
|
||||
|
||||
#if CALLEE_SAVED_REGS_COUNT != 24
|
||||
# error Mismatch between <asm/system.h> and kernel/entry.S
|
||||
#endif
|
||||
#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)
|
||||
|
||||
#define SAVE_REG(r) { sw r12, r; addi r12, r12, 4 }
|
||||
#define LOAD_REG(r) { lw r, r12; addi r12, r12, 4 }
|
||||
#define FOR_EACH_CALLEE_SAVED_REG(f) \
|
||||
f(r30); f(r31); \
|
||||
f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
|
||||
f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
|
||||
f(r48); f(r49); f(r50); f(r51); f(r52);
|
||||
|
||||
STD_ENTRY_SECTION(__switch_to, .sched.text)
|
||||
{
|
||||
move r10, sp
|
||||
sw sp, lr
|
||||
addi sp, sp, -FRAME_SIZE
|
||||
}
|
||||
{
|
||||
addi r11, sp, 4
|
||||
addi r12, sp, 8
|
||||
}
|
||||
{
|
||||
sw r11, r10
|
||||
addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
|
||||
}
|
||||
{
|
||||
lw r13, r4 /* Load new sp to a temp register early. */
|
||||
addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
|
||||
}
|
||||
FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
|
||||
{
|
||||
sw r3, sp
|
||||
addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
|
||||
}
|
||||
{
|
||||
sw r3, lr
|
||||
addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
|
||||
}
|
||||
{
|
||||
lw lr, r4
|
||||
addi r12, r13, 8
|
||||
}
|
||||
{
|
||||
/* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
|
||||
move sp, r13
|
||||
mtspr SYSTEM_SAVE_1_0, r2
|
||||
}
|
||||
FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
|
||||
.L__switch_to_pc:
|
||||
{
|
||||
addi sp, sp, FRAME_SIZE
|
||||
jrp lr /* r0 is still valid here, so return it */
|
||||
}
|
||||
STD_ENDPROC(__switch_to)
|
||||
|
||||
/* Return a suitable address for the backtracer for suspended threads */
|
||||
STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
|
||||
lnk r0
|
||||
{
|
||||
addli r0, r0, .L__switch_to_pc - .
|
||||
jrp lr
|
||||
}
|
||||
STD_ENDPROC(get_switch_to_pc)
|
||||
|
||||
STD_ENTRY(get_pt_regs)
|
||||
.irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
|
||||
r8, r9, r10, r11, r12, r13, r14, r15, \
|
||||
r16, r17, r18, r19, r20, r21, r22, r23, \
|
||||
r24, r25, r26, r27, r28, r29, r30, r31, \
|
||||
r32, r33, r34, r35, r36, r37, r38, r39, \
|
||||
r40, r41, r42, r43, r44, r45, r46, r47, \
|
||||
r48, r49, r50, r51, r52, tp, sp
|
||||
{
|
||||
sw r0, \reg
|
||||
addi r0, r0, 4
|
||||
}
|
||||
.endr
|
||||
{
|
||||
sw r0, lr
|
||||
addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
|
||||
}
|
||||
lnk r1
|
||||
{
|
||||
sw r0, r1
|
||||
addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
|
||||
}
|
||||
mfspr r1, INTERRUPT_CRITICAL_SECTION
|
||||
shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
|
||||
ori r1, r1, KERNEL_PL
|
||||
{
|
||||
sw r0, r1
|
||||
addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
|
||||
}
|
||||
{
|
||||
sw r0, zero /* clear faultnum */
|
||||
addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
|
||||
}
|
||||
{
|
||||
sw r0, zero /* clear orig_r0 */
|
||||
addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */
|
||||
}
|
||||
jrp lr
|
||||
STD_ENDPROC(get_pt_regs)
|
280
arch/tile/kernel/relocate_kernel.S
Normal file
280
arch/tile/kernel/relocate_kernel.S
Normal file
@@ -0,0 +1,280 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* copy new kernel into place and then call hv_reexec
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <arch/chip.h>
|
||||
#include <asm/page.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
#define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA
|
||||
|
||||
#define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f))
|
||||
|
||||
#define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC)
|
||||
#define ___hv_halt ___hv_dispatch(HV_DISPATCH_HALT)
|
||||
#define ___hv_reexec ___hv_dispatch(HV_DISPATCH_REEXEC)
|
||||
#define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE)
|
||||
|
||||
#undef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
|
||||
STD_ENTRY(relocate_new_kernel)
|
||||
|
||||
move r30, r0 /* page list */
|
||||
move r31, r1 /* address of page we are on */
|
||||
move r32, r2 /* start address of new kernel */
|
||||
|
||||
shri r1, r1, PAGE_SHIFT
|
||||
addi r1, r1, 1
|
||||
shli sp, r1, PAGE_SHIFT
|
||||
addi sp, sp, -8
|
||||
/* we now have a stack (whether we need one or not) */
|
||||
|
||||
moveli r40, lo16(___hv_console_putc)
|
||||
auli r40, r40, ha16(___hv_console_putc)
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
|
||||
moveli r0, '_'
|
||||
jalr r40
|
||||
|
||||
moveli r0, 'n'
|
||||
jalr r40
|
||||
|
||||
moveli r0, '_'
|
||||
jalr r40
|
||||
|
||||
moveli r0, 'k'
|
||||
jalr r40
|
||||
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Throughout this code r30 is pointer to the element of page
|
||||
* list we are working on.
|
||||
*
|
||||
* Normally we get to the next element of the page list by
|
||||
* incrementing r30 by four. The exception is if the element
|
||||
* on the page list is an IND_INDIRECTION in which case we use
|
||||
* the element with the low bits masked off as the new value
|
||||
* of r30.
|
||||
*
|
||||
* To get this started, we need the value passed to us (which
|
||||
* will always be an IND_INDIRECTION) in memory somewhere with
|
||||
* r30 pointing at it. To do that, we push the value passed
|
||||
* to us on the stack and make r30 point to it.
|
||||
*/
|
||||
|
||||
sw sp, r30
|
||||
move r30, sp
|
||||
addi sp, sp, -8
|
||||
|
||||
#if CHIP_HAS_CBOX_HOME_MAP()
|
||||
/*
|
||||
* On TILEPro, we need to flush all tiles' caches, since we may
|
||||
* have been doing hash-for-home caching there. Note that we
|
||||
* must do this _after_ we're completely done modifying any memory
|
||||
* other than our output buffer (which we know is locally cached).
|
||||
* We want the caches to be fully clean when we do the reexec,
|
||||
* because the hypervisor is going to do this flush again at that
|
||||
* point, and we don't want that second flush to overwrite any memory.
|
||||
*/
|
||||
{
|
||||
move r0, zero /* cache_pa */
|
||||
move r1, zero
|
||||
}
|
||||
{
|
||||
auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */
|
||||
movei r3, -1 /* cache_cpumask; -1 means all client tiles */
|
||||
}
|
||||
{
|
||||
move r4, zero /* tlb_va */
|
||||
move r5, zero /* tlb_length */
|
||||
}
|
||||
{
|
||||
move r6, zero /* tlb_pgsize */
|
||||
move r7, zero /* tlb_cpumask */
|
||||
}
|
||||
{
|
||||
move r8, zero /* asids */
|
||||
moveli r20, lo16(___hv_flush_remote)
|
||||
}
|
||||
{
|
||||
move r9, zero /* asidcount */
|
||||
auli r20, r20, ha16(___hv_flush_remote)
|
||||
}
|
||||
|
||||
jalr r20
|
||||
#endif
|
||||
|
||||
/* r33 is destination pointer, default to zero */
|
||||
|
||||
moveli r33, 0
|
||||
|
||||
.Lloop: lw r10, r30
|
||||
|
||||
andi r9, r10, 0xf /* low 4 bits tell us what type it is */
|
||||
xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
|
||||
|
||||
seqi r0, r9, 0x1 /* IND_DESTINATION */
|
||||
bzt r0, .Ltry2
|
||||
|
||||
move r33, r10
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'd'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
addi r30, r30, 4
|
||||
j .Lloop
|
||||
|
||||
.Ltry2:
|
||||
seqi r0, r9, 0x2 /* IND_INDIRECTION */
|
||||
bzt r0, .Ltry4
|
||||
|
||||
move r30, r10
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'i'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
j .Lloop
|
||||
|
||||
.Ltry4:
|
||||
seqi r0, r9, 0x4 /* IND_DONE */
|
||||
bzt r0, .Ltry8
|
||||
|
||||
mf
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 'D'
|
||||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
move r0, r32
|
||||
moveli r1, 0 /* arg to hv_reexec is 64 bits */
|
||||
|
||||
moveli r41, lo16(___hv_reexec)
|
||||
auli r41, r41, ha16(___hv_reexec)
|
||||
|
||||
jalr r41
|
||||
|
||||
/* we should not get here */
|
||||
|
||||
moveli r0, '?'
|
||||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
|
||||
j .Lhalt
|
||||
|
||||
.Ltry8: seqi r0, r9, 0x8 /* IND_SOURCE */
|
||||
bz r0, .Lerr /* unknown type */
|
||||
|
||||
/* copy page at r10 to page at r33 */
|
||||
|
||||
move r11, r33
|
||||
|
||||
moveli r0, lo16(PAGE_SIZE)
|
||||
auli r0, r0, ha16(PAGE_SIZE)
|
||||
add r33, r33, r0
|
||||
|
||||
/* copy word at r10 to word at r11 until r11 equals r33 */
|
||||
|
||||
/* We know page size must be multiple of 16, so we can unroll
|
||||
* 16 times safely without any edge case checking.
|
||||
*
|
||||
* Issue a flush of the destination every 16 words to avoid
|
||||
* incoherence when starting the new kernel. (Now this is
|
||||
* just good paranoia because the hv_reexec call will also
|
||||
* take care of this.)
|
||||
*/
|
||||
|
||||
1:
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0; addi r11, r11, 4 }
|
||||
{ lw r0, r10; addi r10, r10, 4 }
|
||||
{ sw r11, r0 }
|
||||
{ flush r11 ; addi r11, r11, 4 }
|
||||
|
||||
seq r0, r33, r11
|
||||
bzt r0, 1b
|
||||
|
||||
#ifdef RELOCATE_NEW_KERNEL_VERBOSE
|
||||
moveli r0, 's'
|
||||
jalr r40
|
||||
#endif
|
||||
|
||||
addi r30, r30, 4
|
||||
j .Lloop
|
||||
|
||||
|
||||
.Lerr: moveli r0, 'e'
|
||||
jalr r40
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
moveli r0, 'r'
|
||||
jalr r40
|
||||
moveli r0, '\n'
|
||||
jalr r40
|
||||
.Lhalt:
|
||||
moveli r41, lo16(___hv_halt)
|
||||
auli r41, r41, ha16(___hv_halt)
|
||||
|
||||
jalr r41
|
||||
STD_ENDPROC(relocate_new_kernel)
|
||||
|
||||
.section .rodata,"a"
|
||||
|
||||
.globl relocate_new_kernel_size
|
||||
relocate_new_kernel_size:
|
||||
.long .Lend_relocate_new_kernel - relocate_new_kernel
|
1497
arch/tile/kernel/setup.c
Normal file
1497
arch/tile/kernel/setup.c
Normal file
File diff suppressed because it is too large
Load Diff
359
arch/tile/kernel/signal.c
Normal file
359
arch/tile/kernel/signal.c
Normal file
@@ -0,0 +1,359 @@
|
||||
/*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <arch/interrupts.h>
|
||||
|
||||
#define DEBUG_SIG 0
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
|
||||
/* Caller before callee in this file; other callee is in assembler */
|
||||
void do_signal(struct pt_regs *regs);
|
||||
|
||||
int _sys_sigaltstack(const stack_t __user *uss,
|
||||
stack_t __user *uoss, struct pt_regs *regs)
|
||||
{
|
||||
return do_sigaltstack(uss, uoss, regs->sp);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Do a signal return; undo the signal stack.
|
||||
*/
|
||||
|
||||
int restore_sigcontext(struct pt_regs *regs,
|
||||
struct sigcontext __user *sc, long *pr0)
|
||||
{
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
|
||||
err |= __get_user(((long *)regs)[i],
|
||||
&((long *)(&sc->regs))[i]);
|
||||
|
||||
regs->faultnum = INT_SWINT_1_SIGRETURN;
|
||||
|
||||
err |= __get_user(*pr0, &sc->regs.regs[0]);
|
||||
return err;
|
||||
}
|
||||
|
||||
int _sys_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
struct rt_sigframe __user *frame =
|
||||
(struct rt_sigframe __user *)(regs->sp);
|
||||
sigset_t set;
|
||||
long r0;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
|
||||
goto badframe;
|
||||
|
||||
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
|
||||
goto badframe;
|
||||
|
||||
return r0;
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV, current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a signal frame.
|
||||
*/
|
||||
|
||||
int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
|
||||
{
|
||||
int i, err = 0;
|
||||
|
||||
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
|
||||
err |= __put_user(((long *)regs)[i],
|
||||
&((long *)(&sc->regs))[i]);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine which stack to use..
|
||||
*/
|
||||
static inline void __user *get_sigframe(struct k_sigaction *ka,
|
||||
struct pt_regs *regs,
|
||||
size_t frame_size)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
/* Default to using normal stack */
|
||||
sp = regs->sp;
|
||||
|
||||
/*
|
||||
* If we are on the alternate signal stack and would overflow
|
||||
* it, don't. Return an always-bogus address instead so we
|
||||
* will die with SIGSEGV.
|
||||
*/
|
||||
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
|
||||
return (void __user *) -1L;
|
||||
|
||||
/* This is the X/Open sanctioned signal stack switching. */
|
||||
if (ka->sa.sa_flags & SA_ONSTACK) {
|
||||
if (sas_ss_flags(sp) == 0)
|
||||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
sp -= frame_size;
|
||||
/*
|
||||
* Align the stack pointer according to the TILE ABI,
|
||||
* i.e. so that on function entry (sp & 15) == 0.
|
||||
*/
|
||||
sp &= -16UL;
|
||||
return (void __user *) sp;
|
||||
}
|
||||
|
||||
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long restorer;
|
||||
struct rt_sigframe __user *frame;
|
||||
int err = 0;
|
||||
int usig;
|
||||
|
||||
frame = get_sigframe(ka, regs, sizeof(*frame));
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
goto give_sigsegv;
|
||||
|
||||
usig = current_thread_info()->exec_domain
|
||||
&& current_thread_info()->exec_domain->signal_invmap
|
||||
&& sig < 32
|
||||
? current_thread_info()->exec_domain->signal_invmap[sig]
|
||||
: sig;
|
||||
|
||||
/* Always write at least the signal number for the stack backtracer. */
|
||||
if (ka->sa.sa_flags & SA_SIGINFO) {
|
||||
/* At sigreturn time, restore the callee-save registers too. */
|
||||
err |= copy_siginfo_to_user(&frame->info, info);
|
||||
regs->flags |= PT_FLAGS_RESTORE_REGS;
|
||||
} else {
|
||||
err |= __put_user(info->si_signo, &frame->info.si_signo);
|
||||
}
|
||||
|
||||
/* Create the ucontext. */
|
||||
err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user((void *)(current->sas_ss_sp),
|
||||
&frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
goto give_sigsegv;
|
||||
|
||||
restorer = VDSO_BASE;
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = (unsigned long) ka->sa.sa_restorer;
|
||||
|
||||
/*
|
||||
* Set up registers for signal handler.
|
||||
* Registers that we don't modify keep the value they had from
|
||||
* user-space at the time we took the signal.
|
||||
*/
|
||||
regs->pc = (unsigned long) ka->sa.sa_handler;
|
||||
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
|
||||
regs->sp = (unsigned long) frame;
|
||||
regs->lr = restorer;
|
||||
regs->regs[0] = (unsigned long) usig;
|
||||
|
||||
if (ka->sa.sa_flags & SA_SIGINFO) {
|
||||
/* Need extra arguments, so mark to restore caller-saves. */
|
||||
regs->regs[1] = (unsigned long) &frame->info;
|
||||
regs->regs[2] = (unsigned long) &frame->uc;
|
||||
regs->flags |= PT_FLAGS_CALLER_SAVES;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify any tracer that was single-stepping it.
|
||||
* The tracer may want to single-step inside the
|
||||
* handler too.
|
||||
*/
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
ptrace_notify(SIGTRAP);
|
||||
|
||||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we're invoking a handler
|
||||
*/
|
||||
|
||||
static int handle_signal(unsigned long sig, siginfo_t *info,
|
||||
struct k_sigaction *ka, sigset_t *oldset,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
||||
/* Are we from a system call? */
|
||||
if (regs->faultnum == INT_SWINT_1) {
|
||||
/* If so, check system call restarting.. */
|
||||
switch (regs->regs[0]) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
case -ERESTARTNOHAND:
|
||||
regs->regs[0] = -EINTR;
|
||||
break;
|
||||
|
||||
case -ERESTARTSYS:
|
||||
if (!(ka->sa.sa_flags & SA_RESTART)) {
|
||||
regs->regs[0] = -EINTR;
|
||||
break;
|
||||
}
|
||||
/* fallthrough */
|
||||
case -ERESTARTNOINTR:
|
||||
/* Reload caller-saves to restore r0..r5 and r10. */
|
||||
regs->flags |= PT_FLAGS_CALLER_SAVES;
|
||||
regs->regs[0] = regs->orig_r0;
|
||||
regs->pc -= 8;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up the stack frame */
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (is_compat_task())
|
||||
ret = compat_setup_rt_frame(sig, ka, info, oldset, regs);
|
||||
else
|
||||
#endif
|
||||
ret = setup_rt_frame(sig, ka, info, oldset, regs);
|
||||
if (ret == 0) {
|
||||
/* This code is only called from system calls or from
|
||||
* the work_pending path in the return-to-user code, and
|
||||
* either way we can re-enable interrupts unconditionally.
|
||||
*/
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked,
|
||||
¤t->blocked, &ka->sa.sa_mask);
|
||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||
sigaddset(¤t->blocked, sig);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
||||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
||||
* mistake.
|
||||
*/
|
||||
void do_signal(struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
int signr;
|
||||
struct k_sigaction ka;
|
||||
sigset_t *oldset;
|
||||
|
||||
/*
|
||||
* i386 will check if we're coming from kernel mode and bail out
|
||||
* here. In my experience this just turns weird crashes into
|
||||
* weird spin-hangs. But if we find a case where this seems
|
||||
* helpful, we can reinstate the check on "!user_mode(regs)".
|
||||
*/
|
||||
|
||||
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
|
||||
oldset = ¤t->saved_sigmask;
|
||||
else
|
||||
oldset = ¤t->blocked;
|
||||
|
||||
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
||||
if (signr > 0) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
|
||||
/*
|
||||
* A signal was successfully delivered; the saved
|
||||
* sigmask will have been stored in the signal frame,
|
||||
* and will be restored by sigreturn, so we can simply
|
||||
* clear the TS_RESTORE_SIGMASK flag.
|
||||
*/
|
||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Did we come from a system call? */
|
||||
if (regs->faultnum == INT_SWINT_1) {
|
||||
/* Restart the system call - no handlers present */
|
||||
switch (regs->regs[0]) {
|
||||
case -ERESTARTNOHAND:
|
||||
case -ERESTARTSYS:
|
||||
case -ERESTARTNOINTR:
|
||||
regs->flags |= PT_FLAGS_CALLER_SAVES;
|
||||
regs->regs[0] = regs->orig_r0;
|
||||
regs->pc -= 8;
|
||||
break;
|
||||
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
regs->flags |= PT_FLAGS_CALLER_SAVES;
|
||||
regs->regs[TREG_SYSCALL_NR] = __NR_restart_syscall;
|
||||
regs->pc -= 8;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* If there's no signal to deliver, just put the saved sigmask back. */
|
||||
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
|
||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
||||
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
||||
}
|
||||
}
|
656
arch/tile/kernel/single_step.c
Normal file
656
arch/tile/kernel/single_step.c
Normal file
@@ -0,0 +1,656 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* A code-rewriter that enables instruction single-stepping.
|
||||
* Derived from iLib's single-stepping code.
|
||||
*/
|
||||
|
||||
#ifndef __tilegx__ /* No support for single-step yet. */
|
||||
|
||||
/* These functions are only used on the TILE platform */
|
||||
#include <linux/slab.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/opcode-tile.h>
|
||||
#include <asm/opcode_constants.h>
|
||||
#include <arch/abi.h>
|
||||
|
||||
#define signExtend17(val) sign_extend((val), 17)
|
||||
#define TILE_X1_MASK (0xffffffffULL << 31)
|
||||
|
||||
int unaligned_printk;
|
||||
|
||||
static int __init setup_unaligned_printk(char *str)
|
||||
{
|
||||
long val;
|
||||
if (strict_strtol(str, 0, &val) != 0)
|
||||
return 0;
|
||||
unaligned_printk = val;
|
||||
printk("Printk for each unaligned data accesses is %s\n",
|
||||
unaligned_printk ? "enabled" : "disabled");
|
||||
return 1;
|
||||
}
|
||||
__setup("unaligned_printk=", setup_unaligned_printk);
|
||||
|
||||
unsigned int unaligned_fixup_count;
|
||||
|
||||
enum mem_op {
|
||||
MEMOP_NONE,
|
||||
MEMOP_LOAD,
|
||||
MEMOP_STORE,
|
||||
MEMOP_LOAD_POSTINCR,
|
||||
MEMOP_STORE_POSTINCR
|
||||
};
|
||||
|
||||
static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset)
|
||||
{
|
||||
tile_bundle_bits result;
|
||||
|
||||
/* mask out the old offset */
|
||||
tile_bundle_bits mask = create_BrOff_X1(-1);
|
||||
result = n & (~mask);
|
||||
|
||||
/* or in the new offset */
|
||||
result |= create_BrOff_X1(offset);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
|
||||
{
|
||||
tile_bundle_bits result;
|
||||
tile_bundle_bits op;
|
||||
|
||||
result = n & (~TILE_X1_MASK);
|
||||
|
||||
op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
|
||||
create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
|
||||
create_Dest_X1(dest) |
|
||||
create_SrcB_X1(TREG_ZERO) |
|
||||
create_SrcA_X1(src) ;
|
||||
|
||||
result |= op;
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
|
||||
{
|
||||
return move_X1(n, TREG_ZERO, TREG_ZERO);
|
||||
}
|
||||
|
||||
static inline tile_bundle_bits addi_X1(
|
||||
tile_bundle_bits n, int dest, int src, int imm)
|
||||
{
|
||||
n &= ~TILE_X1_MASK;
|
||||
|
||||
n |= (create_SrcA_X1(src) |
|
||||
create_Dest_X1(dest) |
|
||||
create_Imm8_X1(imm) |
|
||||
create_S_X1(0) |
|
||||
create_Opcode_X1(IMM_0_OPCODE_X1) |
|
||||
create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static tile_bundle_bits rewrite_load_store_unaligned(
|
||||
struct single_step_state *state,
|
||||
tile_bundle_bits bundle,
|
||||
struct pt_regs *regs,
|
||||
enum mem_op mem_op,
|
||||
int size, int sign_ext)
|
||||
{
|
||||
unsigned char *addr;
|
||||
int val_reg, addr_reg, err, val;
|
||||
|
||||
/* Get address and value registers */
|
||||
if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
|
||||
addr_reg = get_SrcA_Y2(bundle);
|
||||
val_reg = get_SrcBDest_Y2(bundle);
|
||||
} else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
|
||||
addr_reg = get_SrcA_X1(bundle);
|
||||
val_reg = get_Dest_X1(bundle);
|
||||
} else {
|
||||
addr_reg = get_SrcA_X1(bundle);
|
||||
val_reg = get_SrcB_X1(bundle);
|
||||
}
|
||||
|
||||
/*
|
||||
* If registers are not GPRs, don't try to handle it.
|
||||
*
|
||||
* FIXME: we could handle non-GPR loads by getting the real value
|
||||
* from memory, writing it to the single step buffer, using a
|
||||
* temp_reg to hold a pointer to that memory, then executing that
|
||||
* instruction and resetting temp_reg. For non-GPR stores, it's a
|
||||
* little trickier; we could use the single step buffer for that
|
||||
* too, but we'd have to add some more state bits so that we could
|
||||
* call back in here to copy that value to the real target. For
|
||||
* now, we just handle the simple case.
|
||||
*/
|
||||
if ((val_reg >= PTREGS_NR_GPRS &&
|
||||
(val_reg != TREG_ZERO ||
|
||||
mem_op == MEMOP_LOAD ||
|
||||
mem_op == MEMOP_LOAD_POSTINCR)) ||
|
||||
addr_reg >= PTREGS_NR_GPRS)
|
||||
return bundle;
|
||||
|
||||
/* If it's aligned, don't handle it specially */
|
||||
addr = (void *)regs->regs[addr_reg];
|
||||
if (((unsigned long)addr % size) == 0)
|
||||
return bundle;
|
||||
|
||||
#ifndef __LITTLE_ENDIAN
|
||||
# error We assume little-endian representation with copy_xx_user size 2 here
|
||||
#endif
|
||||
/* Handle unaligned load/store */
|
||||
if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
|
||||
unsigned short val_16;
|
||||
switch (size) {
|
||||
case 2:
|
||||
err = copy_from_user(&val_16, addr, sizeof(val_16));
|
||||
val = sign_ext ? ((short)val_16) : val_16;
|
||||
break;
|
||||
case 4:
|
||||
err = copy_from_user(&val, addr, sizeof(val));
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
if (err == 0) {
|
||||
state->update_reg = val_reg;
|
||||
state->update_value = val;
|
||||
state->update = 1;
|
||||
}
|
||||
} else {
|
||||
val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
|
||||
err = copy_to_user(addr, &val, size);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
siginfo_t info = {
|
||||
.si_signo = SIGSEGV,
|
||||
.si_code = SEGV_MAPERR,
|
||||
.si_addr = (void __user *)addr
|
||||
};
|
||||
force_sig_info(info.si_signo, &info, current);
|
||||
return (tile_bundle_bits) 0;
|
||||
}
|
||||
|
||||
if (unaligned_fixup == 0) {
|
||||
siginfo_t info = {
|
||||
.si_signo = SIGBUS,
|
||||
.si_code = BUS_ADRALN,
|
||||
.si_addr = (void __user *)addr
|
||||
};
|
||||
force_sig_info(info.si_signo, &info, current);
|
||||
return (tile_bundle_bits) 0;
|
||||
}
|
||||
|
||||
if (unaligned_printk || unaligned_fixup_count == 0) {
|
||||
printk("Process %d/%s: PC %#lx: Fixup of"
|
||||
" unaligned %s at %#lx.\n",
|
||||
current->pid, current->comm, regs->pc,
|
||||
(mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) ?
|
||||
"load" : "store",
|
||||
(unsigned long)addr);
|
||||
if (!unaligned_printk) {
|
||||
printk("\n"
|
||||
"Unaligned fixups in the kernel will slow your application considerably.\n"
|
||||
"You can find them by writing \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"
|
||||
"which requests the kernel show all unaligned fixups, or writing a \"0\"\n"
|
||||
"to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"
|
||||
"access will become a SIGBUS you can debug. No further warnings will be\n"
|
||||
"shown so as to avoid additional slowdown, but you can track the number\n"
|
||||
"of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"
|
||||
"Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"
|
||||
"\n");
|
||||
}
|
||||
}
|
||||
++unaligned_fixup_count;
|
||||
|
||||
if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
|
||||
/* Convert the Y2 instruction to a prefetch. */
|
||||
bundle &= ~(create_SrcBDest_Y2(-1) |
|
||||
create_Opcode_Y2(-1));
|
||||
bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
|
||||
create_Opcode_Y2(LW_OPCODE_Y2));
|
||||
/* Replace the load postincr with an addi */
|
||||
} else if (mem_op == MEMOP_LOAD_POSTINCR) {
|
||||
bundle = addi_X1(bundle, addr_reg, addr_reg,
|
||||
get_Imm8_X1(bundle));
|
||||
/* Replace the store postincr with an addi */
|
||||
} else if (mem_op == MEMOP_STORE_POSTINCR) {
|
||||
bundle = addi_X1(bundle, addr_reg, addr_reg,
|
||||
get_Dest_Imm8_X1(bundle));
|
||||
} else {
|
||||
/* Convert the X1 instruction to a nop. */
|
||||
bundle &= ~(create_Opcode_X1(-1) |
|
||||
create_UnShOpcodeExtension_X1(-1) |
|
||||
create_UnOpcodeExtension_X1(-1));
|
||||
bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
|
||||
create_UnShOpcodeExtension_X1(
|
||||
UN_0_SHUN_0_OPCODE_X1) |
|
||||
create_UnOpcodeExtension_X1(
|
||||
NOP_UN_0_SHUN_0_OPCODE_X1));
|
||||
}
|
||||
|
||||
return bundle;
|
||||
}
|
||||
|
||||
/**
|
||||
* single_step_once() - entry point when single stepping has been triggered.
|
||||
* @regs: The machine register state
|
||||
*
|
||||
* When we arrive at this routine via a trampoline, the single step
|
||||
* engine copies the executing bundle to the single step buffer.
|
||||
* If the instruction is a condition branch, then the target is
|
||||
* reset to one past the next instruction. If the instruction
|
||||
* sets the lr, then that is noted. If the instruction is a jump
|
||||
* or call, then the new target pc is preserved and the current
|
||||
* bundle instruction set to null.
|
||||
*
|
||||
* The necessary post-single-step rewriting information is stored in
|
||||
* single_step_state-> We use data segment values because the
|
||||
* stack will be rewound when we run the rewritten single-stepped
|
||||
* instruction.
|
||||
*/
|
||||
void single_step_once(struct pt_regs *regs)
|
||||
{
|
||||
extern tile_bundle_bits __single_step_ill_insn;
|
||||
extern tile_bundle_bits __single_step_j_insn;
|
||||
extern tile_bundle_bits __single_step_addli_insn;
|
||||
extern tile_bundle_bits __single_step_auli_insn;
|
||||
struct thread_info *info = (void *)current_thread_info();
|
||||
struct single_step_state *state = info->step_state;
|
||||
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
|
||||
tile_bundle_bits *buffer, *pc;
|
||||
tile_bundle_bits bundle;
|
||||
int temp_reg;
|
||||
int target_reg = TREG_LR;
|
||||
int err;
|
||||
enum mem_op mem_op = MEMOP_NONE;
|
||||
int size = 0, sign_ext = 0; /* happy compiler */
|
||||
|
||||
asm(
|
||||
" .pushsection .rodata.single_step\n"
|
||||
" .align 8\n"
|
||||
" .globl __single_step_ill_insn\n"
|
||||
"__single_step_ill_insn:\n"
|
||||
" ill\n"
|
||||
" .globl __single_step_addli_insn\n"
|
||||
"__single_step_addli_insn:\n"
|
||||
" { nop; addli r0, zero, 0 }\n"
|
||||
" .globl __single_step_auli_insn\n"
|
||||
"__single_step_auli_insn:\n"
|
||||
" { nop; auli r0, r0, 0 }\n"
|
||||
" .globl __single_step_j_insn\n"
|
||||
"__single_step_j_insn:\n"
|
||||
" j .\n"
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
if (state == NULL) {
|
||||
/* allocate a page of writable, executable memory */
|
||||
state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
|
||||
if (state == NULL) {
|
||||
printk("Out of kernel memory trying to single-step\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* allocate a cache line of writable, executable memory */
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
buffer = (void *) do_mmap(0, 0, 64,
|
||||
PROT_EXEC | PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
0);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
if ((int)buffer < 0 && (int)buffer > -PAGE_SIZE) {
|
||||
kfree(state);
|
||||
printk("Out of kernel pages trying to single-step\n");
|
||||
return;
|
||||
}
|
||||
|
||||
state->buffer = buffer;
|
||||
state->is_enabled = 0;
|
||||
|
||||
info->step_state = state;
|
||||
|
||||
/* Validate our stored instruction patterns */
|
||||
BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
|
||||
ADDLI_OPCODE_X1);
|
||||
BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
|
||||
AULI_OPCODE_X1);
|
||||
BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
|
||||
BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
|
||||
BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are returning from a syscall, we still haven't hit the
|
||||
* "ill" for the swint1 instruction. So back the PC up to be
|
||||
* pointing at the swint1, but we'll actually return directly
|
||||
* back to the "ill" so we come back in via SIGILL as if we
|
||||
* had "executed" the swint1 without ever being in kernel space.
|
||||
*/
|
||||
if (regs->faultnum == INT_SWINT_1)
|
||||
regs->pc -= 8;
|
||||
|
||||
pc = (tile_bundle_bits *)(regs->pc);
|
||||
bundle = pc[0];
|
||||
|
||||
/* We'll follow the instruction with 2 ill op bundles */
|
||||
state->orig_pc = (unsigned long) pc;
|
||||
state->next_pc = (unsigned long)(pc + 1);
|
||||
state->branch_next_pc = 0;
|
||||
state->update = 0;
|
||||
|
||||
if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
|
||||
/* two wide, check for control flow */
|
||||
int opcode = get_Opcode_X1(bundle);
|
||||
|
||||
switch (opcode) {
|
||||
/* branches */
|
||||
case BRANCH_OPCODE_X1:
|
||||
{
|
||||
int32_t offset = signExtend17(get_BrOff_X1(bundle));
|
||||
|
||||
/*
|
||||
* For branches, we use a rewriting trick to let the
|
||||
* hardware evaluate whether the branch is taken or
|
||||
* untaken. We record the target offset and then
|
||||
* rewrite the branch instruction to target 1 insn
|
||||
* ahead if the branch is taken. We then follow the
|
||||
* rewritten branch with two bundles, each containing
|
||||
* an "ill" instruction. The supervisor examines the
|
||||
* pc after the single step code is executed, and if
|
||||
* the pc is the first ill instruction, then the
|
||||
* branch (if any) was not taken. If the pc is the
|
||||
* second ill instruction, then the branch was
|
||||
* taken. The new pc is computed for these cases, and
|
||||
* inserted into the registers for the thread. If
|
||||
* the pc is the start of the single step code, then
|
||||
* an exception or interrupt was taken before the
|
||||
* code started processing, and the same "original"
|
||||
* pc is restored. This change, different from the
|
||||
* original implementation, has the advantage of
|
||||
* executing a single user instruction.
|
||||
*/
|
||||
state->branch_next_pc = (unsigned long)(pc + offset);
|
||||
|
||||
/* rewrite branch offset to go forward one bundle */
|
||||
bundle = set_BrOff_X1(bundle, 2);
|
||||
}
|
||||
break;
|
||||
|
||||
/* jumps */
|
||||
case JALB_OPCODE_X1:
|
||||
case JALF_OPCODE_X1:
|
||||
state->update = 1;
|
||||
state->next_pc =
|
||||
(unsigned long) (pc + get_JOffLong_X1(bundle));
|
||||
break;
|
||||
|
||||
case JB_OPCODE_X1:
|
||||
case JF_OPCODE_X1:
|
||||
state->next_pc =
|
||||
(unsigned long) (pc + get_JOffLong_X1(bundle));
|
||||
bundle = nop_X1(bundle);
|
||||
break;
|
||||
|
||||
case SPECIAL_0_OPCODE_X1:
|
||||
switch (get_RRROpcodeExtension_X1(bundle)) {
|
||||
/* jump-register */
|
||||
case JALRP_SPECIAL_0_OPCODE_X1:
|
||||
case JALR_SPECIAL_0_OPCODE_X1:
|
||||
state->update = 1;
|
||||
state->next_pc =
|
||||
regs->regs[get_SrcA_X1(bundle)];
|
||||
break;
|
||||
|
||||
case JRP_SPECIAL_0_OPCODE_X1:
|
||||
case JR_SPECIAL_0_OPCODE_X1:
|
||||
state->next_pc =
|
||||
regs->regs[get_SrcA_X1(bundle)];
|
||||
bundle = nop_X1(bundle);
|
||||
break;
|
||||
|
||||
case LNK_SPECIAL_0_OPCODE_X1:
|
||||
state->update = 1;
|
||||
target_reg = get_Dest_X1(bundle);
|
||||
break;
|
||||
|
||||
/* stores */
|
||||
case SH_SPECIAL_0_OPCODE_X1:
|
||||
mem_op = MEMOP_STORE;
|
||||
size = 2;
|
||||
break;
|
||||
|
||||
case SW_SPECIAL_0_OPCODE_X1:
|
||||
mem_op = MEMOP_STORE;
|
||||
size = 4;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
/* loads and iret */
|
||||
case SHUN_0_OPCODE_X1:
|
||||
if (get_UnShOpcodeExtension_X1(bundle) ==
|
||||
UN_0_SHUN_0_OPCODE_X1) {
|
||||
switch (get_UnOpcodeExtension_X1(bundle)) {
|
||||
case LH_UN_0_SHUN_0_OPCODE_X1:
|
||||
mem_op = MEMOP_LOAD;
|
||||
size = 2;
|
||||
sign_ext = 1;
|
||||
break;
|
||||
|
||||
case LH_U_UN_0_SHUN_0_OPCODE_X1:
|
||||
mem_op = MEMOP_LOAD;
|
||||
size = 2;
|
||||
sign_ext = 0;
|
||||
break;
|
||||
|
||||
case LW_UN_0_SHUN_0_OPCODE_X1:
|
||||
mem_op = MEMOP_LOAD;
|
||||
size = 4;
|
||||
break;
|
||||
|
||||
case IRET_UN_0_SHUN_0_OPCODE_X1:
|
||||
{
|
||||
unsigned long ex0_0 = __insn_mfspr(
|
||||
SPR_EX_CONTEXT_0_0);
|
||||
unsigned long ex0_1 = __insn_mfspr(
|
||||
SPR_EX_CONTEXT_0_1);
|
||||
/*
|
||||
* Special-case it if we're iret'ing
|
||||
* to PL0 again. Otherwise just let
|
||||
* it run and it will generate SIGILL.
|
||||
*/
|
||||
if (EX1_PL(ex0_1) == USER_PL) {
|
||||
state->next_pc = ex0_0;
|
||||
regs->ex1 = ex0_1;
|
||||
bundle = nop_X1(bundle);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
#if CHIP_HAS_WH64()
|
||||
/* postincrement operations */
|
||||
case IMM_0_OPCODE_X1:
|
||||
switch (get_ImmOpcodeExtension_X1(bundle)) {
|
||||
case LWADD_IMM_0_OPCODE_X1:
|
||||
mem_op = MEMOP_LOAD_POSTINCR;
|
||||
size = 4;
|
||||
break;
|
||||
|
||||
case LHADD_IMM_0_OPCODE_X1:
|
||||
mem_op = MEMOP_LOAD_POSTINCR;
|
||||
size = 2;
|
||||
sign_ext = 1;
|
||||
break;
|
||||
|
||||
case LHADD_U_IMM_0_OPCODE_X1:
|
||||
mem_op = MEMOP_LOAD_POSTINCR;
|
||||
size = 2;
|
||||
sign_ext = 0;
|
||||
break;
|
||||
|
||||
case SWADD_IMM_0_OPCODE_X1:
|
||||
mem_op = MEMOP_STORE_POSTINCR;
|
||||
size = 4;
|
||||
break;
|
||||
|
||||
case SHADD_IMM_0_OPCODE_X1:
|
||||
mem_op = MEMOP_STORE_POSTINCR;
|
||||
size = 2;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
#endif /* CHIP_HAS_WH64() */
|
||||
}
|
||||
|
||||
if (state->update) {
|
||||
/*
|
||||
* Get an available register. We start with a
|
||||
* bitmask with 1's for available registers.
|
||||
* We truncate to the low 32 registers since
|
||||
* we are guaranteed to have set bits in the
|
||||
* low 32 bits, then use ctz to pick the first.
|
||||
*/
|
||||
u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
|
||||
(1ULL << get_SrcA_X0(bundle)) |
|
||||
(1ULL << get_SrcB_X0(bundle)) |
|
||||
(1ULL << target_reg));
|
||||
temp_reg = __builtin_ctz(mask);
|
||||
state->update_reg = temp_reg;
|
||||
state->update_value = regs->regs[temp_reg];
|
||||
regs->regs[temp_reg] = (unsigned long) (pc+1);
|
||||
regs->flags |= PT_FLAGS_RESTORE_REGS;
|
||||
bundle = move_X1(bundle, target_reg, temp_reg);
|
||||
}
|
||||
} else {
|
||||
int opcode = get_Opcode_Y2(bundle);
|
||||
|
||||
switch (opcode) {
|
||||
/* loads */
|
||||
case LH_OPCODE_Y2:
|
||||
mem_op = MEMOP_LOAD;
|
||||
size = 2;
|
||||
sign_ext = 1;
|
||||
break;
|
||||
|
||||
case LH_U_OPCODE_Y2:
|
||||
mem_op = MEMOP_LOAD;
|
||||
size = 2;
|
||||
sign_ext = 0;
|
||||
break;
|
||||
|
||||
case LW_OPCODE_Y2:
|
||||
mem_op = MEMOP_LOAD;
|
||||
size = 4;
|
||||
break;
|
||||
|
||||
/* stores */
|
||||
case SH_OPCODE_Y2:
|
||||
mem_op = MEMOP_STORE;
|
||||
size = 2;
|
||||
break;
|
||||
|
||||
case SW_OPCODE_Y2:
|
||||
mem_op = MEMOP_STORE;
|
||||
size = 4;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if we need to rewrite an unaligned load/store.
|
||||
* Returning zero is a special value meaning we need to SIGSEGV.
|
||||
*/
|
||||
if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
|
||||
bundle = rewrite_load_store_unaligned(state, bundle, regs,
|
||||
mem_op, size, sign_ext);
|
||||
if (bundle == 0)
|
||||
return;
|
||||
}
|
||||
|
||||
/* write the bundle to our execution area */
|
||||
buffer = state->buffer;
|
||||
err = __put_user(bundle, buffer++);
|
||||
|
||||
/*
|
||||
* If we're really single-stepping, we take an INT_ILL after.
|
||||
* If we're just handling an unaligned access, we can just
|
||||
* jump directly back to where we were in user code.
|
||||
*/
|
||||
if (is_single_step) {
|
||||
err |= __put_user(__single_step_ill_insn, buffer++);
|
||||
err |= __put_user(__single_step_ill_insn, buffer++);
|
||||
} else {
|
||||
long delta;
|
||||
|
||||
if (state->update) {
|
||||
/* We have some state to update; do it inline */
|
||||
int ha16;
|
||||
bundle = __single_step_addli_insn;
|
||||
bundle |= create_Dest_X1(state->update_reg);
|
||||
bundle |= create_Imm16_X1(state->update_value);
|
||||
err |= __put_user(bundle, buffer++);
|
||||
bundle = __single_step_auli_insn;
|
||||
bundle |= create_Dest_X1(state->update_reg);
|
||||
bundle |= create_SrcA_X1(state->update_reg);
|
||||
ha16 = (state->update_value + 0x8000) >> 16;
|
||||
bundle |= create_Imm16_X1(ha16);
|
||||
err |= __put_user(bundle, buffer++);
|
||||
state->update = 0;
|
||||
}
|
||||
|
||||
/* End with a jump back to the next instruction */
|
||||
delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
|
||||
(unsigned long)buffer) >>
|
||||
TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
|
||||
bundle = __single_step_j_insn;
|
||||
bundle |= create_JOffLong_X1(delta);
|
||||
err |= __put_user(bundle, buffer++);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
printk("Fault when writing to single-step buffer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the buffer.
|
||||
* We do a local flush only, since this is a thread-specific buffer.
|
||||
*/
|
||||
__flush_icache_range((unsigned long) state->buffer,
|
||||
(unsigned long) buffer);
|
||||
|
||||
/* Indicate enabled */
|
||||
state->is_enabled = is_single_step;
|
||||
regs->pc = (unsigned long) state->buffer;
|
||||
|
||||
/* Fault immediately if we are coming back from a syscall. */
|
||||
if (regs->faultnum == INT_SWINT_1)
|
||||
regs->pc += 8;
|
||||
}
|
||||
|
||||
#endif /* !__tilegx__ */
|
202
arch/tile/kernel/smp.c
Normal file
202
arch/tile/kernel/smp.c
Normal file
@@ -0,0 +1,202 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* TILE SMP support routines.
|
||||
*/
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/irq.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
HV_Topology smp_topology __write_once;
|
||||
|
||||
|
||||
/*
|
||||
* Top-level send_IPI*() functions to send messages to other cpus.
|
||||
*/
|
||||
|
||||
/* Set by smp_send_stop() to avoid recursive panics. */
|
||||
static int stopping_cpus;
|
||||
|
||||
void send_IPI_single(int cpu, int tag)
|
||||
{
|
||||
HV_Recipient recip = {
|
||||
.y = cpu / smp_width,
|
||||
.x = cpu % smp_width,
|
||||
.state = HV_TO_BE_SENT
|
||||
};
|
||||
int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag));
|
||||
BUG_ON(rc <= 0);
|
||||
}
|
||||
|
||||
void send_IPI_many(const struct cpumask *mask, int tag)
|
||||
{
|
||||
HV_Recipient recip[NR_CPUS];
|
||||
int cpu, sent;
|
||||
int nrecip = 0;
|
||||
int my_cpu = smp_processor_id();
|
||||
for_each_cpu(cpu, mask) {
|
||||
HV_Recipient *r;
|
||||
BUG_ON(cpu == my_cpu);
|
||||
r = &recip[nrecip++];
|
||||
r->y = cpu / smp_width;
|
||||
r->x = cpu % smp_width;
|
||||
r->state = HV_TO_BE_SENT;
|
||||
}
|
||||
sent = 0;
|
||||
while (sent < nrecip) {
|
||||
int rc = hv_send_message(recip, nrecip,
|
||||
(HV_VirtAddr)&tag, sizeof(tag));
|
||||
if (rc <= 0) {
|
||||
if (!stopping_cpus) /* avoid recursive panic */
|
||||
panic("hv_send_message returned %d", rc);
|
||||
break;
|
||||
}
|
||||
sent += rc;
|
||||
}
|
||||
}
|
||||
|
||||
void send_IPI_allbutself(int tag)
|
||||
{
|
||||
struct cpumask mask;
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
send_IPI_many(&mask, tag);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Provide smp_call_function_mask, but also run function locally
|
||||
* if specified in the mask.
|
||||
*/
|
||||
void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
|
||||
void *info, bool wait)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
smp_call_function_many(mask, func, info, wait);
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
local_irq_disable();
|
||||
func(info);
|
||||
local_irq_enable();
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Functions related to starting/stopping cpus.
|
||||
*/
|
||||
|
||||
/* Handler to start the current cpu. */
|
||||
static void smp_start_cpu_interrupt(void)
|
||||
{
|
||||
extern unsigned long start_cpu_function_addr;
|
||||
get_irq_regs()->pc = start_cpu_function_addr;
|
||||
}
|
||||
|
||||
/* Handler to stop the current cpu. */
|
||||
static void smp_stop_cpu_interrupt(void)
|
||||
{
|
||||
set_cpu_online(smp_processor_id(), 0);
|
||||
raw_local_irq_disable_all();
|
||||
for (;;)
|
||||
asm("nap");
|
||||
}
|
||||
|
||||
/* This function calls the 'stop' function on all other CPUs in the system. */
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
stopping_cpus = 1;
|
||||
send_IPI_allbutself(MSG_TAG_STOP_CPU);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
|
||||
*/
|
||||
void evaluate_message(int tag)
|
||||
{
|
||||
switch (tag) {
|
||||
case MSG_TAG_START_CPU: /* Start up a cpu */
|
||||
smp_start_cpu_interrupt();
|
||||
break;
|
||||
|
||||
case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
|
||||
smp_stop_cpu_interrupt();
|
||||
break;
|
||||
|
||||
case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
default:
|
||||
panic("Unknown IPI message tag %d", tag);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* flush_icache_range() code uses smp_call_function().
|
||||
*/
|
||||
|
||||
struct ipi_flush {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
};
|
||||
|
||||
static void ipi_flush_icache_range(void *info)
|
||||
{
|
||||
struct ipi_flush *flush = (struct ipi_flush *) info;
|
||||
__flush_icache_range(flush->start, flush->end);
|
||||
}
|
||||
|
||||
void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct ipi_flush flush = { start, end };
|
||||
preempt_disable();
|
||||
on_each_cpu(ipi_flush_icache_range, &flush, 1);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The smp_send_reschedule() path does not use the hv_message_intr()
|
||||
* path but instead the faster tile_dev_intr() path for interrupts.
|
||||
*/
|
||||
|
||||
irqreturn_t handle_reschedule_ipi(int irq, void *token)
|
||||
{
|
||||
/*
|
||||
* Nothing to do here; when we return from interrupt, the
|
||||
* rescheduling will occur there. But do bump the interrupt
|
||||
* profiler count in the meantime.
|
||||
*/
|
||||
__get_cpu_var(irq_stat).irq_resched_count++;
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
HV_Coord coord;
|
||||
|
||||
WARN_ON(cpu_is_offline(cpu));
|
||||
coord.y = cpu / smp_width;
|
||||
coord.x = cpu % smp_width;
|
||||
hv_trigger_ipi(coord, IRQ_RESCHEDULE);
|
||||
}
|
293
arch/tile/kernel/smpboot.c
Normal file
293
arch/tile/kernel/smpboot.c
Normal file
@@ -0,0 +1,293 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/*
|
||||
* This assembly function is provided in entry.S.
|
||||
* When called, it loops on a nap instruction forever.
|
||||
* FIXME: should be in a header somewhere.
|
||||
*/
|
||||
extern void smp_nap(void);
|
||||
|
||||
/* State of each CPU. */
|
||||
DEFINE_PER_CPU(int, cpu_state) = { 0 };
|
||||
|
||||
/* The messaging code jumps to this pointer during boot-up */
|
||||
unsigned long start_cpu_function_addr;
|
||||
|
||||
/* Called very early during startup to mark boot cpu as online */
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
set_cpu_online(cpu, 1);
|
||||
set_cpu_present(cpu, 1);
|
||||
__get_cpu_var(cpu_state) = CPU_ONLINE;
|
||||
|
||||
init_messaging();
|
||||
}
|
||||
|
||||
static void start_secondary(void);
|
||||
|
||||
/*
|
||||
* Called at the top of init() to launch all the other CPUs.
|
||||
* They run free to complete their initialization and then wait
|
||||
* until they get an IPI from the boot cpu to come online.
|
||||
*/
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
long rc;
|
||||
int cpu, cpu_count;
|
||||
int boot_cpu = smp_processor_id();
|
||||
|
||||
current_thread_info()->cpu = boot_cpu;
|
||||
|
||||
/*
|
||||
* Pin this task to the boot CPU while we bring up the others,
|
||||
* just to make sure we don't uselessly migrate as they come up.
|
||||
*/
|
||||
rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
|
||||
if (rc != 0)
|
||||
printk("Couldn't set init affinity to boot cpu (%ld)\n", rc);
|
||||
|
||||
/* Print information about disabled and dataplane cpus. */
|
||||
print_disabled_cpus();
|
||||
|
||||
/*
|
||||
* Tell the messaging subsystem how to respond to the
|
||||
* startup message. We use a level of indirection to avoid
|
||||
* confusing the linker with the fact that the messaging
|
||||
* subsystem is calling __init code.
|
||||
*/
|
||||
start_cpu_function_addr = (unsigned long) &online_secondary;
|
||||
|
||||
/* Set up thread context for all new processors. */
|
||||
cpu_count = 1;
|
||||
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
|
||||
struct task_struct *idle;
|
||||
|
||||
if (cpu == boot_cpu)
|
||||
continue;
|
||||
|
||||
if (!cpu_possible(cpu)) {
|
||||
/*
|
||||
* Make this processor do nothing on boot.
|
||||
* Note that we don't give the boot_pc function
|
||||
* a stack, so it has to be assembly code.
|
||||
*/
|
||||
per_cpu(boot_sp, cpu) = 0;
|
||||
per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Create a new idle thread to run start_secondary() */
|
||||
idle = fork_idle(cpu);
|
||||
if (IS_ERR(idle))
|
||||
panic("failed fork for CPU %d", cpu);
|
||||
idle->thread.pc = (unsigned long) start_secondary;
|
||||
|
||||
/* Make this thread the boot thread for this processor */
|
||||
per_cpu(boot_sp, cpu) = task_ksp0(idle);
|
||||
per_cpu(boot_pc, cpu) = idle->thread.pc;
|
||||
|
||||
++cpu_count;
|
||||
}
|
||||
BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));
|
||||
|
||||
/* Fire up the other tiles, if any */
|
||||
init_cpu_present(cpu_possible_mask);
|
||||
if (cpumask_weight(cpu_present_mask) > 1) {
|
||||
mb(); /* make sure all data is visible to new processors */
|
||||
hv_start_all_tiles();
|
||||
}
|
||||
}
|
||||
|
||||
static __initdata struct cpumask init_affinity;
|
||||
|
||||
static __init int reset_init_affinity(void)
|
||||
{
|
||||
long rc = sched_setaffinity(current->pid, &init_affinity);
|
||||
if (rc != 0)
|
||||
printk(KERN_WARNING "couldn't reset init affinity (%ld)\n",
|
||||
rc);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(reset_init_affinity);
|
||||
|
||||
struct cpumask cpu_started __cpuinitdata;
|
||||
|
||||
/*
|
||||
* Activate a secondary processor. Very minimal; don't add anything
|
||||
* to this path without knowing what you're doing, since SMP booting
|
||||
* is pretty fragile.
|
||||
*/
|
||||
static void __cpuinit start_secondary(void)
|
||||
{
|
||||
int cpuid = smp_processor_id();
|
||||
|
||||
/* Set our thread pointer appropriately. */
|
||||
set_my_cpu_offset(__per_cpu_offset[cpuid]);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* In large machines even this will slow us down, since we
|
||||
* will be contending for for the printk spinlock.
|
||||
*/
|
||||
/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
|
||||
|
||||
/* Initialize the current asid for our first page table. */
|
||||
__get_cpu_var(current_asid) = min_asid;
|
||||
|
||||
/* Set up this thread as another owner of the init_mm */
|
||||
atomic_inc(&init_mm.mm_count);
|
||||
current->active_mm = &init_mm;
|
||||
if (current->mm)
|
||||
BUG();
|
||||
enter_lazy_tlb(&init_mm, current);
|
||||
|
||||
/* Enable IRQs. */
|
||||
init_per_tile_IRQs();
|
||||
|
||||
/* Allow hypervisor messages to be received */
|
||||
init_messaging();
|
||||
local_irq_enable();
|
||||
|
||||
/* Indicate that we're ready to come up. */
|
||||
/* Must not do this before we're ready to receive messages */
|
||||
if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
|
||||
printk(KERN_WARNING "CPU#%d already started!\n", cpuid);
|
||||
for (;;)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
smp_nap();
|
||||
}
|
||||
|
||||
void setup_mpls(void); /* from kernel/setup.c */
|
||||
void store_permanent_mappings(void);
|
||||
|
||||
/*
|
||||
* Bring a secondary processor online.
|
||||
*/
|
||||
void __cpuinit online_secondary()
|
||||
{
|
||||
/*
|
||||
* low-memory mappings have been cleared, flush them from
|
||||
* the local TLBs too.
|
||||
*/
|
||||
local_flush_tlb();
|
||||
|
||||
BUG_ON(in_interrupt());
|
||||
|
||||
/* This must be done before setting cpu_online_mask */
|
||||
wmb();
|
||||
|
||||
/*
|
||||
* We need to hold call_lock, so there is no inconsistency
|
||||
* between the time smp_call_function() determines number of
|
||||
* IPI recipients, and the time when the determination is made
|
||||
* for which cpus receive the IPI. Holding this
|
||||
* lock helps us to not include this cpu in a currently in progress
|
||||
* smp_call_function().
|
||||
*/
|
||||
ipi_call_lock();
|
||||
set_cpu_online(smp_processor_id(), 1);
|
||||
ipi_call_unlock();
|
||||
__get_cpu_var(cpu_state) = CPU_ONLINE;
|
||||
|
||||
/* Set up MPLs for this processor */
|
||||
setup_mpls();
|
||||
|
||||
|
||||
/* Set up tile-timer clock-event device on this cpu */
|
||||
setup_tile_timer();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
store_permanent_mappings();
|
||||
|
||||
cpu_idle();
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
{
|
||||
/* Wait 5s total for all CPUs for them to come online */
|
||||
static int timeout;
|
||||
for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
|
||||
if (timeout >= 50000) {
|
||||
printk(KERN_INFO "skipping unresponsive cpu%d\n", cpu);
|
||||
local_irq_enable();
|
||||
return -EIO;
|
||||
}
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
local_irq_enable();
|
||||
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
||||
|
||||
/* Unleash the CPU! */
|
||||
send_IPI_single(cpu, MSG_TAG_START_CPU);
|
||||
while (!cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
cpu_relax();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void panic_start_cpu(void)
|
||||
{
|
||||
panic("Received a MSG_START_CPU IPI after boot finished.");
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
int cpu, next, rc;
|
||||
|
||||
/* Reset the response to a (now illegal) MSG_START_CPU IPI. */
|
||||
start_cpu_function_addr = (unsigned long) &panic_start_cpu;
|
||||
|
||||
cpumask_copy(&init_affinity, cpu_online_mask);
|
||||
|
||||
/*
|
||||
* Pin ourselves to a single cpu in the initial affinity set
|
||||
* so that kernel mappings for the rootfs are not in the dataplane,
|
||||
* if set, and to avoid unnecessary migrating during bringup.
|
||||
* Use the last cpu just in case the whole chip has been
|
||||
* isolated from the scheduler, to keep init away from likely
|
||||
* more useful user code. This also ensures that work scheduled
|
||||
* via schedule_delayed_work() in the init routines will land
|
||||
* on this cpu.
|
||||
*/
|
||||
for (cpu = cpumask_first(&init_affinity);
|
||||
(next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids;
|
||||
cpu = next)
|
||||
;
|
||||
rc = sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
if (rc != 0)
|
||||
printk("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
|
||||
}
|
485
arch/tile/kernel/stack.c
Normal file
485
arch/tile/kernel/stack.c
Normal file
@@ -0,0 +1,485 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <asm/backtrace.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/stack.h>
|
||||
#include <arch/abi.h>
|
||||
#include <arch/interrupts.h>
|
||||
|
||||
|
||||
/* Is address on the specified kernel stack? */
|
||||
static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
|
||||
{
|
||||
ulong kstack_base = (ulong) kbt->task->stack;
|
||||
if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
|
||||
return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
|
||||
return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
|
||||
}
|
||||
|
||||
/* Is address in the specified kernel code? */
|
||||
static int in_kernel_text(VirtualAddress address)
|
||||
{
|
||||
return (address >= MEM_SV_INTRPT &&
|
||||
address < MEM_SV_INTRPT + HPAGE_SIZE);
|
||||
}
|
||||
|
||||
/* Is address valid for reading? */
|
||||
static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
|
||||
{
|
||||
HV_PTE *l1_pgtable = kbt->pgtable;
|
||||
HV_PTE *l2_pgtable;
|
||||
unsigned long pfn;
|
||||
HV_PTE pte;
|
||||
struct page *page;
|
||||
|
||||
pte = l1_pgtable[HV_L1_INDEX(address)];
|
||||
if (!hv_pte_get_present(pte))
|
||||
return 0;
|
||||
pfn = hv_pte_get_pfn(pte);
|
||||
if (pte_huge(pte)) {
|
||||
if (!pfn_valid(pfn)) {
|
||||
printk(KERN_ERR "huge page has bad pfn %#lx\n", pfn);
|
||||
return 0;
|
||||
}
|
||||
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
|
||||
}
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (PageHighMem(page)) {
|
||||
printk(KERN_ERR "L2 page table not in LOWMEM (%#llx)\n",
|
||||
HV_PFN_TO_CPA(pfn));
|
||||
return 0;
|
||||
}
|
||||
l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
|
||||
pte = l2_pgtable[HV_L2_INDEX(address)];
|
||||
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
|
||||
}
|
||||
|
||||
/* Callback for backtracer; basically a glorified memcpy */
|
||||
static bool read_memory_func(void *result, VirtualAddress address,
|
||||
unsigned int size, void *vkbt)
|
||||
{
|
||||
int retval;
|
||||
struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
|
||||
if (in_kernel_text(address)) {
|
||||
/* OK to read kernel code. */
|
||||
} else if (address >= PAGE_OFFSET) {
|
||||
/* We only tolerate kernel-space reads of this task's stack */
|
||||
if (!in_kernel_stack(kbt, address))
|
||||
return 0;
|
||||
} else if (kbt->pgtable == NULL) {
|
||||
return 0; /* can't read user space in other tasks */
|
||||
} else if (!valid_address(kbt, address)) {
|
||||
return 0; /* invalid user-space address */
|
||||
}
|
||||
pagefault_disable();
|
||||
retval = __copy_from_user_inatomic(result, (const void *)address,
|
||||
size);
|
||||
pagefault_enable();
|
||||
return (retval == 0);
|
||||
}
|
||||
|
||||
/* Return a pt_regs pointer for a valid fault handler frame */
|
||||
static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
|
||||
{
|
||||
#ifndef __tilegx__
|
||||
const char *fault = NULL; /* happy compiler */
|
||||
char fault_buf[64];
|
||||
VirtualAddress sp = kbt->it.sp;
|
||||
struct pt_regs *p;
|
||||
|
||||
if (!in_kernel_stack(kbt, sp))
|
||||
return NULL;
|
||||
if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
|
||||
return NULL;
|
||||
p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
|
||||
if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
|
||||
fault = "syscall";
|
||||
else {
|
||||
if (kbt->verbose) { /* else we aren't going to use it */
|
||||
snprintf(fault_buf, sizeof(fault_buf),
|
||||
"interrupt %ld", p->faultnum);
|
||||
fault = fault_buf;
|
||||
}
|
||||
}
|
||||
if (EX1_PL(p->ex1) == KERNEL_PL &&
|
||||
in_kernel_text(p->pc) &&
|
||||
in_kernel_stack(kbt, p->sp) &&
|
||||
p->sp >= sp) {
|
||||
if (kbt->verbose)
|
||||
printk(KERN_ERR " <%s while in kernel mode>\n", fault);
|
||||
} else if (EX1_PL(p->ex1) == USER_PL &&
|
||||
p->pc < PAGE_OFFSET &&
|
||||
p->sp < PAGE_OFFSET) {
|
||||
if (kbt->verbose)
|
||||
printk(KERN_ERR " <%s while in user mode>\n", fault);
|
||||
} else if (kbt->verbose) {
|
||||
printk(KERN_ERR " (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
|
||||
p->pc, p->sp, p->ex1);
|
||||
p = NULL;
|
||||
}
|
||||
if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
|
||||
return p;
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Is the pc pointing to a sigreturn trampoline? */
|
||||
static int is_sigreturn(VirtualAddress pc)
|
||||
{
|
||||
return (pc == VDSO_BASE);
|
||||
}
|
||||
|
||||
/* Return a pt_regs pointer for a valid signal handler frame */
|
||||
static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
|
||||
{
|
||||
BacktraceIterator *b = &kbt->it;
|
||||
|
||||
if (b->pc == VDSO_BASE) {
|
||||
struct rt_sigframe *frame;
|
||||
unsigned long sigframe_top =
|
||||
b->sp + sizeof(struct rt_sigframe) - 1;
|
||||
if (!valid_address(kbt, b->sp) ||
|
||||
!valid_address(kbt, sigframe_top)) {
|
||||
if (kbt->verbose)
|
||||
printk(" (odd signal: sp %#lx?)\n",
|
||||
(unsigned long)(b->sp));
|
||||
return NULL;
|
||||
}
|
||||
frame = (struct rt_sigframe *)b->sp;
|
||||
if (kbt->verbose) {
|
||||
printk(KERN_ERR " <received signal %d>\n",
|
||||
frame->info.si_signo);
|
||||
}
|
||||
return &frame->uc.uc_mcontext.regs;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
return is_sigreturn(kbt->it.pc);
|
||||
}
|
||||
|
||||
static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
struct pt_regs *p;
|
||||
|
||||
p = valid_fault_handler(kbt);
|
||||
if (p == NULL)
|
||||
p = valid_sigframe(kbt);
|
||||
if (p == NULL)
|
||||
return 0;
|
||||
backtrace_init(&kbt->it, read_memory_func, kbt,
|
||||
p->pc, p->lr, p->sp, p->regs[52]);
|
||||
kbt->new_context = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Find a frame that isn't a sigreturn, if there is one. */
|
||||
static int KBacktraceIterator_next_item_inclusive(
|
||||
struct KBacktraceIterator *kbt)
|
||||
{
|
||||
for (;;) {
|
||||
do {
|
||||
if (!KBacktraceIterator_is_sigreturn(kbt))
|
||||
return 1;
|
||||
} while (backtrace_next(&kbt->it));
|
||||
|
||||
if (!KBacktraceIterator_restart(kbt))
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current sp is on a page different than what we recorded
|
||||
* as the top-of-kernel-stack last time we context switched, we have
|
||||
* probably blown the stack, and nothing is going to work out well.
|
||||
* If we can at least get out a warning, that may help the debug,
|
||||
* though we probably won't be able to backtrace into the code that
|
||||
* actually did the recursive damage.
|
||||
*/
|
||||
static void validate_stack(struct pt_regs *regs)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long ksp0 = get_current_ksp0();
|
||||
unsigned long ksp0_base = ksp0 - THREAD_SIZE;
|
||||
unsigned long sp = stack_pointer;
|
||||
|
||||
if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
|
||||
printk("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
|
||||
" sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
|
||||
cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
|
||||
}
|
||||
|
||||
else if (sp < ksp0_base + sizeof(struct thread_info)) {
|
||||
printk("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
|
||||
" sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
|
||||
cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
|
||||
}
|
||||
}
|
||||
|
||||
void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
|
||||
struct task_struct *t, struct pt_regs *regs)
|
||||
{
|
||||
VirtualAddress pc, lr, sp, r52;
|
||||
int is_current;
|
||||
|
||||
/*
|
||||
* Set up callback information. We grab the kernel stack base
|
||||
* so we will allow reads of that address range, and if we're
|
||||
* asking about the current process we grab the page table
|
||||
* so we can check user accesses before trying to read them.
|
||||
* We flush the TLB to avoid any weird skew issues.
|
||||
*/
|
||||
is_current = (t == NULL);
|
||||
kbt->is_current = is_current;
|
||||
if (is_current)
|
||||
t = validate_current();
|
||||
kbt->task = t;
|
||||
kbt->pgtable = NULL;
|
||||
kbt->verbose = 0; /* override in caller if desired */
|
||||
kbt->profile = 0; /* override in caller if desired */
|
||||
kbt->end = 0;
|
||||
kbt->new_context = 0;
|
||||
if (is_current) {
|
||||
HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
|
||||
if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
|
||||
/*
|
||||
* Not just an optimization: this also allows
|
||||
* this to work at all before va/pa mappings
|
||||
* are set up.
|
||||
*/
|
||||
kbt->pgtable = swapper_pg_dir;
|
||||
} else {
|
||||
struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
|
||||
if (!PageHighMem(page))
|
||||
kbt->pgtable = __va(pgdir_pa);
|
||||
else
|
||||
printk(KERN_ERR "page table not in LOWMEM"
|
||||
" (%#llx)\n", pgdir_pa);
|
||||
}
|
||||
local_flush_tlb_all();
|
||||
validate_stack(regs);
|
||||
}
|
||||
|
||||
if (regs == NULL) {
|
||||
extern const void *get_switch_to_pc(void);
|
||||
if (is_current || t->state == TASK_RUNNING) {
|
||||
/* Can't do this; we need registers */
|
||||
kbt->end = 1;
|
||||
return;
|
||||
}
|
||||
pc = (ulong) get_switch_to_pc();
|
||||
lr = t->thread.pc;
|
||||
sp = t->thread.ksp;
|
||||
r52 = 0;
|
||||
} else {
|
||||
pc = regs->pc;
|
||||
lr = regs->lr;
|
||||
sp = regs->sp;
|
||||
r52 = regs->regs[52];
|
||||
}
|
||||
|
||||
backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
|
||||
kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
|
||||
}
|
||||
EXPORT_SYMBOL(KBacktraceIterator_init);
|
||||
|
||||
int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
return kbt->end;
|
||||
}
|
||||
EXPORT_SYMBOL(KBacktraceIterator_end);
|
||||
|
||||
void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
|
||||
{
|
||||
kbt->new_context = 0;
|
||||
if (!backtrace_next(&kbt->it) &&
|
||||
!KBacktraceIterator_restart(kbt)) {
|
||||
kbt->end = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
|
||||
}
|
||||
EXPORT_SYMBOL(KBacktraceIterator_next);
|
||||
|
||||
/*
|
||||
* This method wraps the backtracer's more generic support.
|
||||
* It is only invoked from the architecture-specific code; show_stack()
|
||||
* and dump_stack() (in entry.S) are architecture-independent entry points.
|
||||
*/
|
||||
void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (headers) {
|
||||
/*
|
||||
* Add a blank line since if we are called from panic(),
|
||||
* then bust_spinlocks() spit out a space in front of us
|
||||
* and it will mess up our KERN_ERR.
|
||||
*/
|
||||
printk("\n");
|
||||
printk(KERN_ERR "Starting stack dump of tid %d, pid %d (%s)"
|
||||
" on cpu %d at cycle %lld\n",
|
||||
kbt->task->pid, kbt->task->tgid, kbt->task->comm,
|
||||
smp_processor_id(), get_cycles());
|
||||
}
|
||||
#ifdef __tilegx__
|
||||
if (kbt->is_current) {
|
||||
__insn_mtspr(SPR_SIM_CONTROL,
|
||||
SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE));
|
||||
}
|
||||
#endif
|
||||
kbt->verbose = 1;
|
||||
i = 0;
|
||||
for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
|
||||
char *modname;
|
||||
const char *name;
|
||||
unsigned long address = kbt->it.pc;
|
||||
unsigned long offset, size;
|
||||
char namebuf[KSYM_NAME_LEN+100];
|
||||
|
||||
if (address >= PAGE_OFFSET)
|
||||
name = kallsyms_lookup(address, &size, &offset,
|
||||
&modname, namebuf);
|
||||
else
|
||||
name = NULL;
|
||||
|
||||
if (!name)
|
||||
namebuf[0] = '\0';
|
||||
else {
|
||||
size_t namelen = strlen(namebuf);
|
||||
size_t remaining = (sizeof(namebuf) - 1) - namelen;
|
||||
char *p = namebuf + namelen;
|
||||
int rc = snprintf(p, remaining, "+%#lx/%#lx ",
|
||||
offset, size);
|
||||
if (modname && rc < remaining)
|
||||
snprintf(p + rc, remaining - rc,
|
||||
"[%s] ", modname);
|
||||
namebuf[sizeof(namebuf)-1] = '\0';
|
||||
}
|
||||
|
||||
printk(KERN_ERR " frame %d: 0x%lx %s(sp 0x%lx)\n",
|
||||
i++, address, namebuf, (unsigned long)(kbt->it.sp));
|
||||
|
||||
if (i >= 100) {
|
||||
printk(KERN_ERR "Stack dump truncated"
|
||||
" (%d frames)\n", i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (headers)
|
||||
printk(KERN_ERR "Stack dump complete\n");
|
||||
}
|
||||
EXPORT_SYMBOL(tile_show_stack);
|
||||
|
||||
|
||||
/* This is called from show_regs() and _dump_stack() */
|
||||
void dump_stack_regs(struct pt_regs *regs)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
KBacktraceIterator_init(&kbt, NULL, regs);
|
||||
tile_show_stack(&kbt, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(dump_stack_regs);
|
||||
|
||||
static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
|
||||
ulong pc, ulong lr, ulong sp, ulong r52)
|
||||
{
|
||||
memset(regs, 0, sizeof(struct pt_regs));
|
||||
regs->pc = pc;
|
||||
regs->lr = lr;
|
||||
regs->sp = sp;
|
||||
regs->regs[52] = r52;
|
||||
return regs;
|
||||
}
|
||||
|
||||
/* This is called from dump_stack() and just converts to pt_regs */
|
||||
void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52));
|
||||
}
|
||||
|
||||
/* This is called from KBacktraceIterator_init_current() */
|
||||
void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
|
||||
ulong lr, ulong sp, ulong r52)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
KBacktraceIterator_init(kbt, NULL,
|
||||
regs_to_pt_regs(®s, pc, lr, sp, r52));
|
||||
}
|
||||
|
||||
/* This is called only from kernel/sched.c, with esp == NULL */
|
||||
void show_stack(struct task_struct *task, unsigned long *esp)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
if (task == NULL || task == current)
|
||||
KBacktraceIterator_init_current(&kbt);
|
||||
else
|
||||
KBacktraceIterator_init(&kbt, task, NULL);
|
||||
tile_show_stack(&kbt, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
||||
/* Support generic Linux stack API too */
|
||||
|
||||
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
int skip = trace->skip;
|
||||
int i = 0;
|
||||
|
||||
if (task == NULL || task == current)
|
||||
KBacktraceIterator_init_current(&kbt);
|
||||
else
|
||||
KBacktraceIterator_init(&kbt, task, NULL);
|
||||
for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
|
||||
if (skip) {
|
||||
--skip;
|
||||
continue;
|
||||
}
|
||||
if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
|
||||
break;
|
||||
trace->entries[i++] = kbt.it.pc;
|
||||
}
|
||||
trace->nr_entries = i;
|
||||
}
|
||||
EXPORT_SYMBOL(save_stack_trace_tsk);
|
||||
|
||||
void save_stack_trace(struct stack_trace *trace)
|
||||
{
|
||||
save_stack_trace_tsk(NULL, trace);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* In entry.S */
|
||||
EXPORT_SYMBOL(KBacktraceIterator_init_current);
|
122
arch/tile/kernel/sys.c
Normal file
122
arch/tile/kernel/sys.c
Normal file
@@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* This file contains various random system calls that
|
||||
* have a non-standard calling sequence on the Linux/TILE
|
||||
* platform.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/mempolicy.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/signal.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <arch/chip.h>
|
||||
|
||||
SYSCALL_DEFINE0(flush_cache)
|
||||
{
|
||||
homecache_evict(cpumask_of(smp_processor_id()));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Syscalls that pass 64-bit values on 32-bit systems normally
|
||||
* pass them as (low,high) word packed into the immediately adjacent
|
||||
* registers. If the low word naturally falls on an even register,
|
||||
* our ABI makes it work correctly; if not, we adjust it here.
|
||||
* Handling it here means we don't have to fix uclibc AND glibc AND
|
||||
* any other standard libcs we want to support.
|
||||
*/
|
||||
|
||||
#if !defined(__tilegx__) || defined(CONFIG_COMPAT)
|
||||
|
||||
ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count)
|
||||
{
|
||||
return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count);
|
||||
}
|
||||
|
||||
long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
|
||||
u32 len, int advice)
|
||||
{
|
||||
return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
len, advice);
|
||||
}
|
||||
|
||||
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
|
||||
u32 len_lo, u32 len_hi, int advice)
|
||||
{
|
||||
return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo,
|
||||
((loff_t)len_hi << 32) | len_lo, advice);
|
||||
}
|
||||
|
||||
#endif /* 32-bit syscall wrappers */
|
||||
|
||||
/*
|
||||
* This API uses a 4KB-page-count offset into the file descriptor.
|
||||
* It is likely not the right API to use on a 64-bit platform.
|
||||
*/
|
||||
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, prot, unsigned long, flags,
|
||||
unsigned long, fd, unsigned long, off_4k)
|
||||
{
|
||||
#define PAGE_ADJUST (PAGE_SHIFT - 12)
|
||||
if (off_4k & ((1 << PAGE_ADJUST) - 1))
|
||||
return -EINVAL;
|
||||
return sys_mmap_pgoff(addr, len, prot, flags, fd,
|
||||
off_4k >> PAGE_ADJUST);
|
||||
}
|
||||
|
||||
/*
|
||||
* This API uses a byte offset into the file descriptor.
|
||||
* It is likely not the right API to use on a 32-bit platform.
|
||||
*/
|
||||
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, prot, unsigned long, flags,
|
||||
unsigned long, fd, unsigned long, offset)
|
||||
{
|
||||
if (offset & ((1 << PAGE_SHIFT) - 1))
|
||||
return -EINVAL;
|
||||
return sys_mmap_pgoff(addr, len, prot, flags, fd,
|
||||
offset >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
||||
/* Provide the actual syscall number to call mapping. */
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, call) [nr] = (call),
|
||||
|
||||
#ifndef __tilegx__
|
||||
/* See comments at the top of the file. */
|
||||
#define sys_fadvise64 sys32_fadvise64
|
||||
#define sys_fadvise64_64 sys32_fadvise64_64
|
||||
#define sys_readahead sys32_readahead
|
||||
#define sys_sync_file_range sys_sync_file_range2
|
||||
#endif
|
||||
|
||||
void *sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls-1] = sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
};
|
13826
arch/tile/kernel/tile-desc_32.c
Normal file
13826
arch/tile/kernel/tile-desc_32.c
Normal file
File diff suppressed because it is too large
Load Diff
220
arch/tile/kernel/time.c
Normal file
220
arch/tile/kernel/time.c
Normal file
@@ -0,0 +1,220 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* Support the cycle counter clocksource and tile timer clock event device.
|
||||
*/
|
||||
|
||||
#include <linux/time.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <hv/hypervisor.h>
|
||||
#include <arch/interrupts.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
|
||||
/*
|
||||
* Define the cycle counter clock source.
|
||||
*/
|
||||
|
||||
/* How many cycles per second we are running at. */
|
||||
static cycles_t cycles_per_sec __write_once;
|
||||
|
||||
/*
|
||||
* We set up shift and multiply values with a minsec of five seconds,
|
||||
* since our timer counter counts down 31 bits at a frequency of
|
||||
* no less than 500 MHz. See @minsec for clocks_calc_mult_shift().
|
||||
* We could use a different value for the 64-bit free-running
|
||||
* cycle counter, but we use the same one for consistency, and since
|
||||
* we will be reasonably precise with this value anyway.
|
||||
*/
|
||||
#define TILE_MINSEC 5
|
||||
|
||||
cycles_t get_clock_rate()
|
||||
{
|
||||
return cycles_per_sec;
|
||||
}
|
||||
|
||||
#if CHIP_HAS_SPLIT_CYCLE()
|
||||
cycles_t get_cycles()
|
||||
{
|
||||
unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
|
||||
unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
|
||||
unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH);
|
||||
|
||||
while (unlikely(high != high2)) {
|
||||
low = __insn_mfspr(SPR_CYCLE_LOW);
|
||||
high = high2;
|
||||
high2 = __insn_mfspr(SPR_CYCLE_HIGH);
|
||||
}
|
||||
|
||||
return (((cycles_t)high) << 32) | low;
|
||||
}
|
||||
#endif
|
||||
|
||||
cycles_t clocksource_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return get_cycles();
|
||||
}
|
||||
|
||||
static struct clocksource cycle_counter_cs = {
|
||||
.name = "cycle counter",
|
||||
.rating = 300,
|
||||
.read = clocksource_get_cycles,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
/*
|
||||
* Called very early from setup_arch() to set cycles_per_sec.
|
||||
* We initialize it early so we can use it to set up loops_per_jiffy.
|
||||
*/
|
||||
void __init setup_clock(void)
|
||||
{
|
||||
cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
|
||||
clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec,
|
||||
TILE_MINSEC);
|
||||
}
|
||||
|
||||
void __init calibrate_delay(void)
|
||||
{
|
||||
loops_per_jiffy = get_clock_rate() / HZ;
|
||||
pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n",
|
||||
loops_per_jiffy/(500000/HZ),
|
||||
(loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
|
||||
}
|
||||
|
||||
/* Called fairly late in init/main.c, but before we go smp. */
|
||||
void __init time_init(void)
|
||||
{
|
||||
/* Initialize and register the clock source. */
|
||||
clocksource_register(&cycle_counter_cs);
|
||||
|
||||
/* Start up the tile-timer interrupt source on the boot cpu. */
|
||||
setup_tile_timer();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Define the tile timer clock event device. The timer is driven by
|
||||
* the TILE_TIMER_CONTROL register, which consists of a 31-bit down
|
||||
* counter, plus bit 31, which signifies that the counter has wrapped
|
||||
* from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be
|
||||
* raised as long as bit 31 is set.
|
||||
*/
|
||||
|
||||
#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */
|
||||
|
||||
static int tile_timer_set_next_event(unsigned long ticks,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
BUG_ON(ticks > MAX_TICK);
|
||||
__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
|
||||
raw_local_irq_unmask_now(INT_TILE_TIMER);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Whenever anyone tries to change modes, we just mask interrupts
|
||||
* and wait for the next event to get set.
|
||||
*/
|
||||
static void tile_timer_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
raw_local_irq_mask_now(INT_TILE_TIMER);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set min_delta_ns to 1 microsecond, since it takes about
|
||||
* that long to fire the interrupt.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
|
||||
.name = "tile timer",
|
||||
.features = CLOCK_EVT_FEAT_ONESHOT,
|
||||
.min_delta_ns = 1000,
|
||||
.rating = 100,
|
||||
.irq = -1,
|
||||
.set_next_event = tile_timer_set_next_event,
|
||||
.set_mode = tile_timer_set_mode,
|
||||
};
|
||||
|
||||
void __cpuinit setup_tile_timer(void)
|
||||
{
|
||||
struct clock_event_device *evt = &__get_cpu_var(tile_timer);
|
||||
|
||||
/* Fill in fields that are speed-specific. */
|
||||
clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
|
||||
evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt);
|
||||
|
||||
/* Mark as being for this cpu only. */
|
||||
evt->cpumask = cpumask_of(smp_processor_id());
|
||||
|
||||
/* Start out with timer not firing. */
|
||||
raw_local_irq_mask_now(INT_TILE_TIMER);
|
||||
|
||||
/* Register tile timer. */
|
||||
clockevents_register_device(evt);
|
||||
}
|
||||
|
||||
/* Called from the interrupt vector. */
|
||||
void do_timer_interrupt(struct pt_regs *regs, int fault_num)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct clock_event_device *evt = &__get_cpu_var(tile_timer);
|
||||
|
||||
/*
|
||||
* Mask the timer interrupt here, since we are a oneshot timer
|
||||
* and there are now by definition no events pending.
|
||||
*/
|
||||
raw_local_irq_mask(INT_TILE_TIMER);
|
||||
|
||||
/* Track time spent here in an interrupt context */
|
||||
irq_enter();
|
||||
|
||||
/* Track interrupt count. */
|
||||
__get_cpu_var(irq_stat).irq_timer_count++;
|
||||
|
||||
/* Call the generic timer handler */
|
||||
evt->event_handler(evt);
|
||||
|
||||
/*
|
||||
* Track time spent against the current process again and
|
||||
* process any softirqs if they are waiting.
|
||||
*/
|
||||
irq_exit();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
* Note that with LOCKDEP, this is called during lockdep_init(), and
|
||||
* we will claim that sched_clock() is zero for a little while, until
|
||||
* we run setup_clock(), above.
|
||||
*/
|
||||
unsigned long long sched_clock(void)
|
||||
{
|
||||
return clocksource_cyc2ns(get_cycles(),
|
||||
cycle_counter_cs.mult,
|
||||
cycle_counter_cs.shift);
|
||||
}
|
||||
|
||||
int setup_profiling_timer(unsigned int multiplier)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
97
arch/tile/kernel/tlb.c
Normal file
97
arch/tile/kernel/tlb.c
Normal file
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* From tlbflush.h */
|
||||
DEFINE_PER_CPU(int, current_asid);
|
||||
int min_asid, max_asid;
|
||||
|
||||
/*
|
||||
* Note that we flush the L1I (for VM_EXEC pages) as well as the TLB
|
||||
* so that when we are unmapping an executable page, we also flush it.
|
||||
* Combined with flushing the L1I at context switch time, this means
|
||||
* we don't have to do any other icache flushes.
|
||||
*/
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
HV_Remote_ASID asids[NR_CPUS];
|
||||
int i = 0, cpu;
|
||||
for_each_cpu(cpu, &mm->cpu_vm_mask) {
|
||||
HV_Remote_ASID *asid = &asids[i++];
|
||||
asid->y = cpu / smp_topology.width;
|
||||
asid->x = cpu % smp_topology.width;
|
||||
asid->asid = per_cpu(current_asid, cpu);
|
||||
}
|
||||
flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask,
|
||||
0, 0, 0, NULL, asids, i);
|
||||
}
|
||||
|
||||
void flush_tlb_current_task(void)
|
||||
{
|
||||
flush_tlb_mm(current->mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
unsigned long va)
|
||||
{
|
||||
unsigned long size = hv_page_size(vma);
|
||||
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
|
||||
flush_remote(0, cache, &mm->cpu_vm_mask,
|
||||
va, size, size, &mm->cpu_vm_mask, NULL, 0);
|
||||
}
|
||||
|
||||
void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
|
||||
{
|
||||
flush_tlb_page_mm(vma, vma->vm_mm, va);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_tlb_page);
|
||||
|
||||
void flush_tlb_range(const struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size = hv_page_size(vma);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
|
||||
flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size,
|
||||
&mm->cpu_vm_mask, NULL, 0);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; ; ++i) {
|
||||
HV_VirtAddrRange r = hv_inquire_virtual(i);
|
||||
if (r.size == 0)
|
||||
break;
|
||||
flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
|
||||
r.start, r.size, PAGE_SIZE, cpu_online_mask,
|
||||
NULL, 0);
|
||||
flush_remote(0, 0, NULL,
|
||||
r.start, r.size, HPAGE_SIZE, cpu_online_mask,
|
||||
NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
|
||||
start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);
|
||||
}
|
237
arch/tile/kernel/traps.c
Normal file
237
arch/tile/kernel/traps.c
Normal file
@@ -0,0 +1,237 @@
|
||||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/opcode-tile.h>
|
||||
|
||||
#include <arch/interrupts.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
/* Nothing needed here since we link code at .intrpt1 */
|
||||
}
|
||||
|
||||
int unaligned_fixup = 1;
|
||||
|
||||
static int __init setup_unaligned_fixup(char *str)
|
||||
{
|
||||
/*
|
||||
* Say "=-1" to completely disable it. If you just do "=0", we
|
||||
* will still parse the instruction, then fire a SIGBUS with
|
||||
* the correct address from inside the single_step code.
|
||||
*/
|
||||
long val;
|
||||
if (strict_strtol(str, 0, &val) != 0)
|
||||
return 0;
|
||||
unaligned_fixup = val;
|
||||
printk("Fixups for unaligned data accesses are %s\n",
|
||||
unaligned_fixup >= 0 ?
|
||||
(unaligned_fixup ? "enabled" : "disabled") :
|
||||
"completely disabled");
|
||||
return 1;
|
||||
}
|
||||
__setup("unaligned_fixup=", setup_unaligned_fixup);
|
||||
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
|
||||
static int dma_disabled;
|
||||
|
||||
static int __init nodma(char *str)
|
||||
{
|
||||
printk("User-space DMA is disabled\n");
|
||||
dma_disabled = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("nodma", nodma);
|
||||
|
||||
/* How to decode SPR_GPV_REASON */
|
||||
#define IRET_ERROR (1U << 31)
|
||||
#define MT_ERROR (1U << 30)
|
||||
#define MF_ERROR (1U << 29)
|
||||
#define SPR_INDEX ((1U << 15) - 1)
|
||||
#define SPR_MPL_SHIFT 9 /* starting bit position for MPL encoded in SPR */
|
||||
|
||||
/*
|
||||
* See if this GPV is just to notify the kernel of SPR use and we can
|
||||
* retry the user instruction after adjusting some MPLs suitably.
|
||||
*/
|
||||
static int retry_gpv(unsigned int gpv_reason)
|
||||
{
|
||||
int mpl;
|
||||
|
||||
if (gpv_reason & IRET_ERROR)
|
||||
return 0;
|
||||
|
||||
BUG_ON((gpv_reason & (MT_ERROR|MF_ERROR)) == 0);
|
||||
mpl = (gpv_reason & SPR_INDEX) >> SPR_MPL_SHIFT;
|
||||
if (mpl == INT_DMA_NOTIFY && !dma_disabled) {
|
||||
/* User is turning on DMA. Allow it and retry. */
|
||||
printk(KERN_DEBUG "Process %d/%s is now enabled for DMA\n",
|
||||
current->pid, current->comm);
|
||||
BUG_ON(current->thread.tile_dma_state.enabled);
|
||||
current->thread.tile_dma_state.enabled = 1;
|
||||
grant_dma_mpls();
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CHIP_HAS_TILE_DMA() */
|
||||
|
||||
/* Defined inside do_trap(), below. */
|
||||
#ifdef __tilegx__
|
||||
extern tilegx_bundle_bits bpt_code;
|
||||
#else
|
||||
extern tile_bundle_bits bpt_code;
|
||||
#endif
|
||||
|
||||
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
||||
unsigned long reason)
|
||||
{
|
||||
siginfo_t info = { 0 };
|
||||
int signo, code;
|
||||
unsigned long address;
|
||||
__typeof__(bpt_code) instr;
|
||||
|
||||
/* Re-enable interrupts. */
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* If it hits in kernel mode and we can't fix it up, just exit the
|
||||
* current process and hope for the best.
|
||||
*/
|
||||
if (!user_mode(regs)) {
|
||||
if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
|
||||
return;
|
||||
printk(KERN_ALERT "Kernel took bad trap %d at PC %#lx\n",
|
||||
fault_num, regs->pc);
|
||||
if (fault_num == INT_GPV)
|
||||
printk(KERN_ALERT "GPV_REASON is %#lx\n", reason);
|
||||
show_regs(regs);
|
||||
do_exit(SIGKILL); /* FIXME: implement i386 die() */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (fault_num) {
|
||||
case INT_ILL:
|
||||
asm(".pushsection .rodata.bpt_code,\"a\";"
|
||||
".align 8;"
|
||||
"bpt_code: bpt;"
|
||||
".size bpt_code,.-bpt_code;"
|
||||
".popsection");
|
||||
|
||||
if (copy_from_user(&instr, (void *)regs->pc, sizeof(instr))) {
|
||||
printk(KERN_ERR "Unreadable instruction for INT_ILL:"
|
||||
" %#lx\n", regs->pc);
|
||||
do_exit(SIGKILL);
|
||||
return;
|
||||
}
|
||||
if (instr == bpt_code) {
|
||||
signo = SIGTRAP;
|
||||
code = TRAP_BRKPT;
|
||||
} else {
|
||||
signo = SIGILL;
|
||||
code = ILL_ILLOPC;
|
||||
}
|
||||
address = regs->pc;
|
||||
break;
|
||||
case INT_GPV:
|
||||
#if CHIP_HAS_TILE_DMA()
|
||||
if (retry_gpv(reason))
|
||||
return;
|
||||
#endif
|
||||
/*FALLTHROUGH*/
|
||||
case INT_UDN_ACCESS:
|
||||
case INT_IDN_ACCESS:
|
||||
#if CHIP_HAS_SN()
|
||||
case INT_SN_ACCESS:
|
||||
#endif
|
||||
signo = SIGILL;
|
||||
code = ILL_PRVREG;
|
||||
address = regs->pc;
|
||||
break;
|
||||
case INT_SWINT_3:
|
||||
case INT_SWINT_2:
|
||||
case INT_SWINT_0:
|
||||
signo = SIGILL;
|
||||
code = ILL_ILLTRP;
|
||||
address = regs->pc;
|
||||
break;
|
||||
case INT_UNALIGN_DATA:
|
||||
#ifndef __tilegx__ /* FIXME: GX: no single-step yet */
|
||||
if (unaligned_fixup >= 0) {
|
||||
struct single_step_state *state =
|
||||
current_thread_info()->step_state;
|
||||
if (!state || (void *)(regs->pc) != state->buffer) {
|
||||
single_step_once(regs);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
signo = SIGBUS;
|
||||
code = BUS_ADRALN;
|
||||
address = 0;
|
||||
break;
|
||||
case INT_DOUBLE_FAULT:
|
||||
/*
|
||||
* For double fault, "reason" is actually passed as
|
||||
* SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so
|
||||
* we can provide the original fault number rather than
|
||||
* the uninteresting "INT_DOUBLE_FAULT" so the user can
|
||||
* learn what actually struck while PL0 ICS was set.
|
||||
*/
|
||||
fault_num = reason;
|
||||
signo = SIGILL;
|
||||
code = ILL_DBLFLT;
|
||||
address = regs->pc;
|
||||
break;
|
||||
#ifdef __tilegx__
|
||||
case INT_ILL_TRANS:
|
||||
signo = SIGSEGV;
|
||||
code = SEGV_MAPERR;
|
||||
if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
|
||||
address = regs->pc;
|
||||
else
|
||||
address = 0; /* FIXME: GX: single-step for address */
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
panic("Unexpected do_trap interrupt number %d", fault_num);
|
||||
return;
|
||||
}
|
||||
|
||||
info.si_signo = signo;
|
||||
info.si_code = code;
|
||||
info.si_addr = (void *)address;
|
||||
if (signo == SIGILL)
|
||||
info.si_trapno = fault_num;
|
||||
force_sig_info(signo, &info, current);
|
||||
}
|
||||
|
||||
extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
|
||||
|
||||
void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
|
||||
{
|
||||
_dump_stack(dummy, pc, lr, sp, r52);
|
||||
printk("Double fault: exiting\n");
|
||||
machine_halt();
|
||||
}
|
98
arch/tile/kernel/vmlinux.lds.S
Normal file
98
arch/tile/kernel/vmlinux.lds.S
Normal file
@@ -0,0 +1,98 @@
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Text loads starting from the supervisor interrupt vector address. */
|
||||
#define TEXT_OFFSET MEM_SV_INTRPT
|
||||
|
||||
OUTPUT_ARCH(tile)
|
||||
ENTRY(_start)
|
||||
jiffies = jiffies_64;
|
||||
|
||||
PHDRS
|
||||
{
|
||||
intrpt1 PT_LOAD ;
|
||||
text PT_LOAD ;
|
||||
data PT_LOAD ;
|
||||
}
|
||||
SECTIONS
|
||||
{
|
||||
/* Text is loaded with a different VA than data; start with text. */
|
||||
#undef LOAD_OFFSET
|
||||
#define LOAD_OFFSET TEXT_OFFSET
|
||||
|
||||
/* Interrupt vectors */
|
||||
.intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */
|
||||
{
|
||||
_text = .;
|
||||
_stext = .;
|
||||
*(.intrpt1)
|
||||
} :intrpt1 =0
|
||||
|
||||
/* Hypervisor call vectors */
|
||||
#include "hvglue.lds"
|
||||
|
||||
/* Now the real code */
|
||||
. = ALIGN(0x20000);
|
||||
HEAD_TEXT_SECTION :text =0
|
||||
.text : AT (ADDR(.text) - LOAD_OFFSET) {
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
__fix_text_end = .; /* tile-cpack won't rearrange before this */
|
||||
TEXT_TEXT
|
||||
*(.text.*)
|
||||
*(.coldtext*)
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
}
|
||||
_etext = .;
|
||||
|
||||
/* "Init" is divided into two areas with very different virtual addresses. */
|
||||
INIT_TEXT_SECTION(PAGE_SIZE)
|
||||
|
||||
/* Now we skip back to PAGE_OFFSET for the data. */
|
||||
. = (. - TEXT_OFFSET + PAGE_OFFSET);
|
||||
#undef LOAD_OFFSET
|
||||
#define LOAD_OFFSET PAGE_OFFSET
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
VMLINUX_SYMBOL(_sinitdata) = .;
|
||||
.init.page : AT (ADDR(.init.page) - LOAD_OFFSET) {
|
||||
*(.init.page)
|
||||
} :data =0
|
||||
INIT_DATA_SECTION(16)
|
||||
PERCPU(PAGE_SIZE)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
VMLINUX_SYMBOL(_einitdata) = .;
|
||||
|
||||
_sdata = .; /* Start of data section */
|
||||
|
||||
RO_DATA_SECTION(PAGE_SIZE)
|
||||
|
||||
/* initially writeable, then read-only */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__w1data_begin = .;
|
||||
.w1data : AT(ADDR(.w1data) - LOAD_OFFSET) {
|
||||
VMLINUX_SYMBOL(__w1data_begin) = .;
|
||||
*(.w1data)
|
||||
VMLINUX_SYMBOL(__w1data_end) = .;
|
||||
}
|
||||
|
||||
RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||
|
||||
_edata = .;
|
||||
|
||||
EXCEPTION_TABLE(L2_CACHE_BYTES)
|
||||
NOTES
|
||||
|
||||
|
||||
BSS_SECTION(8, PAGE_SIZE, 1)
|
||||
_end = . ;
|
||||
|
||||
STABS_DEBUG
|
||||
DWARF_DEBUG
|
||||
|
||||
DISCARDS
|
||||
}
|
Reference in New Issue
Block a user