Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/asm-x86/statfs.h
This commit is contained in:
@@ -6,10 +6,11 @@ extra-y := head.o init_task.o vmlinux.lds
|
||||
|
||||
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
|
||||
ptrace.o reset.o setup.o signal.o syscall.o \
|
||||
time.o topology.o traps.o unaligned.o
|
||||
time.o topology.o traps.o unaligned.o watch.o
|
||||
|
||||
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
|
||||
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
|
||||
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
|
||||
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
|
||||
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
|
||||
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
|
||||
|
@@ -12,6 +12,14 @@
|
||||
|
||||
#include <asm/smtc_ipi.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/cevt-r4k.h>
|
||||
|
||||
/*
|
||||
* The SMTC Kernel for the 34K, 1004K, et. al. replaces several
|
||||
* of these routines with SMTC-specific variants.
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
|
||||
static int mips_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
|
||||
unsigned int cnt;
|
||||
int res;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
{
|
||||
unsigned long flags, vpflags;
|
||||
local_irq_save(flags);
|
||||
vpflags = dvpe();
|
||||
#endif
|
||||
cnt = read_c0_count();
|
||||
cnt += delta;
|
||||
write_c0_compare(cnt);
|
||||
res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
evpe(vpflags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
return res;
|
||||
}
|
||||
|
||||
static void mips_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
void mips_set_clock_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
/* Nothing to do ... */
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
|
||||
static int cp0_timer_irq_installed;
|
||||
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
|
||||
int cp0_timer_irq_installed;
|
||||
|
||||
/*
|
||||
* Timer ack for an R4k-compatible timer of a known frequency.
|
||||
*/
|
||||
static void c0_timer_ack(void)
|
||||
{
|
||||
write_c0_compare(read_c0_compare());
|
||||
}
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
|
||||
/*
|
||||
* Possibly handle a performance counter interrupt.
|
||||
* Return true if the timer interrupt should not be checked
|
||||
*/
|
||||
static inline int handle_perf_irq(int r2)
|
||||
{
|
||||
/*
|
||||
* The performance counter overflow interrupt may be shared with the
|
||||
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
|
||||
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
|
||||
* and we can't reliably determine if a counter interrupt has also
|
||||
* happened (!r2) then don't check for a timer interrupt.
|
||||
*/
|
||||
return (cp0_perfcount_irq < 0) &&
|
||||
perf_irq() == IRQ_HANDLED &&
|
||||
!r2;
|
||||
}
|
||||
|
||||
static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
||||
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
const int r2 = cpu_has_mips_r2;
|
||||
struct clock_event_device *cd;
|
||||
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
||||
* interrupt. Being the paranoiacs we are we check anyway.
|
||||
*/
|
||||
if (!r2 || (read_c0_cause() & (1 << 30))) {
|
||||
c0_timer_ack();
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (cpu_data[cpu].vpe_id)
|
||||
goto out;
|
||||
cpu = 0;
|
||||
#endif
|
||||
/* Clear Count/Compare Interrupt */
|
||||
write_c0_compare(read_c0_compare());
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
}
|
||||
@@ -107,65 +78,16 @@ out:
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction c0_compare_irqaction = {
|
||||
#endif /* Not CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
struct irqaction c0_compare_irqaction = {
|
||||
.handler = c0_compare_interrupt,
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
.flags = IRQF_DISABLED,
|
||||
#else
|
||||
.flags = IRQF_DISABLED | IRQF_PERCPU,
|
||||
#endif
|
||||
.name = "timer",
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
|
||||
|
||||
static void smtc_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
}
|
||||
|
||||
static void mips_broadcast(cpumask_t mask)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
|
||||
}
|
||||
|
||||
static void setup_smtc_dummy_clockevent_device(void)
|
||||
{
|
||||
//uint64_t mips_freq = mips_hpt_^frequency;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct clock_event_device *cd;
|
||||
|
||||
cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
|
||||
|
||||
cd->name = "SMTC";
|
||||
cd->features = CLOCK_EVT_FEAT_DUMMY;
|
||||
|
||||
/* Calculate the min / max delta */
|
||||
cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
|
||||
cd->shift = 0; //32;
|
||||
cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
|
||||
cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
|
||||
|
||||
cd->rating = 200;
|
||||
cd->irq = 17; //-1;
|
||||
// if (cpu)
|
||||
// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
|
||||
// else
|
||||
cd->cpumask = cpumask_of_cpu(cpu);
|
||||
|
||||
cd->set_mode = smtc_set_mode;
|
||||
|
||||
cd->broadcast = mips_broadcast;
|
||||
|
||||
clockevents_register_device(cd);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mips_event_handler(struct clock_event_device *dev)
|
||||
void mips_event_handler(struct clock_event_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
|
||||
return (read_c0_cause() >> cp0_compare_irq) & 0x100;
|
||||
}
|
||||
|
||||
static int c0_compare_int_usable(void)
|
||||
/*
|
||||
* Compare interrupt can be routed and latched outside the core,
|
||||
* so a single execution hazard barrier may not be enough to give
|
||||
* it time to clear as seen in the Cause register. 4 time the
|
||||
* pipeline depth seems reasonably conservative, and empirically
|
||||
* works better in configurations with high CPU/bus clock ratios.
|
||||
*/
|
||||
|
||||
#define compare_change_hazard() \
|
||||
do { \
|
||||
irq_disable_hazard(); \
|
||||
irq_disable_hazard(); \
|
||||
irq_disable_hazard(); \
|
||||
irq_disable_hazard(); \
|
||||
} while (0)
|
||||
|
||||
int c0_compare_int_usable(void)
|
||||
{
|
||||
unsigned int delta;
|
||||
unsigned int cnt;
|
||||
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
|
||||
*/
|
||||
if (c0_compare_int_pending()) {
|
||||
write_c0_compare(read_c0_count());
|
||||
irq_disable_hazard();
|
||||
compare_change_hazard();
|
||||
if (c0_compare_int_pending())
|
||||
return 0;
|
||||
}
|
||||
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
|
||||
cnt = read_c0_count();
|
||||
cnt += delta;
|
||||
write_c0_compare(cnt);
|
||||
irq_disable_hazard();
|
||||
compare_change_hazard();
|
||||
if ((int)(read_c0_count() - cnt) < 0)
|
||||
break;
|
||||
/* increase delta if the timer was already expired */
|
||||
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
|
||||
while ((int)(read_c0_count() - cnt) <= 0)
|
||||
; /* Wait for expiry */
|
||||
|
||||
compare_change_hazard();
|
||||
if (!c0_compare_int_pending())
|
||||
return 0;
|
||||
|
||||
write_c0_compare(read_c0_count());
|
||||
irq_disable_hazard();
|
||||
compare_change_hazard();
|
||||
if (c0_compare_int_pending())
|
||||
return 0;
|
||||
|
||||
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
|
||||
int __cpuinit mips_clockevent_init(void)
|
||||
{
|
||||
uint64_t mips_freq = mips_hpt_frequency;
|
||||
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
|
||||
if (!cpu_has_counter || !mips_hpt_frequency)
|
||||
return -ENXIO;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
setup_smtc_dummy_clockevent_device();
|
||||
|
||||
/*
|
||||
* On SMTC we only register VPE0's compare interrupt as clockevent
|
||||
* device.
|
||||
*/
|
||||
if (cpu)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
if (!c0_compare_int_usable())
|
||||
return -ENXIO;
|
||||
|
||||
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
|
||||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
cd->cpumask = CPU_MASK_ALL;
|
||||
#else
|
||||
cd->cpumask = cpumask_of_cpu(cpu);
|
||||
#endif
|
||||
cd->set_next_event = mips_next_event;
|
||||
cd->set_mode = mips_set_mode;
|
||||
cd->set_mode = mips_set_clock_mode;
|
||||
cd->event_handler = mips_event_handler;
|
||||
|
||||
clockevents_register_device(cd);
|
||||
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
|
||||
|
||||
cp0_timer_irq_installed = 1;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
|
||||
setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
|
||||
#else
|
||||
setup_irq(irq, &c0_compare_irqaction);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* Not CONFIG_MIPS_MT_SMTC */
|
||||
|
321
arch/mips/kernel/cevt-smtc.c
Normal file
321
arch/mips/kernel/cevt-smtc.c
Normal file
@@ -0,0 +1,321 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2007 MIPS Technologies, Inc.
|
||||
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
|
||||
* Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
|
||||
*/
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/smtc_ipi.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/cevt-r4k.h>
|
||||
|
||||
/*
|
||||
* Variant clock event timer support for SMTC on MIPS 34K, 1004K
|
||||
* or other MIPS MT cores.
|
||||
*
|
||||
* Notes on SMTC Support:
|
||||
*
|
||||
* SMTC has multiple microthread TCs pretending to be Linux CPUs.
|
||||
* But there's only one Count/Compare pair per VPE, and Compare
|
||||
* interrupts are taken opportunisitically by available TCs
|
||||
* bound to the VPE with the Count register. The new timer
|
||||
* framework provides for global broadcasts, but we really
|
||||
* want VPE-level multicasts for best behavior. So instead
|
||||
* of invoking the high-level clock-event broadcast code,
|
||||
* this version of SMTC support uses the historical SMTC
|
||||
* multicast mechanisms "under the hood", appearing to the
|
||||
* generic clock layer as if the interrupts are per-CPU.
|
||||
*
|
||||
* The approach taken here is to maintain a set of NR_CPUS
|
||||
* virtual timers, and track which "CPU" needs to be alerted
|
||||
* at each event.
|
||||
*
|
||||
* It's unlikely that we'll see a MIPS MT core with more than
|
||||
* 2 VPEs, but we *know* that we won't need to handle more
|
||||
* VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
|
||||
* is always going to be overkill, but always going to be enough.
|
||||
*/
|
||||
|
||||
unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
|
||||
static int smtc_nextinvpe[NR_CPUS];
|
||||
|
||||
/*
|
||||
* Timestamps stored are absolute values to be programmed
|
||||
* into Count register. Valid timestamps will never be zero.
|
||||
* If a Zero Count value is actually calculated, it is converted
|
||||
* to be a 1, which will introduce 1 or two CPU cycles of error
|
||||
* roughly once every four billion events, which at 1000 HZ means
|
||||
* about once every 50 days. If that's actually a problem, one
|
||||
* could alternate squashing 0 to 1 and to -1.
|
||||
*/
|
||||
|
||||
#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
|
||||
#define ISVALID(x) ((x) != 0L)
|
||||
|
||||
/*
|
||||
* Time comparison is subtle, as it's really truncated
|
||||
* modular arithmetic.
|
||||
*/
|
||||
|
||||
#define IS_SOONER(a, b, reference) \
|
||||
(((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
|
||||
|
||||
/*
|
||||
* CATCHUP_INCREMENT, used when the function falls behind the counter.
|
||||
* Could be an increasing function instead of a constant;
|
||||
*/
|
||||
|
||||
#define CATCHUP_INCREMENT 64
|
||||
|
||||
static int mips_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int mtflags;
|
||||
unsigned long timestamp, reference, previous;
|
||||
unsigned long nextcomp = 0L;
|
||||
int vpe = current_cpu_data.vpe_id;
|
||||
int cpu = smp_processor_id();
|
||||
local_irq_save(flags);
|
||||
mtflags = dmt();
|
||||
|
||||
/*
|
||||
* Maintain the per-TC virtual timer
|
||||
* and program the per-VPE shared Count register
|
||||
* as appropriate here...
|
||||
*/
|
||||
reference = (unsigned long)read_c0_count();
|
||||
timestamp = MAKEVALID(reference + delta);
|
||||
/*
|
||||
* To really model the clock, we have to catch the case
|
||||
* where the current next-in-VPE timestamp is the old
|
||||
* timestamp for the calling CPE, but the new value is
|
||||
* in fact later. In that case, we have to do a full
|
||||
* scan and discover the new next-in-VPE CPU id and
|
||||
* timestamp.
|
||||
*/
|
||||
previous = smtc_nexttime[vpe][cpu];
|
||||
if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
|
||||
&& IS_SOONER(previous, timestamp, reference)) {
|
||||
int i;
|
||||
int soonest = cpu;
|
||||
|
||||
/*
|
||||
* Update timestamp array here, so that new
|
||||
* value gets considered along with those of
|
||||
* other virtual CPUs on the VPE.
|
||||
*/
|
||||
smtc_nexttime[vpe][cpu] = timestamp;
|
||||
for_each_online_cpu(i) {
|
||||
if (ISVALID(smtc_nexttime[vpe][i])
|
||||
&& IS_SOONER(smtc_nexttime[vpe][i],
|
||||
smtc_nexttime[vpe][soonest], reference)) {
|
||||
soonest = i;
|
||||
}
|
||||
}
|
||||
smtc_nextinvpe[vpe] = soonest;
|
||||
nextcomp = smtc_nexttime[vpe][soonest];
|
||||
/*
|
||||
* Otherwise, we don't have to process the whole array rank,
|
||||
* we just have to see if the event horizon has gotten closer.
|
||||
*/
|
||||
} else {
|
||||
if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
|
||||
IS_SOONER(timestamp,
|
||||
smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
|
||||
smtc_nextinvpe[vpe] = cpu;
|
||||
nextcomp = timestamp;
|
||||
}
|
||||
/*
|
||||
* Since next-in-VPE may me the same as the executing
|
||||
* virtual CPU, we update the array *after* checking
|
||||
* its value.
|
||||
*/
|
||||
smtc_nexttime[vpe][cpu] = timestamp;
|
||||
}
|
||||
|
||||
/*
|
||||
* It may be that, in fact, we don't need to update Compare,
|
||||
* but if we do, we want to make sure we didn't fall into
|
||||
* a crack just behind Count.
|
||||
*/
|
||||
if (ISVALID(nextcomp)) {
|
||||
write_c0_compare(nextcomp);
|
||||
ehb();
|
||||
/*
|
||||
* We never return an error, we just make sure
|
||||
* that we trigger the handlers as quickly as
|
||||
* we can if we fell behind.
|
||||
*/
|
||||
while ((nextcomp - (unsigned long)read_c0_count())
|
||||
> (unsigned long)LONG_MAX) {
|
||||
nextcomp += CATCHUP_INCREMENT;
|
||||
write_c0_compare(nextcomp);
|
||||
ehb();
|
||||
}
|
||||
}
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void smtc_distribute_timer(int vpe)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int mtflags;
|
||||
int cpu;
|
||||
struct clock_event_device *cd;
|
||||
unsigned long nextstamp = 0L;
|
||||
unsigned long reference;
|
||||
|
||||
|
||||
repeat:
|
||||
for_each_online_cpu(cpu) {
|
||||
/*
|
||||
* Find virtual CPUs within the current VPE who have
|
||||
* unserviced timer requests whose time is now past.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
mtflags = dmt();
|
||||
if (cpu_data[cpu].vpe_id == vpe &&
|
||||
ISVALID(smtc_nexttime[vpe][cpu])) {
|
||||
reference = (unsigned long)read_c0_count();
|
||||
if ((smtc_nexttime[vpe][cpu] - reference)
|
||||
> (unsigned long)LONG_MAX) {
|
||||
smtc_nexttime[vpe][cpu] = 0L;
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
/*
|
||||
* We don't send IPIs to ourself.
|
||||
*/
|
||||
if (cpu != smp_processor_id()) {
|
||||
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
|
||||
} else {
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
}
|
||||
} else {
|
||||
/* Local to VPE but Valid Time not yet reached. */
|
||||
if (!ISVALID(nextstamp) ||
|
||||
IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
|
||||
reference)) {
|
||||
smtc_nextinvpe[vpe] = cpu;
|
||||
nextstamp = smtc_nexttime[vpe][cpu];
|
||||
}
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
} else {
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
|
||||
}
|
||||
}
|
||||
/* Reprogram for interrupt at next soonest timestamp for VPE */
|
||||
if (ISVALID(nextstamp)) {
|
||||
write_c0_compare(nextstamp);
|
||||
ehb();
|
||||
if ((nextstamp - (unsigned long)read_c0_count())
|
||||
> (unsigned long)LONG_MAX)
|
||||
goto repeat;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
|
||||
handle_perf_irq(1);
|
||||
|
||||
if (read_c0_cause() & (1 << 30)) {
|
||||
/* Clear Count/Compare Interrupt */
|
||||
write_c0_compare(read_c0_compare());
|
||||
smtc_distribute_timer(cpu_data[cpu].vpe_id);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
int __cpuinit mips_clockevent_init(void)
|
||||
{
|
||||
uint64_t mips_freq = mips_hpt_frequency;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct clock_event_device *cd;
|
||||
unsigned int irq;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
if (!cpu_has_counter || !mips_hpt_frequency)
|
||||
return -ENXIO;
|
||||
if (cpu == 0) {
|
||||
for (i = 0; i < num_possible_cpus(); i++) {
|
||||
smtc_nextinvpe[i] = 0;
|
||||
for (j = 0; j < num_possible_cpus(); j++)
|
||||
smtc_nexttime[i][j] = 0L;
|
||||
}
|
||||
/*
|
||||
* SMTC also can't have the usablility test
|
||||
* run by secondary TCs once Compare is in use.
|
||||
*/
|
||||
if (!c0_compare_int_usable())
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of it's liking.
|
||||
*/
|
||||
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
|
||||
if (get_c0_compare_int)
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
|
||||
cd->name = "MIPS";
|
||||
cd->features = CLOCK_EVT_FEAT_ONESHOT;
|
||||
|
||||
/* Calculate the min / max delta */
|
||||
cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
|
||||
cd->shift = 32;
|
||||
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
|
||||
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
|
||||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
cd->cpumask = cpumask_of_cpu(cpu);
|
||||
cd->set_next_event = mips_next_event;
|
||||
cd->set_mode = mips_set_clock_mode;
|
||||
cd->event_handler = mips_event_handler;
|
||||
|
||||
clockevents_register_device(cd);
|
||||
|
||||
/*
|
||||
* On SMTC we only want to do the data structure
|
||||
* initialization and IRQ setup once.
|
||||
*/
|
||||
if (cpu)
|
||||
return 0;
|
||||
/*
|
||||
* And we need the hwmask associated with the c0_compare
|
||||
* vector to be initialized.
|
||||
*/
|
||||
irq_hwmask[irq] = (0x100 << cp0_compare_irq);
|
||||
if (cp0_timer_irq_installed)
|
||||
return 0;
|
||||
|
||||
cp0_timer_irq_installed = 1;
|
||||
|
||||
setup_irq(irq, &c0_compare_irqaction);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -21,6 +21,7 @@
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/watch.h>
|
||||
|
||||
/*
|
||||
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
|
||||
@@ -45,18 +46,7 @@ static void r39xx_wait(void)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* There is a race when WAIT instruction executed with interrupt
|
||||
* enabled.
|
||||
* But it is implementation-dependent wheter the pipelie restarts when
|
||||
* a non-enabled interrupt is requested.
|
||||
*/
|
||||
static void r4k_wait(void)
|
||||
{
|
||||
__asm__(" .set mips3 \n"
|
||||
" wait \n"
|
||||
" .set mips0 \n");
|
||||
}
|
||||
extern void r4k_wait(void);
|
||||
|
||||
/*
|
||||
* This variant is preferable as it allows testing need_resched and going to
|
||||
@@ -65,14 +55,18 @@ static void r4k_wait(void)
|
||||
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
|
||||
* using this version a gamble.
|
||||
*/
|
||||
static void r4k_wait_irqoff(void)
|
||||
void r4k_wait_irqoff(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
if (!need_resched())
|
||||
__asm__(" .set mips3 \n"
|
||||
__asm__(" .set push \n"
|
||||
" .set mips3 \n"
|
||||
" wait \n"
|
||||
" .set mips0 \n");
|
||||
" .set pop \n");
|
||||
local_irq_enable();
|
||||
__asm__(" .globl __pastwait \n"
|
||||
"__pastwait: \n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -128,7 +122,7 @@ static int __init wait_disable(char *s)
|
||||
|
||||
__setup("nowait", wait_disable);
|
||||
|
||||
static inline void check_wait(void)
|
||||
void __init check_wait(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
@@ -242,7 +236,6 @@ static inline void check_errata(void)
|
||||
|
||||
void __init check_bugs32(void)
|
||||
{
|
||||
check_wait();
|
||||
check_errata();
|
||||
}
|
||||
|
||||
@@ -685,6 +678,7 @@ static inline void spram_config(void) {}
|
||||
static inline void cpu_probe_mips(struct cpuinfo_mips *c)
|
||||
{
|
||||
decode_configs(c);
|
||||
mips_probe_watch_registers(c);
|
||||
switch (c->processor_id & 0xff00) {
|
||||
case PRID_IMP_4KC:
|
||||
c->cputype = CPU_4KC;
|
||||
|
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
|
||||
|
||||
FEXPORT(restore_all) # restore full frame
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Detect and execute deferred IPI "interrupts" */
|
||||
LONG_L s0, TI_REGS($28)
|
||||
LONG_S sp, TI_REGS($28)
|
||||
jal deferred_smtc_ipi
|
||||
LONG_S s0, TI_REGS($28)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
|
||||
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
|
||||
xor t0, t0, t3
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
|
||||
/* Detect and execute deferred IPI "interrupts" */
|
||||
LONG_L s0, TI_REGS($28)
|
||||
LONG_S sp, TI_REGS($28)
|
||||
jal deferred_smtc_ipi
|
||||
LONG_S s0, TI_REGS($28)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
.set noat
|
||||
RESTORE_TEMP
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/war.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#define PANIC_PIC(msg) \
|
||||
.set push; \
|
||||
@@ -126,7 +127,42 @@ handle_vcei:
|
||||
|
||||
__FINIT
|
||||
|
||||
.align 5 /* 32 byte rollback region */
|
||||
LEAF(r4k_wait)
|
||||
.set push
|
||||
.set noreorder
|
||||
/* start of rollback region */
|
||||
LONG_L t0, TI_FLAGS($28)
|
||||
nop
|
||||
andi t0, _TIF_NEED_RESCHED
|
||||
bnez t0, 1f
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
.set mips3
|
||||
wait
|
||||
/* end of rollback region (the region size must be power of two) */
|
||||
.set pop
|
||||
1:
|
||||
jr ra
|
||||
END(r4k_wait)
|
||||
|
||||
.macro BUILD_ROLLBACK_PROLOGUE handler
|
||||
FEXPORT(rollback_\handler)
|
||||
.set push
|
||||
.set noat
|
||||
MFC0 k0, CP0_EPC
|
||||
PTR_LA k1, r4k_wait
|
||||
ori k0, 0x1f /* 32 byte rollback region */
|
||||
xori k0, 0x1f
|
||||
bne k0, k1, 9f
|
||||
MTC0 k0, CP0_EPC
|
||||
9:
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.align 5
|
||||
BUILD_ROLLBACK_PROLOGUE handle_int
|
||||
NESTED(handle_int, PT_SIZE, sp)
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/*
|
||||
@@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
|
||||
* This prototype is copied to ebase + n*IntCtl.VS and patched
|
||||
* to invoke the handler
|
||||
*/
|
||||
BUILD_ROLLBACK_PROLOGUE except_vec_vi
|
||||
NESTED(except_vec_vi, 0, sp)
|
||||
SAVE_SOME
|
||||
SAVE_AT
|
||||
@@ -245,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
|
||||
and t0, a0, t1
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
|
||||
mfc0 t2, CP0_TCCONTEXT
|
||||
or t0, t0, t2
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
or t2, t0, t2
|
||||
mtc0 t2, CP0_TCCONTEXT
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
|
||||
xor t1, t1, t0
|
||||
mtc0 t1, CP0_STATUS
|
||||
@@ -416,7 +453,11 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
||||
BUILD_HANDLER tr tr sti silent /* #13 */
|
||||
BUILD_HANDLER fpe fpe fpe silent /* #15 */
|
||||
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
|
||||
#ifdef CONFIG_HARDWARE_WATCHPOINTS
|
||||
BUILD_HANDLER watch watch sti silent /* #23 */
|
||||
#else
|
||||
BUILD_HANDLER watch watch sti verbose /* #23 */
|
||||
#endif
|
||||
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
|
||||
BUILD_HANDLER mt mt sti silent /* #25 */
|
||||
BUILD_HANDLER dsp dsp sti silent /* #26 */
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-bits.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
|
@@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
|
||||
|
||||
atomic_set(&kgdb_cpu_doing_single_step, -1);
|
||||
if (remcom_in_buffer[0] == 's')
|
||||
if (kgdb_contthread)
|
||||
atomic_set(&kgdb_cpu_doing_single_step, cpu);
|
||||
atomic_set(&kgdb_cpu_doing_single_step, cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
|
||||
/*
|
||||
* FPU Use Factor empirically derived from experiments on 34K
|
||||
*/
|
||||
#define FPUSEFACTOR 333
|
||||
#define FPUSEFACTOR 2000
|
||||
|
||||
static __init int mt_fp_affinity_init(void)
|
||||
{
|
||||
|
@@ -23,6 +23,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
unsigned int version = cpu_data[n].processor_id;
|
||||
unsigned int fp_vers = cpu_data[n].fpu_id;
|
||||
char fmt [64];
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_isset(n, cpu_online_map))
|
||||
@@ -50,8 +51,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
|
||||
seq_printf(m, "extra interrupt vector\t: %s\n",
|
||||
cpu_has_divec ? "yes" : "no");
|
||||
seq_printf(m, "hardware watchpoint\t: %s\n",
|
||||
cpu_has_watch ? "yes" : "no");
|
||||
seq_printf(m, "hardware watchpoint\t: %s",
|
||||
cpu_has_watch ? "yes, " : "no\n");
|
||||
if (cpu_has_watch) {
|
||||
seq_printf(m, "count: %d, address/irw mask: [",
|
||||
cpu_data[n].watch_reg_count);
|
||||
for (i = 0; i < cpu_data[n].watch_reg_count; i++)
|
||||
seq_printf(m, "%s0x%04x", i ? ", " : "" ,
|
||||
cpu_data[n].watch_reg_masks[i]);
|
||||
seq_printf(m, "]\n");
|
||||
}
|
||||
seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
|
||||
cpu_has_mips16 ? " mips16" : "",
|
||||
cpu_has_mdmx ? " mdmx" : "",
|
||||
|
@@ -54,7 +54,7 @@ void __noreturn cpu_idle(void)
|
||||
while (1) {
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
while (!need_resched()) {
|
||||
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
extern void smtc_idle_loop_hook(void);
|
||||
|
||||
smtc_idle_loop_hook();
|
||||
@@ -144,17 +144,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
||||
*/
|
||||
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
|
||||
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC restores TCStatus after Status, and the CU bits
|
||||
* are aliased there.
|
||||
*/
|
||||
childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
|
||||
#endif
|
||||
clear_tsk_thread_flag(p, TIF_USEDFPU);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/*
|
||||
* FPU affinity support is cleaner if we track the
|
||||
* user-visible CPU affinity from the very beginning.
|
||||
* The generic cpus_allowed mask will already have
|
||||
* been copied from the parent before copy_thread
|
||||
* is invoked.
|
||||
*/
|
||||
p->thread.user_cpus_allowed = p->cpus_allowed;
|
||||
clear_tsk_thread_flag(p, TIF_FPUBOUND);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
|
@@ -46,7 +46,8 @@
|
||||
*/
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
/* Nothing to do.. */
|
||||
/* Don't load the watchpoint registers for the ex-child. */
|
||||
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -167,6 +168,93 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ptrace_get_watch_regs(struct task_struct *child,
|
||||
struct pt_watch_regs __user *addr)
|
||||
{
|
||||
enum pt_watch_style style;
|
||||
int i;
|
||||
|
||||
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
|
||||
return -EIO;
|
||||
if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
|
||||
return -EIO;
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
style = pt_watch_style_mips32;
|
||||
#define WATCH_STYLE mips32
|
||||
#else
|
||||
style = pt_watch_style_mips64;
|
||||
#define WATCH_STYLE mips64
|
||||
#endif
|
||||
|
||||
__put_user(style, &addr->style);
|
||||
__put_user(current_cpu_data.watch_reg_use_cnt,
|
||||
&addr->WATCH_STYLE.num_valid);
|
||||
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
|
||||
__put_user(child->thread.watch.mips3264.watchlo[i],
|
||||
&addr->WATCH_STYLE.watchlo[i]);
|
||||
__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
|
||||
&addr->WATCH_STYLE.watchhi[i]);
|
||||
__put_user(current_cpu_data.watch_reg_masks[i],
|
||||
&addr->WATCH_STYLE.watch_masks[i]);
|
||||
}
|
||||
for (; i < 8; i++) {
|
||||
__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
|
||||
__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
|
||||
__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ptrace_set_watch_regs(struct task_struct *child,
|
||||
struct pt_watch_regs __user *addr)
|
||||
{
|
||||
int i;
|
||||
int watch_active = 0;
|
||||
unsigned long lt[NUM_WATCH_REGS];
|
||||
u16 ht[NUM_WATCH_REGS];
|
||||
|
||||
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
|
||||
return -EIO;
|
||||
if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
|
||||
return -EIO;
|
||||
/* Check the values. */
|
||||
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
|
||||
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
|
||||
#ifdef CONFIG_32BIT
|
||||
if (lt[i] & __UA_LIMIT)
|
||||
return -EINVAL;
|
||||
#else
|
||||
if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
|
||||
if (lt[i] & 0xffffffff80000000UL)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (lt[i] & __UA_LIMIT)
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
|
||||
if (ht[i] & ~0xff8)
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Install them. */
|
||||
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
|
||||
if (lt[i] & 7)
|
||||
watch_active = 1;
|
||||
child->thread.watch.mips3264.watchlo[i] = lt[i];
|
||||
/* Set the G bit. */
|
||||
child->thread.watch.mips3264.watchhi[i] = ht[i];
|
||||
}
|
||||
|
||||
if (watch_active)
|
||||
set_tsk_thread_flag(child, TIF_LOAD_WATCH);
|
||||
else
|
||||
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
int ret;
|
||||
@@ -238,7 +326,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
case FPC_EIR: { /* implementation / version register */
|
||||
unsigned int flags;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned int irqflags;
|
||||
unsigned long irqflags;
|
||||
unsigned int mtflags;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
@@ -440,6 +528,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
(unsigned long __user *) data);
|
||||
break;
|
||||
|
||||
case PTRACE_GET_WATCH_REGS:
|
||||
ret = ptrace_get_watch_regs(child,
|
||||
(struct pt_watch_regs __user *) addr);
|
||||
break;
|
||||
|
||||
case PTRACE_SET_WATCH_REGS:
|
||||
ret = ptrace_set_watch_regs(child,
|
||||
(struct pt_watch_regs __user *) addr);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
|
@@ -15,6 +15,7 @@
|
||||
* binaries.
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -36,47 +37,17 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/bootinfo.h>
|
||||
|
||||
int ptrace_getregs(struct task_struct *child, __s64 __user *data);
|
||||
int ptrace_setregs(struct task_struct *child, __s64 __user *data);
|
||||
|
||||
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data);
|
||||
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data);
|
||||
|
||||
/*
|
||||
* Tracing a 32-bit process with a 64-bit strace and vice versa will not
|
||||
* work. I don't know how to fix this.
|
||||
*/
|
||||
asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
||||
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
compat_ulong_t caddr, compat_ulong_t cdata)
|
||||
{
|
||||
struct task_struct *child;
|
||||
int addr = caddr;
|
||||
int data = cdata;
|
||||
int ret;
|
||||
|
||||
#if 0
|
||||
printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
|
||||
(int) request, (int) pid, (unsigned long) addr,
|
||||
(unsigned long) data);
|
||||
#endif
|
||||
lock_kernel();
|
||||
if (request == PTRACE_TRACEME) {
|
||||
ret = ptrace_traceme();
|
||||
goto out;
|
||||
}
|
||||
|
||||
child = ptrace_get_task_struct(pid);
|
||||
if (IS_ERR(child)) {
|
||||
ret = PTR_ERR(child);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (request == PTRACE_ATTACH) {
|
||||
ret = ptrace_attach(child);
|
||||
goto out_tsk;
|
||||
}
|
||||
|
||||
ret = ptrace_check_attach(child, request == PTRACE_KILL);
|
||||
if (ret < 0)
|
||||
goto out_tsk;
|
||||
|
||||
switch (request) {
|
||||
/* when I and D space are separate, these will need to be fixed. */
|
||||
case PTRACE_PEEKTEXT: /* read word at location addr. */
|
||||
@@ -214,7 +185,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
||||
if (!cpu_has_dsp) {
|
||||
tmp = 0;
|
||||
ret = -EIO;
|
||||
goto out_tsk;
|
||||
goto out;
|
||||
}
|
||||
dregs = __get_dsp_regs(child);
|
||||
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
|
||||
@@ -224,14 +195,14 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
||||
if (!cpu_has_dsp) {
|
||||
tmp = 0;
|
||||
ret = -EIO;
|
||||
goto out_tsk;
|
||||
goto out;
|
||||
}
|
||||
tmp = child->thread.dsp.dspcontrol;
|
||||
break;
|
||||
default:
|
||||
tmp = 0;
|
||||
ret = -EIO;
|
||||
goto out_tsk;
|
||||
goto out;
|
||||
}
|
||||
ret = put_user(tmp, (unsigned __user *) (unsigned long) data);
|
||||
break;
|
||||
@@ -410,14 +381,20 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
||||
(unsigned long __user *) (unsigned long) data);
|
||||
break;
|
||||
|
||||
case PTRACE_GET_WATCH_REGS:
|
||||
ret = ptrace_get_watch_regs(child,
|
||||
(struct pt_watch_regs __user *) (unsigned long) addr);
|
||||
break;
|
||||
|
||||
case PTRACE_SET_WATCH_REGS:
|
||||
ret = ptrace_set_watch_regs(child,
|
||||
(struct pt_watch_regs __user *) (unsigned long) addr);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
}
|
||||
|
||||
out_tsk:
|
||||
put_task_struct(child);
|
||||
out:
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
@@ -219,7 +219,7 @@ EXPORT(sysn32_call_table)
|
||||
PTR compat_sys_getrusage
|
||||
PTR compat_sys_sysinfo
|
||||
PTR compat_sys_times
|
||||
PTR sys32_ptrace
|
||||
PTR compat_sys_ptrace
|
||||
PTR sys_getuid /* 6100 */
|
||||
PTR sys_syslog
|
||||
PTR sys_getgid
|
||||
|
@@ -231,7 +231,7 @@ sys_call_table:
|
||||
PTR sys_setuid
|
||||
PTR sys_getuid
|
||||
PTR compat_sys_stime /* 4025 */
|
||||
PTR sys32_ptrace
|
||||
PTR compat_sys_ptrace
|
||||
PTR sys_alarm
|
||||
PTR sys_ni_syscall /* was sys_fstat */
|
||||
PTR sys_pause
|
||||
|
@@ -160,30 +160,33 @@ early_param("rd_size", rd_size_early);
|
||||
static unsigned long __init init_initrd(void)
|
||||
{
|
||||
unsigned long end;
|
||||
u32 *initrd_header;
|
||||
|
||||
/*
|
||||
* Board specific code or command line parser should have
|
||||
* already set up initrd_start and initrd_end. In these cases
|
||||
* perfom sanity checks and use them if all looks good.
|
||||
*/
|
||||
if (initrd_start && initrd_end > initrd_start)
|
||||
goto sanitize;
|
||||
if (!initrd_start || initrd_end <= initrd_start) {
|
||||
#ifdef CONFIG_PROBE_INITRD_HEADER
|
||||
u32 *initrd_header;
|
||||
|
||||
/*
|
||||
* See if initrd has been added to the kernel image by
|
||||
* arch/mips/boot/addinitrd.c. In that case a header is
|
||||
* prepended to initrd and is made up by 8 bytes. The fisrt
|
||||
* word is a magic number and the second one is the size of
|
||||
* initrd. Initrd start must be page aligned in any cases.
|
||||
*/
|
||||
initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
|
||||
if (initrd_header[0] != 0x494E5244)
|
||||
/*
|
||||
* See if initrd has been added to the kernel image by
|
||||
* arch/mips/boot/addinitrd.c. In that case a header is
|
||||
* prepended to initrd and is made up by 8 bytes. The first
|
||||
* word is a magic number and the second one is the size of
|
||||
* initrd. Initrd start must be page aligned in any cases.
|
||||
*/
|
||||
initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
|
||||
if (initrd_header[0] != 0x494E5244)
|
||||
goto disable;
|
||||
initrd_start = (unsigned long)(initrd_header + 2);
|
||||
initrd_end = initrd_start + initrd_header[1];
|
||||
#else
|
||||
goto disable;
|
||||
initrd_start = (unsigned long)(initrd_header + 2);
|
||||
initrd_end = initrd_start + initrd_header[1];
|
||||
#endif
|
||||
}
|
||||
|
||||
sanitize:
|
||||
if (initrd_start & ~PAGE_MASK) {
|
||||
pr_err("initrd start must be page aligned\n");
|
||||
goto disable;
|
||||
|
@@ -482,6 +482,18 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
return err;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
memset(to, 0, sizeof *to);
|
||||
|
||||
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
||||
copy_from_user(to->_sifields._pad,
|
||||
from->_sifields._pad, SI_PAD_SIZE32))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||
{
|
||||
struct sigframe32 __user *frame;
|
||||
|
@@ -121,6 +121,8 @@ asmlinkage __cpuinit void start_secondary(void)
|
||||
cpu = smp_processor_id();
|
||||
cpu_data[cpu].udelay_val = loops_per_jiffy;
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
mp_ops->smp_finish();
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
||||
|
@@ -1,4 +1,21 @@
|
||||
/* Copyright (C) 2004 Mips Technologies, Inc */
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2004 Mips Technologies, Inc
|
||||
* Copyright (C) 2008 Kevin D. Kissell
|
||||
*/
|
||||
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/kernel.h>
|
||||
@@ -21,7 +38,6 @@
|
||||
#include <asm/time.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/smtc.h>
|
||||
#include <asm/smtc_ipi.h>
|
||||
#include <asm/smtc_proc.h>
|
||||
|
||||
/*
|
||||
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
|
||||
|
||||
asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
|
||||
|
||||
/*
|
||||
* Clock interrupt "latch" buffers, per "CPU"
|
||||
*/
|
||||
|
||||
static atomic_t ipi_timer_latch[NR_CPUS];
|
||||
|
||||
/*
|
||||
* Number of InterProcessor Interrupt (IPI) message buffers to allocate
|
||||
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
|
||||
|
||||
#define IPIBUF_PER_CPU 4
|
||||
|
||||
static struct smtc_ipi_q IPIQ[NR_CPUS];
|
||||
struct smtc_ipi_q IPIQ[NR_CPUS];
|
||||
static struct smtc_ipi_q freeIPIq;
|
||||
|
||||
|
||||
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
|
||||
* phys_cpu_present_map and the logical/physical mappings.
|
||||
*/
|
||||
|
||||
int __init mipsmt_build_cpu_map(int start_cpu_slot)
|
||||
int __init smtc_build_cpu_map(int start_cpu_slot)
|
||||
{
|
||||
int i, ntcs;
|
||||
|
||||
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
|
||||
write_tc_c0_tcstatus((read_tc_c0_tcstatus()
|
||||
& ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
|
||||
| TCSTATUS_A);
|
||||
write_tc_c0_tccontext(0);
|
||||
/*
|
||||
* TCContext gets an offset from the base of the IPIQ array
|
||||
* to be used in low-level code to detect the presence of
|
||||
* an active IPI queue
|
||||
*/
|
||||
write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
|
||||
/* Bind tc to vpe */
|
||||
write_tc_c0_tcbind(vpe);
|
||||
/* In general, all TCs should have the same cpu_data indications */
|
||||
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
|
||||
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
|
||||
cpu_data[cpu].vpe_id = vpe;
|
||||
cpu_data[cpu].tc_id = tc;
|
||||
/* Multi-core SMTC hasn't been tested, but be prepared */
|
||||
cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tweak to get Count registes in as close a sync as possible.
|
||||
* Value seems good for 34K-class cores.
|
||||
*/
|
||||
|
||||
void mipsmt_prepare_cpus(void)
|
||||
#define CP0_SKEW 8
|
||||
|
||||
void smtc_prepare_cpus(int cpus)
|
||||
{
|
||||
int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
|
||||
unsigned long flags;
|
||||
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
|
||||
IPIQ[i].head = IPIQ[i].tail = NULL;
|
||||
spin_lock_init(&IPIQ[i].lock);
|
||||
IPIQ[i].depth = 0;
|
||||
atomic_set(&ipi_timer_latch[i], 0);
|
||||
}
|
||||
|
||||
/* cpu_data index starts at zero */
|
||||
cpu = 0;
|
||||
cpu_data[cpu].vpe_id = 0;
|
||||
cpu_data[cpu].tc_id = 0;
|
||||
cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
|
||||
cpu++;
|
||||
|
||||
/* Report on boot-time options */
|
||||
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
|
||||
write_vpe_c0_compare(0);
|
||||
/* Propagate Config7 */
|
||||
write_vpe_c0_config7(read_c0_config7());
|
||||
write_vpe_c0_count(read_c0_count());
|
||||
write_vpe_c0_count(read_c0_count() + CP0_SKEW);
|
||||
ehb();
|
||||
}
|
||||
/* enable multi-threading within VPE */
|
||||
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
|
||||
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
|
||||
void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
|
||||
{
|
||||
extern u32 kernelsp[NR_CPUS];
|
||||
long flags;
|
||||
unsigned long flags;
|
||||
int mtflags;
|
||||
|
||||
LOCK_MT_PRA();
|
||||
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
|
||||
|
||||
void smtc_init_secondary(void)
|
||||
{
|
||||
/*
|
||||
* Start timer on secondary VPEs if necessary.
|
||||
* plat_timer_setup has already have been invoked by init/main
|
||||
* on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
|
||||
* SMTC init code assigns TCs consdecutively and in ascending order
|
||||
* to across available VPEs.
|
||||
*/
|
||||
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
|
||||
((read_c0_tcbind() & TCBIND_CURVPE)
|
||||
!= cpu_data[smp_processor_id() - 1].vpe_id)){
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
|
||||
}
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
void smtc_smp_finish(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Lowest-numbered CPU per VPE starts a clock tick.
|
||||
* Like per_cpu_trap_init() hack, this assumes that
|
||||
* SMTC init code assigns TCs consdecutively and
|
||||
* in ascending order across available VPEs.
|
||||
*/
|
||||
if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
|
||||
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
|
||||
|
||||
printk("TC %d going on-line as CPU %d\n",
|
||||
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
|
||||
}
|
||||
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
||||
{
|
||||
int tcstatus;
|
||||
struct smtc_ipi *pipi;
|
||||
long flags;
|
||||
unsigned long flags;
|
||||
int mtflags;
|
||||
unsigned long tcrestart;
|
||||
extern void r4k_wait_irqoff(void), __pastwait(void);
|
||||
|
||||
if (cpu == smp_processor_id()) {
|
||||
printk("Cannot Send IPI to self!\n");
|
||||
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
||||
pipi->arg = (void *)action;
|
||||
pipi->dest = cpu;
|
||||
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
|
||||
if (type == SMTC_CLOCK_TICK)
|
||||
atomic_inc(&ipi_timer_latch[cpu]);
|
||||
/* If not on same VPE, enqueue and send cross-VPE interrupt */
|
||||
smtc_ipi_nq(&IPIQ[cpu], pipi);
|
||||
LOCK_CORE_PRA();
|
||||
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
||||
|
||||
if ((tcstatus & TCSTATUS_IXMT) != 0) {
|
||||
/*
|
||||
* Spin-waiting here can deadlock,
|
||||
* so we queue the message for the target TC.
|
||||
* If we're in the the irq-off version of the wait
|
||||
* loop, we need to force exit from the wait and
|
||||
* do a direct post of the IPI.
|
||||
*/
|
||||
if (cpu_wait == r4k_wait_irqoff) {
|
||||
tcrestart = read_tc_c0_tcrestart();
|
||||
if (tcrestart >= (unsigned long)r4k_wait_irqoff
|
||||
&& tcrestart < (unsigned long)__pastwait) {
|
||||
write_tc_c0_tcrestart(__pastwait);
|
||||
tcstatus &= ~TCSTATUS_IXMT;
|
||||
write_tc_c0_tcstatus(tcstatus);
|
||||
goto postdirect;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Otherwise we queue the message for the target TC
|
||||
* to pick up when he does a local_irq_restore()
|
||||
*/
|
||||
write_tc_c0_tchalt(0);
|
||||
UNLOCK_CORE_PRA();
|
||||
/* Try to reduce redundant timer interrupt messages */
|
||||
if (type == SMTC_CLOCK_TICK) {
|
||||
if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
|
||||
smtc_ipi_nq(&freeIPIq, pipi);
|
||||
return;
|
||||
}
|
||||
}
|
||||
smtc_ipi_nq(&IPIQ[cpu], pipi);
|
||||
} else {
|
||||
if (type == SMTC_CLOCK_TICK)
|
||||
atomic_inc(&ipi_timer_latch[cpu]);
|
||||
postdirect:
|
||||
post_direct_ipi(cpu, pipi);
|
||||
write_tc_c0_tchalt(0);
|
||||
UNLOCK_CORE_PRA();
|
||||
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
|
||||
smp_call_function_interrupt();
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
|
||||
DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
|
||||
|
||||
void ipi_decode(struct smtc_ipi *pipi)
|
||||
{
|
||||
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
|
||||
struct clock_event_device *cd;
|
||||
void *arg_copy = pipi->arg;
|
||||
int type_copy = pipi->type;
|
||||
int ticks;
|
||||
|
||||
smtc_ipi_nq(&freeIPIq, pipi);
|
||||
switch (type_copy) {
|
||||
case SMTC_CLOCK_TICK:
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
|
||||
cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
|
||||
ticks = atomic_read(&ipi_timer_latch[cpu]);
|
||||
atomic_sub(ticks, &ipi_timer_latch[cpu]);
|
||||
while (ticks) {
|
||||
cd->event_handler(cd);
|
||||
ticks--;
|
||||
}
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
irq_exit();
|
||||
break;
|
||||
|
||||
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to smtc_ipi_replay(), but invoked from context restore,
|
||||
* so it reuses the current exception frame rather than set up a
|
||||
* new one with self_ipi.
|
||||
*/
|
||||
|
||||
void deferred_smtc_ipi(void)
|
||||
{
|
||||
struct smtc_ipi *pipi;
|
||||
unsigned long flags;
|
||||
/* DEBUG */
|
||||
int q = smp_processor_id();
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Test is not atomic, but much faster than a dequeue,
|
||||
* and the vast majority of invocations will have a null queue.
|
||||
* If irq_disabled when this was called, then any IPIs queued
|
||||
* after we test last will be taken on the next irq_enable/restore.
|
||||
* If interrupts were enabled, then any IPIs added after the
|
||||
* last test will be taken directly.
|
||||
*/
|
||||
if (IPIQ[q].head != NULL) {
|
||||
while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
|
||||
/* ipi_decode() should be called with interrupts off */
|
||||
local_irq_save(flags);
|
||||
|
||||
while (IPIQ[cpu].head != NULL) {
|
||||
struct smtc_ipi_q *q = &IPIQ[cpu];
|
||||
struct smtc_ipi *pipi;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* It may be possible we'll come in with interrupts
|
||||
* already enabled.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
spin_lock(&q->lock);
|
||||
pipi = __smtc_ipi_dq(q);
|
||||
spin_unlock(&q->lock);
|
||||
if (pipi != NULL)
|
||||
ipi_decode(pipi);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
/*
|
||||
* The use of the __raw_local restore isn't
|
||||
* as obviously necessary here as in smtc_ipi_replay(),
|
||||
* but it's more efficient, given that we're already
|
||||
* running down the IPI queue.
|
||||
*/
|
||||
__raw_local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
|
||||
struct smtc_ipi *pipi;
|
||||
unsigned long tcstatus;
|
||||
int sent;
|
||||
long flags;
|
||||
unsigned long flags;
|
||||
unsigned int mtflags;
|
||||
unsigned int vpflags;
|
||||
|
||||
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
|
||||
|
||||
/*
|
||||
* SMTC-specific hacks invoked from elsewhere in the kernel.
|
||||
*
|
||||
* smtc_ipi_replay is called from raw_local_irq_restore which is only ever
|
||||
* called with interrupts disabled. We do rely on interrupts being disabled
|
||||
* here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
|
||||
* result in a recursive call to raw_local_irq_restore().
|
||||
*/
|
||||
|
||||
static void __smtc_ipi_replay(void)
|
||||
/*
|
||||
* smtc_ipi_replay is called from raw_local_irq_restore
|
||||
*/
|
||||
|
||||
void smtc_ipi_replay(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* To the extent that we've ever turned interrupts off,
|
||||
* we may have accumulated deferred IPIs. This is subtle.
|
||||
* If we use the smtc_ipi_qdepth() macro, we'll get an
|
||||
* exact number - but we'll also disable interrupts
|
||||
* and create a window of failure where a new IPI gets
|
||||
* queued after we test the depth but before we re-enable
|
||||
* interrupts. So long as IXMT never gets set, however,
|
||||
* we should be OK: If we pick up something and dispatch
|
||||
* it here, that's great. If we see nothing, but concurrent
|
||||
* with this operation, another TC sends us an IPI, IXMT
|
||||
* is clear, and we'll handle it as a real pseudo-interrupt
|
||||
* and not a pseudo-pseudo interrupt.
|
||||
* and not a pseudo-pseudo interrupt. The important thing
|
||||
* is to do the last check for queued message *after* the
|
||||
* re-enabling of interrupts.
|
||||
*/
|
||||
if (IPIQ[cpu].depth > 0) {
|
||||
while (1) {
|
||||
struct smtc_ipi_q *q = &IPIQ[cpu];
|
||||
struct smtc_ipi *pipi;
|
||||
extern void self_ipi(struct smtc_ipi *);
|
||||
while (IPIQ[cpu].head != NULL) {
|
||||
struct smtc_ipi_q *q = &IPIQ[cpu];
|
||||
struct smtc_ipi *pipi;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&q->lock);
|
||||
pipi = __smtc_ipi_dq(q);
|
||||
spin_unlock(&q->lock);
|
||||
if (!pipi)
|
||||
break;
|
||||
/*
|
||||
* It's just possible we'll come in with interrupts
|
||||
* already enabled.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
spin_lock(&q->lock);
|
||||
pipi = __smtc_ipi_dq(q);
|
||||
spin_unlock(&q->lock);
|
||||
/*
|
||||
** But use a raw restore here to avoid recursion.
|
||||
*/
|
||||
__raw_local_irq_restore(flags);
|
||||
|
||||
if (pipi) {
|
||||
self_ipi(pipi);
|
||||
smtc_cpu_stats[cpu].selfipis++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void smtc_ipi_replay(void)
|
||||
{
|
||||
raw_local_irq_disable();
|
||||
__smtc_ipi_replay();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(smtc_ipi_replay);
|
||||
|
||||
void smtc_idle_loop_hook(void)
|
||||
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we limit outstanding timer IPIs, check for hung TC
|
||||
*/
|
||||
for (tc = 0; tc < NR_CPUS; tc++) {
|
||||
/* Don't check ourself - we'll dequeue IPIs just below */
|
||||
if ((tc != smp_processor_id()) &&
|
||||
atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
|
||||
if (clock_hang_reported[tc] == 0) {
|
||||
pdb_msg += sprintf(pdb_msg,
|
||||
"TC %d looks hung with timer latch at %d\n",
|
||||
tc, atomic_read(&ipi_timer_latch[tc]));
|
||||
clock_hang_reported[tc]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
emt(mtflags);
|
||||
local_irq_restore(flags);
|
||||
if (pdb_msg != &id_ho_db_msg[0])
|
||||
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
|
||||
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
|
||||
|
||||
/*
|
||||
* Replay any accumulated deferred IPIs. If "Instant Replay"
|
||||
* is in use, there should never be any.
|
||||
*/
|
||||
#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__smtc_ipi_replay();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
|
||||
smtc_ipi_replay();
|
||||
}
|
||||
|
||||
void smtc_soft_dump(void)
|
||||
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
|
||||
printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
|
||||
}
|
||||
smtc_ipi_qdump();
|
||||
printk("Timer IPI Backlogs:\n");
|
||||
for (i=0; i < NR_CPUS; i++) {
|
||||
printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
|
||||
}
|
||||
printk("%d Recoveries of \"stolen\" FPU\n",
|
||||
atomic_read(&smtc_fpu_recoveries));
|
||||
}
|
||||
|
@@ -42,10 +42,14 @@
|
||||
#include <asm/tlbdebug.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/watch.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
extern void check_wait(void);
|
||||
extern asmlinkage void r4k_wait(void);
|
||||
extern asmlinkage void rollback_handle_int(void);
|
||||
extern asmlinkage void handle_int(void);
|
||||
extern asmlinkage void handle_tlbm(void);
|
||||
extern asmlinkage void handle_tlbl(void);
|
||||
@@ -373,8 +377,8 @@ void __noreturn die(const char * str, const struct pt_regs * regs)
|
||||
do_exit(SIGSEGV);
|
||||
}
|
||||
|
||||
extern const struct exception_table_entry __start___dbe_table[];
|
||||
extern const struct exception_table_entry __stop___dbe_table[];
|
||||
extern struct exception_table_entry __start___dbe_table[];
|
||||
extern struct exception_table_entry __stop___dbe_table[];
|
||||
|
||||
__asm__(
|
||||
" .section __dbe_table, \"a\"\n"
|
||||
@@ -822,8 +826,10 @@ static void mt_ase_fp_affinity(void)
|
||||
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
|
||||
cpumask_t tmask;
|
||||
|
||||
cpus_and(tmask, current->thread.user_cpus_allowed,
|
||||
mt_fpu_cpumask);
|
||||
current->thread.user_cpus_allowed
|
||||
= current->cpus_allowed;
|
||||
cpus_and(tmask, current->cpus_allowed,
|
||||
mt_fpu_cpumask);
|
||||
set_cpus_allowed(current, tmask);
|
||||
set_thread_flag(TIF_FPUBOUND);
|
||||
}
|
||||
@@ -907,13 +913,26 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
|
||||
|
||||
asmlinkage void do_watch(struct pt_regs *regs)
|
||||
{
|
||||
u32 cause;
|
||||
|
||||
/*
|
||||
* We use the watch exception where available to detect stack
|
||||
* overflows.
|
||||
* Clear WP (bit 22) bit of cause register so we don't loop
|
||||
* forever.
|
||||
*/
|
||||
dump_tlb_all();
|
||||
show_regs(regs);
|
||||
panic("Caught WATCH exception - probably caused by stack overflow.");
|
||||
cause = read_c0_cause();
|
||||
cause &= ~(1 << 22);
|
||||
write_c0_cause(cause);
|
||||
|
||||
/*
|
||||
* If the current thread has the watch registers loaded, save
|
||||
* their values and send SIGTRAP. Otherwise another thread
|
||||
* left the registers set, clear them and continue.
|
||||
*/
|
||||
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
|
||||
mips_read_watch_registers();
|
||||
force_sig(SIGTRAP, current);
|
||||
} else
|
||||
mips_clear_watch_registers();
|
||||
}
|
||||
|
||||
asmlinkage void do_mcheck(struct pt_regs *regs)
|
||||
@@ -1200,7 +1219,7 @@ void *set_except_vector(int n, void *addr)
|
||||
if (n == 0 && cpu_has_divec) {
|
||||
*(u32 *)(ebase + 0x200) = 0x08000000 |
|
||||
(0x03ffffff & (handler >> 2));
|
||||
flush_icache_range(ebase + 0x200, ebase + 0x204);
|
||||
local_flush_icache_range(ebase + 0x200, ebase + 0x204);
|
||||
}
|
||||
return (void *)old_handler;
|
||||
}
|
||||
@@ -1251,6 +1270,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
||||
|
||||
extern char except_vec_vi, except_vec_vi_lui;
|
||||
extern char except_vec_vi_ori, except_vec_vi_end;
|
||||
extern char rollback_except_vec_vi;
|
||||
char *vec_start = (cpu_wait == r4k_wait) ?
|
||||
&rollback_except_vec_vi : &except_vec_vi;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* We need to provide the SMTC vectored interrupt handler
|
||||
@@ -1258,11 +1280,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
||||
* Status.IM bit to be masked before going there.
|
||||
*/
|
||||
extern char except_vec_vi_mori;
|
||||
const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
|
||||
const int mori_offset = &except_vec_vi_mori - vec_start;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
const int handler_len = &except_vec_vi_end - &except_vec_vi;
|
||||
const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
|
||||
const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
|
||||
const int handler_len = &except_vec_vi_end - vec_start;
|
||||
const int lui_offset = &except_vec_vi_lui - vec_start;
|
||||
const int ori_offset = &except_vec_vi_ori - vec_start;
|
||||
|
||||
if (handler_len > VECTORSPACING) {
|
||||
/*
|
||||
@@ -1272,7 +1294,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
||||
panic("VECTORSPACING too small");
|
||||
}
|
||||
|
||||
memcpy(b, &except_vec_vi, handler_len);
|
||||
memcpy(b, vec_start, handler_len);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
|
||||
|
||||
@@ -1283,7 +1305,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
||||
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
|
||||
w = (u32 *)(b + ori_offset);
|
||||
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
|
||||
flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
|
||||
local_flush_icache_range((unsigned long)b,
|
||||
(unsigned long)(b+handler_len));
|
||||
}
|
||||
else {
|
||||
/*
|
||||
@@ -1295,7 +1318,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
||||
w = (u32 *)b;
|
||||
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
|
||||
*w = 0;
|
||||
flush_icache_range((unsigned long)b, (unsigned long)(b+8));
|
||||
local_flush_icache_range((unsigned long)b,
|
||||
(unsigned long)(b+8));
|
||||
}
|
||||
|
||||
return (void *)old_handler;
|
||||
@@ -1515,7 +1539,7 @@ void __cpuinit per_cpu_trap_init(void)
|
||||
void __init set_handler(unsigned long offset, void *addr, unsigned long size)
|
||||
{
|
||||
memcpy((void *)(ebase + offset), addr, size);
|
||||
flush_icache_range(ebase + offset, ebase + offset + size);
|
||||
local_flush_icache_range(ebase + offset, ebase + offset + size);
|
||||
}
|
||||
|
||||
static char panic_null_cerr[] __cpuinitdata =
|
||||
@@ -1552,6 +1576,10 @@ void __init trap_init(void)
|
||||
extern char except_vec3_generic, except_vec3_r4000;
|
||||
extern char except_vec4;
|
||||
unsigned long i;
|
||||
int rollback;
|
||||
|
||||
check_wait();
|
||||
rollback = (cpu_wait == r4k_wait);
|
||||
|
||||
#if defined(CONFIG_KGDB)
|
||||
if (kgdb_early_setup)
|
||||
@@ -1616,7 +1644,7 @@ void __init trap_init(void)
|
||||
if (board_be_init)
|
||||
board_be_init();
|
||||
|
||||
set_except_vector(0, handle_int);
|
||||
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
|
||||
set_except_vector(1, handle_tlbm);
|
||||
set_except_vector(2, handle_tlbl);
|
||||
set_except_vector(3, handle_tlbs);
|
||||
@@ -1680,6 +1708,8 @@ void __init trap_init(void)
|
||||
signal32_init();
|
||||
#endif
|
||||
|
||||
flush_icache_range(ebase, ebase + 0x400);
|
||||
local_flush_icache_range(ebase, ebase + 0x400);
|
||||
flush_tlb_handlers();
|
||||
|
||||
sort_extable(__start___dbe_table, __stop___dbe_table);
|
||||
}
|
||||
|
@@ -36,6 +36,7 @@ SECTIONS
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.text.*)
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
} :text = 0
|
||||
|
188
arch/mips/kernel/watch.c
Normal file
188
arch/mips/kernel/watch.c
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2008 David Daney
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/watch.h>
|
||||
|
||||
/*
|
||||
* Install the watch registers for the current thread. A maximum of
|
||||
* four registers are installed although the machine may have more.
|
||||
*/
|
||||
void mips_install_watch_registers(void)
|
||||
{
|
||||
struct mips3264_watch_reg_state *watches =
|
||||
¤t->thread.watch.mips3264;
|
||||
switch (current_cpu_data.watch_reg_use_cnt) {
|
||||
default:
|
||||
BUG();
|
||||
case 4:
|
||||
write_c0_watchlo3(watches->watchlo[3]);
|
||||
/* Write 1 to the I, R, and W bits to clear them, and
|
||||
1 to G so all ASIDs are trapped. */
|
||||
write_c0_watchhi3(0x40000007 | watches->watchhi[3]);
|
||||
case 3:
|
||||
write_c0_watchlo2(watches->watchlo[2]);
|
||||
write_c0_watchhi2(0x40000007 | watches->watchhi[2]);
|
||||
case 2:
|
||||
write_c0_watchlo1(watches->watchlo[1]);
|
||||
write_c0_watchhi1(0x40000007 | watches->watchhi[1]);
|
||||
case 1:
|
||||
write_c0_watchlo0(watches->watchlo[0]);
|
||||
write_c0_watchhi0(0x40000007 | watches->watchhi[0]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Read back the watchhi registers so the user space debugger has
|
||||
* access to the I, R, and W bits. A maximum of four registers are
|
||||
* read although the machine may have more.
|
||||
*/
|
||||
void mips_read_watch_registers(void)
|
||||
{
|
||||
struct mips3264_watch_reg_state *watches =
|
||||
¤t->thread.watch.mips3264;
|
||||
switch (current_cpu_data.watch_reg_use_cnt) {
|
||||
default:
|
||||
BUG();
|
||||
case 4:
|
||||
watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff);
|
||||
case 3:
|
||||
watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff);
|
||||
case 2:
|
||||
watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff);
|
||||
case 1:
|
||||
watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff);
|
||||
}
|
||||
if (current_cpu_data.watch_reg_use_cnt == 1 &&
|
||||
(watches->watchhi[0] & 7) == 0) {
|
||||
/* Pathological case of release 1 architecture that
|
||||
* doesn't set the condition bits. We assume that
|
||||
* since we got here, the watch condition was met and
|
||||
* signal that the conditions requested in watchlo
|
||||
* were met. */
|
||||
watches->watchhi[0] |= (watches->watchlo[0] & 7);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable all watch registers. Although only four registers are
|
||||
* installed, all are cleared to eliminate the possibility of endless
|
||||
* looping in the watch handler.
|
||||
*/
|
||||
void mips_clear_watch_registers(void)
|
||||
{
|
||||
switch (current_cpu_data.watch_reg_count) {
|
||||
default:
|
||||
BUG();
|
||||
case 8:
|
||||
write_c0_watchlo7(0);
|
||||
case 7:
|
||||
write_c0_watchlo6(0);
|
||||
case 6:
|
||||
write_c0_watchlo5(0);
|
||||
case 5:
|
||||
write_c0_watchlo4(0);
|
||||
case 4:
|
||||
write_c0_watchlo3(0);
|
||||
case 3:
|
||||
write_c0_watchlo2(0);
|
||||
case 2:
|
||||
write_c0_watchlo1(0);
|
||||
case 1:
|
||||
write_c0_watchlo0(0);
|
||||
}
|
||||
}
|
||||
|
||||
__cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c)
|
||||
{
|
||||
unsigned int t;
|
||||
|
||||
if ((c->options & MIPS_CPU_WATCH) == 0)
|
||||
return;
|
||||
/*
|
||||
* Check which of the I,R and W bits are supported, then
|
||||
* disable the register.
|
||||
*/
|
||||
write_c0_watchlo0(7);
|
||||
t = read_c0_watchlo0();
|
||||
write_c0_watchlo0(0);
|
||||
c->watch_reg_masks[0] = t & 7;
|
||||
|
||||
/* Write the mask bits and read them back to determine which
|
||||
* can be used. */
|
||||
c->watch_reg_count = 1;
|
||||
c->watch_reg_use_cnt = 1;
|
||||
t = read_c0_watchhi0();
|
||||
write_c0_watchhi0(t | 0xff8);
|
||||
t = read_c0_watchhi0();
|
||||
c->watch_reg_masks[0] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
write_c0_watchlo1(7);
|
||||
t = read_c0_watchlo1();
|
||||
write_c0_watchlo1(0);
|
||||
c->watch_reg_masks[1] = t & 7;
|
||||
|
||||
c->watch_reg_count = 2;
|
||||
c->watch_reg_use_cnt = 2;
|
||||
t = read_c0_watchhi1();
|
||||
write_c0_watchhi1(t | 0xff8);
|
||||
t = read_c0_watchhi1();
|
||||
c->watch_reg_masks[1] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
write_c0_watchlo2(7);
|
||||
t = read_c0_watchlo2();
|
||||
write_c0_watchlo2(0);
|
||||
c->watch_reg_masks[2] = t & 7;
|
||||
|
||||
c->watch_reg_count = 3;
|
||||
c->watch_reg_use_cnt = 3;
|
||||
t = read_c0_watchhi2();
|
||||
write_c0_watchhi2(t | 0xff8);
|
||||
t = read_c0_watchhi2();
|
||||
c->watch_reg_masks[2] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
write_c0_watchlo3(7);
|
||||
t = read_c0_watchlo3();
|
||||
write_c0_watchlo3(0);
|
||||
c->watch_reg_masks[3] = t & 7;
|
||||
|
||||
c->watch_reg_count = 4;
|
||||
c->watch_reg_use_cnt = 4;
|
||||
t = read_c0_watchhi3();
|
||||
write_c0_watchhi3(t | 0xff8);
|
||||
t = read_c0_watchhi3();
|
||||
c->watch_reg_masks[3] |= (t & 0xff8);
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
/* We use at most 4, but probe and report up to 8. */
|
||||
c->watch_reg_count = 5;
|
||||
t = read_c0_watchhi4();
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
c->watch_reg_count = 6;
|
||||
t = read_c0_watchhi5();
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
c->watch_reg_count = 7;
|
||||
t = read_c0_watchhi6();
|
||||
if ((t & 0x80000000) == 0)
|
||||
return;
|
||||
|
||||
c->watch_reg_count = 8;
|
||||
}
|
Reference in New Issue
Block a user