Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (76 commits) [ARM] 4002/1: S3C24XX: leave parent IRQs unmasked [ARM] 4001/1: S3C24XX: shorten reboot time [ARM] 3983/2: remove unused argument to __bug() [ARM] 4000/1: Osiris: add third serial port in [ARM] 3999/1: RX3715: suspend to RAM support [ARM] 3998/1: VR1000: LED platform devices [ARM] 3995/1: iop13xx: add iop13xx support [ARM] 3968/1: iop13xx: add iop13xx_defconfig [ARM] Update mach-types [ARM] Allow gcc to optimise arm_add_memory a little more [ARM] 3991/1: i.MX/MX1 high resolution time source [ARM] 3990/1: i.MX/MX1 more precise PLL decode [ARM] 3986/1: H1940: suspend to RAM support [ARM] 3985/1: ixp4xx clocksource cleanup [ARM] 3984/1: ixp4xx/nslu2: Fix disk LED numbering (take 2) [ARM] 3994/1: ixp23xx: fix handling of pci master aborts [ARM] 3981/1: sched_clock for PXA2xx [ARM] 3980/1: extend the ARM Versatile sched_clock implementation from 32 to 63 bit [ARM] 3979/1: extend the SA11x0 sched_clock implementation from 32 to 63 bit period [ARM] 3978/1: macro to provide a 63-bit value from a 32-bit hardware counter ...
This commit is contained in:
@@ -24,7 +24,9 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
|
||||
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
|
||||
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
|
||||
|
||||
obj-$(CONFIG_IWMMXT) += iwmmxt.o iwmmxt-notifier.o
|
||||
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
|
||||
obj-$(CONFIG_IWMMXT) += iwmmxt.o
|
||||
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
|
||||
|
||||
ifneq ($(CONFIG_ARCH_EBSA110),y)
|
||||
|
@@ -12,7 +12,6 @@
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/miscdevice.h>
|
||||
@@ -26,6 +25,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/apm.h> /* apm_power_info */
|
||||
#include <asm/system.h>
|
||||
@@ -71,7 +71,8 @@ struct apm_user {
|
||||
#define SUSPEND_PENDING 1 /* suspend pending read */
|
||||
#define SUSPEND_READ 2 /* suspend read, pending ack */
|
||||
#define SUSPEND_ACKED 3 /* suspend acked */
|
||||
#define SUSPEND_DONE 4 /* suspend completed */
|
||||
#define SUSPEND_WAIT 4 /* waiting for suspend */
|
||||
#define SUSPEND_DONE 5 /* suspend completed */
|
||||
|
||||
struct apm_queue queue;
|
||||
};
|
||||
@@ -101,6 +102,7 @@ static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
|
||||
static DEFINE_SPINLOCK(kapmd_queue_lock);
|
||||
static struct apm_queue kapmd_queue;
|
||||
|
||||
static DEFINE_MUTEX(state_lock);
|
||||
|
||||
static const char driver_version[] = "1.13"; /* no spaces */
|
||||
|
||||
@@ -148,40 +150,62 @@ static void queue_add_event(struct apm_queue *q, apm_event_t event)
|
||||
q->events[q->event_head] = event;
|
||||
}
|
||||
|
||||
static void queue_event_one_user(struct apm_user *as, apm_event_t event)
|
||||
{
|
||||
if (as->suser && as->writer) {
|
||||
switch (event) {
|
||||
case APM_SYS_SUSPEND:
|
||||
case APM_USER_SUSPEND:
|
||||
/*
|
||||
* If this user already has a suspend pending,
|
||||
* don't queue another one.
|
||||
*/
|
||||
if (as->suspend_state != SUSPEND_NONE)
|
||||
return;
|
||||
|
||||
as->suspend_state = SUSPEND_PENDING;
|
||||
suspends_pending++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
queue_add_event(&as->queue, event);
|
||||
}
|
||||
|
||||
static void queue_event(apm_event_t event, struct apm_user *sender)
|
||||
static void queue_event(apm_event_t event)
|
||||
{
|
||||
struct apm_user *as;
|
||||
|
||||
down_read(&user_list_lock);
|
||||
list_for_each_entry(as, &apm_user_list, list) {
|
||||
if (as != sender && as->reader)
|
||||
queue_event_one_user(as, event);
|
||||
if (as->reader)
|
||||
queue_add_event(&as->queue, event);
|
||||
}
|
||||
up_read(&user_list_lock);
|
||||
wake_up_interruptible(&apm_waitqueue);
|
||||
}
|
||||
|
||||
/*
|
||||
* queue_suspend_event - queue an APM suspend event.
|
||||
*
|
||||
* Check that we're in a state where we can suspend. If not,
|
||||
* return -EBUSY. Otherwise, queue an event to all "writer"
|
||||
* users. If there are no "writer" users, return '1' to
|
||||
* indicate that we can immediately suspend.
|
||||
*/
|
||||
static int queue_suspend_event(apm_event_t event, struct apm_user *sender)
|
||||
{
|
||||
struct apm_user *as;
|
||||
int ret = 1;
|
||||
|
||||
mutex_lock(&state_lock);
|
||||
down_read(&user_list_lock);
|
||||
|
||||
/*
|
||||
* If a thread is still processing, we can't suspend, so reject
|
||||
* the request.
|
||||
*/
|
||||
list_for_each_entry(as, &apm_user_list, list) {
|
||||
if (as != sender && as->reader && as->writer && as->suser &&
|
||||
as->suspend_state != SUSPEND_NONE) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(as, &apm_user_list, list) {
|
||||
if (as != sender && as->reader && as->writer && as->suser) {
|
||||
as->suspend_state = SUSPEND_PENDING;
|
||||
suspends_pending++;
|
||||
queue_add_event(&as->queue, event);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
out:
|
||||
up_read(&user_list_lock);
|
||||
mutex_unlock(&state_lock);
|
||||
wake_up_interruptible(&apm_waitqueue);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void apm_suspend(void)
|
||||
{
|
||||
struct apm_user *as;
|
||||
@@ -191,17 +215,22 @@ static void apm_suspend(void)
|
||||
* Anyone on the APM queues will think we're still suspended.
|
||||
* Send a message so everyone knows we're now awake again.
|
||||
*/
|
||||
queue_event(APM_NORMAL_RESUME, NULL);
|
||||
queue_event(APM_NORMAL_RESUME);
|
||||
|
||||
/*
|
||||
* Finally, wake up anyone who is sleeping on the suspend.
|
||||
*/
|
||||
mutex_lock(&state_lock);
|
||||
down_read(&user_list_lock);
|
||||
list_for_each_entry(as, &apm_user_list, list) {
|
||||
as->suspend_result = err;
|
||||
as->suspend_state = SUSPEND_DONE;
|
||||
if (as->suspend_state == SUSPEND_WAIT ||
|
||||
as->suspend_state == SUSPEND_ACKED) {
|
||||
as->suspend_result = err;
|
||||
as->suspend_state = SUSPEND_DONE;
|
||||
}
|
||||
}
|
||||
up_read(&user_list_lock);
|
||||
mutex_unlock(&state_lock);
|
||||
|
||||
wake_up(&apm_suspend_waitqueue);
|
||||
}
|
||||
@@ -227,8 +256,11 @@ static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t
|
||||
if (copy_to_user(buf, &event, sizeof(event)))
|
||||
break;
|
||||
|
||||
if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
|
||||
mutex_lock(&state_lock);
|
||||
if (as->suspend_state == SUSPEND_PENDING &&
|
||||
(event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
|
||||
as->suspend_state = SUSPEND_READ;
|
||||
mutex_unlock(&state_lock);
|
||||
|
||||
buf += sizeof(event);
|
||||
i -= sizeof(event);
|
||||
@@ -270,9 +302,13 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
|
||||
|
||||
switch (cmd) {
|
||||
case APM_IOC_SUSPEND:
|
||||
mutex_lock(&state_lock);
|
||||
|
||||
as->suspend_result = -EINTR;
|
||||
|
||||
if (as->suspend_state == SUSPEND_READ) {
|
||||
int pending;
|
||||
|
||||
/*
|
||||
* If we read a suspend command from /dev/apm_bios,
|
||||
* then the corresponding APM_IOC_SUSPEND ioctl is
|
||||
@@ -280,47 +316,73 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
|
||||
*/
|
||||
as->suspend_state = SUSPEND_ACKED;
|
||||
suspends_pending--;
|
||||
pending = suspends_pending == 0;
|
||||
mutex_unlock(&state_lock);
|
||||
|
||||
/*
|
||||
* If there are no further acknowledges required,
|
||||
* suspend the system.
|
||||
*/
|
||||
if (pending)
|
||||
apm_suspend();
|
||||
|
||||
/*
|
||||
* Wait for the suspend/resume to complete. If there
|
||||
* are pending acknowledges, we wait here for them.
|
||||
*
|
||||
* Note: we need to ensure that the PM subsystem does
|
||||
* not kick us out of the wait when it suspends the
|
||||
* threads.
|
||||
*/
|
||||
flags = current->flags;
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
wait_event(apm_suspend_waitqueue,
|
||||
as->suspend_state == SUSPEND_DONE);
|
||||
} else {
|
||||
as->suspend_state = SUSPEND_WAIT;
|
||||
mutex_unlock(&state_lock);
|
||||
|
||||
/*
|
||||
* Otherwise it is a request to suspend the system.
|
||||
* Queue an event for all readers, and expect an
|
||||
* acknowledge from all writers who haven't already
|
||||
* acknowledged.
|
||||
*/
|
||||
queue_event(APM_USER_SUSPEND, as);
|
||||
}
|
||||
err = queue_suspend_event(APM_USER_SUSPEND, as);
|
||||
if (err < 0) {
|
||||
/*
|
||||
* Avoid taking the lock here - this
|
||||
* should be fine.
|
||||
*/
|
||||
as->suspend_state = SUSPEND_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no further acknowledges required, suspend
|
||||
* the system.
|
||||
*/
|
||||
if (suspends_pending == 0)
|
||||
apm_suspend();
|
||||
if (err > 0)
|
||||
apm_suspend();
|
||||
|
||||
/*
|
||||
* Wait for the suspend/resume to complete. If there are
|
||||
* pending acknowledges, we wait here for them.
|
||||
*
|
||||
* Note that we need to ensure that the PM subsystem does
|
||||
* not kick us out of the wait when it suspends the threads.
|
||||
*/
|
||||
flags = current->flags;
|
||||
current->flags |= PF_NOFREEZE;
|
||||
/*
|
||||
* Wait for the suspend/resume to complete. If there
|
||||
* are pending acknowledges, we wait here for them.
|
||||
*
|
||||
* Note: we need to ensure that the PM subsystem does
|
||||
* not kick us out of the wait when it suspends the
|
||||
* threads.
|
||||
*/
|
||||
flags = current->flags;
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
/*
|
||||
* Note: do not allow a thread which is acking the suspend
|
||||
* to escape until the resume is complete.
|
||||
*/
|
||||
if (as->suspend_state == SUSPEND_ACKED)
|
||||
wait_event(apm_suspend_waitqueue,
|
||||
as->suspend_state == SUSPEND_DONE);
|
||||
else
|
||||
wait_event_interruptible(apm_suspend_waitqueue,
|
||||
as->suspend_state == SUSPEND_DONE);
|
||||
}
|
||||
|
||||
current->flags = flags;
|
||||
|
||||
mutex_lock(&state_lock);
|
||||
err = as->suspend_result;
|
||||
as->suspend_state = SUSPEND_NONE;
|
||||
mutex_unlock(&state_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -330,6 +392,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
|
||||
static int apm_release(struct inode * inode, struct file * filp)
|
||||
{
|
||||
struct apm_user *as = filp->private_data;
|
||||
int pending = 0;
|
||||
|
||||
filp->private_data = NULL;
|
||||
|
||||
down_write(&user_list_lock);
|
||||
@@ -342,11 +406,14 @@ static int apm_release(struct inode * inode, struct file * filp)
|
||||
* need to balance suspends_pending, which means the
|
||||
* possibility of sleeping.
|
||||
*/
|
||||
mutex_lock(&state_lock);
|
||||
if (as->suspend_state != SUSPEND_NONE) {
|
||||
suspends_pending -= 1;
|
||||
if (suspends_pending == 0)
|
||||
apm_suspend();
|
||||
pending = suspends_pending == 0;
|
||||
}
|
||||
mutex_unlock(&state_lock);
|
||||
if (pending)
|
||||
apm_suspend();
|
||||
|
||||
kfree(as);
|
||||
return 0;
|
||||
@@ -470,6 +537,7 @@ static int kapmd(void *arg)
|
||||
{
|
||||
do {
|
||||
apm_event_t event;
|
||||
int ret;
|
||||
|
||||
wait_event_interruptible(kapmd_wait,
|
||||
!queue_empty(&kapmd_queue) || kthread_should_stop());
|
||||
@@ -489,13 +557,20 @@ static int kapmd(void *arg)
|
||||
|
||||
case APM_LOW_BATTERY:
|
||||
case APM_POWER_STATUS_CHANGE:
|
||||
queue_event(event, NULL);
|
||||
queue_event(event);
|
||||
break;
|
||||
|
||||
case APM_USER_SUSPEND:
|
||||
case APM_SYS_SUSPEND:
|
||||
queue_event(event, NULL);
|
||||
if (suspends_pending == 0)
|
||||
ret = queue_suspend_event(event, NULL);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* We were busy. Try again in 50ms.
|
||||
*/
|
||||
queue_add_event(&kapmd_queue, event);
|
||||
msleep(50);
|
||||
}
|
||||
if (ret > 0)
|
||||
apm_suspend();
|
||||
break;
|
||||
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/procinfo.h>
|
||||
|
||||
/*
|
||||
* Make sure that the compiler and target are compatible.
|
||||
|
@@ -529,7 +529,7 @@ static void ecard_dump_irq_state(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void ecard_check_lockup(struct irqdesc *desc)
|
||||
static void ecard_check_lockup(struct irq_desc *desc)
|
||||
{
|
||||
static unsigned long last;
|
||||
static int lockup;
|
||||
@@ -567,7 +567,7 @@ static void ecard_check_lockup(struct irqdesc *desc)
|
||||
}
|
||||
|
||||
static void
|
||||
ecard_irq_handler(unsigned int irq, struct irqdesc *desc)
|
||||
ecard_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
ecard_t *ec;
|
||||
int called = 0;
|
||||
@@ -585,7 +585,7 @@ ecard_irq_handler(unsigned int irq, struct irqdesc *desc)
|
||||
pending = ecard_default_ops.irqpending(ec);
|
||||
|
||||
if (pending) {
|
||||
struct irqdesc *d = irq_desc + ec->irq;
|
||||
struct irq_desc *d = irq_desc + ec->irq;
|
||||
desc_handle_irq(ec->irq, d);
|
||||
called ++;
|
||||
}
|
||||
@@ -609,7 +609,7 @@ static unsigned char first_set[] =
|
||||
};
|
||||
|
||||
static void
|
||||
ecard_irqexp_handler(unsigned int irq, struct irqdesc *desc)
|
||||
ecard_irqexp_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
const unsigned int statusmask = 15;
|
||||
unsigned int status;
|
||||
@@ -1022,7 +1022,7 @@ ecard_probe(int slot, card_type_t type)
|
||||
if (slot < 8) {
|
||||
ec->irq = 32 + slot;
|
||||
set_irq_chip(ec->irq, &ecard_chip);
|
||||
set_irq_handler(ec->irq, do_level_IRQ);
|
||||
set_irq_handler(ec->irq, handle_level_irq);
|
||||
set_irq_flags(ec->irq, IRQF_VALID);
|
||||
}
|
||||
|
||||
|
@@ -589,10 +589,6 @@ ENTRY(__switch_to)
|
||||
strex r5, r4, [ip] @ Clear exclusive monitor
|
||||
#endif
|
||||
#endif
|
||||
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
|
||||
mra r4, r5, acc0
|
||||
stmia ip, {r4, r5}
|
||||
#endif
|
||||
#if defined(CONFIG_HAS_TLS_REG)
|
||||
mcr p15, 0, r3, c13, c0, 3 @ set TLS register
|
||||
#elif !defined(CONFIG_TLS_REG_EMUL)
|
||||
@@ -601,11 +597,6 @@ ENTRY(__switch_to)
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
||||
#endif
|
||||
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
|
||||
add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
|
||||
ldmib r4, {r4, r5}
|
||||
mar acc0, r4, r5
|
||||
#endif
|
||||
mov r5, r0
|
||||
add r4, r2, #TI_CPU_SAVE
|
||||
|
@@ -16,7 +16,6 @@
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
@@ -16,7 +16,6 @@
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/domain.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/memory.h>
|
||||
|
@@ -112,7 +112,7 @@ static struct irq_desc bad_irq_desc = {
|
||||
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct irqdesc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
|
||||
/*
|
||||
* Some hardware gives randomly wrong interrupts. Rather
|
||||
@@ -134,7 +134,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
|
||||
void set_irq_flags(unsigned int irq, unsigned int iflags)
|
||||
{
|
||||
struct irqdesc *desc;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
@@ -171,7 +171,7 @@ void __init init_IRQ(void)
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
|
||||
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
|
||||
{
|
||||
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
|
||||
|
||||
@@ -190,7 +190,7 @@ void migrate_irqs(void)
|
||||
unsigned int i, cpu = smp_processor_id();
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++) {
|
||||
struct irqdesc *desc = irq_desc + i;
|
||||
struct irq_desc *desc = irq_desc + i;
|
||||
|
||||
if (desc->cpu == cpu) {
|
||||
unsigned int newcpu = any_online_cpu(desc->affinity);
|
||||
|
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/kernel/iwmmxt-notifier.c
|
||||
*
|
||||
* XScale iWMMXt (Concan) context switching and handling
|
||||
*
|
||||
* Initial code:
|
||||
* Copyright (c) 2003, Intel Corporation
|
||||
*
|
||||
* Full lazy switching support, optimizations and more, by Nicolas Pitre
|
||||
* Copyright (c) 2003-2004, MontaVista Software, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
|
||||
{
|
||||
struct thread_info *thread = t;
|
||||
|
||||
switch (cmd) {
|
||||
case THREAD_NOTIFY_FLUSH:
|
||||
/*
|
||||
* flush_thread() zeroes thread->fpstate, so no need
|
||||
* to do anything here.
|
||||
*
|
||||
* FALLTHROUGH: Ensure we don't try to overwrite our newly
|
||||
* initialised state information on the first fault.
|
||||
*/
|
||||
|
||||
case THREAD_NOTIFY_RELEASE:
|
||||
iwmmxt_task_release(thread);
|
||||
break;
|
||||
|
||||
case THREAD_NOTIFY_SWITCH:
|
||||
iwmmxt_task_switch(thread);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block iwmmxt_notifier_block = {
|
||||
.notifier_call = iwmmxt_do,
|
||||
};
|
||||
|
||||
static int __init iwmmxt_init(void)
|
||||
{
|
||||
thread_register_notifier(&iwmmxt_notifier_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(iwmmxt_init);
|
@@ -280,67 +280,6 @@ void show_fpregs(struct user_fp *regs)
|
||||
(unsigned long)regs->fpcr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Task structure and kernel stack allocation.
|
||||
*/
|
||||
struct thread_info_list {
|
||||
unsigned long *head;
|
||||
unsigned int nr;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct thread_info_list, thread_info_list) = { NULL, 0 };
|
||||
|
||||
#define EXTRA_TASK_STRUCT 4
|
||||
|
||||
struct thread_info *alloc_thread_info(struct task_struct *task)
|
||||
{
|
||||
struct thread_info *thread = NULL;
|
||||
|
||||
if (EXTRA_TASK_STRUCT) {
|
||||
struct thread_info_list *th = &get_cpu_var(thread_info_list);
|
||||
unsigned long *p = th->head;
|
||||
|
||||
if (p) {
|
||||
th->head = (unsigned long *)p[0];
|
||||
th->nr -= 1;
|
||||
}
|
||||
put_cpu_var(thread_info_list);
|
||||
|
||||
thread = (struct thread_info *)p;
|
||||
}
|
||||
|
||||
if (!thread)
|
||||
thread = (struct thread_info *)
|
||||
__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
/*
|
||||
* The stack must be cleared if you want SYSRQ-T to
|
||||
* give sensible stack usage information
|
||||
*/
|
||||
if (thread)
|
||||
memzero(thread, THREAD_SIZE);
|
||||
#endif
|
||||
return thread;
|
||||
}
|
||||
|
||||
void free_thread_info(struct thread_info *thread)
|
||||
{
|
||||
if (EXTRA_TASK_STRUCT) {
|
||||
struct thread_info_list *th = &get_cpu_var(thread_info_list);
|
||||
if (th->nr < EXTRA_TASK_STRUCT) {
|
||||
unsigned long *p = (unsigned long *)thread;
|
||||
p[0] = (unsigned long)th->head;
|
||||
th->head = p;
|
||||
th->nr += 1;
|
||||
put_cpu_var(thread_info_list);
|
||||
return;
|
||||
}
|
||||
put_cpu_var(thread_info_list);
|
||||
}
|
||||
free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
|
@@ -357,9 +357,6 @@ static void __init setup_processor(void)
|
||||
#ifndef CONFIG_VFP
|
||||
elf_hwcap &= ~HWCAP_VFP;
|
||||
#endif
|
||||
#ifndef CONFIG_IWMMXT
|
||||
elf_hwcap &= ~HWCAP_IWMMXT;
|
||||
#endif
|
||||
|
||||
cpu_proc_init();
|
||||
}
|
||||
@@ -441,16 +438,19 @@ __early_param("initrd=", early_initrd);
|
||||
|
||||
static void __init arm_add_memory(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct membank *bank;
|
||||
|
||||
/*
|
||||
* Ensure that start/size are aligned to a page boundary.
|
||||
* Size is appropriately rounded down, start is rounded up.
|
||||
*/
|
||||
size -= start & ~PAGE_MASK;
|
||||
|
||||
meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
|
||||
meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK;
|
||||
meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
|
||||
meminfo.nr_banks += 1;
|
||||
bank = &meminfo.bank[meminfo.nr_banks++];
|
||||
|
||||
bank->start = PAGE_ALIGN(start);
|
||||
bank->size = size & PAGE_MASK;
|
||||
bank->node = PHYS_TO_NID(start);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -13,6 +13,7 @@
|
||||
#include <linux/personality.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include <asm/elf.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@@ -631,12 +631,9 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
|
||||
notify_die("unknown data abort code", regs, &info, instr, 0);
|
||||
}
|
||||
|
||||
void __attribute__((noreturn)) __bug(const char *file, int line, void *data)
|
||||
void __attribute__((noreturn)) __bug(const char *file, int line)
|
||||
{
|
||||
printk(KERN_CRIT"kernel BUG at %s:%d!", file, line);
|
||||
if (data)
|
||||
printk(" - extra data = %p", data);
|
||||
printk("\n");
|
||||
printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line);
|
||||
*(int *)0 = 0;
|
||||
|
||||
/* Avoid "noreturn function does return" */
|
||||
|
179
arch/arm/kernel/xscale-cp0.c
Normal file
179
arch/arm/kernel/xscale-cp0.c
Normal file
@@ -0,0 +1,179 @@
|
||||
/*
|
||||
* linux/arch/arm/kernel/xscale-cp0.c
|
||||
*
|
||||
* XScale DSP and iWMMXt coprocessor context switching and handling
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static inline void dsp_save_state(u32 *state)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"mrrc p0, 0, %0, %1, c0\n"
|
||||
: "=r" (state[0]), "=r" (state[1]));
|
||||
}
|
||||
|
||||
static inline void dsp_load_state(u32 *state)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"mcrr p0, 0, %0, %1, c0\n"
|
||||
: : "r" (state[0]), "r" (state[1]));
|
||||
}
|
||||
|
||||
static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t)
|
||||
{
|
||||
struct thread_info *thread = t;
|
||||
|
||||
switch (cmd) {
|
||||
case THREAD_NOTIFY_FLUSH:
|
||||
thread->cpu_context.extra[0] = 0;
|
||||
thread->cpu_context.extra[1] = 0;
|
||||
break;
|
||||
|
||||
case THREAD_NOTIFY_SWITCH:
|
||||
dsp_save_state(current_thread_info()->cpu_context.extra);
|
||||
dsp_load_state(thread->cpu_context.extra);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block dsp_notifier_block = {
|
||||
.notifier_call = dsp_do,
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_IWMMXT
|
||||
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
|
||||
{
|
||||
struct thread_info *thread = t;
|
||||
|
||||
switch (cmd) {
|
||||
case THREAD_NOTIFY_FLUSH:
|
||||
/*
|
||||
* flush_thread() zeroes thread->fpstate, so no need
|
||||
* to do anything here.
|
||||
*
|
||||
* FALLTHROUGH: Ensure we don't try to overwrite our newly
|
||||
* initialised state information on the first fault.
|
||||
*/
|
||||
|
||||
case THREAD_NOTIFY_RELEASE:
|
||||
iwmmxt_task_release(thread);
|
||||
break;
|
||||
|
||||
case THREAD_NOTIFY_SWITCH:
|
||||
iwmmxt_task_switch(thread);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block iwmmxt_notifier_block = {
|
||||
.notifier_call = iwmmxt_do,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
static u32 __init xscale_cp_access_read(void)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mrc p15, 0, %0, c15, c1, 0\n\t"
|
||||
: "=r" (value));
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static void __init xscale_cp_access_write(u32 value)
|
||||
{
|
||||
u32 temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mcr p15, 0, %1, c15, c1, 0\n\t"
|
||||
"mrc p15, 0, %0, c15, c1, 0\n\t"
|
||||
"mov %0, %0\n\t"
|
||||
"sub pc, pc, #4\n\t"
|
||||
: "=r" (temp) : "r" (value));
|
||||
}
|
||||
|
||||
/*
|
||||
* Detect whether we have a MAC coprocessor (40 bit register) or an
|
||||
* iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000
|
||||
* into a coprocessor register and reading it back, and checking
|
||||
* whether the upper word survived intact.
|
||||
*/
|
||||
static int __init cpu_has_iwmmxt(void)
|
||||
{
|
||||
u32 lo;
|
||||
u32 hi;
|
||||
|
||||
/*
|
||||
* This sequence is interpreted by the DSP coprocessor as:
|
||||
* mar acc0, %2, %3
|
||||
* mra %0, %1, acc0
|
||||
*
|
||||
* And by the iWMMXt coprocessor as:
|
||||
* tmcrr wR0, %2, %3
|
||||
* tmrrc %0, %1, wR0
|
||||
*/
|
||||
__asm__ __volatile__ (
|
||||
"mcrr p0, 0, %2, %3, c0\n"
|
||||
"mrrc p0, 0, %0, %1, c0\n"
|
||||
: "=r" (lo), "=r" (hi)
|
||||
: "r" (0), "r" (0x100));
|
||||
|
||||
return !!hi;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we
|
||||
* disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
|
||||
* switch code handle iWMMXt context switching. If on the other
|
||||
* hand the CPU has a DSP coprocessor, we keep access to CP0 enabled
|
||||
* all the time, and save/restore acc0 on context switch in non-lazy
|
||||
* fashion.
|
||||
*/
|
||||
static int __init xscale_cp0_init(void)
|
||||
{
|
||||
u32 cp_access;
|
||||
|
||||
cp_access = xscale_cp_access_read() & ~3;
|
||||
xscale_cp_access_write(cp_access | 1);
|
||||
|
||||
if (cpu_has_iwmmxt()) {
|
||||
#ifndef CONFIG_IWMMXT
|
||||
printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor "
|
||||
"detected, but kernel support is missing.\n");
|
||||
#else
|
||||
printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n");
|
||||
elf_hwcap |= HWCAP_IWMMXT;
|
||||
thread_register_notifier(&iwmmxt_notifier_block);
|
||||
#endif
|
||||
} else {
|
||||
printk(KERN_INFO "XScale DSP coprocessor detected.\n");
|
||||
thread_register_notifier(&dsp_notifier_block);
|
||||
cp_access |= 1;
|
||||
}
|
||||
|
||||
xscale_cp_access_write(cp_access);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(xscale_cp0_init);
|
Reference in New Issue
Block a user