uml: style fixes pass 3
Formatting changes in the files which have been changed in the course of folding foo_skas functions into their callers. These include: copyright updates header file trimming style fixes adding severity to printks These changes should be entirely non-functional. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
77bf440031
commit
ba180fd437
@ -1,24 +1,19 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/slab.h"
|
||||
#include "linux/stddef.h"
|
||||
#include "linux/fs.h"
|
||||
#include "linux/smp_lock.h"
|
||||
#include "linux/ptrace.h"
|
||||
#include "linux/fs.h"
|
||||
#include "asm/ptrace.h"
|
||||
#include "asm/pgtable.h"
|
||||
#include "asm/tlbflush.h"
|
||||
#include "linux/sched.h"
|
||||
#include "asm/current.h"
|
||||
#include "asm/processor.h"
|
||||
#include "asm/uaccess.h"
|
||||
#include "kern_util.h"
|
||||
#include "as-layout.h"
|
||||
#include "mem_user.h"
|
||||
#include "kern.h"
|
||||
#include "irq_user.h"
|
||||
#include "tlb.h"
|
||||
#include "skas.h"
|
||||
#include "os.h"
|
||||
#include "skas/skas.h"
|
||||
|
||||
void flush_thread(void)
|
||||
{
|
||||
@ -29,8 +24,8 @@ void flush_thread(void)
|
||||
arch_flush_thread(¤t->thread.arch);
|
||||
|
||||
ret = unmap(¤t->mm->context.skas.id, 0, end, 1, &data);
|
||||
if(ret){
|
||||
printk("flush_thread - clearing address space failed, "
|
||||
if (ret) {
|
||||
printk(KERN_ERR "flush_thread - clearing address space failed, "
|
||||
"err = %d\n", ret);
|
||||
force_sig(SIGKILL, current);
|
||||
}
|
||||
@ -52,7 +47,7 @@ extern void log_exec(char **argv, void *tty);
|
||||
static long execve1(char *file, char __user * __user *argv,
|
||||
char __user *__user *env)
|
||||
{
|
||||
long error;
|
||||
long error;
|
||||
#ifdef CONFIG_TTY_LOG
|
||||
struct tty_struct *tty;
|
||||
|
||||
@ -62,16 +57,16 @@ static long execve1(char *file, char __user * __user *argv,
|
||||
log_exec(argv, tty);
|
||||
mutex_unlock(&tty_mutex);
|
||||
#endif
|
||||
error = do_execve(file, argv, env, ¤t->thread.regs);
|
||||
if (error == 0){
|
||||
error = do_execve(file, argv, env, ¤t->thread.regs);
|
||||
if (error == 0) {
|
||||
task_lock(current);
|
||||
current->ptrace &= ~PT_DTRACE;
|
||||
current->ptrace &= ~PT_DTRACE;
|
||||
#ifdef SUBARCH_EXECVE1
|
||||
SUBARCH_EXECVE1(¤t->thread.regs.regs);
|
||||
#endif
|
||||
task_unlock(current);
|
||||
}
|
||||
return(error);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
long um_execve(char *file, char __user *__user *argv, char __user *__user *env)
|
||||
@ -79,9 +74,9 @@ long um_execve(char *file, char __user *__user *argv, char __user *__user *env)
|
||||
long err;
|
||||
|
||||
err = execve1(file, argv, env);
|
||||
if(!err)
|
||||
if (!err)
|
||||
do_longjmp(current->thread.exec_buf, 1);
|
||||
return(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
long sys_execve(char __user *file, char __user *__user *argv,
|
||||
@ -98,5 +93,5 @@ long sys_execve(char __user *file, char __user *__user *argv,
|
||||
putname(filename);
|
||||
out:
|
||||
unlock_kernel();
|
||||
return(error);
|
||||
return error;
|
||||
}
|
||||
|
@ -1,37 +1,19 @@
|
||||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
* Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
|
||||
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
||||
*/
|
||||
|
||||
#include "linux/kernel.h"
|
||||
#include "linux/module.h"
|
||||
#include "linux/smp.h"
|
||||
#include "linux/kernel_stat.h"
|
||||
#include "linux/interrupt.h"
|
||||
#include "linux/random.h"
|
||||
#include "linux/slab.h"
|
||||
#include "linux/file.h"
|
||||
#include "linux/proc_fs.h"
|
||||
#include "linux/init.h"
|
||||
#include "linux/seq_file.h"
|
||||
#include "linux/profile.h"
|
||||
#include "linux/cpumask.h"
|
||||
#include "linux/hardirq.h"
|
||||
#include "asm/irq.h"
|
||||
#include "asm/hw_irq.h"
|
||||
#include "asm/atomic.h"
|
||||
#include "asm/signal.h"
|
||||
#include "asm/system.h"
|
||||
#include "asm/errno.h"
|
||||
#include "asm/uaccess.h"
|
||||
#include "kern_util.h"
|
||||
#include "irq_user.h"
|
||||
#include "irq_kern.h"
|
||||
#include "os.h"
|
||||
#include "sigio.h"
|
||||
#include "misc_constants.h"
|
||||
#include "linux/interrupt.h"
|
||||
#include "linux/kernel_stat.h"
|
||||
#include "linux/module.h"
|
||||
#include "linux/seq_file.h"
|
||||
#include "as-layout.h"
|
||||
#include "kern_util.h"
|
||||
#include "os.h"
|
||||
|
||||
/*
|
||||
* Generic, controller-independent functions:
|
||||
@ -71,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
} else if (i == NR_IRQS) {
|
||||
} else if (i == NR_IRQS)
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -102,11 +83,13 @@ void sigio_handler(int sig, struct uml_pt_regs *regs)
|
||||
while (1) {
|
||||
n = os_waiting_for_events(active_fds);
|
||||
if (n <= 0) {
|
||||
if(n == -EINTR) continue;
|
||||
if (n == -EINTR)
|
||||
continue;
|
||||
else break;
|
||||
}
|
||||
|
||||
for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
|
||||
for (irq_fd = active_fds; irq_fd != NULL;
|
||||
irq_fd = irq_fd->next) {
|
||||
if (irq_fd->current_events != 0) {
|
||||
irq_fd->current_events = 0;
|
||||
do_IRQ(irq_fd->irq, regs);
|
||||
@ -138,8 +121,7 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
|
||||
|
||||
if (type == IRQ_READ)
|
||||
events = UM_POLLIN | UM_POLLPRI;
|
||||
else
|
||||
events = UM_POLLOUT;
|
||||
else events = UM_POLLOUT;
|
||||
*new_fd = ((struct irq_fd) { .next = NULL,
|
||||
.id = dev_id,
|
||||
.fd = fd,
|
||||
@ -153,9 +135,10 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
|
||||
if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
|
||||
printk("Registering fd %d twice\n", fd);
|
||||
printk("Irqs : %d, %d\n", irq_fd->irq, irq);
|
||||
printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id);
|
||||
printk(KERN_ERR "Registering fd %d twice\n", fd);
|
||||
printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
|
||||
printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
|
||||
dev_id);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
@ -171,7 +154,8 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
|
||||
if (n == 0)
|
||||
break;
|
||||
|
||||
/* n > 0
|
||||
/*
|
||||
* n > 0
|
||||
* It means we couldn't put new pollfd to current pollfds
|
||||
* and tmp_fds is NULL or too small for new pollfds array.
|
||||
* Needed size is equal to n as minimum.
|
||||
@ -197,7 +181,8 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
|
||||
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
|
||||
/* This calls activate_fd, so it has to be outside the critical
|
||||
/*
|
||||
* This calls activate_fd, so it has to be outside the critical
|
||||
* section.
|
||||
*/
|
||||
maybe_sigio_broken(fd, (type == IRQ_READ));
|
||||
@ -264,13 +249,14 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
|
||||
i++;
|
||||
}
|
||||
if (irq == NULL) {
|
||||
printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
|
||||
printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
|
||||
fd);
|
||||
goto out;
|
||||
}
|
||||
fdi = os_get_pollfd(i);
|
||||
if ((fdi != -1) && (fdi != fd)) {
|
||||
printk("find_irq_by_fd - mismatch between active_fds and "
|
||||
"pollfds, fd %d vs %d, need %d\n", irq->fd,
|
||||
printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
|
||||
"and pollfds, fd %d vs %d, need %d\n", irq->fd,
|
||||
fdi, fd);
|
||||
irq = NULL;
|
||||
goto out;
|
||||
@ -306,7 +292,7 @@ void deactivate_fd(int fd, int irqnum)
|
||||
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
irq = find_irq_by_fd(fd, irqnum, &i);
|
||||
if(irq == NULL){
|
||||
if (irq == NULL) {
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -372,8 +358,10 @@ int um_request_irq(unsigned int irq, int fd, int type,
|
||||
EXPORT_SYMBOL(um_request_irq);
|
||||
EXPORT_SYMBOL(reactivate_fd);
|
||||
|
||||
/* hw_interrupt_type must define (startup || enable) &&
|
||||
* (shutdown || disable) && end */
|
||||
/*
|
||||
* hw_interrupt_type must define (startup || enable) &&
|
||||
* (shutdown || disable) && end
|
||||
*/
|
||||
static void dummy(unsigned int irq)
|
||||
{
|
||||
}
|
||||
@ -422,7 +410,8 @@ int init_aio_irq(int irq, char *name, irq_handler_t handler)
|
||||
|
||||
err = os_pipe(fds, 1, 1);
|
||||
if (err) {
|
||||
printk("init_aio_irq - os_pipe failed, err = %d\n", -err);
|
||||
printk(KERN_ERR "init_aio_irq - os_pipe failed, err = %d\n",
|
||||
-err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -430,7 +419,8 @@ int init_aio_irq(int irq, char *name, irq_handler_t handler)
|
||||
IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name,
|
||||
(void *) (long) fds[0]);
|
||||
if (err) {
|
||||
printk("init_aio_irq - : um_request_irq failed, err = %d\n",
|
||||
printk(KERN_ERR "init_aio_irq - : um_request_irq failed, "
|
||||
"err = %d\n",
|
||||
err);
|
||||
goto out_close;
|
||||
}
|
||||
@ -501,8 +491,9 @@ unsigned long to_irq_stack(unsigned long *mask_out)
|
||||
int nested;
|
||||
|
||||
mask = xchg(&pending_mask, *mask_out);
|
||||
if(mask != 0){
|
||||
/* If any interrupts come in at this point, we want to
|
||||
if (mask != 0) {
|
||||
/*
|
||||
* If any interrupts come in at this point, we want to
|
||||
* make sure that their bits aren't lost by our
|
||||
* putting our bit in. So, this loop accumulates bits
|
||||
* until xchg returns the same value that we put in.
|
||||
@ -514,13 +505,13 @@ unsigned long to_irq_stack(unsigned long *mask_out)
|
||||
do {
|
||||
old |= mask;
|
||||
mask = xchg(&pending_mask, old);
|
||||
} while(mask != old);
|
||||
} while (mask != old);
|
||||
return 1;
|
||||
}
|
||||
|
||||
ti = current_thread_info();
|
||||
nested = (ti->real_thread != NULL);
|
||||
if(!nested){
|
||||
if (!nested) {
|
||||
struct task_struct *task;
|
||||
struct thread_info *tti;
|
||||
|
||||
|
@ -75,7 +75,7 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
|
||||
err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
|
||||
if (err) {
|
||||
if (err == -ENOMEM)
|
||||
printk("try increasing the host's "
|
||||
printk(KERN_ERR "try increasing the host's "
|
||||
"/proc/sys/vm/max_map_count to <physical "
|
||||
"memory size>/4096\n");
|
||||
panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
|
||||
@ -103,7 +103,8 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Special kludge - This page will be mapped in to userspace processes
|
||||
/*
|
||||
* Special kludge - This page will be mapped in to userspace processes
|
||||
* from physmem_fd, so it needs to be written out there.
|
||||
*/
|
||||
os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
|
||||
@ -202,8 +203,8 @@ int setup_iomem(void)
|
||||
err = os_map_memory((void *) iomem_start, region->fd, 0,
|
||||
region->size, 1, 1, 0);
|
||||
if (err)
|
||||
printk("Mapping iomem region for driver '%s' failed, "
|
||||
"errno = %d\n", region->driver, -err);
|
||||
printk(KERN_ERR "Mapping iomem region for driver '%s' "
|
||||
"failed, errno = %d\n", region->driver, -err);
|
||||
else {
|
||||
region->virt = iomem_start;
|
||||
region->phys = __pa(region->virt);
|
||||
|
@ -1,51 +1,29 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Copyright 2003 PathScale, Inc.
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/kernel.h"
|
||||
#include "linux/sched.h"
|
||||
#include "linux/interrupt.h"
|
||||
#include "linux/string.h"
|
||||
#include "linux/stddef.h"
|
||||
#include "linux/err.h"
|
||||
#include "linux/hardirq.h"
|
||||
#include "linux/mm.h"
|
||||
#include "linux/slab.h"
|
||||
#include "linux/utsname.h"
|
||||
#include "linux/fs.h"
|
||||
#include "linux/utime.h"
|
||||
#include "linux/smp_lock.h"
|
||||
#include "linux/module.h"
|
||||
#include "linux/init.h"
|
||||
#include "linux/capability.h"
|
||||
#include "linux/vmalloc.h"
|
||||
#include "linux/spinlock.h"
|
||||
#include "linux/personality.h"
|
||||
#include "linux/proc_fs.h"
|
||||
#include "linux/ptrace.h"
|
||||
#include "linux/random.h"
|
||||
#include "linux/personality.h"
|
||||
#include "asm/unistd.h"
|
||||
#include "asm/mman.h"
|
||||
#include "asm/segment.h"
|
||||
#include "asm/stat.h"
|
||||
#include "linux/sched.h"
|
||||
#include "linux/threads.h"
|
||||
#include "asm/pgtable.h"
|
||||
#include "asm/processor.h"
|
||||
#include "asm/tlbflush.h"
|
||||
#include "asm/uaccess.h"
|
||||
#include "asm/user.h"
|
||||
#include "kern_util.h"
|
||||
#include "as-layout.h"
|
||||
#include "kern.h"
|
||||
#include "signal_kern.h"
|
||||
#include "init.h"
|
||||
#include "irq_user.h"
|
||||
#include "mem_user.h"
|
||||
#include "tlb.h"
|
||||
#include "frame_kern.h"
|
||||
#include "sigcontext.h"
|
||||
#include "kern_util.h"
|
||||
#include "os.h"
|
||||
#include "skas.h"
|
||||
#include "tlb.h"
|
||||
|
||||
/* This is a per-cpu array. A processor only modifies its entry and it only
|
||||
/*
|
||||
* This is a per-cpu array. A processor only modifies its entry and it only
|
||||
* cares about its entry, so it's OK if another processor is modifying its
|
||||
* entry.
|
||||
*/
|
||||
@ -54,15 +32,15 @@ struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
|
||||
static inline int external_pid(struct task_struct *task)
|
||||
{
|
||||
/* FIXME: Need to look up userspace_pid by cpu */
|
||||
return(userspace_pid[0]);
|
||||
return userspace_pid[0];
|
||||
}
|
||||
|
||||
int pid_to_processor_id(int pid)
|
||||
{
|
||||
int i;
|
||||
|
||||
for(i = 0; i < ncpus; i++){
|
||||
if(cpu_tasks[i].pid == pid)
|
||||
for(i = 0; i < ncpus; i++) {
|
||||
if (cpu_tasks[i].pid == pid)
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
@ -118,7 +96,7 @@ void *_switch_to(void *prev, void *next, void *last)
|
||||
current->thread.saved_task = NULL;
|
||||
|
||||
/* XXX need to check runqueues[cpu].idle */
|
||||
if(current->pid == 0)
|
||||
if (current->pid == 0)
|
||||
switch_timers(0);
|
||||
|
||||
switch_threads(&from->thread.switch_buf,
|
||||
@ -126,10 +104,10 @@ void *_switch_to(void *prev, void *next, void *last)
|
||||
|
||||
arch_switch_to(current->thread.prev_sched, current);
|
||||
|
||||
if(current->pid == 0)
|
||||
if (current->pid == 0)
|
||||
switch_timers(1);
|
||||
|
||||
if(current->thread.saved_task)
|
||||
if (current->thread.saved_task)
|
||||
show_regs(&(current->thread.regs));
|
||||
next= current->thread.saved_task;
|
||||
prev= current;
|
||||
@ -141,9 +119,9 @@ void *_switch_to(void *prev, void *next, void *last)
|
||||
|
||||
void interrupt_end(void)
|
||||
{
|
||||
if(need_resched())
|
||||
if (need_resched())
|
||||
schedule();
|
||||
if(test_tsk_thread_flag(current, TIF_SIGPENDING))
|
||||
if (test_tsk_thread_flag(current, TIF_SIGPENDING))
|
||||
do_signal();
|
||||
}
|
||||
|
||||
@ -158,7 +136,8 @@ void *get_current(void)
|
||||
|
||||
extern void schedule_tail(struct task_struct *prev);
|
||||
|
||||
/* This is called magically, by its address being stuffed in a jmp_buf
|
||||
/*
|
||||
* This is called magically, by its address being stuffed in a jmp_buf
|
||||
* and being longjmp-d to.
|
||||
*/
|
||||
void new_thread_handler(void)
|
||||
@ -166,18 +145,19 @@ void new_thread_handler(void)
|
||||
int (*fn)(void *), n;
|
||||
void *arg;
|
||||
|
||||
if(current->thread.prev_sched != NULL)
|
||||
if (current->thread.prev_sched != NULL)
|
||||
schedule_tail(current->thread.prev_sched);
|
||||
current->thread.prev_sched = NULL;
|
||||
|
||||
fn = current->thread.request.u.thread.proc;
|
||||
arg = current->thread.request.u.thread.arg;
|
||||
|
||||
/* The return value is 1 if the kernel thread execs a process,
|
||||
/*
|
||||
* The return value is 1 if the kernel thread execs a process,
|
||||
* 0 if it just exits
|
||||
*/
|
||||
n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf);
|
||||
if(n == 1){
|
||||
if (n == 1) {
|
||||
/* Handle any immediate reschedules or signals */
|
||||
interrupt_end();
|
||||
userspace(¤t->thread.regs.regs);
|
||||
@ -189,14 +169,16 @@ void new_thread_handler(void)
|
||||
void fork_handler(void)
|
||||
{
|
||||
force_flush_all();
|
||||
if(current->thread.prev_sched == NULL)
|
||||
if (current->thread.prev_sched == NULL)
|
||||
panic("blech");
|
||||
|
||||
schedule_tail(current->thread.prev_sched);
|
||||
|
||||
/* XXX: if interrupt_end() calls schedule, this call to
|
||||
/*
|
||||
* XXX: if interrupt_end() calls schedule, this call to
|
||||
* arch_switch_to isn't needed. We could want to apply this to
|
||||
* improve performance. -bb */
|
||||
* improve performance. -bb
|
||||
*/
|
||||
arch_switch_to(current->thread.prev_sched, current);
|
||||
|
||||
current->thread.prev_sched = NULL;
|
||||
@ -216,11 +198,11 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
||||
|
||||
p->thread = (struct thread_struct) INIT_THREAD;
|
||||
|
||||
if(current->thread.forking){
|
||||
if (current->thread.forking) {
|
||||
memcpy(&p->thread.regs.regs, ®s->regs,
|
||||
sizeof(p->thread.regs.regs));
|
||||
REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0);
|
||||
if(sp != 0)
|
||||
if (sp != 0)
|
||||
REGS_SP(p->thread.regs.regs.regs) = sp;
|
||||
|
||||
handler = fork_handler;
|
||||
@ -259,14 +241,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
|
||||
|
||||
void default_idle(void)
|
||||
{
|
||||
while(1){
|
||||
while(1) {
|
||||
/* endless idle loop with no priority at all */
|
||||
|
||||
/*
|
||||
* although we are an idle CPU, we do not want to
|
||||
* get into the scheduler unnecessarily.
|
||||
*/
|
||||
if(need_resched())
|
||||
if (need_resched())
|
||||
schedule();
|
||||
|
||||
idle_sleep(10);
|
||||
@ -288,26 +270,26 @@ void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
|
||||
pte_t *pte;
|
||||
pte_t ptent;
|
||||
|
||||
if(task->mm == NULL)
|
||||
if (task->mm == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
pgd = pgd_offset(task->mm, addr);
|
||||
if(!pgd_present(*pgd))
|
||||
if (!pgd_present(*pgd))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if(!pud_present(*pud))
|
||||
if (!pud_present(*pud))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if(!pmd_present(*pmd))
|
||||
if (!pmd_present(*pmd))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
ptent = *pte;
|
||||
if(!pte_present(ptent))
|
||||
if (!pte_present(ptent))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if(pte_out != NULL)
|
||||
if (pte_out != NULL)
|
||||
*pte_out = ptent;
|
||||
return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
|
||||
}
|
||||
@ -380,7 +362,7 @@ int smp_sigio_handler(void)
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu = current_thread->cpu;
|
||||
IPI_handler(cpu);
|
||||
if(cpu != 0)
|
||||
if (cpu != 0)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
@ -408,7 +390,8 @@ int get_using_sysemu(void)
|
||||
|
||||
static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
|
||||
{
|
||||
if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/
|
||||
if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size)
|
||||
/* No overflow */
|
||||
*eof = 1;
|
||||
|
||||
return strlen(buf);
|
||||
@ -423,7 +406,8 @@ static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned
|
||||
|
||||
if (tmp[0] >= '0' && tmp[0] <= '2')
|
||||
set_using_sysemu(tmp[0] - '0');
|
||||
return count; /*We use the first char, but pretend to write everything*/
|
||||
/* We use the first char, but pretend to write everything */
|
||||
return count;
|
||||
}
|
||||
|
||||
int __init make_proc_sysemu(void)
|
||||
@ -453,10 +437,10 @@ int singlestepping(void * t)
|
||||
struct task_struct *task = t ? t : current;
|
||||
|
||||
if ( ! (task->ptrace & PT_DTRACE) )
|
||||
return(0);
|
||||
return 0;
|
||||
|
||||
if (task->thread.singlestep_syscall)
|
||||
return(1);
|
||||
return 1;
|
||||
|
||||
return 2;
|
||||
}
|
||||
|
@ -1,35 +1,27 @@
|
||||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
/*
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/sched.h"
|
||||
#include "linux/mm.h"
|
||||
#include "linux/errno.h"
|
||||
#include "linux/smp_lock.h"
|
||||
#include "linux/security.h"
|
||||
#include "linux/ptrace.h"
|
||||
#include "linux/audit.h"
|
||||
#ifdef CONFIG_PROC_MM
|
||||
#include "linux/proc_mm.h"
|
||||
#endif
|
||||
#include "asm/ptrace.h"
|
||||
#include "linux/ptrace.h"
|
||||
#include "linux/sched.h"
|
||||
#include "asm/uaccess.h"
|
||||
#include "kern_util.h"
|
||||
#ifdef CONFIG_PROC_MM
|
||||
#include "proc_mm.h"
|
||||
#endif
|
||||
#include "skas_ptrace.h"
|
||||
#include "sysdep/ptrace.h"
|
||||
#include "os.h"
|
||||
|
||||
static inline void set_singlestepping(struct task_struct *child, int on)
|
||||
{
|
||||
if (on)
|
||||
child->ptrace |= PT_DTRACE;
|
||||
else
|
||||
child->ptrace &= ~PT_DTRACE;
|
||||
child->thread.singlestep_syscall = 0;
|
||||
if (on)
|
||||
child->ptrace |= PT_DTRACE;
|
||||
else
|
||||
child->ptrace &= ~PT_DTRACE;
|
||||
child->thread.singlestep_syscall = 0;
|
||||
|
||||
#ifdef SUBARCH_SET_SINGLESTEPPING
|
||||
SUBARCH_SET_SINGLESTEPPING(child, on);
|
||||
SUBARCH_SET_SINGLESTEPPING(child, on);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -37,8 +29,8 @@ static inline void set_singlestepping(struct task_struct *child, int on)
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*/
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
set_singlestepping(child,0);
|
||||
{
|
||||
set_singlestepping(child,0);
|
||||
}
|
||||
|
||||
extern int peek_user(struct task_struct * child, long addr, long data);
|
||||
@ -50,40 +42,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
unsigned long __user *p = (void __user *)(unsigned long)data;
|
||||
|
||||
switch (request) {
|
||||
/* when I and D space are separate, these will need to be fixed. */
|
||||
case PTRACE_PEEKTEXT: /* read word at location addr. */
|
||||
/* read word at location addr. */
|
||||
case PTRACE_PEEKTEXT:
|
||||
case PTRACE_PEEKDATA:
|
||||
ret = generic_ptrace_peekdata(child, addr, data);
|
||||
break;
|
||||
|
||||
/* read the word at location addr in the USER area. */
|
||||
case PTRACE_PEEKUSR:
|
||||
ret = peek_user(child, addr, data);
|
||||
break;
|
||||
case PTRACE_PEEKUSR:
|
||||
ret = peek_user(child, addr, data);
|
||||
break;
|
||||
|
||||
/* when I and D space are separate, this will have to be fixed. */
|
||||
case PTRACE_POKETEXT: /* write the word at location addr. */
|
||||
/* write the word at location addr. */
|
||||
case PTRACE_POKETEXT:
|
||||
case PTRACE_POKEDATA:
|
||||
ret = generic_ptrace_pokedata(child, addr, data);
|
||||
break;
|
||||
|
||||
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
|
||||
ret = poke_user(child, addr, data);
|
||||
break;
|
||||
/* write the word at location addr in the USER area */
|
||||
case PTRACE_POKEUSR:
|
||||
ret = poke_user(child, addr, data);
|
||||
break;
|
||||
|
||||
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
|
||||
case PTRACE_CONT: { /* restart after signal. */
|
||||
/* continue and stop at next (return from) syscall */
|
||||
case PTRACE_SYSCALL:
|
||||
/* restart after signal. */
|
||||
case PTRACE_CONT: {
|
||||
ret = -EIO;
|
||||
if (!valid_signal(data))
|
||||
break;
|
||||
|
||||
set_singlestepping(child, 0);
|
||||
if (request == PTRACE_SYSCALL) {
|
||||
set_singlestepping(child, 0);
|
||||
if (request == PTRACE_SYSCALL)
|
||||
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
}
|
||||
else {
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
}
|
||||
else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
child->exit_code = data;
|
||||
wake_up_process(child);
|
||||
ret = 0;
|
||||
@ -91,8 +83,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
}
|
||||
|
||||
/*
|
||||
* make the child exit. Best I can do is send it a sigkill.
|
||||
* perhaps it should be put in the status that it wants to
|
||||
* make the child exit. Best I can do is send it a sigkill.
|
||||
* perhaps it should be put in the status that it wants to
|
||||
* exit.
|
||||
*/
|
||||
case PTRACE_KILL: {
|
||||
@ -100,7 +92,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
|
||||
break;
|
||||
|
||||
set_singlestepping(child, 0);
|
||||
set_singlestepping(child, 0);
|
||||
child->exit_code = SIGKILL;
|
||||
wake_up_process(child);
|
||||
break;
|
||||
@ -111,7 +103,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
if (!valid_signal(data))
|
||||
break;
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
set_singlestepping(child, 1);
|
||||
set_singlestepping(child, 1);
|
||||
child->exit_code = data;
|
||||
/* give it a chance to run. */
|
||||
wake_up_process(child);
|
||||
@ -180,13 +172,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
break;
|
||||
|
||||
case PTRACE_FAULTINFO: {
|
||||
/* Take the info from thread->arch->faultinfo,
|
||||
/*
|
||||
* Take the info from thread->arch->faultinfo,
|
||||
* but transfer max. sizeof(struct ptrace_faultinfo).
|
||||
* On i386, ptrace_faultinfo is smaller!
|
||||
*/
|
||||
ret = copy_to_user(p, &child->thread.arch.faultinfo,
|
||||
sizeof(struct ptrace_faultinfo));
|
||||
if(ret)
|
||||
if (ret)
|
||||
break;
|
||||
break;
|
||||
}
|
||||
@ -195,12 +188,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
case PTRACE_LDT: {
|
||||
struct ptrace_ldt ldt;
|
||||
|
||||
if(copy_from_user(&ldt, p, sizeof(ldt))){
|
||||
if (copy_from_user(&ldt, p, sizeof(ldt))) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
/* This one is confusing, so just punt and return -EIO for
|
||||
/*
|
||||
* This one is confusing, so just punt and return -EIO for
|
||||
* now
|
||||
*/
|
||||
ret = -EIO;
|
||||
@ -212,7 +206,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
struct mm_struct *old = child->mm;
|
||||
struct mm_struct *new = proc_mm_get_mm(data);
|
||||
|
||||
if(IS_ERR(new)){
|
||||
if (IS_ERR(new)) {
|
||||
ret = PTR_ERR(new);
|
||||
break;
|
||||
}
|
||||
@ -226,10 +220,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
}
|
||||
#endif
|
||||
#ifdef PTRACE_ARCH_PRCTL
|
||||
case PTRACE_ARCH_PRCTL:
|
||||
/* XXX Calls ptrace on the host - needs some SMP thinking */
|
||||
ret = arch_prctl(child, data, (void *) addr);
|
||||
break;
|
||||
case PTRACE_ARCH_PRCTL:
|
||||
/* XXX Calls ptrace on the host - needs some SMP thinking */
|
||||
ret = arch_prctl(child, data, (void *) addr);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
@ -255,7 +249,8 @@ void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs,
|
||||
force_sig_info(SIGTRAP, &info, tsk);
|
||||
}
|
||||
|
||||
/* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
|
||||
/*
|
||||
* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
|
||||
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
|
||||
*/
|
||||
void syscall_trace(struct uml_pt_regs *regs, int entryexit)
|
||||
@ -272,7 +267,7 @@ void syscall_trace(struct uml_pt_regs *regs, int entryexit)
|
||||
UPT_SYSCALL_ARG3(regs),
|
||||
UPT_SYSCALL_ARG4(regs));
|
||||
else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)),
|
||||
UPT_SYSCALL_RET(regs));
|
||||
UPT_SYSCALL_RET(regs));
|
||||
}
|
||||
|
||||
/* Fake a debug trap */
|
||||
@ -285,15 +280,18 @@ void syscall_trace(struct uml_pt_regs *regs, int entryexit)
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
return;
|
||||
|
||||
/* the 0x80 provides a way for the tracing parent to distinguish
|
||||
between a syscall stop and SIGTRAP delivery */
|
||||
/*
|
||||
* the 0x80 provides a way for the tracing parent to distinguish
|
||||
* between a syscall stop and SIGTRAP delivery
|
||||
*/
|
||||
tracesysgood = (current->ptrace & PT_TRACESYSGOOD);
|
||||
ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0));
|
||||
|
||||
if (entryexit) /* force do_signal() --> is_syscall() */
|
||||
set_thread_flag(TIF_SIGPENDING);
|
||||
|
||||
/* this isn't the same as continuing with a signal, but it will do
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
|
@ -1,13 +1,9 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/module.h"
|
||||
#include "linux/sched.h"
|
||||
#include "asm/smp.h"
|
||||
#include "kern_util.h"
|
||||
#include "kern.h"
|
||||
#include "os.h"
|
||||
#include "skas.h"
|
||||
|
||||
@ -37,20 +33,20 @@ static void kill_off_processes(void)
|
||||
|
||||
void uml_cleanup(void)
|
||||
{
|
||||
kmalloc_ok = 0;
|
||||
kmalloc_ok = 0;
|
||||
do_uml_exitcalls();
|
||||
kill_off_processes();
|
||||
}
|
||||
|
||||
void machine_restart(char * __unused)
|
||||
{
|
||||
uml_cleanup();
|
||||
uml_cleanup();
|
||||
reboot_skas();
|
||||
}
|
||||
|
||||
void machine_power_off(void)
|
||||
{
|
||||
uml_cleanup();
|
||||
uml_cleanup();
|
||||
halt_skas();
|
||||
}
|
||||
|
||||
|
@ -1,27 +1,16 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/stddef.h"
|
||||
#include "linux/sys.h"
|
||||
#include "linux/sched.h"
|
||||
#include "linux/wait.h"
|
||||
#include "linux/kernel.h"
|
||||
#include "linux/smp_lock.h"
|
||||
#include "linux/module.h"
|
||||
#include "linux/slab.h"
|
||||
#include "linux/tty.h"
|
||||
#include "linux/binfmts.h"
|
||||
#include "linux/ptrace.h"
|
||||
#include "linux/sched.h"
|
||||
#include "asm/siginfo.h"
|
||||
#include "asm/signal.h"
|
||||
#include "asm/uaccess.h"
|
||||
#include "asm/unistd.h"
|
||||
#include "asm/ucontext.h"
|
||||
#include "kern_util.h"
|
||||
#include "signal_kern.h"
|
||||
#include "kern.h"
|
||||
#include "frame_kern.h"
|
||||
#include "kern_util.h"
|
||||
#include "sigcontext.h"
|
||||
|
||||
EXPORT_SYMBOL(block_signals);
|
||||
@ -45,9 +34,9 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
/* Did we come from a system call? */
|
||||
if(PT_REGS_SYSCALL_NR(regs) >= 0){
|
||||
if (PT_REGS_SYSCALL_NR(regs) >= 0) {
|
||||
/* If so, check system call restarting.. */
|
||||
switch(PT_REGS_SYSCALL_RET(regs)){
|
||||
switch(PT_REGS_SYSCALL_RET(regs)) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
case -ERESTARTNOHAND:
|
||||
PT_REGS_SYSCALL_RET(regs) = -EINTR;
|
||||
@ -67,17 +56,17 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
|
||||
}
|
||||
|
||||
sp = PT_REGS_SP(regs);
|
||||
if((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
|
||||
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
|
||||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SC_SIGNALS
|
||||
if(!(ka->sa.sa_flags & SA_SIGINFO))
|
||||
if (!(ka->sa.sa_flags & SA_SIGINFO))
|
||||
err = setup_signal_stack_sc(sp, signr, ka, regs, oldset);
|
||||
else
|
||||
#endif
|
||||
err = setup_signal_stack_si(sp, signr, ka, regs, info, oldset);
|
||||
|
||||
if(err){
|
||||
if (err) {
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = *oldset;
|
||||
recalc_sigpending();
|
||||
@ -87,7 +76,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked, ¤t->blocked,
|
||||
&ka->sa.sa_mask);
|
||||
if(!(ka->sa.sa_flags & SA_NODEFER))
|
||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||
sigaddset(¤t->blocked, signr);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
@ -108,14 +97,16 @@ static int kern_do_signal(struct pt_regs *regs)
|
||||
else
|
||||
oldset = ¤t->blocked;
|
||||
|
||||
while((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0){
|
||||
while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
|
||||
handled_sig = 1;
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if(!handle_signal(regs, sig, &ka_copy, &info, oldset)){
|
||||
/* a signal was successfully delivered; the saved
|
||||
if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) {
|
||||
/*
|
||||
* a signal was successfully delivered; the saved
|
||||
* sigmask will have been stored in the signal frame,
|
||||
* and will be restored by sigreturn, so we can simply
|
||||
* clear the TIF_RESTORE_SIGMASK flag */
|
||||
* clear the TIF_RESTORE_SIGMASK flag
|
||||
*/
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
break;
|
||||
@ -123,9 +114,9 @@ static int kern_do_signal(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
/* Did we come from a system call? */
|
||||
if(!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)){
|
||||
if (!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)) {
|
||||
/* Restart the system call - no handlers present */
|
||||
switch(PT_REGS_SYSCALL_RET(regs)){
|
||||
switch(PT_REGS_SYSCALL_RET(regs)) {
|
||||
case -ERESTARTNOHAND:
|
||||
case -ERESTARTSYS:
|
||||
case -ERESTARTNOINTR:
|
||||
@ -136,22 +127,25 @@ static int kern_do_signal(struct pt_regs *regs)
|
||||
PT_REGS_ORIG_SYSCALL(regs) = __NR_restart_syscall;
|
||||
PT_REGS_RESTART_SYSCALL(regs);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* This closes a way to execute a system call on the host. If
|
||||
/*
|
||||
* This closes a way to execute a system call on the host. If
|
||||
* you set a breakpoint on a system call instruction and singlestep
|
||||
* from it, the tracing thread used to PTRACE_SINGLESTEP the process
|
||||
* rather than PTRACE_SYSCALL it, allowing the system call to execute
|
||||
* on the host. The tracing thread will check this flag and
|
||||
* PTRACE_SYSCALL if necessary.
|
||||
*/
|
||||
if(current->ptrace & PT_DTRACE)
|
||||
if (current->ptrace & PT_DTRACE)
|
||||
current->thread.singlestep_syscall =
|
||||
is_syscall(PT_REGS_IP(¤t->thread.regs));
|
||||
|
||||
/* if there's no signal to deliver, we just put the saved sigmask
|
||||
* back */
|
||||
/*
|
||||
* if there's no signal to deliver, we just put the saved sigmask
|
||||
* back
|
||||
*/
|
||||
if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) {
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com)
|
||||
# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
# Licensed under the GPL
|
||||
#
|
||||
|
||||
|
@ -1,20 +1,12 @@
|
||||
/*
|
||||
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
|
||||
/*
|
||||
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/sched.h"
|
||||
#include "linux/list.h"
|
||||
#include "linux/spinlock.h"
|
||||
#include "linux/slab.h"
|
||||
#include "linux/errno.h"
|
||||
#include "linux/mm.h"
|
||||
#include "asm/current.h"
|
||||
#include "asm/segment.h"
|
||||
#include "asm/mmu.h"
|
||||
#include "linux/sched.h"
|
||||
#include "asm/pgalloc.h"
|
||||
#include "asm/pgtable.h"
|
||||
#include "asm/ldt.h"
|
||||
#include "os.h"
|
||||
#include "skas.h"
|
||||
|
||||
@ -41,10 +33,11 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
|
||||
if (!pte)
|
||||
goto out_pte;
|
||||
|
||||
/* There's an interaction between the skas0 stub pages, stack
|
||||
/*
|
||||
* There's an interaction between the skas0 stub pages, stack
|
||||
* randomization, and the BUG at the end of exit_mmap. exit_mmap
|
||||
* checks that the number of page tables freed is the same as had
|
||||
* been allocated. If the stack is on the last page table page,
|
||||
* checks that the number of page tables freed is the same as had
|
||||
* been allocated. If the stack is on the last page table page,
|
||||
* then the stack pte page will be freed, and if not, it won't. To
|
||||
* avoid having to know where the stack is, or if the process mapped
|
||||
* something at the top of its address space for some other reason,
|
||||
@ -54,36 +47,37 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
|
||||
* destroy_context_skas.
|
||||
*/
|
||||
|
||||
mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
|
||||
mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
|
||||
#ifdef CONFIG_3_LEVEL_PGTABLES
|
||||
mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
|
||||
mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
|
||||
#endif
|
||||
|
||||
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
|
||||
*pte = pte_mkread(*pte);
|
||||
return(0);
|
||||
return 0;
|
||||
|
||||
out_pmd:
|
||||
pud_free(pud);
|
||||
out_pte:
|
||||
pmd_free(pmd);
|
||||
out:
|
||||
return(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
||||
{
|
||||
struct mmu_context_skas *from_mm = NULL;
|
||||
struct mmu_context_skas *from_mm = NULL;
|
||||
struct mmu_context_skas *to_mm = &mm->context.skas;
|
||||
unsigned long stack = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if(skas_needs_stub){
|
||||
if (skas_needs_stub) {
|
||||
stack = get_zeroed_page(GFP_KERNEL);
|
||||
if(stack == 0)
|
||||
if (stack == 0)
|
||||
goto out;
|
||||
|
||||
/* This zeros the entry that pgd_alloc didn't, needed since
|
||||
/*
|
||||
* This zeros the entry that pgd_alloc didn't, needed since
|
||||
* we are about to reinitialize it, and want mm.nr_ptes to
|
||||
* be accurate.
|
||||
*/
|
||||
@ -91,39 +85,39 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
||||
|
||||
ret = init_stub_pte(mm, CONFIG_STUB_CODE,
|
||||
(unsigned long) &__syscall_stub_start);
|
||||
if(ret)
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
|
||||
if(ret)
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
mm->nr_ptes--;
|
||||
}
|
||||
|
||||
to_mm->id.stack = stack;
|
||||
if(current->mm != NULL && current->mm != &init_mm)
|
||||
if (current->mm != NULL && current->mm != &init_mm)
|
||||
from_mm = ¤t->mm->context.skas;
|
||||
|
||||
if(proc_mm){
|
||||
if (proc_mm) {
|
||||
ret = new_mm(stack);
|
||||
if(ret < 0){
|
||||
printk("init_new_context_skas - new_mm failed, "
|
||||
"errno = %d\n", ret);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "init_new_context_skas - "
|
||||
"new_mm failed, errno = %d\n", ret);
|
||||
goto out_free;
|
||||
}
|
||||
to_mm->id.u.mm_fd = ret;
|
||||
}
|
||||
else {
|
||||
if(from_mm)
|
||||
if (from_mm)
|
||||
to_mm->id.u.pid = copy_context_skas0(stack,
|
||||
from_mm->id.u.pid);
|
||||
else to_mm->id.u.pid = start_userspace(stack);
|
||||
}
|
||||
|
||||
ret = init_new_ldt(to_mm, from_mm);
|
||||
if(ret < 0){
|
||||
printk("init_new_context_skas - init_ldt"
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "init_new_context_skas - init_ldt"
|
||||
" failed, errno = %d\n", ret);
|
||||
goto out_free;
|
||||
}
|
||||
@ -131,7 +125,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
if(to_mm->id.stack != 0)
|
||||
if (to_mm->id.stack != 0)
|
||||
free_page(to_mm->id.stack);
|
||||
out:
|
||||
return ret;
|
||||
@ -141,12 +135,12 @@ void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
struct mmu_context_skas *mmu = &mm->context.skas;
|
||||
|
||||
if(proc_mm)
|
||||
if (proc_mm)
|
||||
os_close_file(mmu->id.u.mm_fd);
|
||||
else
|
||||
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
||||
|
||||
if(!proc_mm || !ptrace_faultinfo){
|
||||
if (!proc_mm || !ptrace_faultinfo) {
|
||||
free_page(mmu->id.stack);
|
||||
pte_lock_deinit(virt_to_page(mmu->last_page_table));
|
||||
pte_free_kernel((pte_t *) mmu->last_page_table);
|
||||
|
@ -1,36 +1,23 @@
|
||||
/*
|
||||
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/sched.h"
|
||||
#include "linux/slab.h"
|
||||
#include "linux/ptrace.h"
|
||||
#include "linux/proc_fs.h"
|
||||
#include "linux/file.h"
|
||||
#include "linux/errno.h"
|
||||
#include "linux/init.h"
|
||||
#include "asm/uaccess.h"
|
||||
#include "asm/atomic.h"
|
||||
#include "kern_util.h"
|
||||
#include "linux/sched.h"
|
||||
#include "as-layout.h"
|
||||
#include "skas.h"
|
||||
#include "os.h"
|
||||
#include "tlb.h"
|
||||
#include "kern.h"
|
||||
#include "registers.h"
|
||||
|
||||
extern void schedule_tail(struct task_struct *prev);
|
||||
#include "skas.h"
|
||||
|
||||
int new_mm(unsigned long stack)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
|
||||
if(fd < 0)
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
if(skas_needs_stub)
|
||||
if (skas_needs_stub)
|
||||
map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
|
||||
|
||||
return fd;
|
||||
@ -62,7 +49,7 @@ int __init start_uml(void)
|
||||
{
|
||||
stack_protections((unsigned long) &cpu0_irqstack);
|
||||
set_sigstack(cpu0_irqstack, THREAD_SIZE);
|
||||
if(proc_mm)
|
||||
if (proc_mm)
|
||||
userspace_pid[0] = start_userspace(0);
|
||||
|
||||
init_new_thread_signals();
|
||||
@ -75,7 +62,7 @@ int __init start_uml(void)
|
||||
|
||||
unsigned long current_stub_stack(void)
|
||||
{
|
||||
if(current->mm == NULL)
|
||||
if (current->mm == NULL)
|
||||
return 0;
|
||||
|
||||
return current->mm->context.skas.id.stack;
|
||||
|
@ -1,17 +1,13 @@
|
||||
/*
|
||||
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/sys.h"
|
||||
#include "linux/kernel.h"
|
||||
#include "linux/ptrace.h"
|
||||
#include "asm/errno.h"
|
||||
#include "asm/unistd.h"
|
||||
#include "asm/ptrace.h"
|
||||
#include "asm/current.h"
|
||||
#include "sysdep/syscalls.h"
|
||||
#include "kern_util.h"
|
||||
#include "syscall.h"
|
||||
#include "sysdep/ptrace.h"
|
||||
#include "sysdep/syscalls.h"
|
||||
|
||||
void handle_syscall(struct uml_pt_regs *r)
|
||||
{
|
||||
@ -24,7 +20,8 @@ void handle_syscall(struct uml_pt_regs *r)
|
||||
current->thread.nsyscalls++;
|
||||
nsyscalls++;
|
||||
|
||||
/* This should go in the declaration of syscall, but when I do that,
|
||||
/*
|
||||
* This should go in the declaration of syscall, but when I do that,
|
||||
* strace -f -c bash -c 'ls ; ls' breaks, sometimes not tracing
|
||||
* children at all, sometimes hanging when bash doesn't see the first
|
||||
* ls exit.
|
||||
@ -33,7 +30,7 @@ void handle_syscall(struct uml_pt_regs *r)
|
||||
* in case it's a compiler bug.
|
||||
*/
|
||||
syscall = UPT_SYSCALL_NR(r);
|
||||
if((syscall >= NR_syscalls) || (syscall < 0))
|
||||
if ((syscall >= NR_syscalls) || (syscall < 0))
|
||||
result = -ENOSYS;
|
||||
else result = EXECUTE_SYSCALL(syscall, regs);
|
||||
|
||||
|
@ -1,25 +1,17 @@
|
||||
/*
|
||||
* Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/sched.h"
|
||||
#include "linux/file.h"
|
||||
#include "linux/smp_lock.h"
|
||||
#include "linux/mm.h"
|
||||
#include "linux/fs.h"
|
||||
#include "linux/mm.h"
|
||||
#include "linux/sched.h"
|
||||
#include "linux/utsname.h"
|
||||
#include "linux/msg.h"
|
||||
#include "linux/shm.h"
|
||||
#include "linux/sys.h"
|
||||
#include "linux/syscalls.h"
|
||||
#include "linux/unistd.h"
|
||||
#include "linux/slab.h"
|
||||
#include "linux/utime.h"
|
||||
#include "asm/current.h"
|
||||
#include "asm/mman.h"
|
||||
#include "asm/uaccess.h"
|
||||
#include "kern_util.h"
|
||||
#include "sysdep/syscalls.h"
|
||||
#include "asm/unistd.h"
|
||||
|
||||
/* Unlocked, I don't care if this is a bit off */
|
||||
int nsyscalls = 0;
|
||||
@ -32,7 +24,7 @@ long sys_fork(void)
|
||||
ret = do_fork(SIGCHLD, UPT_SP(¤t->thread.regs.regs),
|
||||
¤t->thread.regs, 0, NULL, NULL);
|
||||
current->thread.forking = 0;
|
||||
return(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
long sys_vfork(void)
|
||||
@ -44,7 +36,7 @@ long sys_vfork(void)
|
||||
UPT_SP(¤t->thread.regs.regs),
|
||||
¤t->thread.regs, 0, NULL, NULL);
|
||||
current->thread.forking = 0;
|
||||
return(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* common code for old and new mmaps */
|
||||
@ -90,15 +82,15 @@ long old_mmap(unsigned long addr, unsigned long len,
|
||||
*/
|
||||
long sys_pipe(unsigned long __user * fildes)
|
||||
{
|
||||
int fd[2];
|
||||
long error;
|
||||
int fd[2];
|
||||
long error;
|
||||
|
||||
error = do_pipe(fd);
|
||||
if (!error) {
|
||||
error = do_pipe(fd);
|
||||
if (!error) {
|
||||
if (copy_to_user(fildes, fd, sizeof(fd)))
|
||||
error = -EFAULT;
|
||||
}
|
||||
return error;
|
||||
error = -EFAULT;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
@ -122,7 +114,7 @@ long sys_olduname(struct oldold_utsname __user * name)
|
||||
if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
|
||||
return -EFAULT;
|
||||
|
||||
down_read(&uts_sem);
|
||||
down_read(&uts_sem);
|
||||
|
||||
error = __copy_to_user(&name->sysname, &utsname()->sysname,
|
||||
__OLD_UTS_LEN);
|
||||
|
@ -1,28 +1,19 @@
|
||||
/*
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/kernel.h"
|
||||
#include "linux/module.h"
|
||||
#include "linux/unistd.h"
|
||||
#include "linux/stddef.h"
|
||||
#include "linux/spinlock.h"
|
||||
#include "linux/time.h"
|
||||
#include "linux/sched.h"
|
||||
#include "linux/interrupt.h"
|
||||
#include "linux/init.h"
|
||||
#include "linux/delay.h"
|
||||
#include "linux/hrtimer.h"
|
||||
#include "linux/jiffies.h"
|
||||
#include "linux/threads.h"
|
||||
#include "asm/irq.h"
|
||||
#include "asm/param.h"
|
||||
#include "asm/current.h"
|
||||
#include "kern_util.h"
|
||||
#include "os.h"
|
||||
|
||||
int hz(void)
|
||||
{
|
||||
return(HZ);
|
||||
return HZ;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -43,7 +34,7 @@ void timer_irq(struct uml_pt_regs *regs)
|
||||
unsigned long long ticks = 0;
|
||||
#ifdef CONFIG_UML_REAL_TIME_CLOCK
|
||||
int c = cpu();
|
||||
if(prev_nsecs[c]){
|
||||
if (prev_nsecs[c]) {
|
||||
/* We've had 1 tick */
|
||||
unsigned long long nsecs = os_nsecs();
|
||||
|
||||
@ -51,7 +42,7 @@ void timer_irq(struct uml_pt_regs *regs)
|
||||
prev_nsecs[c] = nsecs;
|
||||
|
||||
/* Protect against the host clock being set backwards */
|
||||
if(delta[c] < 0)
|
||||
if (delta[c] < 0)
|
||||
delta[c] = 0;
|
||||
|
||||
ticks += (delta[c] * HZ) / BILLION;
|
||||
@ -61,7 +52,7 @@ void timer_irq(struct uml_pt_regs *regs)
|
||||
#else
|
||||
ticks = 1;
|
||||
#endif
|
||||
while(ticks > 0){
|
||||
while (ticks > 0) {
|
||||
do_IRQ(TIMER_IRQ, regs);
|
||||
ticks--;
|
||||
}
|
||||
@ -112,12 +103,12 @@ static void register_timer(void)
|
||||
int err;
|
||||
|
||||
err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL);
|
||||
if(err != 0)
|
||||
if (err != 0)
|
||||
printk(KERN_ERR "register_timer : request_irq failed - "
|
||||
"errno = %d\n", -err);
|
||||
|
||||
err = set_interval(1);
|
||||
if(err != 0)
|
||||
if (err != 0)
|
||||
printk(KERN_ERR "register_timer : set_interval failed - "
|
||||
"errno = %d\n", -err);
|
||||
}
|
||||
@ -144,7 +135,8 @@ void do_gettimeofday(struct timeval *tv)
|
||||
xtime.tv_nsec;
|
||||
#endif
|
||||
tv->tv_sec = nsecs / NSEC_PER_SEC;
|
||||
/* Careful about calculations here - this was originally done as
|
||||
/*
|
||||
* Careful about calculations here - this was originally done as
|
||||
* (nsecs - tv->tv_sec * NSEC_PER_SEC) / NSEC_PER_USEC
|
||||
* which gave bogus (> 1000000) values. Dunno why, suspect gcc
|
||||
* (4.0.0) miscompiled it, or there's a subtle 64/32-bit conversion
|
||||
@ -176,7 +168,7 @@ int do_settimeofday(struct timespec *tv)
|
||||
|
||||
void timer_handler(int sig, struct uml_pt_regs *regs)
|
||||
{
|
||||
if(current_thread->cpu == 0)
|
||||
if (current_thread->cpu == 0)
|
||||
timer_irq(regs);
|
||||
local_irq_disable();
|
||||
irq_enter();
|
||||
|
@ -1,19 +1,16 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/mm.h"
|
||||
#include "asm/page.h"
|
||||
#include "asm/pgalloc.h"
|
||||
#include "asm/pgtable.h"
|
||||
#include "asm/tlbflush.h"
|
||||
#include "as-layout.h"
|
||||
#include "tlb.h"
|
||||
#include "mem.h"
|
||||
#include "mem_user.h"
|
||||
#include "os.h"
|
||||
#include "skas.h"
|
||||
#include "tlb.h"
|
||||
|
||||
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
|
||||
unsigned int prot, struct host_vm_op *ops, int *index,
|
||||
@ -26,18 +23,18 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
|
||||
int fd, ret = 0;
|
||||
|
||||
fd = phys_mapping(phys, &offset);
|
||||
if(*index != -1){
|
||||
if (*index != -1) {
|
||||
last = &ops[*index];
|
||||
if((last->type == MMAP) &&
|
||||
if ((last->type == MMAP) &&
|
||||
(last->u.mmap.addr + last->u.mmap.len == virt) &&
|
||||
(last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
|
||||
(last->u.mmap.offset + last->u.mmap.len == offset)){
|
||||
(last->u.mmap.offset + last->u.mmap.len == offset)) {
|
||||
last->u.mmap.len += len;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if(*index == last_filled){
|
||||
if (*index == last_filled) {
|
||||
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
|
||||
*index = -1;
|
||||
}
|
||||
@ -62,16 +59,16 @@ static int add_munmap(unsigned long addr, unsigned long len,
|
||||
struct host_vm_op *last;
|
||||
int ret = 0;
|
||||
|
||||
if(*index != -1){
|
||||
if (*index != -1) {
|
||||
last = &ops[*index];
|
||||
if((last->type == MUNMAP) &&
|
||||
(last->u.munmap.addr + last->u.mmap.len == addr)){
|
||||
if ((last->type == MUNMAP) &&
|
||||
(last->u.munmap.addr + last->u.mmap.len == addr)) {
|
||||
last->u.munmap.len += len;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if(*index == last_filled){
|
||||
if (*index == last_filled) {
|
||||
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
|
||||
*index = -1;
|
||||
}
|
||||
@ -92,17 +89,17 @@ static int add_mprotect(unsigned long addr, unsigned long len,
|
||||
struct host_vm_op *last;
|
||||
int ret = 0;
|
||||
|
||||
if(*index != -1){
|
||||
if (*index != -1) {
|
||||
last = &ops[*index];
|
||||
if((last->type == MPROTECT) &&
|
||||
if ((last->type == MPROTECT) &&
|
||||
(last->u.mprotect.addr + last->u.mprotect.len == addr) &&
|
||||
(last->u.mprotect.prot == prot)){
|
||||
(last->u.mprotect.prot == prot)) {
|
||||
last->u.mprotect.len += len;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if(*index == last_filled){
|
||||
if (*index == last_filled) {
|
||||
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
|
||||
*index = -1;
|
||||
}
|
||||
@ -141,15 +138,15 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
}
|
||||
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
|
||||
(x ? UM_PROT_EXEC : 0));
|
||||
if(force || pte_newpage(*pte)){
|
||||
if(pte_present(*pte))
|
||||
if (force || pte_newpage(*pte)) {
|
||||
if (pte_present(*pte))
|
||||
ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
|
||||
PAGE_SIZE, prot, ops, op_index,
|
||||
last_op, mmu, flush, do_ops);
|
||||
else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
|
||||
last_op, mmu, flush, do_ops);
|
||||
}
|
||||
else if(pte_newprot(*pte))
|
||||
else if (pte_newprot(*pte))
|
||||
ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
|
||||
last_op, mmu, flush, do_ops);
|
||||
*pte = pte_mkuptodate(*pte);
|
||||
@ -172,8 +169,8 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if(!pmd_present(*pmd)){
|
||||
if(force || pmd_newpage(*pmd)){
|
||||
if (!pmd_present(*pmd)) {
|
||||
if (force || pmd_newpage(*pmd)) {
|
||||
ret = add_munmap(addr, next - addr, ops,
|
||||
op_index, last_op, mmu,
|
||||
flush, do_ops);
|
||||
@ -202,8 +199,8 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
|
||||
pud = pud_offset(pgd, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if(!pud_present(*pud)){
|
||||
if(force || pud_newpage(*pud)){
|
||||
if (!pud_present(*pud)) {
|
||||
if (force || pud_newpage(*pud)) {
|
||||
ret = add_munmap(addr, next - addr, ops,
|
||||
op_index, last_op, mmu,
|
||||
flush, do_ops);
|
||||
@ -233,8 +230,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
||||
pgd = pgd_offset(mm, addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end_addr);
|
||||
if(!pgd_present(*pgd)){
|
||||
if (force || pgd_newpage(*pgd)){
|
||||
if (!pgd_present(*pgd)) {
|
||||
if (force || pgd_newpage(*pgd)) {
|
||||
ret = add_munmap(addr, next - addr, ops,
|
||||
&op_index, last_op, mmu,
|
||||
&flush, do_ops);
|
||||
@ -246,12 +243,13 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
||||
do_ops);
|
||||
} while (pgd++, addr = next, ((addr != end_addr) && !ret));
|
||||
|
||||
if(!ret)
|
||||
if (!ret)
|
||||
ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
|
||||
|
||||
/* This is not an else because ret is modified above */
|
||||
if(ret) {
|
||||
printk("fix_range_common: failed, killing current process\n");
|
||||
if (ret) {
|
||||
printk(KERN_ERR "fix_range_common: failed, killing current "
|
||||
"process\n");
|
||||
force_sig(SIGKILL, current);
|
||||
}
|
||||
}
|
||||
@ -267,17 +265,17 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
|
||||
int updated = 0, err;
|
||||
|
||||
mm = &init_mm;
|
||||
for(addr = start; addr < end;){
|
||||
for (addr = start; addr < end;) {
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if(!pgd_present(*pgd)){
|
||||
if (!pgd_present(*pgd)) {
|
||||
last = ADD_ROUND(addr, PGDIR_SIZE);
|
||||
if(last > end)
|
||||
if (last > end)
|
||||
last = end;
|
||||
if(pgd_newpage(*pgd)){
|
||||
if (pgd_newpage(*pgd)) {
|
||||
updated = 1;
|
||||
err = os_unmap_memory((void *) addr,
|
||||
last - addr);
|
||||
if(err < 0)
|
||||
if (err < 0)
|
||||
panic("munmap failed, errno = %d\n",
|
||||
-err);
|
||||
}
|
||||
@ -286,15 +284,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if(!pud_present(*pud)){
|
||||
if (!pud_present(*pud)) {
|
||||
last = ADD_ROUND(addr, PUD_SIZE);
|
||||
if(last > end)
|
||||
if (last > end)
|
||||
last = end;
|
||||
if(pud_newpage(*pud)){
|
||||
if (pud_newpage(*pud)) {
|
||||
updated = 1;
|
||||
err = os_unmap_memory((void *) addr,
|
||||
last - addr);
|
||||
if(err < 0)
|
||||
if (err < 0)
|
||||
panic("munmap failed, errno = %d\n",
|
||||
-err);
|
||||
}
|
||||
@ -303,15 +301,15 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if(!pmd_present(*pmd)){
|
||||
if (!pmd_present(*pmd)) {
|
||||
last = ADD_ROUND(addr, PMD_SIZE);
|
||||
if(last > end)
|
||||
if (last > end)
|
||||
last = end;
|
||||
if(pmd_newpage(*pmd)){
|
||||
if (pmd_newpage(*pmd)) {
|
||||
updated = 1;
|
||||
err = os_unmap_memory((void *) addr,
|
||||
last - addr);
|
||||
if(err < 0)
|
||||
if (err < 0)
|
||||
panic("munmap failed, errno = %d\n",
|
||||
-err);
|
||||
}
|
||||
@ -320,25 +318,25 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
if(!pte_present(*pte) || pte_newpage(*pte)){
|
||||
if (!pte_present(*pte) || pte_newpage(*pte)) {
|
||||
updated = 1;
|
||||
err = os_unmap_memory((void *) addr,
|
||||
PAGE_SIZE);
|
||||
if(err < 0)
|
||||
if (err < 0)
|
||||
panic("munmap failed, errno = %d\n",
|
||||
-err);
|
||||
if(pte_present(*pte))
|
||||
if (pte_present(*pte))
|
||||
map_memory(addr,
|
||||
pte_val(*pte) & PAGE_MASK,
|
||||
PAGE_SIZE, 1, 1, 1);
|
||||
}
|
||||
else if(pte_newprot(*pte)){
|
||||
else if (pte_newprot(*pte)) {
|
||||
updated = 1;
|
||||
os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
|
||||
}
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
return(updated);
|
||||
return updated;
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
|
||||
@ -354,15 +352,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
|
||||
|
||||
address &= PAGE_MASK;
|
||||
pgd = pgd_offset(mm, address);
|
||||
if(!pgd_present(*pgd))
|
||||
if (!pgd_present(*pgd))
|
||||
goto kill;
|
||||
|
||||
pud = pud_offset(pgd, address);
|
||||
if(!pud_present(*pud))
|
||||
if (!pud_present(*pud))
|
||||
goto kill;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
if(!pmd_present(*pmd))
|
||||
if (!pmd_present(*pmd))
|
||||
goto kill;
|
||||
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
@ -380,8 +378,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
|
||||
mm_id = &mm->context.skas.id;
|
||||
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
|
||||
(x ? UM_PROT_EXEC : 0));
|
||||
if(pte_newpage(*pte)){
|
||||
if(pte_present(*pte)){
|
||||
if (pte_newpage(*pte)) {
|
||||
if (pte_present(*pte)) {
|
||||
unsigned long long offset;
|
||||
int fd;
|
||||
|
||||
@ -391,10 +389,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
|
||||
}
|
||||
else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
|
||||
}
|
||||
else if(pte_newprot(*pte))
|
||||
else if (pte_newprot(*pte))
|
||||
err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
|
||||
|
||||
if(err)
|
||||
if (err)
|
||||
goto kill;
|
||||
|
||||
*pte = pte_mkuptodate(*pte);
|
||||
@ -402,28 +400,28 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
|
||||
return;
|
||||
|
||||
kill:
|
||||
printk("Failed to flush page for address 0x%lx\n", address);
|
||||
printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
|
||||
force_sig(SIGKILL, current);
|
||||
}
|
||||
|
||||
pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
return(pgd_offset(mm, address));
|
||||
return pgd_offset(mm, address);
|
||||
}
|
||||
|
||||
pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
return(pud_offset(pgd, address));
|
||||
return pud_offset(pgd, address);
|
||||
}
|
||||
|
||||
pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
|
||||
{
|
||||
return(pmd_offset(pud, address));
|
||||
return pmd_offset(pud, address);
|
||||
}
|
||||
|
||||
pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
|
||||
{
|
||||
return(pte_offset_kernel(pmd, address));
|
||||
return pte_offset_kernel(pmd, address);
|
||||
}
|
||||
|
||||
pte_t *addr_pte(struct task_struct *task, unsigned long addr)
|
||||
@ -432,7 +430,7 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr)
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
|
||||
return(pte_offset_map(pmd, addr));
|
||||
return pte_offset_map(pmd, addr);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
@ -452,18 +450,18 @@ void flush_tlb_kernel_vm(void)
|
||||
|
||||
void __flush_tlb_one(unsigned long addr)
|
||||
{
|
||||
flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
|
||||
flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
||||
int finished, void **flush)
|
||||
{
|
||||
struct host_vm_op *op;
|
||||
int i, ret = 0;
|
||||
int i, ret = 0;
|
||||
|
||||
for(i = 0; i <= last && !ret; i++){
|
||||
op = &ops[i];
|
||||
switch(op->type){
|
||||
for (i = 0; i <= last && !ret; i++) {
|
||||
op = &ops[i];
|
||||
switch(op->type) {
|
||||
case MMAP:
|
||||
ret = map(&mmu->skas.id, op->u.mmap.addr,
|
||||
op->u.mmap.len, op->u.mmap.prot,
|
||||
@ -480,7 +478,8 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
||||
finished, flush);
|
||||
break;
|
||||
default:
|
||||
printk("Unknown op type %d in do_ops\n", op->type);
|
||||
printk(KERN_ERR "Unknown op type %d in do_ops\n",
|
||||
op->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -491,32 +490,33 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
||||
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
|
||||
unsigned long end_addr, int force)
|
||||
{
|
||||
if(!proc_mm && (end_addr > CONFIG_STUB_START))
|
||||
end_addr = CONFIG_STUB_START;
|
||||
if (!proc_mm && (end_addr > CONFIG_STUB_START))
|
||||
end_addr = CONFIG_STUB_START;
|
||||
|
||||
fix_range_common(mm, start_addr, end_addr, force, do_ops);
|
||||
fix_range_common(mm, start_addr, end_addr, force, do_ops);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
if(vma->vm_mm == NULL)
|
||||
flush_tlb_kernel_range_common(start, end);
|
||||
else fix_range(vma->vm_mm, start, end, 0);
|
||||
if (vma->vm_mm == NULL)
|
||||
flush_tlb_kernel_range_common(start, end);
|
||||
else fix_range(vma->vm_mm, start, end, 0);
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long end;
|
||||
|
||||
/* Don't bother flushing if this address space is about to be
|
||||
* destroyed.
|
||||
*/
|
||||
if(atomic_read(&mm->mm_users) == 0)
|
||||
return;
|
||||
/*
|
||||
* Don't bother flushing if this address space is about to be
|
||||
* destroyed.
|
||||
*/
|
||||
if (atomic_read(&mm->mm_users) == 0)
|
||||
return;
|
||||
|
||||
end = proc_mm ? task_size : CONFIG_STUB_START;
|
||||
fix_range(mm, 0, end, 0);
|
||||
fix_range(mm, 0, end, 0);
|
||||
}
|
||||
|
||||
void force_flush_all(void)
|
||||
@ -524,7 +524,7 @@ void force_flush_all(void)
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma = mm->mmap;
|
||||
|
||||
while(vma != NULL) {
|
||||
while (vma != NULL) {
|
||||
fix_range(mm, vma->vm_start, vma->vm_end, 1);
|
||||
vma = vma->vm_next;
|
||||
}
|
||||
|
@ -1,39 +1,22 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
||||
#include "linux/kernel.h"
|
||||
#include "linux/sched.h"
|
||||
#include "linux/notifier.h"
|
||||
#include "linux/mm.h"
|
||||
#include "linux/types.h"
|
||||
#include "linux/tty.h"
|
||||
#include "linux/init.h"
|
||||
#include "linux/bootmem.h"
|
||||
#include "linux/spinlock.h"
|
||||
#include "linux/utsname.h"
|
||||
#include "linux/sysrq.h"
|
||||
#include "linux/seq_file.h"
|
||||
#include "linux/delay.h"
|
||||
#include "linux/mm.h"
|
||||
#include "linux/module.h"
|
||||
#include "linux/seq_file.h"
|
||||
#include "linux/string.h"
|
||||
#include "linux/utsname.h"
|
||||
#include "asm/page.h"
|
||||
#include "asm/pgtable.h"
|
||||
#include "asm/ptrace.h"
|
||||
#include "asm/elf.h"
|
||||
#include "asm/user.h"
|
||||
#include "asm/processor.h"
|
||||
#include "asm/setup.h"
|
||||
#include "ubd_user.h"
|
||||
#include "asm/current.h"
|
||||
#include "kern_util.h"
|
||||
#include "as-layout.h"
|
||||
#include "arch.h"
|
||||
#include "as-layout.h"
|
||||
#include "init.h"
|
||||
#include "kern.h"
|
||||
#include "mem_user.h"
|
||||
#include "mem.h"
|
||||
#include "initrd.h"
|
||||
#include "init.h"
|
||||
#include "os.h"
|
||||
#include "skas.h"
|
||||
|
||||
@ -48,7 +31,7 @@ static void __init add_arg(char *arg)
|
||||
printf("add_arg: Too many command line arguments!\n");
|
||||
exit(1);
|
||||
}
|
||||
if(strlen(command_line) > 0)
|
||||
if (strlen(command_line) > 0)
|
||||
strcat(command_line, " ");
|
||||
strcat(command_line, arg);
|
||||
}
|
||||
@ -133,7 +116,7 @@ static int have_root __initdata = 0;
|
||||
/* Set in uml_mem_setup and modified in linux_main */
|
||||
long long physmem_size = 32 * 1024 * 1024;
|
||||
|
||||
static char *usage_string =
|
||||
static char *usage_string =
|
||||
"User Mode Linux v%s\n"
|
||||
" available at http://user-mode-linux.sourceforge.net/\n\n";
|
||||
|
||||
@ -191,7 +174,7 @@ static int __init uml_ncpus_setup(char *line, int *add)
|
||||
|
||||
__uml_setup("ncpus=", uml_ncpus_setup,
|
||||
"ncpus=<# of desired CPUs>\n"
|
||||
" This tells an SMP kernel how many virtual processors to start.\n\n"
|
||||
" This tells an SMP kernel how many virtual processors to start.\n\n"
|
||||
);
|
||||
#endif
|
||||
|
||||
@ -223,9 +206,8 @@ static int __init uml_checksetup(char *line, int *add)
|
||||
int n;
|
||||
|
||||
n = strlen(p->str);
|
||||
if(!strncmp(line, p->str, n)){
|
||||
if (p->setup_func(line + n, add)) return 1;
|
||||
}
|
||||
if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
@ -236,7 +218,7 @@ static void __init uml_postsetup(void)
|
||||
initcall_t *p;
|
||||
|
||||
p = &__uml_postsetup_start;
|
||||
while(p < &__uml_postsetup_end){
|
||||
while(p < &__uml_postsetup_end) {
|
||||
(*p)();
|
||||
p++;
|
||||
}
|
||||
@ -272,16 +254,18 @@ int __init linux_main(int argc, char **argv)
|
||||
unsigned int i, add;
|
||||
char * mode;
|
||||
|
||||
for (i = 1; i < argc; i++){
|
||||
if((i == 1) && (argv[i][0] == ' ')) continue;
|
||||
for (i = 1; i < argc; i++) {
|
||||
if ((i == 1) && (argv[i][0] == ' '))
|
||||
continue;
|
||||
add = 1;
|
||||
uml_checksetup(argv[i], &add);
|
||||
if (add)
|
||||
add_arg(argv[i]);
|
||||
}
|
||||
if(have_root == 0)
|
||||
if (have_root == 0)
|
||||
add_arg(DEFAULT_COMMAND_LINE);
|
||||
|
||||
/* OS sanity checks that need to happen before the kernel runs */
|
||||
os_early_checks();
|
||||
|
||||
can_do_skas();
|
||||
@ -302,12 +286,14 @@ int __init linux_main(int argc, char **argv)
|
||||
|
||||
brk_start = (unsigned long) sbrk(0);
|
||||
|
||||
/* Increase physical memory size for exec-shield users
|
||||
so they actually get what they asked for. This should
|
||||
add zero for non-exec shield users */
|
||||
/*
|
||||
* Increase physical memory size for exec-shield users
|
||||
* so they actually get what they asked for. This should
|
||||
* add zero for non-exec shield users
|
||||
*/
|
||||
|
||||
diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
|
||||
if(diff > 1024 * 1024){
|
||||
if (diff > 1024 * 1024) {
|
||||
printf("Adding %ld bytes to physical memory to account for "
|
||||
"exec-shield gap\n", diff);
|
||||
physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
|
||||
@ -324,11 +310,12 @@ int __init linux_main(int argc, char **argv)
|
||||
iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC;
|
||||
|
||||
/* Zones have to begin on a 1 << MAX_ORDER page boundary,
|
||||
/*
|
||||
* Zones have to begin on a 1 << MAX_ORDER page boundary,
|
||||
* so this makes sure that's true for highmem
|
||||
*/
|
||||
max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
|
||||
if(physmem_size + iomem_size > max_physmem){
|
||||
if (physmem_size + iomem_size > max_physmem) {
|
||||
highmem = physmem_size + iomem_size - max_physmem;
|
||||
physmem_size -= highmem;
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
@ -345,7 +332,7 @@ int __init linux_main(int argc, char **argv)
|
||||
start_vm = VMALLOC_START;
|
||||
|
||||
setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
|
||||
if(init_maps(physmem_size, iomem_size, highmem)){
|
||||
if (init_maps(physmem_size, iomem_size, highmem)) {
|
||||
printf("Failed to allocate mem_map for %Lu bytes of physical "
|
||||
"memory and %Lu bytes of highmem\n", physmem_size,
|
||||
highmem);
|
||||
@ -354,10 +341,11 @@ int __init linux_main(int argc, char **argv)
|
||||
|
||||
virtmem_size = physmem_size;
|
||||
avail = get_kmem_end() - start_vm;
|
||||
if(physmem_size > avail) virtmem_size = avail;
|
||||
if (physmem_size > avail)
|
||||
virtmem_size = avail;
|
||||
end_vm = start_vm + virtmem_size;
|
||||
|
||||
if(virtmem_size < physmem_size)
|
||||
if (virtmem_size < physmem_size)
|
||||
printf("Kernel virtual memory size shrunk to %lu bytes\n",
|
||||
virtmem_size);
|
||||
|
||||
|
Reference in New Issue
Block a user