sh: Initial vsyscall page support.

This implements initial support for the vsyscall page on SH.
At the moment we leave it configurable due to having nommu
to support from the same code base. We hook it up for the
signal trampoline return at present, with more to be added
later, once uClibc catches up.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt
2006-09-27 18:33:49 +09:00
parent 8c12b5dc13
commit 19f9a34f87
19 changed files with 473 additions and 17 deletions

View File

@@ -1,4 +1,18 @@
#ifndef __ASM_SH_AUXVEC_H
#define __ASM_SH_AUXVEC_H
/*
* Architecture-neutral AT_ values in 0-17, leave some room
* for more of them.
*/
#ifdef CONFIG_VSYSCALL
/*
* Only define this in the vsyscall case, the entry point to
* the vsyscall page gets placed here. The kernel will attempt
* to build a gate VMA we don't care about otherwise..
*/
#define AT_SYSINFO_EHDR 33
#endif
#endif /* __ASM_SH_AUXVEC_H */

View File

@@ -121,4 +121,24 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#endif
#ifdef CONFIG_VSYSCALL
/* vDSO has arch_setup_additional_pages */
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack);
extern unsigned int vdso_enabled;
extern void __kernel_vsyscall;
#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
} while (0)
#endif /* CONFIG_VSYSCALL */
#endif /* __ASM_SH_ELF_H */

View File

@@ -11,7 +11,12 @@ typedef struct {
#else
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
typedef unsigned long mm_context_id_t;
typedef struct {
mm_context_id_t id;
void *vdso;
} mm_context_t;
#endif /* CONFIG_MMU */

View File

@@ -49,7 +49,7 @@ get_mmu_context(struct mm_struct *mm)
unsigned long mc = mmu_context_cache;
/* Check if we have old version of context. */
if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
/* It's up to date, do nothing */
return;
@@ -68,7 +68,7 @@ get_mmu_context(struct mm_struct *mm)
if (!mc)
mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
}
mm->context = mc;
mm->context.id = mc;
}
/*
@@ -78,7 +78,7 @@ get_mmu_context(struct mm_struct *mm)
static __inline__ int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
mm->context = NO_CONTEXT;
mm->context.id = NO_CONTEXT;
return 0;
}
@@ -123,7 +123,7 @@ static __inline__ unsigned long get_asid(void)
static __inline__ void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
}
/* MMU_TTB can be used for optimizing the fault handling.

View File

@@ -117,5 +117,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
/* vDSO support */
#ifdef CONFIG_VSYSCALL
#define __HAVE_ARCH_GATE_AREA
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */

View File

@@ -276,5 +276,11 @@ static inline void prefetch(void *x)
#define prefetchw(x) prefetch(x)
#endif
#ifdef CONFIG_VSYSCALL
extern int vsyscall_init(void);
#else
#define vsyscall_init() do { } while (0)
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PROCESSOR_H */