x86, um: get rid of header symlinks

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
Al Viro
2008-08-23 17:28:20 -04:00
committed by H. Peter Anvin
parent 4198426af3
commit aa7bd94249
50 changed files with 5 additions and 19 deletions

View File

@ -0,0 +1,22 @@
/*
* arch/um/include/sysdep-i386/archsetjmp.h
*/
#ifndef _KLIBC_ARCHSETJMP_H
#define _KLIBC_ARCHSETJMP_H
struct __jmp_buf {
unsigned int __ebx;
unsigned int __esp;
unsigned int __ebp;
unsigned int __esi;
unsigned int __edi;
unsigned int __eip;
};
typedef struct __jmp_buf jmp_buf[1];
#define JB_IP __eip
#define JB_SP __esp
#endif /* _SETJMP_H */

View File

@ -0,0 +1,9 @@
#ifndef __SYSDEP_I386_BARRIER_H
#define __SYSDEP_I386_BARRIER_H
/* Copied from include/asm-i386 for use by userspace. i386 has the option
* of using mfence, but I'm just using this, which works everywhere, for now.
*/
#define mb() asm volatile("lock; addl $0,0(%esp)")
#endif

View File

@ -0,0 +1,211 @@
/*
* Licensed under the GPL
*/
#ifndef __UM_SYSDEP_CHECKSUM_H
#define __UM_SYSDEP_CHECKSUM_H
#include "linux/in6.h"
#include "linux/string.h"
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static __inline__
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
static __inline__
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
if (copy_from_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return csum_partial(dst, len, sum);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
__asm__ __volatile__(
"movl (%1), %0 ;\n"
"subl $4, %2 ;\n"
"jbe 2f ;\n"
"addl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0 ;\n"
"1: adcl 16(%1), %0 ;\n"
"lea 4(%1), %1 ;\n"
"decl %2 ;\n"
"jne 1b ;\n"
"adcl $0, %0 ;\n"
"movl %0, %2 ;\n"
"shrl $16, %0 ;\n"
"addw %w2, %w0 ;\n"
"adcl $0, %0 ;\n"
"notl %0 ;\n"
"2: ;\n"
/* Since the input registers which are loaded with iph and ipl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return (__force __sum16)sum;
}
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum sum)
{
__asm__(
"addl %1, %0 ;\n"
"adcl $0xffff, %0 ;\n"
: "=r" (sum)
: "r" ((__force u32)sum << 16),
"0" ((__force u32)sum & 0xffff0000)
);
return (__force __sum16)(~(__force u32)sum >> 16);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__(
"addl %1, %0 ;\n"
"adcl %2, %0 ;\n"
"adcl %3, %0 ;\n"
"adcl $0, %0 ;\n"
: "=r" (sum)
: "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold (csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__(
"addl 0(%1), %0 ;\n"
"adcl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0 ;\n"
"adcl 0(%2), %0 ;\n"
"adcl 4(%2), %0 ;\n"
"adcl 8(%2), %0 ;\n"
"adcl 12(%2), %0 ;\n"
"adcl %3, %0 ;\n"
"adcl %4, %0 ;\n"
"adcl $0, %0 ;\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len)) {
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum)-1;
}
return csum_partial(src, len, sum);
}
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2004 Fujitsu Siemens Computers GmbH
* Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
* Licensed under the GPL
*/
#ifndef __FAULTINFO_I386_H
#define __FAULTINFO_I386_H
/* this structure contains the full arch-specific faultinfo
* from the traps.
* On i386, ptrace_faultinfo unfortunately doesn't provide
* all the info, since trap_no is missing.
* All common elements are defined at the same position in
* both structures, thus making it easy to copy the
* contents without knowledge about the structure elements.
*/
struct faultinfo {
int error_code; /* in ptrace_faultinfo misleadingly called is_write */
unsigned long cr2; /* in ptrace_faultinfo called addr */
int trap_no; /* missing in ptrace_faultinfo */
};
#define FAULT_WRITE(fi) ((fi).error_code & 2)
#define FAULT_ADDRESS(fi) ((fi).cr2)
#define PTRACE_FULL_FAULTINFO 0
#endif

View File

@ -0,0 +1,34 @@
#ifndef __ASM_HOST_LDT_I386_H
#define __ASM_HOST_LDT_I386_H
#include <asm/ldt.h>
/*
* macros stolen from include/asm-i386/desc.h
*/
#define LDT_entry_a(info) \
((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
#define LDT_entry_b(info) \
(((info)->base_addr & 0xff000000) | \
(((info)->base_addr & 0x00ff0000) >> 16) | \
((info)->limit & 0xf0000) | \
(((info)->read_exec_only ^ 1) << 9) | \
((info)->contents << 10) | \
(((info)->seg_not_present ^ 1) << 15) | \
((info)->seg_32bit << 22) | \
((info)->limit_in_pages << 23) | \
((info)->useable << 20) | \
0x7000)
#define LDT_empty(info) (\
(info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0 )
#endif

View File

@ -0,0 +1,21 @@
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/elf.h>
#include <linux/crypto.h>
#include <asm/mman.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define STR(x) #x
#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
#define BLANK() asm volatile("\n->" : : )
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem));
void foo(void)
{
#include <common-offsets.h>
}

View File

@ -0,0 +1,171 @@
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_I386_PTRACE_H
#define __SYSDEP_I386_PTRACE_H
#include "uml-config.h"
#include "user_constants.h"
#include "sysdep/faultinfo.h"
#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
#define MAX_REG_OFFSET (UM_FRAME_SIZE)
static inline void update_debugregs(int seq) {}
/* syscall emulation path in ptrace */
#ifndef PTRACE_SYSEMU
#define PTRACE_SYSEMU 31
#endif
void set_using_sysemu(int value);
int get_using_sysemu(void);
extern int sysemu_supported;
#include "skas_ptregs.h"
#define REGS_IP(r) ((r)[HOST_IP])
#define REGS_SP(r) ((r)[HOST_SP])
#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
#define REGS_EAX(r) ((r)[HOST_EAX])
#define REGS_EBX(r) ((r)[HOST_EBX])
#define REGS_ECX(r) ((r)[HOST_ECX])
#define REGS_EDX(r) ((r)[HOST_EDX])
#define REGS_ESI(r) ((r)[HOST_ESI])
#define REGS_EDI(r) ((r)[HOST_EDI])
#define REGS_EBP(r) ((r)[HOST_EBP])
#define REGS_CS(r) ((r)[HOST_CS])
#define REGS_SS(r) ((r)[HOST_SS])
#define REGS_DS(r) ((r)[HOST_DS])
#define REGS_ES(r) ((r)[HOST_ES])
#define REGS_FS(r) ((r)[HOST_FS])
#define REGS_GS(r) ((r)[HOST_GS])
#define REGS_SET_SYSCALL_RETURN(r, res) REGS_EAX(r) = (res)
#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
#ifndef PTRACE_SYSEMU_SINGLESTEP
#define PTRACE_SYSEMU_SINGLESTEP 32
#endif
struct uml_pt_regs {
unsigned long gp[MAX_REG_NR];
struct faultinfo faultinfo;
long syscall;
int is_user;
};
#define EMPTY_UML_PT_REGS { }
#define UPT_IP(r) REGS_IP((r)->gp)
#define UPT_SP(r) REGS_SP((r)->gp)
#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
#define UPT_EAX(r) REGS_EAX((r)->gp)
#define UPT_EBX(r) REGS_EBX((r)->gp)
#define UPT_ECX(r) REGS_ECX((r)->gp)
#define UPT_EDX(r) REGS_EDX((r)->gp)
#define UPT_ESI(r) REGS_ESI((r)->gp)
#define UPT_EDI(r) REGS_EDI((r)->gp)
#define UPT_EBP(r) REGS_EBP((r)->gp)
#define UPT_ORIG_EAX(r) ((r)->syscall)
#define UPT_CS(r) REGS_CS((r)->gp)
#define UPT_SS(r) REGS_SS((r)->gp)
#define UPT_DS(r) REGS_DS((r)->gp)
#define UPT_ES(r) REGS_ES((r)->gp)
#define UPT_FS(r) REGS_FS((r)->gp)
#define UPT_GS(r) REGS_GS((r)->gp)
#define UPT_SYSCALL_ARG1(r) UPT_EBX(r)
#define UPT_SYSCALL_ARG2(r) UPT_ECX(r)
#define UPT_SYSCALL_ARG3(r) UPT_EDX(r)
#define UPT_SYSCALL_ARG4(r) UPT_ESI(r)
#define UPT_SYSCALL_ARG5(r) UPT_EDI(r)
#define UPT_SYSCALL_ARG6(r) UPT_EBP(r)
extern int user_context(unsigned long sp);
#define UPT_IS_USER(r) ((r)->is_user)
struct syscall_args {
unsigned long args[6];
};
#define SYSCALL_ARGS(r) ((struct syscall_args) \
{ .args = { UPT_SYSCALL_ARG1(r), \
UPT_SYSCALL_ARG2(r), \
UPT_SYSCALL_ARG3(r), \
UPT_SYSCALL_ARG4(r), \
UPT_SYSCALL_ARG5(r), \
UPT_SYSCALL_ARG6(r) } } )
#define UPT_REG(regs, reg) \
({ unsigned long val; \
switch(reg){ \
case EIP: val = UPT_IP(regs); break; \
case UESP: val = UPT_SP(regs); break; \
case EAX: val = UPT_EAX(regs); break; \
case EBX: val = UPT_EBX(regs); break; \
case ECX: val = UPT_ECX(regs); break; \
case EDX: val = UPT_EDX(regs); break; \
case ESI: val = UPT_ESI(regs); break; \
case EDI: val = UPT_EDI(regs); break; \
case EBP: val = UPT_EBP(regs); break; \
case ORIG_EAX: val = UPT_ORIG_EAX(regs); break; \
case CS: val = UPT_CS(regs); break; \
case SS: val = UPT_SS(regs); break; \
case DS: val = UPT_DS(regs); break; \
case ES: val = UPT_ES(regs); break; \
case FS: val = UPT_FS(regs); break; \
case GS: val = UPT_GS(regs); break; \
case EFL: val = UPT_EFLAGS(regs); break; \
default : \
panic("Bad register in UPT_REG : %d\n", reg); \
val = -1; \
} \
val; \
})
#define UPT_SET(regs, reg, val) \
do { \
switch(reg){ \
case EIP: UPT_IP(regs) = val; break; \
case UESP: UPT_SP(regs) = val; break; \
case EAX: UPT_EAX(regs) = val; break; \
case EBX: UPT_EBX(regs) = val; break; \
case ECX: UPT_ECX(regs) = val; break; \
case EDX: UPT_EDX(regs) = val; break; \
case ESI: UPT_ESI(regs) = val; break; \
case EDI: UPT_EDI(regs) = val; break; \
case EBP: UPT_EBP(regs) = val; break; \
case ORIG_EAX: UPT_ORIG_EAX(regs) = val; break; \
case CS: UPT_CS(regs) = val; break; \
case SS: UPT_SS(regs) = val; break; \
case DS: UPT_DS(regs) = val; break; \
case ES: UPT_ES(regs) = val; break; \
case FS: UPT_FS(regs) = val; break; \
case GS: UPT_GS(regs) = val; break; \
case EFL: UPT_EFLAGS(regs) = val; break; \
default : \
panic("Bad register in UPT_SET : %d\n", reg); \
break; \
} \
} while (0)
#define UPT_SET_SYSCALL_RETURN(r, res) \
REGS_SET_SYSCALL_RETURN((r)->regs, (res))
#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
#define UPT_ORIG_SYSCALL(r) UPT_EAX(r)
#define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r)
#define UPT_SYSCALL_RET(r) UPT_EAX(r)
#define UPT_FAULTINFO(r) (&(r)->faultinfo)
extern void arch_init_registers(int pid);
#endif

View File

@ -0,0 +1,50 @@
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_I386_PTRACE_USER_H__
#define __SYSDEP_I386_PTRACE_USER_H__
#include <sys/ptrace.h>
#include <linux/ptrace.h>
#include <asm/ptrace.h>
#include "user_constants.h"
#define PT_OFFSET(r) ((r) * sizeof(long))
#define PT_SYSCALL_NR(regs) ((regs)[ORIG_EAX])
#define PT_SYSCALL_NR_OFFSET PT_OFFSET(ORIG_EAX)
#define PT_SYSCALL_ARG1_OFFSET PT_OFFSET(EBX)
#define PT_SYSCALL_ARG2_OFFSET PT_OFFSET(ECX)
#define PT_SYSCALL_ARG3_OFFSET PT_OFFSET(EDX)
#define PT_SYSCALL_ARG4_OFFSET PT_OFFSET(ESI)
#define PT_SYSCALL_ARG5_OFFSET PT_OFFSET(EDI)
#define PT_SYSCALL_ARG6_OFFSET PT_OFFSET(EBP)
#define PT_SYSCALL_RET_OFFSET PT_OFFSET(EAX)
#define REGS_SYSCALL_NR EAX /* This is used before a system call */
#define REGS_SYSCALL_ARG1 EBX
#define REGS_SYSCALL_ARG2 ECX
#define REGS_SYSCALL_ARG3 EDX
#define REGS_SYSCALL_ARG4 ESI
#define REGS_SYSCALL_ARG5 EDI
#define REGS_SYSCALL_ARG6 EBP
#define REGS_IP_INDEX EIP
#define REGS_SP_INDEX UESP
#define PT_IP_OFFSET PT_OFFSET(EIP)
#define PT_IP(regs) ((regs)[EIP])
#define PT_SP_OFFSET PT_OFFSET(UESP)
#define PT_SP(regs) ((regs)[UESP])
#define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE)
#ifndef FRAME_SIZE
#define FRAME_SIZE (17)
#endif
#endif

View File

@ -0,0 +1,44 @@
#ifndef __SYSDEP_I386_SC_H
#define __SYSDEP_I386_SC_H
#include <user_constants.h>
#define SC_OFFSET(sc, field) \
*((unsigned long *) &(((char *) (sc))[HOST_##field]))
#define SC_FP_OFFSET(sc, field) \
*((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
#define SC_FP_OFFSET_PTR(sc, field, type) \
((type *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field]))
#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
#define SC_EAX(sc) SC_OFFSET(sc, SC_EAX)
#define SC_EBX(sc) SC_OFFSET(sc, SC_EBX)
#define SC_ECX(sc) SC_OFFSET(sc, SC_ECX)
#define SC_EDX(sc) SC_OFFSET(sc, SC_EDX)
#define SC_EDI(sc) SC_OFFSET(sc, SC_EDI)
#define SC_ESI(sc) SC_OFFSET(sc, SC_ESI)
#define SC_EBP(sc) SC_OFFSET(sc, SC_EBP)
#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
#define SC_FPSTATE(sc) SC_OFFSET(sc, SC_FPSTATE)
#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
#define SC_FP_CW(sc) SC_FP_OFFSET(sc, SC_FP_CW)
#define SC_FP_SW(sc) SC_FP_OFFSET(sc, SC_FP_SW)
#define SC_FP_TAG(sc) SC_FP_OFFSET(sc, SC_FP_TAG)
#define SC_FP_IPOFF(sc) SC_FP_OFFSET(sc, SC_FP_IPOFF)
#define SC_FP_CSSEL(sc) SC_FP_OFFSET(sc, SC_FP_CSSEL)
#define SC_FP_DATAOFF(sc) SC_FP_OFFSET(sc, SC_FP_DATAOFF)
#define SC_FP_DATASEL(sc) SC_FP_OFFSET(sc, SC_FP_DATASEL)
#define SC_FP_ST(sc) SC_FP_OFFSET_PTR(sc, SC_FP_ST, struct _fpstate)
#define SC_FXSR_ENV(sc) SC_FP_OFFSET_PTR(sc, SC_FXSR_ENV, void)
#endif

View File

@ -0,0 +1,26 @@
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __SYS_SIGCONTEXT_I386_H
#define __SYS_SIGCONTEXT_I386_H
#include "sysdep/sc.h"
#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
#define GET_FAULTINFO_FROM_SC(fi, sc) \
{ \
(fi).cr2 = SC_CR2(sc); \
(fi).error_code = SC_ERR(sc); \
(fi).trap_no = SC_TRAPNO(sc); \
}
/* This is Page Fault */
#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */
#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo)
#endif

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_I386_SKAS_PTRACE_H
#define __SYSDEP_I386_SKAS_PTRACE_H
struct ptrace_faultinfo {
int is_write;
unsigned long addr;
};
struct ptrace_ldt {
int func;
void *ptr;
unsigned long bytecount;
};
#define PTRACE_LDT 54
#endif

View File

@ -0,0 +1,102 @@
/*
* Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#ifndef __SYSDEP_STUB_H
#define __SYSDEP_STUB_H
#include <sys/mman.h>
#include <asm/ptrace.h>
#include <asm/unistd.h>
#include "as-layout.h"
#include "stub-data.h"
#include "kern_constants.h"
#include "uml-config.h"
extern void stub_segv_handler(int sig);
extern void stub_clone_handler(void);
#define STUB_SYSCALL_RET EAX
#define STUB_MMAP_NR __NR_mmap2
#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
static inline long stub_syscall0(long syscall)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall));
return ret;
}
static inline long stub_syscall1(long syscall, long arg1)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
return ret;
}
static inline long stub_syscall2(long syscall, long arg1, long arg2)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2));
return ret;
}
static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2), "d" (arg3));
return ret;
}
static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
long arg4)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2), "d" (arg3), "S" (arg4));
return ret;
}
static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
long arg4, long arg5)
{
long ret;
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
"c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
return ret;
}
static inline void trap_myself(void)
{
__asm("int3");
}
static inline void remap_stack(int fd, unsigned long offset)
{
__asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
"movl %7, %%ebx ; movl %%eax, (%%ebx)"
: : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
"c" (UM_KERN_PAGE_SIZE),
"d" (PROT_READ | PROT_WRITE),
"S" (MAP_FIXED | MAP_SHARED), "D" (fd),
"a" (offset),
"i" (&((struct stub_data *) STUB_DATA)->err)
: "memory");
}
#endif

View File

@ -0,0 +1,26 @@
/*
* Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "asm/unistd.h"
#include "sysdep/ptrace.h"
typedef long syscall_handler_t(struct pt_regs);
/* Not declared on x86, incompatible declarations on x86_64, so these have
* to go here rather than in sys_call_table.c
*/
extern syscall_handler_t sys_rt_sigaction;
extern syscall_handler_t old_mmap_i386;
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
((long (*)(struct syscall_args)) \
(*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
extern long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);

View File

@ -0,0 +1,132 @@
#ifndef _ASM_X86_SYSTEM_H_
#define _ASM_X86_SYSTEM_H_
#include <asm/asm.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <asm/cmpxchg.h>
#include <asm/nops.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif
extern unsigned long arch_align_stack(unsigned long sp);
void default_idle(void);
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb()
#else
# define smp_rmb() barrier()
#endif
#ifdef CONFIG_X86_OOSTORE
# define smp_wmb() wmb()
#else
# define smp_wmb() barrier()
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
#endif

View File

@ -0,0 +1,32 @@
#ifndef _SYSDEP_TLS_H
#define _SYSDEP_TLS_H
# ifndef __KERNEL__
/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
* may be named user_desc (but in 2.4 and in header matching its API was named
* modify_ldt_ldt_s). */
typedef struct um_dup_user_desc {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
unsigned int seg_32bit:1;
unsigned int contents:2;
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
unsigned int useable:1;
} user_desc_t;
# else /* __KERNEL__ */
# include <ldt.h>
typedef struct user_desc user_desc_t;
# endif /* __KERNEL__ */
#define GDT_ENTRY_TLS_MIN_I386 6
#define GDT_ENTRY_TLS_MIN_X86_64 12
#endif /* _SYSDEP_TLS_H */

View File

@ -0,0 +1,14 @@
/*
* Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#ifndef __VM_FLAGS_I386_H
#define __VM_FLAGS_I386_H
#define VM_DATA_DEFAULT_FLAGS \
(VM_READ | VM_WRITE | \
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif