Merge branch 'ptebits' into devel
Conflicts: arch/arm/Kconfig
This commit is contained in:
@ -18,7 +18,15 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
#ifdef __ARMEB__
|
||||
# define __BIG_ENDIAN
|
||||
#else
|
||||
# define __LITTLE_ENDIAN
|
||||
#endif
|
||||
|
||||
#define __SWAB_64_THRU_32__
|
||||
|
||||
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
|
||||
{
|
||||
__u32 t;
|
||||
|
||||
@ -40,19 +48,8 @@ static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
||||
|
||||
return x;
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#define __arch__swab32(x) ___arch__swab32(x)
|
||||
|
||||
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
|
||||
# define __BYTEORDER_HAS_U64__
|
||||
# define __SWAB_64_THRU_32__
|
||||
#endif
|
||||
|
||||
#ifdef __ARMEB__
|
||||
#include <linux/byteorder/big_endian.h>
|
||||
#else
|
||||
#include <linux/byteorder/little_endian.h>
|
||||
#endif
|
||||
#include <linux/byteorder.h>
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* ELF register definitions..
|
||||
*/
|
||||
@ -17,12 +16,34 @@ typedef unsigned long elf_freg_t[3];
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
typedef struct user_fp elf_fpregset_t;
|
||||
#endif
|
||||
|
||||
#define EM_ARM 40
|
||||
#define EF_ARM_APCS26 0x08
|
||||
#define EF_ARM_SOFT_FLOAT 0x200
|
||||
#define EF_ARM_EABI_MASK 0xFF000000
|
||||
|
||||
#define EF_ARM_EABI_MASK 0xff000000
|
||||
#define EF_ARM_EABI_UNKNOWN 0x00000000
|
||||
#define EF_ARM_EABI_VER1 0x01000000
|
||||
#define EF_ARM_EABI_VER2 0x02000000
|
||||
#define EF_ARM_EABI_VER3 0x03000000
|
||||
#define EF_ARM_EABI_VER4 0x04000000
|
||||
#define EF_ARM_EABI_VER5 0x05000000
|
||||
|
||||
#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
|
||||
#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
|
||||
#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
|
||||
#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
|
||||
#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
|
||||
#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
|
||||
#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
|
||||
#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
|
||||
#define EF_ARM_PIC 0x00000020 /* ABI 0 */
|
||||
#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
|
||||
#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
|
||||
#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
|
||||
#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
|
||||
#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
|
||||
#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
|
||||
#define EF_ARM_HASENTRY 0x00000002 /* All */
|
||||
#define EF_ARM_RELEXEC 0x00000001 /* All */
|
||||
|
||||
#define R_ARM_NONE 0
|
||||
#define R_ARM_PC24 1
|
||||
@ -41,7 +62,6 @@ typedef struct user_fp elf_fpregset_t;
|
||||
#endif
|
||||
#define ELF_ARCH EM_ARM
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* This yields a string that ld.so will use to load implementation
|
||||
* specific libraries for optimization. This is more specific in
|
||||
@ -59,25 +79,17 @@ typedef struct user_fp elf_fpregset_t;
|
||||
#define ELF_PLATFORM (elf_platform)
|
||||
|
||||
extern char elf_platform[];
|
||||
#endif
|
||||
|
||||
struct elf32_hdr;
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x))
|
||||
extern int elf_check_arch(const struct elf32_hdr *);
|
||||
#define elf_check_arch elf_check_arch
|
||||
|
||||
/*
|
||||
* 32-bit code is always OK. Some cpus can do 26-bit, some can't.
|
||||
*/
|
||||
#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
|
||||
|
||||
#define ELF_THUMB_OK(x) \
|
||||
((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \
|
||||
((x)->e_entry & 3) == 0)
|
||||
|
||||
#define ELF_26BIT_OK(x) \
|
||||
((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \
|
||||
((x)->e_flags & EF_ARM_APCS26) == 0)
|
||||
extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
|
||||
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
@ -94,23 +106,7 @@ extern char elf_platform[];
|
||||
have no such handler. */
|
||||
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
|
||||
|
||||
/*
|
||||
* Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
|
||||
* and CP1, we only enable access to the iWMMXt coprocessor if the
|
||||
* binary is EABI or softfloat (and thus, guaranteed not to use
|
||||
* FPA instructions.)
|
||||
*/
|
||||
#define SET_PERSONALITY(ex, ibcs2) \
|
||||
do { \
|
||||
if ((ex).e_flags & EF_ARM_APCS26) { \
|
||||
set_personality(PER_LINUX); \
|
||||
} else { \
|
||||
set_personality(PER_LINUX_32BIT); \
|
||||
if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \
|
||||
set_thread_flag(TIF_USING_IWMMXT); \
|
||||
else \
|
||||
clear_thread_flag(TIF_USING_IWMMXT); \
|
||||
} \
|
||||
} while (0)
|
||||
extern void elf_set_personality(const struct elf32_hdr *);
|
||||
#define SET_PERSONALITY(ex, ibcs2) elf_set_personality(&(ex))
|
||||
|
||||
#endif
|
||||
|
@ -60,7 +60,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
|
||||
#define MT_DEVICE 0
|
||||
#define MT_DEVICE_NONSHARED 1
|
||||
#define MT_DEVICE_CACHED 2
|
||||
#define MT_DEVICE_IXP2000 3
|
||||
#define MT_DEVICE_WC 3
|
||||
/*
|
||||
* types 4 onwards can be found in asm/mach/map.h and are undefined
|
||||
* for ioremap
|
||||
@ -215,11 +215,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
|
||||
#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
|
||||
#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
|
||||
#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED)
|
||||
#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC)
|
||||
#define iounmap(cookie) __iounmap(cookie)
|
||||
#else
|
||||
#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
|
||||
#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
|
||||
#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
|
||||
#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
|
||||
#define iounmap(cookie) __arch_iounmap(cookie)
|
||||
#endif
|
||||
|
||||
|
@ -26,9 +26,6 @@ struct map_desc {
|
||||
#define MT_MEMORY 8
|
||||
#define MT_ROM 9
|
||||
|
||||
#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED
|
||||
#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern void iotable_init(struct map_desc *, int);
|
||||
#else
|
||||
|
@ -184,8 +184,9 @@ typedef struct page *pgtable_t;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VM_DATA_DEFAULT_FLAGS \
|
||||
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
|
||||
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
|
||||
/*
|
||||
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
|
||||
|
@ -164,14 +164,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
#define L_PTE_PRESENT (1 << 0)
|
||||
#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
|
||||
#define L_PTE_YOUNG (1 << 1)
|
||||
#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
|
||||
#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
|
||||
#define L_PTE_USER (1 << 4)
|
||||
#define L_PTE_WRITE (1 << 5)
|
||||
#define L_PTE_EXEC (1 << 6)
|
||||
#define L_PTE_DIRTY (1 << 7)
|
||||
#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */
|
||||
#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */
|
||||
#define L_PTE_DIRTY (1 << 6)
|
||||
#define L_PTE_WRITE (1 << 7)
|
||||
#define L_PTE_USER (1 << 8)
|
||||
#define L_PTE_EXEC (1 << 9)
|
||||
#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
|
||||
|
||||
/*
|
||||
* These are the memory types, defined to be compatible with
|
||||
* pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
|
||||
*/
|
||||
#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
|
||||
#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
|
||||
#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
|
||||
#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
|
||||
#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
|
||||
#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
|
||||
#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */
|
||||
#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
|
||||
#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
|
||||
#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
|
||||
#define L_PTE_MT_MASK (0x0f << 2)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
@ -180,23 +196,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
* as well as any architecture dependent bits like global/ASID and SMP
|
||||
* shared mapping bits.
|
||||
*/
|
||||
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
|
||||
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
|
||||
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
|
||||
|
||||
extern pgprot_t pgprot_user;
|
||||
extern pgprot_t pgprot_kernel;
|
||||
|
||||
#define PAGE_NONE pgprot_user
|
||||
#define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
|
||||
#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \
|
||||
L_PTE_WRITE)
|
||||
#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
|
||||
#define PAGE_KERNEL pgprot_kernel
|
||||
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
|
||||
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
|
||||
#define PAGE_NONE pgprot_user
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
|
||||
#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
|
||||
#define PAGE_KERNEL pgprot_kernel
|
||||
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
|
||||
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
@ -212,19 +235,19 @@ extern pgprot_t pgprot_kernel;
|
||||
#define __P001 __PAGE_READONLY
|
||||
#define __P010 __PAGE_COPY
|
||||
#define __P011 __PAGE_COPY
|
||||
#define __P100 __PAGE_READONLY
|
||||
#define __P101 __PAGE_READONLY
|
||||
#define __P110 __PAGE_COPY
|
||||
#define __P111 __PAGE_COPY
|
||||
#define __P100 __PAGE_READONLY_EXEC
|
||||
#define __P101 __PAGE_READONLY_EXEC
|
||||
#define __P110 __PAGE_COPY_EXEC
|
||||
#define __P111 __PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 __PAGE_NONE
|
||||
#define __S001 __PAGE_READONLY
|
||||
#define __S010 __PAGE_SHARED
|
||||
#define __S011 __PAGE_SHARED
|
||||
#define __S100 __PAGE_READONLY
|
||||
#define __S101 __PAGE_READONLY
|
||||
#define __S110 __PAGE_SHARED
|
||||
#define __S111 __PAGE_SHARED
|
||||
#define __S100 __PAGE_READONLY_EXEC
|
||||
#define __S101 __PAGE_READONLY_EXEC
|
||||
#define __S110 __PAGE_SHARED_EXEC
|
||||
#define __S111 __PAGE_SHARED_EXEC
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
@ -286,8 +309,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
/*
|
||||
* Mark the prot value as uncacheable and unbufferable.
|
||||
*/
|
||||
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
|
||||
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
|
||||
#define pgprot_noncached(prot) \
|
||||
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
|
||||
#define pgprot_writecombine(prot) \
|
||||
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
#define pmd_present(pmd) (pmd_val(pmd))
|
||||
|
Reference in New Issue
Block a user