Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (236 commits) [ARM] 5300/1: fixup spitz reset during boot [ARM] 5295/1: make ZONE_DMA optional [ARM] 5239/1: Palm Zire 72 power management support [ARM] 5298/1: Drop desc_handle_irq() [ARM] 5297/1: [KS8695] Fix two compile-time warnings [ARM] 5296/1: [KS8695] Replace macro's with trailing underscores. [ARM] pxa: allow multi-machine PCMCIA builds [ARM] pxa: add preliminary CPUFREQ support for PXA3xx [ARM] pxa: add missing ACCR bit definitions to pxa3xx-regs.h [ARM] pxa: rename cpu-pxa.c to cpufreq-pxa2xx.c [ARM] pxa/zylonite: add support for USB OHCI [ARM] ohci-pxa27x: use ioremap() and offset for register access [ARM] ohci-pxa27x: introduce pxa27x_clear_otgph() [ARM] ohci-pxa27x: use platform_get_{irq,resource} for the resource [ARM] ohci-pxa27x: move OHCI controller specific registers into the driver [ARM] ohci-pxa27x: introduce flags to avoid direct access to OHCI registers [ARM] pxa: move I2S register and bit definitions into pxa2xx-i2s.c [ARM] pxa: simplify DMA register definitions [ARM] pxa: make additional DCSR bits valid for PXA3xx [ARM] pxa: move i2c register and bit definitions into i2c-pxa.c ... Fixed up conflicts in arch/arm/mach-versatile/core.c sound/soc/pxa/pxa2xx-ac97.c sound/soc/pxa/pxa2xx-i2s.c manually.
This commit is contained in:
@@ -12,7 +12,7 @@ extern void __bug(const char *file, int line) __attribute__((noreturn));
|
||||
#else
|
||||
|
||||
/* this just causes an oops */
|
||||
#define BUG() (*(int *)0 = 0)
|
||||
#define BUG() do { *(int *)0 = 0; } while (1)
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -444,94 +444,4 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
|
||||
dmac_inv_range(start, start + size);
|
||||
}
|
||||
|
||||
#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
|
||||
#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29))
|
||||
|
||||
#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25))
|
||||
#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25))
|
||||
#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
|
||||
#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
|
||||
|
||||
#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
|
||||
#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
|
||||
#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
|
||||
#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
|
||||
#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
|
||||
|
||||
#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
|
||||
/*
|
||||
* VIVT caches only
|
||||
*/
|
||||
#define cache_is_vivt() 1
|
||||
#define cache_is_vipt() 0
|
||||
#define cache_is_vipt_nonaliasing() 0
|
||||
#define cache_is_vipt_aliasing() 0
|
||||
#define icache_is_vivt_asid_tagged() 0
|
||||
|
||||
#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
|
||||
/*
|
||||
* VIPT caches only
|
||||
*/
|
||||
#define cache_is_vivt() 0
|
||||
#define cache_is_vipt() 1
|
||||
#define cache_is_vipt_nonaliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_vipt_nonaliasing(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt_aliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_vipt_aliasing(__val); \
|
||||
})
|
||||
|
||||
#define icache_is_vivt_asid_tagged() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_vivt_asid_tagged_instr(__val); \
|
||||
})
|
||||
|
||||
#else
|
||||
/*
|
||||
* VIVT or VIPT caches. Note that this is unreliable since ARM926
|
||||
* and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test.
|
||||
* There's no way to tell from the CacheType register what type (!)
|
||||
* the cache is.
|
||||
*/
|
||||
#define cache_is_vivt() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
(!__cacheid_present(__val)) || __cacheid_vivt(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && __cacheid_vipt(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt_nonaliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && \
|
||||
__cacheid_vipt_nonaliasing(__val); \
|
||||
})
|
||||
|
||||
#define cache_is_vipt_aliasing() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && \
|
||||
__cacheid_vipt_aliasing(__val); \
|
||||
})
|
||||
|
||||
#define icache_is_vivt_asid_tagged() \
|
||||
({ \
|
||||
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
|
||||
__cacheid_present(__val) && \
|
||||
__cacheid_vivt_asid_tagged_instr(__val); \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
52
arch/arm/include/asm/cachetype.h
Normal file
52
arch/arm/include/asm/cachetype.h
Normal file
@@ -0,0 +1,52 @@
|
||||
#ifndef __ASM_ARM_CACHETYPE_H
|
||||
#define __ASM_ARM_CACHETYPE_H
|
||||
|
||||
#define CACHEID_VIVT (1 << 0)
|
||||
#define CACHEID_VIPT_NONALIASING (1 << 1)
|
||||
#define CACHEID_VIPT_ALIASING (1 << 2)
|
||||
#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
|
||||
#define CACHEID_ASID_TAGGED (1 << 3)
|
||||
|
||||
extern unsigned int cacheid;
|
||||
|
||||
#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
|
||||
#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
|
||||
#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
|
||||
#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
|
||||
#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
|
||||
|
||||
/*
|
||||
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
|
||||
* Mask out support which will never be present on newer CPUs.
|
||||
* - v6+ is never VIVT
|
||||
* - v7+ VIPT never aliases
|
||||
*/
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED)
|
||||
#elif __LINUX_ARM_ARCH__ >= 6
|
||||
#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
|
||||
#else
|
||||
#define __CACHEID_ARCH_MIN (~0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mask out support which isn't configured
|
||||
*/
|
||||
#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
|
||||
#define __CACHEID_ALWAYS (CACHEID_VIVT)
|
||||
#define __CACHEID_NEVER (~CACHEID_VIVT)
|
||||
#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
|
||||
#define __CACHEID_ALWAYS (0)
|
||||
#define __CACHEID_NEVER (CACHEID_VIVT)
|
||||
#else
|
||||
#define __CACHEID_ALWAYS (0)
|
||||
#define __CACHEID_NEVER (0)
|
||||
#endif
|
||||
|
||||
static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
|
||||
{
|
||||
return (__CACHEID_ALWAYS & mask) |
|
||||
(~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
|
||||
}
|
||||
|
||||
#endif
|
64
arch/arm/include/asm/cputype.h
Normal file
64
arch/arm/include/asm/cputype.h
Normal file
@@ -0,0 +1,64 @@
|
||||
#ifndef __ASM_ARM_CPUTYPE_H
|
||||
#define __ASM_ARM_CPUTYPE_H
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#define CPUID_ID 0
|
||||
#define CPUID_CACHETYPE 1
|
||||
#define CPUID_TCM 2
|
||||
#define CPUID_TLBTYPE 3
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
#define read_cpuid(reg) \
|
||||
({ \
|
||||
unsigned int __val; \
|
||||
asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
|
||||
: "=r" (__val) \
|
||||
: \
|
||||
: "cc"); \
|
||||
__val; \
|
||||
})
|
||||
#else
|
||||
extern unsigned int processor_id;
|
||||
#define read_cpuid(reg) (processor_id)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The CPU ID never changes at run time, so we might as well tell the
|
||||
* compiler that it's constant. Use this function to read the CPU ID
|
||||
* rather than directly reading processor_id or read_cpuid() directly.
|
||||
*/
|
||||
static inline unsigned int __attribute_const__ read_cpuid_id(void)
|
||||
{
|
||||
return read_cpuid(CPUID_ID);
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
|
||||
{
|
||||
return read_cpuid(CPUID_CACHETYPE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel's XScale3 core supports some v6 features (supersections, L2)
|
||||
* but advertises itself as v5 as it does not support the v6 ISA. For
|
||||
* this reason, we need a way to explicitly test for this type of CPU.
|
||||
*/
|
||||
#ifndef CONFIG_CPU_XSC3
|
||||
#define cpu_is_xsc3() 0
|
||||
#else
|
||||
static inline int cpu_is_xsc3(void)
|
||||
{
|
||||
if ((read_cpuid_id() & 0xffffe000) == 0x69056000)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
|
||||
#define cpu_is_xscale() 0
|
||||
#else
|
||||
#define cpu_is_xscale() 1
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
|
||||
* function so drivers using this API are highlighted with build warnings.
|
||||
*/
|
||||
static inline void *
|
||||
dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
|
||||
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle)
|
||||
static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t handle)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
* return the CPU-viewed address, and sets @handle to be the
|
||||
* device-viewed address.
|
||||
*/
|
||||
extern void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
|
||||
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
|
||||
|
||||
/**
|
||||
* dma_free_coherent - free memory allocated by dma_alloc_coherent
|
||||
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
|
||||
* References to memory and mappings associated with cpu_addr/handle
|
||||
* during and after this call executing are illegal.
|
||||
*/
|
||||
extern void
|
||||
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle);
|
||||
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
||||
|
||||
/**
|
||||
* dma_mmap_coherent - map a coherent DMA allocation into user space
|
||||
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
* into user space. The coherent DMA buffer must not be freed by the
|
||||
* driver until the user space mapping has been released.
|
||||
*/
|
||||
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size);
|
||||
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
|
||||
void *, dma_addr_t, size_t);
|
||||
|
||||
|
||||
/**
|
||||
@@ -174,282 +170,16 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
* return the CPU-viewed address, and sets @handle to be the
|
||||
* device-viewed address.
|
||||
*/
|
||||
extern void *
|
||||
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
|
||||
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
|
||||
gfp_t);
|
||||
|
||||
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
|
||||
dma_free_coherent(dev,size,cpu_addr,handle)
|
||||
|
||||
int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size);
|
||||
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
|
||||
void *, dma_addr_t, size_t);
|
||||
|
||||
|
||||
/**
|
||||
* dma_map_single - map a single buffer for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @cpu_addr: CPU direct mapped address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_single() or
|
||||
* dma_sync_single_for_cpu().
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline dma_addr_t
|
||||
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(cpu_addr, size, dir);
|
||||
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
}
|
||||
#else
|
||||
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page() or
|
||||
* dma_sync_single_for_cpu().
|
||||
*/
|
||||
static inline dma_addr_t
|
||||
dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_map_single(dev, page_address(page) + offset, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_single - unmap a single buffer previously mapped
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Unmap a single streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_single() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
#else
|
||||
extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Unmap a single streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_single() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void
|
||||
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_single(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_sg - map a set of SG buffers for streaming mode DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Map a set of buffers described by scatterlist in streaming
|
||||
* mode for DMA. This is the scatter-gather version of the
|
||||
* above dma_map_single interface. Here the scatter gather list
|
||||
* elements are each tagged with the appropriate dma address
|
||||
* and length. They are obtained via sg_dma_{address,length}(SG).
|
||||
*
|
||||
* NOTE: An implementation may be able to use a smaller number of
|
||||
* DMA address/length pairs than there are SG table elements.
|
||||
* (for example via virtual mapping capabilities)
|
||||
* The routine returns the number of addr/length pairs actually
|
||||
* used, at most nents.
|
||||
*
|
||||
* Device ownership issues as mentioned above for dma_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
char *virt;
|
||||
|
||||
sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
|
||||
virt = sg_virt(sg);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(virt, sg->length, dir);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
#else
|
||||
extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Unmap a set of streaming mode DMA translations.
|
||||
* Again, CPU read rules concerning calls here are the same as for
|
||||
* dma_unmap_single() above.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
|
||||
/* nothing to do */
|
||||
}
|
||||
#else
|
||||
extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* dma_sync_single_range_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @offset: offset of region to start sync
|
||||
* @size: size of region to sync
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Make physical memory consistent for a single streaming mode DMA
|
||||
* translation after a transfer.
|
||||
*
|
||||
* If you perform a dma_map_single() but wish to interrogate the
|
||||
* buffer using the cpu, yet do not wish to teardown the PCI dma
|
||||
* mapping, you must call this function before doing so. At the
|
||||
* next point you give the PCI dma address back to the card, you
|
||||
* must first the perform a dma_sync_for_device, and then the
|
||||
* device again owns the buffer.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
#else
|
||||
extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
|
||||
extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* dma_sync_sg_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Make physical memory consistent for a set of streaming
|
||||
* mode DMA translations after a transfer.
|
||||
*
|
||||
* The same as dma_sync_single_for_* but for a scatter-gather list,
|
||||
* same rules and usage.
|
||||
*/
|
||||
#ifndef CONFIG_DMABOUNCE
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
char *virt = sg_virt(sg);
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(virt, sg->length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
char *virt = sg_virt(sg);
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(virt, sg->length, dir);
|
||||
}
|
||||
}
|
||||
#else
|
||||
extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
/*
|
||||
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
|
||||
@@ -475,7 +205,8 @@ extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enu
|
||||
* appropriate DMA pools for the device.
|
||||
*
|
||||
*/
|
||||
extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
|
||||
extern int dmabounce_register_dev(struct device *, unsigned long,
|
||||
unsigned long);
|
||||
|
||||
/**
|
||||
* dmabounce_unregister_dev
|
||||
@@ -506,7 +237,184 @@ extern void dmabounce_unregister_dev(struct device *);
|
||||
*
|
||||
*/
|
||||
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
|
||||
|
||||
/*
|
||||
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
||||
*/
|
||||
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
|
||||
enum dma_data_direction);
|
||||
extern dma_addr_t dma_map_page(struct device *, struct page *,
|
||||
unsigned long, size_t, enum dma_data_direction);
|
||||
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
/*
|
||||
* Private functions
|
||||
*/
|
||||
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
#else
|
||||
#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
|
||||
#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
|
||||
|
||||
|
||||
/**
|
||||
* dma_map_single - map a single buffer for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @cpu_addr: CPU direct mapped address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_single() or
|
||||
* dma_sync_single_for_cpu().
|
||||
*/
|
||||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(cpu_addr, size, dir);
|
||||
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page().
|
||||
*/
|
||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(page_address(page) + offset, size, dir);
|
||||
|
||||
return page_to_dma(dev, page) + offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_single - unmap a single buffer previously mapped
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_single)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Unmap a single streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_single() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
#endif /* CONFIG_DMABOUNCE */
|
||||
|
||||
/**
|
||||
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_page)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
||||
*
|
||||
* Unmap a page streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_page() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_single(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_sync_single_range_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @offset: offset of region to start sync
|
||||
* @size: size of region to sync
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Make physical memory consistent for a single streaming mode DMA
|
||||
* translation after a transfer.
|
||||
*
|
||||
* If you perform a dma_map_single() but wish to interrogate the
|
||||
* buffer using the cpu, yet do not wish to teardown the PCI dma
|
||||
* mapping, you must call this function before doing so. At the
|
||||
* next point you give the PCI dma address back to the card, you
|
||||
* must first the perform a dma_sync_for_device, and then the
|
||||
* device again owns the buffer.
|
||||
*/
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
if (!arch_is_coherent())
|
||||
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* The scatter list versions of the above methods.
|
||||
*/
|
||||
extern int dma_map_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
@@ -3,7 +3,6 @@
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* ELF register definitions..
|
||||
*/
|
||||
@@ -17,12 +16,34 @@ typedef unsigned long elf_freg_t[3];
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
typedef struct user_fp elf_fpregset_t;
|
||||
#endif
|
||||
|
||||
#define EM_ARM 40
|
||||
#define EF_ARM_APCS26 0x08
|
||||
#define EF_ARM_SOFT_FLOAT 0x200
|
||||
#define EF_ARM_EABI_MASK 0xFF000000
|
||||
|
||||
#define EF_ARM_EABI_MASK 0xff000000
|
||||
#define EF_ARM_EABI_UNKNOWN 0x00000000
|
||||
#define EF_ARM_EABI_VER1 0x01000000
|
||||
#define EF_ARM_EABI_VER2 0x02000000
|
||||
#define EF_ARM_EABI_VER3 0x03000000
|
||||
#define EF_ARM_EABI_VER4 0x04000000
|
||||
#define EF_ARM_EABI_VER5 0x05000000
|
||||
|
||||
#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
|
||||
#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
|
||||
#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
|
||||
#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
|
||||
#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
|
||||
#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
|
||||
#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
|
||||
#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
|
||||
#define EF_ARM_PIC 0x00000020 /* ABI 0 */
|
||||
#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
|
||||
#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
|
||||
#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
|
||||
#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
|
||||
#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
|
||||
#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
|
||||
#define EF_ARM_HASENTRY 0x00000002 /* All */
|
||||
#define EF_ARM_RELEXEC 0x00000001 /* All */
|
||||
|
||||
#define R_ARM_NONE 0
|
||||
#define R_ARM_PC24 1
|
||||
@@ -41,7 +62,6 @@ typedef struct user_fp elf_fpregset_t;
|
||||
#endif
|
||||
#define ELF_ARCH EM_ARM
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* This yields a string that ld.so will use to load implementation
|
||||
* specific libraries for optimization. This is more specific in
|
||||
@@ -59,25 +79,17 @@ typedef struct user_fp elf_fpregset_t;
|
||||
#define ELF_PLATFORM (elf_platform)
|
||||
|
||||
extern char elf_platform[];
|
||||
#endif
|
||||
|
||||
struct elf32_hdr;
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x))
|
||||
extern int elf_check_arch(const struct elf32_hdr *);
|
||||
#define elf_check_arch elf_check_arch
|
||||
|
||||
/*
|
||||
* 32-bit code is always OK. Some cpus can do 26-bit, some can't.
|
||||
*/
|
||||
#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
|
||||
|
||||
#define ELF_THUMB_OK(x) \
|
||||
((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \
|
||||
((x)->e_entry & 3) == 0)
|
||||
|
||||
#define ELF_26BIT_OK(x) \
|
||||
((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \
|
||||
((x)->e_flags & EF_ARM_APCS26) == 0)
|
||||
extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
|
||||
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
@@ -94,23 +106,7 @@ extern char elf_platform[];
|
||||
have no such handler. */
|
||||
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
|
||||
|
||||
/*
|
||||
* Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
|
||||
* and CP1, we only enable access to the iWMMXt coprocessor if the
|
||||
* binary is EABI or softfloat (and thus, guaranteed not to use
|
||||
* FPA instructions.)
|
||||
*/
|
||||
#define SET_PERSONALITY(ex, ibcs2) \
|
||||
do { \
|
||||
if ((ex).e_flags & EF_ARM_APCS26) { \
|
||||
set_personality(PER_LINUX); \
|
||||
} else { \
|
||||
set_personality(PER_LINUX_32BIT); \
|
||||
if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \
|
||||
set_thread_flag(TIF_USING_IWMMXT); \
|
||||
else \
|
||||
clear_thread_flag(TIF_USING_IWMMXT); \
|
||||
} \
|
||||
} while (0)
|
||||
extern void elf_set_personality(const struct elf32_hdr *);
|
||||
#define SET_PERSONALITY(ex, ibcs2) elf_set_personality(&(ex))
|
||||
|
||||
#endif
|
||||
|
@@ -1,6 +1,124 @@
|
||||
#ifndef _ASM_FUTEX_H
|
||||
#define _ASM_FUTEX_H
|
||||
#ifndef _ASM_ARM_FUTEX_H
|
||||
#define _ASM_ARM_FUTEX_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <asm-generic/futex.h>
|
||||
|
||||
#endif
|
||||
#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1, [%2]\n" \
|
||||
" " insn "\n" \
|
||||
"2: strt %0, [%2]\n" \
|
||||
" mov %0, #0\n" \
|
||||
"3:\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .align 3\n" \
|
||||
" .long 1b, 4f, 2b, 4f\n" \
|
||||
" .previous\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
"4: mov %0, %4\n" \
|
||||
" b 3b\n" \
|
||||
" .previous" \
|
||||
: "=&r" (ret), "=&r" (oldval) \
|
||||
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
|
||||
: "cc", "memory")
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op << 8) >> 20;
|
||||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int oldval = 0, ret;
|
||||
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
__futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
__futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
__futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
|
||||
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
|
||||
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
|
||||
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
|
||||
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
|
||||
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
|
||||
default: ret = -ENOSYS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
int val;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
|
||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: ldrt %0, [%3]\n"
|
||||
" teq %0, %1\n"
|
||||
"2: streqt %2, [%3]\n"
|
||||
"3:\n"
|
||||
" .section __ex_table,\"a\"\n"
|
||||
" .align 3\n"
|
||||
" .long 1b, 4f, 2b, 4f\n"
|
||||
" .previous\n"
|
||||
" .section .fixup,\"ax\"\n"
|
||||
"4: mov %0, %4\n"
|
||||
" b 3b\n"
|
||||
" .previous"
|
||||
: "=&r" (val)
|
||||
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
|
||||
: "cc", "memory");
|
||||
|
||||
pagefault_enable(); /* subsumes preempt_enable() */
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* !SMP */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_ARM_FUTEX_H */
|
||||
|
@@ -60,10 +60,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
|
||||
#define MT_DEVICE 0
|
||||
#define MT_DEVICE_NONSHARED 1
|
||||
#define MT_DEVICE_CACHED 2
|
||||
#define MT_DEVICE_IXP2000 3
|
||||
#define MT_DEVICE_WC 4
|
||||
#define MT_DEVICE_WC 3
|
||||
/*
|
||||
* types 5 onwards can be found in asm/mach/map.h and are undefined
|
||||
* types 4 onwards can be found in asm/mach/map.h and are undefined
|
||||
* for ioremap
|
||||
*/
|
||||
|
||||
|
@@ -22,6 +22,10 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
struct irqaction;
|
||||
extern void migrate_irqs(void);
|
||||
|
||||
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
||||
void init_IRQ(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -61,7 +61,6 @@ struct kprobe_ctlblk {
|
||||
void arch_remove_kprobe(struct kprobe *);
|
||||
void kretprobe_trampoline(void);
|
||||
|
||||
int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr);
|
||||
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
|
||||
int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data);
|
||||
|
@@ -18,16 +18,13 @@ struct map_desc {
|
||||
unsigned int type;
|
||||
};
|
||||
|
||||
/* types 0-4 are defined in asm/io.h */
|
||||
#define MT_CACHECLEAN 5
|
||||
#define MT_MINICLEAN 6
|
||||
#define MT_LOW_VECTORS 7
|
||||
#define MT_HIGH_VECTORS 8
|
||||
#define MT_MEMORY 9
|
||||
#define MT_ROM 10
|
||||
|
||||
#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED
|
||||
#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000
|
||||
/* types 0-3 are defined in asm/io.h */
|
||||
#define MT_CACHECLEAN 4
|
||||
#define MT_MINICLEAN 5
|
||||
#define MT_LOW_VECTORS 6
|
||||
#define MT_HIGH_VECTORS 7
|
||||
#define MT_MEMORY 8
|
||||
#define MT_ROM 9
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern void iotable_init(struct map_desc *, int);
|
||||
|
@@ -18,8 +18,7 @@ struct pxa2xx_udc_mach_info {
|
||||
/* Boards following the design guidelines in the developer's manual,
|
||||
* with on-chip GPIOs not Lubbock's weird hardware, can have a sane
|
||||
* VBUS IRQ and omit the methods above. Store the GPIO number
|
||||
* here; for GPIO 0, also mask in one of the pxa_gpio_mode() bits.
|
||||
* Note that sometimes the signals go through inverters...
|
||||
* here. Note that sometimes the signals go through inverters...
|
||||
*/
|
||||
bool gpio_vbus_inverted;
|
||||
u16 gpio_vbus; /* high == vbus present */
|
||||
|
@@ -4,8 +4,8 @@
|
||||
#ifndef _ASM_MC146818RTC_H
|
||||
#define _ASM_MC146818RTC_H
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#ifndef RTC_PORT
|
||||
#define RTC_PORT(x) (0x70 + (x))
|
||||
|
@@ -13,43 +13,33 @@
|
||||
#ifndef __ASM_ARM_MEMORY_H
|
||||
#define __ASM_ARM_MEMORY_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/const.h>
|
||||
#include <mach/memory.h>
|
||||
#include <asm/sizes.h>
|
||||
|
||||
/*
|
||||
* Allow for constants defined here to be used from assembly code
|
||||
* by prepending the UL suffix only with actual C code compilation.
|
||||
*/
|
||||
#ifndef __ASSEMBLY__
|
||||
#define UL(x) (x##UL)
|
||||
#else
|
||||
#define UL(x) (x)
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <mach/memory.h>
|
||||
#include <asm/sizes.h>
|
||||
#define UL(x) _AC(x, UL)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#ifndef TASK_SIZE
|
||||
/*
|
||||
* PAGE_OFFSET - the virtual address of the start of the kernel image
|
||||
* TASK_SIZE - the maximum size of a user space task.
|
||||
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
|
||||
*/
|
||||
#define TASK_SIZE UL(0xbf000000)
|
||||
#define TASK_UNMAPPED_BASE UL(0x40000000)
|
||||
#endif
|
||||
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
||||
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
|
||||
#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
|
||||
|
||||
/*
|
||||
* The maximum size of a 26-bit user space task.
|
||||
*/
|
||||
#define TASK_SIZE_26 UL(0x04000000)
|
||||
|
||||
/*
|
||||
* Page offset: 3GB
|
||||
*/
|
||||
#ifndef PAGE_OFFSET
|
||||
#define PAGE_OFFSET UL(0xc0000000)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The module space lives between the addresses given by TASK_SIZE
|
||||
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
|
||||
@@ -147,16 +137,10 @@
|
||||
|
||||
#ifndef arch_adjust_zones
|
||||
#define arch_adjust_zones(node,size,holes) do { } while (0)
|
||||
#elif !defined(CONFIG_ZONE_DMA)
|
||||
#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Amount of memory reserved for the vmalloc() area, and minimum
|
||||
* address for vmalloc mappings.
|
||||
*/
|
||||
extern unsigned long vmalloc_reserve;
|
||||
|
||||
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
|
||||
|
||||
/*
|
||||
* PFNs are used to describe any physical page; this means
|
||||
* PFN 0 == physical address 0.
|
||||
|
@@ -15,6 +15,7 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
|
@@ -184,8 +184,9 @@ typedef struct page *pgtable_t;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VM_DATA_DEFAULT_FLAGS \
|
||||
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
|
||||
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
|
||||
/*
|
||||
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
|
||||
|
@@ -164,14 +164,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
#define L_PTE_PRESENT (1 << 0)
|
||||
#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
|
||||
#define L_PTE_YOUNG (1 << 1)
|
||||
#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
|
||||
#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
|
||||
#define L_PTE_USER (1 << 4)
|
||||
#define L_PTE_WRITE (1 << 5)
|
||||
#define L_PTE_EXEC (1 << 6)
|
||||
#define L_PTE_DIRTY (1 << 7)
|
||||
#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */
|
||||
#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */
|
||||
#define L_PTE_DIRTY (1 << 6)
|
||||
#define L_PTE_WRITE (1 << 7)
|
||||
#define L_PTE_USER (1 << 8)
|
||||
#define L_PTE_EXEC (1 << 9)
|
||||
#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
|
||||
|
||||
/*
|
||||
* These are the memory types, defined to be compatible with
|
||||
* pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
|
||||
*/
|
||||
#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
|
||||
#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
|
||||
#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
|
||||
#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
|
||||
#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
|
||||
#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
|
||||
#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */
|
||||
#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
|
||||
#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
|
||||
#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
|
||||
#define L_PTE_MT_MASK (0x0f << 2)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
@@ -180,23 +196,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
* as well as any architecture dependent bits like global/ASID and SMP
|
||||
* shared mapping bits.
|
||||
*/
|
||||
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
|
||||
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
|
||||
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
|
||||
|
||||
extern pgprot_t pgprot_user;
|
||||
extern pgprot_t pgprot_kernel;
|
||||
|
||||
#define PAGE_NONE pgprot_user
|
||||
#define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
|
||||
#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \
|
||||
L_PTE_WRITE)
|
||||
#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
|
||||
#define PAGE_KERNEL pgprot_kernel
|
||||
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
|
||||
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
|
||||
#define PAGE_NONE pgprot_user
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
|
||||
#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
|
||||
#define PAGE_KERNEL pgprot_kernel
|
||||
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
|
||||
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
@@ -212,19 +235,19 @@ extern pgprot_t pgprot_kernel;
|
||||
#define __P001 __PAGE_READONLY
|
||||
#define __P010 __PAGE_COPY
|
||||
#define __P011 __PAGE_COPY
|
||||
#define __P100 __PAGE_READONLY
|
||||
#define __P101 __PAGE_READONLY
|
||||
#define __P110 __PAGE_COPY
|
||||
#define __P111 __PAGE_COPY
|
||||
#define __P100 __PAGE_READONLY_EXEC
|
||||
#define __P101 __PAGE_READONLY_EXEC
|
||||
#define __P110 __PAGE_COPY_EXEC
|
||||
#define __P111 __PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 __PAGE_NONE
|
||||
#define __S001 __PAGE_READONLY
|
||||
#define __S010 __PAGE_SHARED
|
||||
#define __S011 __PAGE_SHARED
|
||||
#define __S100 __PAGE_READONLY
|
||||
#define __S101 __PAGE_READONLY
|
||||
#define __S110 __PAGE_SHARED
|
||||
#define __S111 __PAGE_SHARED
|
||||
#define __S100 __PAGE_READONLY_EXEC
|
||||
#define __S101 __PAGE_READONLY_EXEC
|
||||
#define __S110 __PAGE_SHARED_EXEC
|
||||
#define __S111 __PAGE_SHARED_EXEC
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
@@ -286,8 +309,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
/*
|
||||
* Mark the prot value as uncacheable and unbufferable.
|
||||
*/
|
||||
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
|
||||
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
|
||||
#define pgprot_noncached(prot) \
|
||||
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
|
||||
#define pgprot_writecombine(prot) \
|
||||
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
#define pmd_present(pmd) (pmd_val(pmd))
|
||||
@@ -319,11 +344,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||
|
||||
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
|
||||
|
||||
/*
|
||||
* Permanent address of a page. We never have highmem, so this is trivial.
|
||||
*/
|
||||
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@@ -54,7 +54,6 @@
|
||||
#define PSR_C_BIT 0x20000000
|
||||
#define PSR_Z_BIT 0x40000000
|
||||
#define PSR_N_BIT 0x80000000
|
||||
#define PCMASK 0
|
||||
|
||||
/*
|
||||
* Groups of PSR bits
|
||||
@@ -139,11 +138,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define pc_pointer(v) \
|
||||
((v) & ~PCMASK)
|
||||
|
||||
#define instruction_pointer(regs) \
|
||||
(pc_pointer((regs)->ARM_pc))
|
||||
#define instruction_pointer(regs) (regs)->ARM_pc
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
|
@@ -209,6 +209,17 @@ struct meminfo {
|
||||
struct membank bank[NR_BANKS];
|
||||
};
|
||||
|
||||
#define for_each_nodebank(iter,mi,no) \
|
||||
for (iter = 0; iter < mi->nr_banks; iter++) \
|
||||
if (mi->bank[iter].node == no)
|
||||
|
||||
#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
|
||||
#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
|
||||
#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
|
||||
#define bank_phys_start(bank) (bank)->start
|
||||
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
|
||||
#define bank_phys_size(bank) (bank)->size
|
||||
|
||||
/*
|
||||
* Early command line parameters.
|
||||
*/
|
||||
|
@@ -3,8 +3,22 @@
|
||||
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define MAX_PHYSADDR_BITS 32
|
||||
#define MAX_PHYSMEM_BITS 32
|
||||
#define SECTION_SIZE_BITS NODE_MEM_SIZE_BITS
|
||||
/*
|
||||
* Two definitions are required for sparsemem:
|
||||
*
|
||||
* MAX_PHYSMEM_BITS: The number of physical address bits required
|
||||
* to address the last byte of memory.
|
||||
*
|
||||
* SECTION_SIZE_BITS: The number of physical address bits to cover
|
||||
* the maximum amount of memory in a section.
|
||||
*
|
||||
* Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000,
|
||||
* then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26.
|
||||
*
|
||||
* Define these in your mach/memory.h.
|
||||
*/
|
||||
#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS)
|
||||
#error Sparsemem is not supported on this platform
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -43,11 +43,6 @@
|
||||
#define CR_XP (1 << 23) /* Extended page tables */
|
||||
#define CR_VE (1 << 24) /* Vectored interrupts */
|
||||
|
||||
#define CPUID_ID 0
|
||||
#define CPUID_CACHETYPE 1
|
||||
#define CPUID_TCM 2
|
||||
#define CPUID_TLBTYPE 3
|
||||
|
||||
/*
|
||||
* This is used to ensure the compiler did actually allocate the register we
|
||||
* asked it for some inline assembly sequences. Apparently we can't trust
|
||||
@@ -61,36 +56,8 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
#define read_cpuid(reg) \
|
||||
({ \
|
||||
unsigned int __val; \
|
||||
asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
|
||||
: "=r" (__val) \
|
||||
: \
|
||||
: "cc"); \
|
||||
__val; \
|
||||
})
|
||||
#else
|
||||
extern unsigned int processor_id;
|
||||
#define read_cpuid(reg) (processor_id)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The CPU ID never changes at run time, so we might as well tell the
|
||||
* compiler that it's constant. Use this function to read the CPU ID
|
||||
* rather than directly reading processor_id or read_cpuid() directly.
|
||||
*/
|
||||
static inline unsigned int read_cpuid_id(void) __attribute_const__;
|
||||
|
||||
static inline unsigned int read_cpuid_id(void)
|
||||
{
|
||||
return read_cpuid(CPUID_ID);
|
||||
}
|
||||
|
||||
#define __exception __attribute__((section(".exception.text")))
|
||||
|
||||
struct thread_info;
|
||||
@@ -131,31 +98,6 @@ extern void cpu_init(void);
|
||||
void arm_machine_restart(char mode);
|
||||
extern void (*arm_pm_restart)(char str);
|
||||
|
||||
/*
|
||||
* Intel's XScale3 core supports some v6 features (supersections, L2)
|
||||
* but advertises itself as v5 as it does not support the v6 ISA. For
|
||||
* this reason, we need a way to explicitly test for this type of CPU.
|
||||
*/
|
||||
#ifndef CONFIG_CPU_XSC3
|
||||
#define cpu_is_xsc3() 0
|
||||
#else
|
||||
static inline int cpu_is_xsc3(void)
|
||||
{
|
||||
extern unsigned int processor_id;
|
||||
|
||||
if ((processor_id & 0xffffe000) == 0x69056000)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
|
||||
#define cpu_is_xscale() 0
|
||||
#else
|
||||
#define cpu_is_xscale() 1
|
||||
#endif
|
||||
|
||||
#define UDBG_UNDEFINED (1 << 0)
|
||||
#define UDBG_SYSCALL (1 << 1)
|
||||
#define UDBG_BADABORT (1 << 2)
|
||||
|
@@ -98,7 +98,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
}
|
||||
|
||||
#define thread_saved_pc(tsk) \
|
||||
((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
|
||||
((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
|
||||
#define thread_saved_fp(tsk) \
|
||||
((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
|
||||
|
||||
|
@@ -225,7 +225,7 @@ do { \
|
||||
|
||||
#define __get_user_asm_byte(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrbt %1,[%2],#0\n" \
|
||||
"1: ldrbt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@@ -261,7 +261,7 @@ do { \
|
||||
|
||||
#define __get_user_asm_word(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1,[%2],#0\n" \
|
||||
"1: ldrt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@@ -306,7 +306,7 @@ do { \
|
||||
|
||||
#define __put_user_asm_byte(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strbt %1,[%2],#0\n" \
|
||||
"1: strbt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@@ -339,7 +339,7 @@ do { \
|
||||
|
||||
#define __put_user_asm_word(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strt %1,[%2],#0\n" \
|
||||
"1: strt %1,[%2]\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
@@ -365,7 +365,7 @@ do { \
|
||||
#define __put_user_asm_dword(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strt " __reg_oper1 ", [%1], #4\n" \
|
||||
"2: strt " __reg_oper0 ", [%1], #0\n" \
|
||||
"2: strt " __reg_oper0 ", [%1]\n" \
|
||||
"3:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@@ -1,8 +1,8 @@
|
||||
#ifndef ASMARM_VGA_H
|
||||
#define ASMARM_VGA_H
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x))
|
||||
|
||||
|
Reference in New Issue
Block a user