Merge branch 'master' into for-linus

This commit is contained in:
Rafael J. Wysocki
2009-09-14 20:26:05 +02:00
2413 changed files with 206081 additions and 110996 deletions

View File

@@ -40,7 +40,11 @@
#define ASI_M_UNA01 0x01 /* Same here... */
#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
#ifndef CONFIG_SPARC_LEON
#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
#else
#define ASI_M_MMUREGS 0x19
#endif /* CONFIG_SPARC_LEON */
#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */

View File

@@ -3,6 +3,7 @@
#include <linux/scatterlist.h>
#include <linux/mm.h>
#include <linux/dma-debug.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
struct dma_ops {
void *(*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction);
void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction);
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
int nhwentries,
enum dma_data_direction direction);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction);
void (*sync_single_for_device)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction);
void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
int nelems,
enum dma_data_direction direction);
void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
};
extern const struct dma_ops *dma_ops;
extern struct dma_map_ops *dma_ops, pci32_dma_ops;
extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (dev->bus == &pci_bus_type)
return &pci32_dma_ops;
#endif
return dma_ops;
}
#include <asm-generic/dma-mapping-common.h>
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
struct dma_map_ops *ops = get_dma_ops(dev);
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr & ~PAGE_MASK, size,
direction);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_page(dev, dma_addr, size, direction);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_page(dev, page, offset, size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_page(dev, dma_address, size, direction);
}
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
return dma_ops->map_sg(dev, sg, nents, direction);
}
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
dma_ops->unmap_sg(dev, sg, nents, direction);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction)
{
if (dma_ops->sync_single_for_device)
dma_ops->sync_single_for_device(dev, dma_handle, size,
direction);
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
if (dma_ops->sync_sg_for_device)
dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);

View File

@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
return retval;
}
void __trigger_all_cpu_backtrace(void);
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS];

View File

@@ -0,0 +1,362 @@
/*
* Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com) Gaisler Research
* Copyright (C) 2004 Stefan Holst (mail@s-holst.de) Uni-Stuttgart
* Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
* Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
*/
#ifndef LEON_H_INCLUDE
#define LEON_H_INCLUDE
#ifdef CONFIG_SPARC_LEON
#define ASI_LEON_NOCACHE 0x01
#define ASI_LEON_DCACHE_MISS 0x1
#define ASI_LEON_CACHEREGS 0x02
#define ASI_LEON_IFLUSH 0x10
#define ASI_LEON_DFLUSH 0x11
#define ASI_LEON_MMUFLUSH 0x18
#define ASI_LEON_MMUREGS 0x19
#define ASI_LEON_BYPASS 0x1c
#define ASI_LEON_FLUSH_PAGE 0x10
/* mmu register access, ASI_LEON_MMUREGS */
#define LEON_CNR_CTRL 0x000
#define LEON_CNR_CTXP 0x100
#define LEON_CNR_CTX 0x200
#define LEON_CNR_F 0x300
#define LEON_CNR_FADDR 0x400
#define LEON_CNR_CTX_NCTX 256 /*number of MMU ctx */
#define LEON_CNR_CTRL_TLBDIS 0x80000000
#define LEON_MMUTLB_ENT_MAX 64
/*
* diagnostic access from mmutlb.vhd:
* 0: pte address
* 4: pte
* 8: additional flags
*/
#define LEON_DIAGF_LVL 0x3
#define LEON_DIAGF_WR 0x8
#define LEON_DIAGF_WR_SHIFT 3
#define LEON_DIAGF_HIT 0x10
#define LEON_DIAGF_HIT_SHIFT 4
#define LEON_DIAGF_CTX 0x1fe0
#define LEON_DIAGF_CTX_SHIFT 5
#define LEON_DIAGF_VALID 0x2000
#define LEON_DIAGF_VALID_SHIFT 13
/*
* Interrupt Sources
*
* The interrupt source numbers directly map to the trap type and to
* the bits used in the Interrupt Clear, Interrupt Force, Interrupt Mask,
* and the Interrupt Pending Registers.
*/
#define LEON_INTERRUPT_CORRECTABLE_MEMORY_ERROR 1
#define LEON_INTERRUPT_UART_1_RX_TX 2
#define LEON_INTERRUPT_UART_0_RX_TX 3
#define LEON_INTERRUPT_EXTERNAL_0 4
#define LEON_INTERRUPT_EXTERNAL_1 5
#define LEON_INTERRUPT_EXTERNAL_2 6
#define LEON_INTERRUPT_EXTERNAL_3 7
#define LEON_INTERRUPT_TIMER1 8
#define LEON_INTERRUPT_TIMER2 9
#define LEON_INTERRUPT_EMPTY1 10
#define LEON_INTERRUPT_EMPTY2 11
#define LEON_INTERRUPT_OPEN_ETH 12
#define LEON_INTERRUPT_EMPTY4 13
#define LEON_INTERRUPT_EMPTY5 14
#define LEON_INTERRUPT_EMPTY6 15
/* irq masks */
#define LEON_HARD_INT(x) (1 << (x)) /* irq 0-15 */
#define LEON_IRQMASK_R 0x0000fffe /* bit 15- 1 of lregs.irqmask */
#define LEON_IRQPRIO_R 0xfffe0000 /* bit 31-17 of lregs.irqmask */
/* leon uart register definitions */
#define LEON_OFF_UDATA 0x0
#define LEON_OFF_USTAT 0x4
#define LEON_OFF_UCTRL 0x8
#define LEON_OFF_USCAL 0xc
#define LEON_UCTRL_RE 0x01
#define LEON_UCTRL_TE 0x02
#define LEON_UCTRL_RI 0x04
#define LEON_UCTRL_TI 0x08
#define LEON_UCTRL_PS 0x10
#define LEON_UCTRL_PE 0x20
#define LEON_UCTRL_FL 0x40
#define LEON_UCTRL_LB 0x80
#define LEON_USTAT_DR 0x01
#define LEON_USTAT_TS 0x02
#define LEON_USTAT_TH 0x04
#define LEON_USTAT_BR 0x08
#define LEON_USTAT_OV 0x10
#define LEON_USTAT_PE 0x20
#define LEON_USTAT_FE 0x40
#define LEON_MCFG2_SRAMDIS 0x00002000
#define LEON_MCFG2_SDRAMEN 0x00004000
#define LEON_MCFG2_SRAMBANKSZ 0x00001e00 /* [12-9] */
#define LEON_MCFG2_SRAMBANKSZ_SHIFT 9
#define LEON_MCFG2_SDRAMBANKSZ 0x03800000 /* [25-23] */
#define LEON_MCFG2_SDRAMBANKSZ_SHIFT 23
#define LEON_TCNT0_MASK 0x7fffff
#define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE)
/* no break yet */
#define ASI_LEON3_SYSCTRL 0x02
#define ASI_LEON3_SYSCTRL_ICFG 0x08
#define ASI_LEON3_SYSCTRL_DCFG 0x0c
#define ASI_LEON3_SYSCTRL_CFG_SNOOPING (1 << 27)
#define ASI_LEON3_SYSCTRL_CFG_SSIZE(c) (1 << ((c >> 20) & 0xf))
#ifndef __ASSEMBLY__
/* do a virtual address read without cache */
static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
{
unsigned long retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
return retval;
}
/* do a physical address bypass write, i.e. for 0x80000000 */
static inline void leon_store_reg(unsigned long paddr, unsigned long value)
{
__asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r"(value), "r"(paddr),
"i"(ASI_LEON_BYPASS) : "memory");
}
/* do a physical address bypass load, i.e. for 0x80000000 */
static inline unsigned long leon_load_reg(unsigned long paddr)
{
unsigned long retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r"(retval) : "r"(paddr), "i"(ASI_LEON_BYPASS));
return retval;
}
extern inline void leon_srmmu_disabletlb(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
"i"(ASI_LEON_MMUREGS));
retval |= LEON_CNR_CTRL_TLBDIS;
__asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
"i"(ASI_LEON_MMUREGS) : "memory");
}
extern inline void leon_srmmu_enabletlb(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
"i"(ASI_LEON_MMUREGS));
retval = retval & ~LEON_CNR_CTRL_TLBDIS;
__asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
"i"(ASI_LEON_MMUREGS) : "memory");
}
/* macro access for leon_load_reg() and leon_store_reg() */
#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x)))
#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS)
#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
/* macro access for leon_readnobuffer_reg() */
#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
extern void sparc_leon_eirq_register(int eirq);
extern void leon_init(void);
extern void leon_switch_mm(void);
extern void leon_init_IRQ(void);
extern unsigned long last_valid_pfn;
extern inline unsigned long sparc_leon3_get_dcachecfg(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r"(retval) :
"r"(ASI_LEON3_SYSCTRL_DCFG),
"i"(ASI_LEON3_SYSCTRL));
return retval;
}
/* enable snooping */
extern inline void sparc_leon3_enable_snooping(void)
{
__asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
"set 0x800000, %%l2\n\t"
"or %%l2, %%l1, %%l2\n\t"
"sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
};
extern inline void sparc_leon3_disable_cache(void)
{
__asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
"set 0x00000f, %%l2\n\t"
"andn %%l2, %%l1, %%l2\n\t"
"sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
};
#endif /*!__ASSEMBLY__*/
#ifdef CONFIG_SMP
# define LEON3_IRQ_RESCHEDULE 13
# define LEON3_IRQ_TICKER (leon_percpu_timer_dev[0].irq)
# define LEON3_IRQ_CROSS_CALL 15
#endif
#if defined(PAGE_SIZE_LEON_8K)
#define LEON_PAGE_SIZE_LEON 1
#elif defined(PAGE_SIZE_LEON_16K)
#define LEON_PAGE_SIZE_LEON 2)
#else
#define LEON_PAGE_SIZE_LEON 0
#endif
#if LEON_PAGE_SIZE_LEON == 0
/* [ 8, 6, 6 ] + 12 */
#define LEON_PGD_SH 24
#define LEON_PGD_M 0xff
#define LEON_PMD_SH 18
#define LEON_PMD_SH_V (LEON_PGD_SH-2)
#define LEON_PMD_M 0x3f
#define LEON_PTE_SH 12
#define LEON_PTE_M 0x3f
#elif LEON_PAGE_SIZE_LEON == 1
/* [ 7, 6, 6 ] + 13 */
#define LEON_PGD_SH 25
#define LEON_PGD_M 0x7f
#define LEON_PMD_SH 19
#define LEON_PMD_SH_V (LEON_PGD_SH-1)
#define LEON_PMD_M 0x3f
#define LEON_PTE_SH 13
#define LEON_PTE_M 0x3f
#elif LEON_PAGE_SIZE_LEON == 2
/* [ 6, 6, 6 ] + 14 */
#define LEON_PGD_SH 26
#define LEON_PGD_M 0x3f
#define LEON_PMD_SH 20
#define LEON_PMD_SH_V (LEON_PGD_SH-0)
#define LEON_PMD_M 0x3f
#define LEON_PTE_SH 14
#define LEON_PTE_M 0x3f
#elif LEON_PAGE_SIZE_LEON == 3
/* [ 4, 7, 6 ] + 15 */
#define LEON_PGD_SH 28
#define LEON_PGD_M 0x0f
#define LEON_PMD_SH 21
#define LEON_PMD_SH_V (LEON_PGD_SH-0)
#define LEON_PMD_M 0x7f
#define LEON_PTE_SH 15
#define LEON_PTE_M 0x3f
#else
#error cannot determine LEON_PAGE_SIZE_LEON
#endif
#define PAGE_MIN_SHIFT (12)
#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT)
#define LEON3_XCCR_SETS_MASK 0x07000000UL
#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
#define LEON2_CCR_DSETS_MASK 0x03000000UL
#define LEON2_CFG_SSIZE_MASK 0x00007000UL
#ifndef __ASSEMBLY__
extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
extern void leon_flush_cache_all(void);
extern void leon_flush_tlb_all(void);
extern int leon_flush_during_switch;
extern int leon_flush_needed(void);
struct vm_area_struct;
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
extern void leon_flush_cache_all(void);
extern void leon_flush_tlb_all(void);
extern int leon_flush_during_switch;
extern int leon_flush_needed(void);
extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
/* struct that hold LEON3 cache configuration registers */
struct leon3_cacheregs {
unsigned long ccr; /* 0x00 - Cache Control Register */
unsigned long iccr; /* 0x08 - Instruction Cache Configuration Register */
unsigned long dccr; /* 0x0c - Data Cache Configuration Register */
};
/* struct that hold LEON2 cache configuration register
* & configuration register
*/
struct leon2_cacheregs {
unsigned long ccr, cfg;
};
#ifdef __KERNEL__
#include <linux/interrupt.h>
struct device_node;
extern int sparc_leon_eirq_get(int eirq, int cpu);
extern irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id);
extern void sparc_leon_eirq_register(int eirq);
extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_init_timers(irq_handler_t counter_fn);
extern void leon_clear_clock_irq(void);
extern void leon_load_profile_irq(int cpu, unsigned int limit);
extern void leon_trans_init(struct device_node *dp);
extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
extern void leon_init_IRQ(void);
extern void leon_init(void);
extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void init_leon(void);
extern void poke_leonsparc(void);
extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
extern int leon_flush_needed(void);
extern void leon_switch_mm(void);
extern int srmmu_swprobe_trace;
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
/* macros used in leon_mm.c */
#define PFN(x) ((x) >> PAGE_SHIFT)
#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
#define _SRMMU_PTE_PMASK_LEON 0xffffffff
#else /* defined(CONFIG_SPARC_LEON) */
/* nop definitions for !LEON case */
#define leon_init() do {} while (0)
#define leon_switch_mm() do {} while (0)
#define leon_init_IRQ() do {} while (0)
#define init_leon() do {} while (0)
#endif /* !defined(CONFIG_SPARC_LEON) */
#endif

View File

@@ -0,0 +1,263 @@
/*
*Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com), Gaisler Research
*Copyright (C) 2004 Stefan Holst (mail@s-holst.de), Uni-Stuttgart
*Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com),Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
*/
#ifndef LEON_AMBA_H_INCLUDE
#define LEON_AMBA_H_INCLUDE
#ifndef __ASSEMBLY__
struct amba_prom_registers {
unsigned int phys_addr; /* The physical address of this register */
unsigned int reg_size; /* How many bytes does this register take up? */
};
#endif
/*
* The following defines the bits in the LEON UART Status Registers.
*/
#define LEON_REG_UART_STATUS_DR 0x00000001 /* Data Ready */
#define LEON_REG_UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */
#define LEON_REG_UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */
#define LEON_REG_UART_STATUS_BR 0x00000008 /* Break Error */
#define LEON_REG_UART_STATUS_OE 0x00000010 /* RX Overrun Error */
#define LEON_REG_UART_STATUS_PE 0x00000020 /* RX Parity Error */
#define LEON_REG_UART_STATUS_FE 0x00000040 /* RX Framing Error */
#define LEON_REG_UART_STATUS_ERR 0x00000078 /* Error Mask */
/*
* The following defines the bits in the LEON UART Ctrl Registers.
*/
#define LEON_REG_UART_CTRL_RE 0x00000001 /* Receiver enable */
#define LEON_REG_UART_CTRL_TE 0x00000002 /* Transmitter enable */
#define LEON_REG_UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */
#define LEON_REG_UART_CTRL_TI 0x00000008 /* Transmitter irq */
#define LEON_REG_UART_CTRL_PS 0x00000010 /* Parity select */
#define LEON_REG_UART_CTRL_PE 0x00000020 /* Parity enable */
#define LEON_REG_UART_CTRL_FL 0x00000040 /* Flow control enable */
#define LEON_REG_UART_CTRL_LB 0x00000080 /* Loop Back enable */
#define LEON3_GPTIMER_EN 1
#define LEON3_GPTIMER_RL 2
#define LEON3_GPTIMER_LD 4
#define LEON3_GPTIMER_IRQEN 8
#define LEON3_GPTIMER_SEPIRQ 8
#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
/* 0 = hold scalar and counter */
#define LEON23_REG_TIMER_CONTROL_RL 0x00000002 /* 1 = reload at 0 */
/* 0 = stop at 0 */
#define LEON23_REG_TIMER_CONTROL_LD 0x00000004 /* 1 = load counter */
/* 0 = no function */
#define LEON23_REG_TIMER_CONTROL_IQ 0x00000008 /* 1 = irq enable */
/* 0 = no function */
/*
* The following defines the bits in the LEON PS/2 Status Registers.
*/
#define LEON_REG_PS2_STATUS_DR 0x00000001 /* Data Ready */
#define LEON_REG_PS2_STATUS_PE 0x00000002 /* Parity error */
#define LEON_REG_PS2_STATUS_FE 0x00000004 /* Framing error */
#define LEON_REG_PS2_STATUS_KI 0x00000008 /* Keyboard inhibit */
#define LEON_REG_PS2_STATUS_RF 0x00000010 /* RX buffer full */
#define LEON_REG_PS2_STATUS_TF 0x00000020 /* TX buffer full */
/*
* The following defines the bits in the LEON PS/2 Ctrl Registers.
*/
#define LEON_REG_PS2_CTRL_RE 0x00000001 /* Receiver enable */
#define LEON_REG_PS2_CTRL_TE 0x00000002 /* Transmitter enable */
#define LEON_REG_PS2_CTRL_RI 0x00000004 /* Keyboard receive irq */
#define LEON_REG_PS2_CTRL_TI 0x00000008 /* Keyboard transmit irq */
#define LEON3_IRQMPSTATUS_CPUNR 28
#define LEON3_IRQMPSTATUS_BROADCAST 27
#define GPTIMER_CONFIG_IRQNT(a) (((a) >> 3) & 0x1f)
#define GPTIMER_CONFIG_ISSEP(a) ((a) & (1 << 8))
#define GPTIMER_CONFIG_NTIMERS(a) ((a) & (0x7))
#define LEON3_GPTIMER_CTRL_PENDING 0x10
#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
#ifdef CONFIG_SPARC_LEON
#ifndef __ASSEMBLY__
struct leon3_irqctrl_regs_map {
u32 ilevel;
u32 ipend;
u32 iforce;
u32 iclear;
u32 mpstatus;
u32 mpbroadcast;
u32 notused02;
u32 notused03;
u32 notused10;
u32 notused11;
u32 notused12;
u32 notused13;
u32 notused20;
u32 notused21;
u32 notused22;
u32 notused23;
u32 mask[16];
u32 force[16];
/* Extended IRQ registers */
u32 intid[16]; /* 0xc0 */
};
struct leon3_apbuart_regs_map {
u32 data;
u32 status;
u32 ctrl;
u32 scaler;
};
struct leon3_gptimerelem_regs_map {
u32 val;
u32 rld;
u32 ctrl;
u32 unused;
};
struct leon3_gptimer_regs_map {
u32 scalar;
u32 scalar_reload;
u32 config;
u32 unused;
struct leon3_gptimerelem_regs_map e[8];
};
/*
* Types and structure used for AMBA Plug & Play bus scanning
*/
#define AMBA_MAXAPB_DEVS 64
#define AMBA_MAXAPB_DEVS_PERBUS 16
struct amba_device_table {
int devnr; /* number of devices on AHB or APB bus */
unsigned int *addr[16]; /* addresses to the devices configuration tables */
unsigned int allocbits[1]; /* 0=unallocated, 1=allocated driver */
};
struct amba_apbslv_device_table {
int devnr; /* number of devices on AHB or APB bus */
unsigned int *addr[AMBA_MAXAPB_DEVS]; /* addresses to the devices configuration tables */
unsigned int apbmst[AMBA_MAXAPB_DEVS]; /* apb master if a entry is a apb slave */
unsigned int apbmstidx[AMBA_MAXAPB_DEVS]; /* apb master idx if a entry is a apb slave */
unsigned int allocbits[4]; /* 0=unallocated, 1=allocated driver */
};
struct amba_confarea_type {
struct amba_confarea_type *next;/* next bus in chain */
struct amba_device_table ahbmst;
struct amba_device_table ahbslv;
struct amba_apbslv_device_table apbslv;
unsigned int apbmst;
};
/* collect apb slaves */
struct amba_apb_device {
unsigned int start, irq, bus_id;
struct amba_confarea_type *bus;
};
/* collect ahb slaves */
struct amba_ahb_device {
unsigned int start[4], irq, bus_id;
struct amba_confarea_type *bus;
};
struct device_node;
void _amba_init(struct device_node *dp, struct device_node ***nextp);
extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs;
extern struct leon3_gptimer_regs_map *leon3_gptimer_regs;
extern struct amba_apb_device leon_percpu_timer_dev[16];
extern int leondebug_irq_disable;
extern int leon_debug_irqout;
extern unsigned long leon3_gptimer_irq;
extern unsigned int sparc_leon_eirq;
#endif /* __ASSEMBLY__ */
#define LEON3_IO_AREA 0xfff00000
#define LEON3_CONF_AREA 0xff000
#define LEON3_AHB_SLAVE_CONF_AREA (1 << 11)
#define LEON3_AHB_CONF_WORDS 8
#define LEON3_APB_CONF_WORDS 2
#define LEON3_AHB_MASTERS 16
#define LEON3_AHB_SLAVES 16
#define LEON3_APB_SLAVES 16
#define LEON3_APBUARTS 8
/* Vendor codes */
#define VENDOR_GAISLER 1
#define VENDOR_PENDER 2
#define VENDOR_ESA 4
#define VENDOR_OPENCORES 8
/* Gaisler Research device id's */
#define GAISLER_LEON3 0x003
#define GAISLER_LEON3DSU 0x004
#define GAISLER_ETHAHB 0x005
#define GAISLER_APBMST 0x006
#define GAISLER_AHBUART 0x007
#define GAISLER_SRCTRL 0x008
#define GAISLER_SDCTRL 0x009
#define GAISLER_APBUART 0x00C
#define GAISLER_IRQMP 0x00D
#define GAISLER_AHBRAM 0x00E
#define GAISLER_GPTIMER 0x011
#define GAISLER_PCITRG 0x012
#define GAISLER_PCISBRG 0x013
#define GAISLER_PCIFBRG 0x014
#define GAISLER_PCITRACE 0x015
#define GAISLER_PCIDMA 0x016
#define GAISLER_AHBTRACE 0x017
#define GAISLER_ETHDSU 0x018
#define GAISLER_PIOPORT 0x01A
#define GAISLER_GRGPIO 0x01A
#define GAISLER_AHBJTAG 0x01c
#define GAISLER_ETHMAC 0x01D
#define GAISLER_AHB2AHB 0x020
#define GAISLER_USBDC 0x021
#define GAISLER_ATACTRL 0x024
#define GAISLER_DDRSPA 0x025
#define GAISLER_USBEHC 0x026
#define GAISLER_USBUHC 0x027
#define GAISLER_I2CMST 0x028
#define GAISLER_SPICTRL 0x02D
#define GAISLER_DDR2SPA 0x02E
#define GAISLER_SPIMCTRL 0x045
#define GAISLER_LEON4 0x048
#define GAISLER_LEON4DSU 0x049
#define GAISLER_AHBSTAT 0x052
#define GAISLER_FTMCTRL 0x054
#define GAISLER_KBD 0x060
#define GAISLER_VGA 0x061
#define GAISLER_SVGA 0x063
#define GAISLER_GRSYSMON 0x066
#define GAISLER_GRACECTRL 0x067
#define GAISLER_L2TIME 0xffd /* internal device: leon2 timer */
#define GAISLER_L2C 0xffe /* internal device: leon2compat */
#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */
#define amba_vendor(x) (((x) >> 24) & 0xff)
#define amba_device(x) (((x) >> 12) & 0xfff)
#endif /* !defined(CONFIG_SPARC_LEON) */
#endif

View File

@@ -15,7 +15,7 @@ struct Sun_Machine_Models {
/* Current number of machines we know about that has an IDPROM
* machtype entry including one entry for the 0x80 OBP machines.
*/
#define NUM_SUN_MACHINES 15
#define NUM_SUN_MACHINES 16
/* The machine type in the idprom area looks like this:
*
@@ -30,6 +30,7 @@ struct Sun_Machine_Models {
#define SM_ARCH_MASK 0xf0
#define SM_SUN4 0x20
#define M_LEON 0x30
#define SM_SUN4C 0x50
#define SM_SUN4M 0x70
#define SM_SUN4M_OBP 0x80
@@ -41,6 +42,9 @@ struct Sun_Machine_Models {
#define SM_4_330 0x03 /* Sun 4/300 series */
#define SM_4_470 0x04 /* Sun 4/400 series */
/* Leon machines */
#define M_LEON3_SOC 0x02 /* Leon3 SoC */
/* Sun4c machines Full Name - PROM NAME */
#define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */
#define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */

View File

@@ -5,6 +5,9 @@ extern int __init nmi_init(void);
extern void perfctr_irq(int irq, struct pt_regs *regs);
extern void nmi_adjust_hz(unsigned int new_hz);
extern int nmi_usable;
extern atomic_t nmi_active;
extern void start_nmi_watchdog(void *unused);
extern void stop_nmi_watchdog(void *unused);
#endif /* __NMI_H */

View File

@@ -5,4 +5,7 @@
#else
#include <asm/pci_32.h>
#endif
#include <asm-generic/pci-dma-compat.h>
#endif

View File

@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
*/
#define PCI_DMA_BUS_IS_PHYS (0)
#include <asm/scatterlist.h>
struct pci_dev;
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent,
* size must be the same as what as passed into pci_alloc_consistent,
* and likewise dma_addr must be the same as what *dma_addrp was set to.
*
* References to the memory and mappings assosciated with cpu_addr/dma_addr
* past this call are illegal.
*/
extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
/* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
*
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
*/
extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
/* Unmap a single streaming mode DMA translation. The dma_addr and size
* must match what was provided for in a previous pci_map_single call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
/*
* Same as above, only with pages instead of mapped addresses.
*/
extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction);
extern void pci_unmap_page(struct pci_dev *hwdev,
dma_addr_t dma_address, size_t size, int direction);
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
/* Make physical memory consistent for a single
* streaming mode DMA translation after a transfer.
*
* If you perform a pci_map_single() but wish to interrogate the
* buffer using the cpu, yet do not wish to teardown the PCI dma
* mapping, you must call this function before doing so. At the
* next point you give the PCI dma address back to the card, you
* must first perform a pci_dma_sync_for_device, and then the device
* again owns the buffer.
*/
extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
*
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
/* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return 1;
}
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
}
#endif
#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
static inline int pci_dma_mapping_error(struct pci_dev *pdev,
dma_addr_t dma_addr)
{
return (dma_addr == PCI_DMA_ERROR_CODE);
}
struct device_node;
extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);

View File

@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
*/
#define PCI_DMA_BUS_IS_PHYS (0)
static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
}
static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
size_t size, int direction)
{
return dma_map_single(&pdev->dev, ptr, size,
(enum dma_data_direction) direction);
}
static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
size_t size, int direction)
{
dma_unmap_single(&pdev->dev, dma_addr, size,
(enum dma_data_direction) direction);
}
#define pci_map_page(dev, page, off, size, dir) \
pci_map_single(dev, (page_address(page) + (off)), size, dir)
#define pci_unmap_page(dev,addr,sz,dir) \
pci_unmap_single(dev,addr,sz,dir)
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(&pdev->dev, sg, nents,
(enum dma_data_direction) direction);
}
static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(&pdev->dev, sg, nents,
(enum dma_data_direction) direction);
}
static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
(enum dma_data_direction) direction);
}
static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
struct scatterlist *sg,
int nents, int direction)
{
dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
(enum dma_data_direction) direction);
}
static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
struct scatterlist *sg,
int nelems, int direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
/* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
/* PCI IOMMU mapping bypass support. */
/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
#define PCI64_ADDR_BASE 0xfffc000000000000UL
static inline int pci_dma_mapping_error(struct pci_dev *pdev,
dma_addr_t dma_addr)
{
return dma_mapping_error(&pdev->dev, dma_addr);
}
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,

View File

@@ -0,0 +1,14 @@
#ifndef __ASM_SPARC_PERF_COUNTER_H
#define __ASM_SPARC_PERF_COUNTER_H
extern void set_perf_counter_pending(void);
#define PERF_COUNTER_INDEX_OFFSET 0
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
#else
static inline void init_hw_perf_counters(void) { }
#endif
#endif

View File

@@ -267,6 +267,7 @@ static inline void srmmu_flush_tlb_page(unsigned long page)
}
#ifndef CONFIG_SPARC_LEON
static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
{
unsigned long retval;
@@ -278,6 +279,9 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
return retval;
}
#else
#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK)
#endif
static inline int
srmmu_get_pte (unsigned long addr)

View File

@@ -118,5 +118,8 @@ extern struct device_node *of_console_device;
extern char *of_console_path;
extern char *of_console_options;
extern void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
extern char *build_full_name(struct device_node *dp);
#endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */

View File

@@ -29,6 +29,9 @@
#define SO_RCVBUFFORCE 0x100b
#define SO_ERROR 0x1007
#define SO_TYPE 0x1008
#define SO_PROTOCOL 0x1028
#define SO_DOMAIN 0x1029
/* Linux specific, keep the same. */
#define SO_NO_CHECK 0x000b

View File

@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
static inline void __read_lock(raw_rwlock_t *rw)
static inline void arch_read_lock(raw_rwlock_t *rw)
{
register raw_rwlock_t *lp asm("g1");
lp = rw;
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw)
#define __raw_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
__read_lock(lock); \
arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
static inline void __read_unlock(raw_rwlock_t *rw)
static inline void arch_read_unlock(raw_rwlock_t *rw)
{
register raw_rwlock_t *lp asm("g1");
lp = rw;
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw)
#define __raw_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
__read_unlock(lock); \
arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (val == 0);
}
static inline int __read_trylock(raw_rwlock_t *rw)
static inline int arch_read_trylock(raw_rwlock_t *rw)
{
register raw_rwlock_t *lp asm("g1");
register int res asm("o0");
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw)
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
res = __read_trylock(lock); \
res = arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})

View File

@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
static void inline __read_lock(raw_rwlock_t *lock)
static void inline arch_read_lock(raw_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock)
: "memory");
}
static int inline __read_trylock(raw_rwlock_t *lock)
static int inline arch_read_trylock(raw_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock)
return tmp1;
}
static void inline __read_unlock(raw_rwlock_t *lock)
static void inline arch_read_unlock(raw_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock)
: "memory");
}
static void inline __write_lock(raw_rwlock_t *lock)
static void inline arch_write_lock(raw_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock)
: "memory");
}
static void inline __write_unlock(raw_rwlock_t *lock)
static void inline arch_write_unlock(raw_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock)
: "memory");
}
static int inline __write_trylock(raw_rwlock_t *lock)
static int inline arch_write_trylock(raw_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock)
return result;
}
#define __raw_read_lock(p) __read_lock(p)
#define __raw_read_lock_flags(p, f) __read_lock(p)
#define __raw_read_trylock(p) __read_trylock(p)
#define __raw_read_unlock(p) __read_unlock(p)
#define __raw_write_lock(p) __write_lock(p)
#define __raw_write_lock_flags(p, f) __write_lock(p)
#define __raw_write_unlock(p) __write_unlock(p)
#define __raw_write_trylock(p) __write_trylock(p)
#define __raw_read_lock(p) arch_read_lock(p)
#define __raw_read_lock_flags(p, f) arch_read_lock(p)
#define __raw_read_trylock(p) arch_read_trylock(p)
#define __raw_read_unlock(p) arch_read_unlock(p)
#define __raw_write_lock(p) arch_write_lock(p)
#define __raw_write_lock_flags(p, f) arch_write_lock(p)
#define __raw_write_unlock(p) arch_write_unlock(p)
#define __raw_write_trylock(p) arch_write_trylock(p)
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define __raw_write_can_lock(rw) (!(rw)->lock)

View File

@@ -32,6 +32,7 @@ enum sparc_cpu {
sun4u = 0x05, /* V8 ploos ploos */
sun_unknown = 0x06,
ap1000 = 0x07, /* almost a sun4m */
sparc_leon = 0x08, /* Leon SoC */
};
/* Really, userland should not be looking at any of this... */

View File

@@ -29,6 +29,10 @@ enum sparc_cpu {
/* This cannot ever be a sun4c :) That's just history. */
#define ARCH_SUN4C 0
extern const char *sparc_cpu_type;
extern const char *sparc_fpu_type;
extern const char *sparc_pmu_type;
extern char reboot_command[];
/* These are here in an effort to more fully work around Spitfire Errata

View File

@@ -8,9 +8,8 @@
* need to be careful to avoid a name clashes.
*/
#if defined(__sparc__) && defined(__arch64__)
#if defined(__sparc__)
/*** SPARC 64 bit ***/
#include <asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__
@@ -26,33 +25,21 @@ typedef unsigned short umode_t;
/* Dma addresses come in generic and 64-bit flavours. */
typedef u32 dma_addr_t;
#if defined(__arch64__)
/*** SPARC 64 bit ***/
typedef u64 dma64_addr_t;
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#else
/*** SPARC 32 bit ***/
#include <asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef u32 dma_addr_t;
typedef u32 dma64_addr_t;
#endif /* defined(__arch64__) */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* defined(__sparc__) && defined(__arch64__) */
#endif /* defined(__sparc__) */
#endif /* defined(_SPARC_TYPES_H) */

View File

@@ -7,8 +7,8 @@
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/asi.h>
#include <asm/system.h>
#include <asm/spitfire.h>

View File

@@ -395,8 +395,9 @@
#define __NR_preadv 324
#define __NR_pwritev 325
#define __NR_rt_tgsigqueueinfo 326
#define __NR_perf_counter_open 327
#define NR_SYSCALLS 327
#define NR_SYSCALLS 328
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,