Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: arch/sparc/Kconfig
This commit is contained in:
@ -26,6 +26,8 @@ config SPARC
|
||||
select RTC_CLASS
|
||||
select RTC_DRV_M48T59
|
||||
select HAVE_PERF_COUNTERS
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_API_DEBUG
|
||||
|
||||
config SPARC32
|
||||
def_bool !64BIT
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||
|
||||
@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
#define dma_is_consistent(d, h) (1)
|
||||
|
||||
struct dma_ops {
|
||||
void *(*alloc_coherent)(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
void (*free_coherent)(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle);
|
||||
dma_addr_t (*map_page)(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction);
|
||||
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction);
|
||||
void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
|
||||
int nhwentries,
|
||||
enum dma_data_direction direction);
|
||||
void (*sync_single_for_cpu)(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
void (*sync_single_for_device)(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
|
||||
int nelems,
|
||||
enum dma_data_direction direction);
|
||||
void (*sync_sg_for_device)(struct device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir);
|
||||
};
|
||||
extern const struct dma_ops *dma_ops;
|
||||
extern struct dma_map_ops *dma_ops, pci32_dma_ops;
|
||||
extern struct bus_type pci_bus_type;
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return &pci32_dma_ops;
|
||||
#endif
|
||||
return dma_ops;
|
||||
}
|
||||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
{
|
||||
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
}
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
return dma_ops->map_page(dev, virt_to_page(cpu_addr),
|
||||
(unsigned long)cpu_addr & ~PAGE_MASK, size,
|
||||
direction);
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_ops->unmap_page(dev, dma_addr, size, direction);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
return dma_ops->map_page(dev, page, offset, size, direction);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_ops->unmap_page(dev, dma_address, size, direction);
|
||||
}
|
||||
|
||||
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
return dma_ops->map_sg(dev, sg, nents, direction);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
dma_ops->unmap_sg(dev, sg, nents, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dma_ops->sync_single_for_device)
|
||||
dma_ops->sync_single_for_device(dev, dma_handle, size,
|
||||
direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dma_ops->sync_sg_for_device)
|
||||
dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
|
||||
}
|
||||
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return (dma_addr == DMA_ERROR_CODE);
|
||||
|
@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
|
||||
return retval;
|
||||
}
|
||||
|
||||
void __trigger_all_cpu_backtrace(void);
|
||||
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
|
||||
void arch_trigger_all_cpu_backtrace(void);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
|
||||
extern void *hardirq_stack[NR_CPUS];
|
||||
extern void *softirq_stack[NR_CPUS];
|
||||
|
@ -5,4 +5,7 @@
|
||||
#else
|
||||
#include <asm/pci_32.h>
|
||||
#endif
|
||||
|
||||
#include <asm-generic/pci-dma-compat.h>
|
||||
|
||||
#endif
|
||||
|
@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS (0)
|
||||
|
||||
#include <asm/scatterlist.h>
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
/* Allocate and map kernel buffer using consistent mode DMA for a device.
|
||||
* hwdev should be valid struct pci_dev pointer for PCI devices.
|
||||
*/
|
||||
extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
|
||||
|
||||
/* Free and unmap a consistent DMA buffer.
|
||||
* cpu_addr is what was returned from pci_alloc_consistent,
|
||||
* size must be the same as what as passed into pci_alloc_consistent,
|
||||
* and likewise dma_addr must be the same as what *dma_addrp was set to.
|
||||
*
|
||||
* References to the memory and mappings assosciated with cpu_addr/dma_addr
|
||||
* past this call are illegal.
|
||||
*/
|
||||
extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
|
||||
|
||||
/* Map a single buffer of the indicated size for DMA in streaming mode.
|
||||
* The 32-bit bus address to use is returned.
|
||||
*
|
||||
* Once the device is given the dma address, the device owns this memory
|
||||
* until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
|
||||
*/
|
||||
extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
|
||||
|
||||
/* Unmap a single streaming mode DMA translation. The dma_addr and size
|
||||
* must match what was provided for in a previous pci_map_single call. All
|
||||
* other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the cpu to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
|
||||
|
||||
/* pci_unmap_{single,page} is not a nop, thus... */
|
||||
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
|
||||
dma_addr_t ADDR_NAME;
|
||||
@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
|
||||
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
|
||||
(((PTR)->LEN_NAME) = (VAL))
|
||||
|
||||
/*
|
||||
* Same as above, only with pages instead of mapped addresses.
|
||||
*/
|
||||
extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
|
||||
unsigned long offset, size_t size, int direction);
|
||||
extern void pci_unmap_page(struct pci_dev *hwdev,
|
||||
dma_addr_t dma_address, size_t size, int direction);
|
||||
|
||||
/* Map a set of buffers described by scatterlist in streaming
|
||||
* mode for DMA. This is the scather-gather version of the
|
||||
* above pci_map_single interface. Here the scatter gather list
|
||||
* elements are each tagged with the appropriate dma address
|
||||
* and length. They are obtained via sg_dma_{address,length}(SG).
|
||||
*
|
||||
* NOTE: An implementation may be able to use a smaller number of
|
||||
* DMA address/length pairs than there are SG table elements.
|
||||
* (for example via virtual mapping capabilities)
|
||||
* The routine returns the number of addr/length pairs actually
|
||||
* used, at most nents.
|
||||
*
|
||||
* Device ownership issues as mentioned above for pci_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
|
||||
|
||||
/* Unmap a set of streaming mode DMA translations.
|
||||
* Again, cpu read rules concerning calls here are the same as for
|
||||
* pci_unmap_single() above.
|
||||
*/
|
||||
extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
|
||||
|
||||
/* Make physical memory consistent for a single
|
||||
* streaming mode DMA translation after a transfer.
|
||||
*
|
||||
* If you perform a pci_map_single() but wish to interrogate the
|
||||
* buffer using the cpu, yet do not wish to teardown the PCI dma
|
||||
* mapping, you must call this function before doing so. At the
|
||||
* next point you give the PCI dma address back to the card, you
|
||||
* must first perform a pci_dma_sync_for_device, and then the device
|
||||
* again owns the buffer.
|
||||
*/
|
||||
extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
|
||||
extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
|
||||
|
||||
/* Make physical memory consistent for a set of streaming
|
||||
* mode DMA translations after a transfer.
|
||||
*
|
||||
* The same as pci_dma_sync_single_* but for a scatter-gather list,
|
||||
* same rules and usage.
|
||||
*/
|
||||
extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
|
||||
extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
|
||||
|
||||
/* Return whether the given PCI device DMA address mask can
|
||||
* be supported properly. For example, if your device can
|
||||
* only drive the low 24-bits during PCI bus mastering, then
|
||||
* you would pass 0x00ffffff as the mask to this function.
|
||||
*/
|
||||
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
enum pci_dma_burst_strategy *strat,
|
||||
@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
}
|
||||
#endif
|
||||
|
||||
#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||
|
||||
static inline int pci_dma_mapping_error(struct pci_dev *pdev,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return (dma_addr == PCI_DMA_ERROR_CODE);
|
||||
}
|
||||
|
||||
struct device_node;
|
||||
extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
|
||||
|
||||
|
@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS (0)
|
||||
|
||||
static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
|
||||
dma_addr_t *dma_handle)
|
||||
{
|
||||
return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
|
||||
size_t size, int direction)
|
||||
{
|
||||
return dma_map_single(&pdev->dev, ptr, size,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
|
||||
size_t size, int direction)
|
||||
{
|
||||
dma_unmap_single(&pdev->dev, dma_addr, size,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
#define pci_map_page(dev, page, off, size, dir) \
|
||||
pci_map_single(dev, (page_address(page) + (off)), size, dir)
|
||||
#define pci_unmap_page(dev,addr,sz,dir) \
|
||||
pci_unmap_single(dev,addr,sz,dir)
|
||||
|
||||
/* pci_unmap_{single,page} is not a nop, thus... */
|
||||
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
|
||||
dma_addr_t ADDR_NAME;
|
||||
@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
|
||||
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
|
||||
(((PTR)->LEN_NAME) = (VAL))
|
||||
|
||||
static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
return dma_map_sg(&pdev->dev, sg, nents,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
dma_unmap_sg(&pdev->dev, sg, nents,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
/* No flushing needed to sync cpu writes to the device. */
|
||||
}
|
||||
|
||||
static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
|
||||
struct scatterlist *sg,
|
||||
int nents, int direction)
|
||||
{
|
||||
dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
|
||||
(enum dma_data_direction) direction);
|
||||
}
|
||||
|
||||
static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction)
|
||||
{
|
||||
/* No flushing needed to sync cpu writes to the device. */
|
||||
}
|
||||
|
||||
/* Return whether the given PCI device DMA address mask can
|
||||
* be supported properly. For example, if your device can
|
||||
* only drive the low 24-bits during PCI bus mastering, then
|
||||
* you would pass 0x00ffffff as the mask to this function.
|
||||
*/
|
||||
extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
|
||||
|
||||
/* PCI IOMMU mapping bypass support. */
|
||||
|
||||
/* PCI 64-bit addressing works for all slots on all controller
|
||||
@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
|
||||
#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
|
||||
#define PCI64_ADDR_BASE 0xfffc000000000000UL
|
||||
|
||||
static inline int pci_dma_mapping_error(struct pci_dev *pdev,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_mapping_error(&pdev->dev, dma_addr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
enum pci_dma_burst_strategy *strat,
|
||||
|
@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||
*
|
||||
* Unfortunately this scheme limits us to ~16,000,000 cpus.
|
||||
*/
|
||||
static inline void __read_lock(raw_rwlock_t *rw)
|
||||
static inline void arch_read_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
register raw_rwlock_t *lp asm("g1");
|
||||
lp = rw;
|
||||
@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw)
|
||||
#define __raw_read_lock(lock) \
|
||||
do { unsigned long flags; \
|
||||
local_irq_save(flags); \
|
||||
__read_lock(lock); \
|
||||
arch_read_lock(lock); \
|
||||
local_irq_restore(flags); \
|
||||
} while(0)
|
||||
|
||||
static inline void __read_unlock(raw_rwlock_t *rw)
|
||||
static inline void arch_read_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
register raw_rwlock_t *lp asm("g1");
|
||||
lp = rw;
|
||||
@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw)
|
||||
#define __raw_read_unlock(lock) \
|
||||
do { unsigned long flags; \
|
||||
local_irq_save(flags); \
|
||||
__read_unlock(lock); \
|
||||
arch_read_unlock(lock); \
|
||||
local_irq_restore(flags); \
|
||||
} while(0)
|
||||
|
||||
@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||
return (val == 0);
|
||||
}
|
||||
|
||||
static inline int __read_trylock(raw_rwlock_t *rw)
|
||||
static inline int arch_read_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
register raw_rwlock_t *lp asm("g1");
|
||||
register int res asm("o0");
|
||||
@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw)
|
||||
({ unsigned long flags; \
|
||||
int res; \
|
||||
local_irq_save(flags); \
|
||||
res = __read_trylock(lock); \
|
||||
res = arch_read_trylock(lock); \
|
||||
local_irq_restore(flags); \
|
||||
res; \
|
||||
})
|
||||
|
@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
|
||||
|
||||
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
|
||||
|
||||
static void inline __read_lock(raw_rwlock_t *lock)
|
||||
static void inline arch_read_lock(raw_rwlock_t *lock)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static int inline __read_trylock(raw_rwlock_t *lock)
|
||||
static int inline arch_read_trylock(raw_rwlock_t *lock)
|
||||
{
|
||||
int tmp1, tmp2;
|
||||
|
||||
@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock)
|
||||
return tmp1;
|
||||
}
|
||||
|
||||
static void inline __read_unlock(raw_rwlock_t *lock)
|
||||
static void inline arch_read_unlock(raw_rwlock_t *lock)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
||||
@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static void inline __write_lock(raw_rwlock_t *lock)
|
||||
static void inline arch_write_lock(raw_rwlock_t *lock)
|
||||
{
|
||||
unsigned long mask, tmp1, tmp2;
|
||||
|
||||
@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static void inline __write_unlock(raw_rwlock_t *lock)
|
||||
static void inline arch_write_unlock(raw_rwlock_t *lock)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
" stw %%g0, [%0]"
|
||||
@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static int inline __write_trylock(raw_rwlock_t *lock)
|
||||
static int inline arch_write_trylock(raw_rwlock_t *lock)
|
||||
{
|
||||
unsigned long mask, tmp1, tmp2, result;
|
||||
|
||||
@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock)
|
||||
return result;
|
||||
}
|
||||
|
||||
#define __raw_read_lock(p) __read_lock(p)
|
||||
#define __raw_read_lock_flags(p, f) __read_lock(p)
|
||||
#define __raw_read_trylock(p) __read_trylock(p)
|
||||
#define __raw_read_unlock(p) __read_unlock(p)
|
||||
#define __raw_write_lock(p) __write_lock(p)
|
||||
#define __raw_write_lock_flags(p, f) __write_lock(p)
|
||||
#define __raw_write_unlock(p) __write_unlock(p)
|
||||
#define __raw_write_trylock(p) __write_trylock(p)
|
||||
#define __raw_read_lock(p) arch_read_lock(p)
|
||||
#define __raw_read_lock_flags(p, f) arch_read_lock(p)
|
||||
#define __raw_read_trylock(p) arch_read_trylock(p)
|
||||
#define __raw_read_unlock(p) arch_read_unlock(p)
|
||||
#define __raw_write_lock(p) arch_write_lock(p)
|
||||
#define __raw_write_lock_flags(p, f) arch_write_lock(p)
|
||||
#define __raw_write_unlock(p) arch_write_unlock(p)
|
||||
#define __raw_write_trylock(p) arch_write_trylock(p)
|
||||
|
||||
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
|
||||
#define __raw_write_can_lock(rw) (!(rw)->lock)
|
||||
|
@ -63,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
|
||||
obj-$(CONFIG_SPARC32) += devres.o
|
||||
devres-y := ../../../kernel/irq/devres.o
|
||||
|
||||
obj-$(CONFIG_SPARC32) += dma.o
|
||||
obj-y += dma.o
|
||||
|
||||
obj-$(CONFIG_SPARC32_PCI) += pcic.o
|
||||
|
||||
|
@ -1,178 +1,13 @@
|
||||
/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc.
|
||||
*
|
||||
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
#include <linux/pci.h>
|
||||
#endif
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
|
||||
|
||||
#include "dma.h"
|
||||
|
||||
int dma_supported(struct device *dev, u64 mask)
|
||||
static int __init dma_init(void)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return pci_dma_supported(to_pci_dev(dev), mask);
|
||||
#endif
|
||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_supported);
|
||||
|
||||
int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
|
||||
#endif
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_set_mask);
|
||||
|
||||
static void *dma32_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
|
||||
#endif
|
||||
return sbus_alloc_consistent(dev, size, dma_handle);
|
||||
}
|
||||
|
||||
static void dma32_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
pci_free_consistent(to_pci_dev(dev), size,
|
||||
cpu_addr, dma_handle);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
sbus_free_consistent(dev, size, cpu_addr, dma_handle);
|
||||
}
|
||||
|
||||
static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return pci_map_page(to_pci_dev(dev), page, offset,
|
||||
size, (int)direction);
|
||||
#endif
|
||||
return sbus_map_single(dev, page_address(page) + offset,
|
||||
size, (int)direction);
|
||||
}
|
||||
|
||||
static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
pci_unmap_page(to_pci_dev(dev), dma_address,
|
||||
size, (int)direction);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
sbus_unmap_single(dev, dma_address, size, (int)direction);
|
||||
}
|
||||
|
||||
static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
|
||||
#endif
|
||||
return sbus_map_sg(dev, sg, nents, direction);
|
||||
}
|
||||
|
||||
void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
sbus_unmap_sg(dev, sg, nents, (int)direction);
|
||||
}
|
||||
|
||||
static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
|
||||
size, (int)direction);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
|
||||
}
|
||||
|
||||
static void dma32_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
|
||||
size, (int)direction);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
|
||||
}
|
||||
|
||||
static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
|
||||
nelems, (int)direction);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void dma32_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
|
||||
nelems, (int)direction);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
BUG();
|
||||
}
|
||||
|
||||
static const struct dma_ops dma32_dma_ops = {
|
||||
.alloc_coherent = dma32_alloc_coherent,
|
||||
.free_coherent = dma32_free_coherent,
|
||||
.map_page = dma32_map_page,
|
||||
.unmap_page = dma32_unmap_page,
|
||||
.map_sg = dma32_map_sg,
|
||||
.unmap_sg = dma32_unmap_sg,
|
||||
.sync_single_for_cpu = dma32_sync_single_for_cpu,
|
||||
.sync_single_for_device = dma32_sync_single_for_device,
|
||||
.sync_sg_for_cpu = dma32_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = dma32_sync_sg_for_device,
|
||||
};
|
||||
|
||||
const struct dma_ops *dma_ops = &dma32_dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
fs_initcall(dma_init);
|
||||
|
@ -1,14 +0,0 @@
|
||||
void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
|
||||
void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
|
||||
dma_addr_t sbus_map_single(struct device *dev, void *va,
|
||||
size_t len, int direction);
|
||||
void sbus_unmap_single(struct device *dev, dma_addr_t ba,
|
||||
size_t n, int direction);
|
||||
int sbus_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int n, int direction);
|
||||
void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int n, int direction);
|
||||
void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
|
||||
size_t size, int direction);
|
||||
void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
|
||||
size_t size, int direction);
|
@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
|
||||
|
||||
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t sz,
|
||||
enum dma_data_direction direction)
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct iommu *iommu;
|
||||
struct strbuf *strbuf;
|
||||
@ -474,7 +475,8 @@ do_flush_sync:
|
||||
}
|
||||
|
||||
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
size_t sz, enum dma_data_direction direction)
|
||||
size_t sz, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct iommu *iommu;
|
||||
struct strbuf *strbuf;
|
||||
@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
}
|
||||
|
||||
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
int nelems, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *s, *outs, *segstart;
|
||||
unsigned long flags, handle, prot, ctx;
|
||||
@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
|
||||
}
|
||||
|
||||
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
int nelems, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long flags, ctx;
|
||||
struct scatterlist *sg;
|
||||
@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
static const struct dma_ops sun4u_dma_ops = {
|
||||
static struct dma_map_ops sun4u_dma_ops = {
|
||||
.alloc_coherent = dma_4u_alloc_coherent,
|
||||
.free_coherent = dma_4u_free_coherent,
|
||||
.map_page = dma_4u_map_page,
|
||||
@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = {
|
||||
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
|
||||
};
|
||||
|
||||
const struct dma_ops *dma_ops = &sun4u_dma_ops;
|
||||
struct dma_map_ops *dma_ops = &sun4u_dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
|
||||
|
||||
int dma_supported(struct device *dev, u64 device_mask)
|
||||
{
|
||||
struct iommu *iommu = dev->archdata.iommu;
|
||||
@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return pci_dma_supported(to_pci_dev(dev), device_mask);
|
||||
return pci64_dma_supported(to_pci_dev(dev), device_mask);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
@ -49,8 +49,6 @@
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/io-unit.h>
|
||||
|
||||
#include "dma.h"
|
||||
|
||||
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
|
||||
|
||||
static struct resource *_sparc_find_resource(struct resource *r,
|
||||
@ -247,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
|
||||
* Typically devices use them for control blocks.
|
||||
* CPU may access them without any explicit flushing.
|
||||
*/
|
||||
void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
|
||||
static void *sbus_alloc_coherent(struct device *dev, size_t len,
|
||||
dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
{
|
||||
struct of_device *op = to_of_device(dev);
|
||||
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
|
||||
@ -300,7 +299,8 @@ err_nopages:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
|
||||
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
|
||||
dma_addr_t ba)
|
||||
{
|
||||
struct resource *res;
|
||||
struct page *pgv;
|
||||
@ -318,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
|
||||
|
||||
n = (n + PAGE_SIZE-1) & PAGE_MASK;
|
||||
if ((res->end-res->start)+1 != n) {
|
||||
printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
|
||||
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
|
||||
(long)((res->end-res->start)+1), n);
|
||||
return;
|
||||
}
|
||||
@ -338,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
|
||||
* CPU view of this memory may be inconsistent with
|
||||
* a device view and explicit flushing is necessary.
|
||||
*/
|
||||
dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
|
||||
static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *va = page_address(page) + offset;
|
||||
|
||||
/* XXX why are some lengths signed, others unsigned? */
|
||||
if (len <= 0) {
|
||||
return 0;
|
||||
@ -351,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
|
||||
return mmu_get_scsi_one(dev, va, len);
|
||||
}
|
||||
|
||||
void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
|
||||
static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
mmu_release_scsi_one(dev, ba, n);
|
||||
}
|
||||
|
||||
int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
|
||||
static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
mmu_get_scsi_sgl(dev, sg, n);
|
||||
|
||||
@ -367,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
|
||||
return n;
|
||||
}
|
||||
|
||||
void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
|
||||
static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
mmu_release_scsi_sgl(dev, sg, n);
|
||||
}
|
||||
|
||||
void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
|
||||
static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int n, enum dma_data_direction dir)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
|
||||
static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int n, enum dma_data_direction dir)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
struct dma_map_ops sbus_dma_ops = {
|
||||
.alloc_coherent = sbus_alloc_coherent,
|
||||
.free_coherent = sbus_free_coherent,
|
||||
.map_page = sbus_map_page,
|
||||
.unmap_page = sbus_unmap_page,
|
||||
.map_sg = sbus_map_sg,
|
||||
.unmap_sg = sbus_unmap_sg,
|
||||
.sync_sg_for_cpu = sbus_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = sbus_sync_sg_for_device,
|
||||
};
|
||||
|
||||
struct dma_map_ops *dma_ops = &sbus_dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
static int __init sparc_register_ioport(void)
|
||||
{
|
||||
register_proc_sparc_ioport();
|
||||
@ -396,7 +422,8 @@ arch_initcall(sparc_register_ioport);
|
||||
/* Allocate and map kernel buffer using consistent mode DMA for a device.
|
||||
* hwdev should be valid struct pci_dev pointer for PCI devices.
|
||||
*/
|
||||
void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
|
||||
static void *pci32_alloc_coherent(struct device *dev, size_t len,
|
||||
dma_addr_t *pba, gfp_t gfp)
|
||||
{
|
||||
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
|
||||
unsigned long va;
|
||||
@ -440,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
|
||||
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
|
||||
return (void *) res->start;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_alloc_consistent);
|
||||
|
||||
/* Free and unmap a consistent DMA buffer.
|
||||
* cpu_addr is what was returned from pci_alloc_consistent,
|
||||
@ -450,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
|
||||
* References to the memory and mappings associated with cpu_addr/dma_addr
|
||||
* past this call are illegal.
|
||||
*/
|
||||
void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
|
||||
static void pci32_free_coherent(struct device *dev, size_t n, void *p,
|
||||
dma_addr_t ba)
|
||||
{
|
||||
struct resource *res;
|
||||
unsigned long pgp;
|
||||
@ -482,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
|
||||
|
||||
free_pages(pgp, get_order(n));
|
||||
}
|
||||
EXPORT_SYMBOL(pci_free_consistent);
|
||||
|
||||
/* Map a single buffer of the indicated size for DMA in streaming mode.
|
||||
* The 32-bit bus address to use is returned.
|
||||
*
|
||||
* Once the device is given the dma address, the device owns this memory
|
||||
* until either pci_unmap_single or pci_dma_sync_single_* is performed.
|
||||
*/
|
||||
dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
|
||||
int direction)
|
||||
{
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
/* IIep is write-through, not flushing. */
|
||||
return virt_to_phys(ptr);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_map_single);
|
||||
|
||||
/* Unmap a single streaming mode DMA translation. The dma_addr and size
|
||||
* must match what was provided for in a previous pci_map_single call. All
|
||||
* other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the cpu to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
|
||||
int direction)
|
||||
{
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
|
||||
(size + PAGE_SIZE-1) & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_unmap_single);
|
||||
|
||||
/*
|
||||
* Same as pci_map_single, but with pages.
|
||||
*/
|
||||
dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
|
||||
unsigned long offset, size_t size, int direction)
|
||||
static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
/* IIep is write-through, not flushing. */
|
||||
return page_to_phys(page) + offset;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_map_page);
|
||||
|
||||
void pci_unmap_page(struct pci_dev *hwdev,
|
||||
dma_addr_t dma_address, size_t size, int direction)
|
||||
{
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
/* mmu_inval_dma_area XXX */
|
||||
}
|
||||
EXPORT_SYMBOL(pci_unmap_page);
|
||||
|
||||
/* Map a set of buffers described by scatterlist in streaming
|
||||
* mode for DMA. This is the scather-gather version of the
|
||||
@ -552,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page);
|
||||
* Device ownership issues as mentioned above for pci_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||
int direction)
|
||||
static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
/* IIep is write-through, not flushing. */
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
@ -567,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||
}
|
||||
return nents;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_map_sg);
|
||||
|
||||
/* Unmap a set of streaming mode DMA translations.
|
||||
* Again, cpu read rules concerning calls here are the same as for
|
||||
* pci_unmap_single() above.
|
||||
*/
|
||||
void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||
int direction)
|
||||
static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
if (dir != PCI_DMA_TODEVICE) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
@ -589,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_unmap_sg);
|
||||
|
||||
/* Make physical memory consistent for a single
|
||||
* streaming mode DMA translation before or after a transfer.
|
||||
@ -601,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
|
||||
* must first perform a pci_dma_sync_for_device, and then the
|
||||
* device again owns the buffer.
|
||||
*/
|
||||
void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
|
||||
static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
if (dir != PCI_DMA_TODEVICE) {
|
||||
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
|
||||
(size + PAGE_SIZE-1) & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
|
||||
|
||||
void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
|
||||
static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
if (dir != PCI_DMA_TODEVICE) {
|
||||
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
|
||||
(size + PAGE_SIZE-1) & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_dma_sync_single_for_device);
|
||||
|
||||
/* Make physical memory consistent for a set of streaming
|
||||
* mode DMA translations after a transfer.
|
||||
@ -627,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
|
||||
* The same as pci_dma_sync_single_* but for a scatter-gather list,
|
||||
* same rules and usage.
|
||||
*/
|
||||
void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
|
||||
static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
if (dir != PCI_DMA_TODEVICE) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
@ -642,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
|
||||
|
||||
void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
|
||||
static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int n;
|
||||
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
if (dir != PCI_DMA_TODEVICE) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
@ -659,9 +639,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
|
||||
|
||||
struct dma_map_ops pci32_dma_ops = {
|
||||
.alloc_coherent = pci32_alloc_coherent,
|
||||
.free_coherent = pci32_free_coherent,
|
||||
.map_page = pci32_map_page,
|
||||
.map_sg = pci32_map_sg,
|
||||
.unmap_sg = pci32_unmap_sg,
|
||||
.sync_single_for_cpu = pci32_sync_single_for_cpu,
|
||||
.sync_single_for_device = pci32_sync_single_for_device,
|
||||
.sync_sg_for_cpu = pci32_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = pci32_sync_sg_for_device,
|
||||
};
|
||||
EXPORT_SYMBOL(pci32_dma_ops);
|
||||
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
/*
|
||||
* Return whether the given PCI device DMA address mask can be
|
||||
* supported properly. For example, if your device can only drive the
|
||||
* low 24-bits during PCI bus mastering, then you would pass
|
||||
* 0x00ffffff as the mask to this function.
|
||||
*/
|
||||
int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_supported);
|
||||
|
||||
int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (dev->bus == &pci_bus_type)
|
||||
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
|
||||
#endif
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_set_mask);
|
||||
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
static int sparc_io_proc_show(struct seq_file *m, void *v)
|
||||
|
@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void)
|
||||
* Therefore you cannot make any OBP calls, not even prom_printf,
|
||||
* from these two routines.
|
||||
*/
|
||||
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
|
||||
static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
|
||||
{
|
||||
unsigned long num_entries = (qmask + 1) / 64;
|
||||
unsigned long status;
|
||||
|
@ -113,7 +113,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
|
||||
}
|
||||
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
|
||||
local_inc(&__get_cpu_var(alert_counter));
|
||||
if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
|
||||
if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
|
||||
die_nmi("BUG: NMI Watchdog detected LOCKUP",
|
||||
regs, panic_on_timeout);
|
||||
} else {
|
||||
|
@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
|
||||
pci_dev_put(ali_isa_bridge);
|
||||
}
|
||||
|
||||
int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
|
||||
int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
|
||||
{
|
||||
u64 dma_addr_mask;
|
||||
|
||||
|
@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
|
||||
|
||||
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t sz,
|
||||
enum dma_data_direction direction)
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct iommu *iommu;
|
||||
unsigned long flags, npages, oaddr;
|
||||
@ -296,7 +297,8 @@ iommu_map_fail:
|
||||
}
|
||||
|
||||
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
size_t sz, enum dma_data_direction direction)
|
||||
size_t sz, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_pbm_info *pbm;
|
||||
struct iommu *iommu;
|
||||
@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
}
|
||||
|
||||
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
int nelems, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *s, *outs, *segstart;
|
||||
unsigned long flags, handle, prot;
|
||||
@ -478,7 +481,8 @@ iommu_map_failed:
|
||||
}
|
||||
|
||||
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction)
|
||||
int nelems, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_pbm_info *pbm;
|
||||
struct scatterlist *sg;
|
||||
@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
static void dma_4v_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t bus_addr, size_t sz,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* Nothing to do... */
|
||||
}
|
||||
|
||||
static void dma_4v_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sglist, int nelems,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* Nothing to do... */
|
||||
}
|
||||
|
||||
static const struct dma_ops sun4v_dma_ops = {
|
||||
static struct dma_map_ops sun4v_dma_ops = {
|
||||
.alloc_coherent = dma_4v_alloc_coherent,
|
||||
.free_coherent = dma_4v_free_coherent,
|
||||
.map_page = dma_4v_map_page,
|
||||
.unmap_page = dma_4v_unmap_page,
|
||||
.map_sg = dma_4v_map_sg,
|
||||
.unmap_sg = dma_4v_unmap_sg,
|
||||
.sync_single_for_cpu = dma_4v_sync_single_for_cpu,
|
||||
.sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
|
||||
};
|
||||
|
||||
static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
|
||||
|
@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
|
||||
}
|
||||
}
|
||||
|
||||
void __trigger_all_cpu_backtrace(void)
|
||||
void arch_trigger_all_cpu_backtrace(void)
|
||||
{
|
||||
struct thread_info *tp = current_thread_info();
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
|
||||
|
||||
static void sysrq_handle_globreg(int key, struct tty_struct *tty)
|
||||
{
|
||||
__trigger_all_cpu_backtrace();
|
||||
arch_trigger_all_cpu_backtrace();
|
||||
}
|
||||
|
||||
static struct sysrq_key_op sparc_globalreg_op = {
|
||||
|
@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
if (current->replacement_session_keyring)
|
||||
key_replace_session_keyring();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
if (current->replacement_session_keyring)
|
||||
key_replace_session_keyring();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ void prom_cmdline(void)
|
||||
/* Drop into the prom, but completely terminate the program.
|
||||
* No chance of continuing.
|
||||
*/
|
||||
void prom_halt(void)
|
||||
void notrace prom_halt(void)
|
||||
{
|
||||
#ifdef CONFIG_SUN_LDOMS
|
||||
if (ldom_domaining_enabled)
|
||||
|
@ -14,14 +14,14 @@
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include <asm/openprom.h>
|
||||
#include <asm/oplib.h>
|
||||
|
||||
static char ppbuf[1024];
|
||||
|
||||
void
|
||||
prom_write(const char *buf, unsigned int n)
|
||||
void notrace prom_write(const char *buf, unsigned int n)
|
||||
{
|
||||
char ch;
|
||||
|
||||
@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
prom_printf(const char *fmt, ...)
|
||||
void notrace prom_printf(const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int i;
|
||||
|
Reference in New Issue
Block a user