Merge branch 'master' into for-linus
This commit is contained in:
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size))
|
||||
return 0;
|
||||
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + get_dma_direct_offset(dev);
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr - get_dma_direct_offset(dev);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
|
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
else
|
||||
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
|
||||
|
||||
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
|
||||
/* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
|
||||
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
|
||||
/* Second case is 32-bit with 64-bit PTE. In this case, we
|
||||
* can just store as long as we do the two halves in the right order
|
||||
* with a barrier in between. This is possible because we take care,
|
||||
* in the hash code, to pre-invalidate if the PTE was already hashed,
|
||||
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
#else
|
||||
/* Anything else just stores the PTE normally. That covers all 64-bit
|
||||
* cases, and 32-bit non-hash with 64-bit PTEs in UP mode
|
||||
* cases, and 32-bit non-hash with 32-bit PTEs.
|
||||
*/
|
||||
*ptep = pte;
|
||||
#endif
|
||||
|
@@ -154,6 +154,7 @@ int qe_get_snum(void);
|
||||
void qe_put_snum(u8 snum);
|
||||
unsigned int qe_get_num_of_risc(void);
|
||||
unsigned int qe_get_num_of_snums(void);
|
||||
int qe_alive_during_sleep(void);
|
||||
|
||||
/* we actually use cpm_muram implementation, define this for convenience */
|
||||
#define qe_muram_init cpm_muram_init
|
||||
|
@@ -64,4 +64,7 @@
|
||||
#define SO_TIMESTAMPING 37
|
||||
#define SCM_TIMESTAMPING SO_TIMESTAMPING
|
||||
|
||||
#define SO_PROTOCOL 38
|
||||
#define SO_DOMAIN 39
|
||||
|
||||
#endif /* _ASM_POWERPC_SOCKET_H */
|
||||
|
@@ -54,7 +54,7 @@
|
||||
* This returns the old value in the lock, so we succeeded
|
||||
* in getting the lock if the return value is 0.
|
||||
*/
|
||||
static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
|
||||
static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
unsigned long tmp, token;
|
||||
|
||||
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
|
||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
CLEAR_IO_SYNC;
|
||||
return __spin_trylock(lock) == 0;
|
||||
return arch_spin_trylock(lock) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
CLEAR_IO_SYNC;
|
||||
while (1) {
|
||||
if (likely(__spin_trylock(lock) == 0))
|
||||
if (likely(arch_spin_trylock(lock) == 0))
|
||||
break;
|
||||
do {
|
||||
HMT_low();
|
||||
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
||||
|
||||
CLEAR_IO_SYNC;
|
||||
while (1) {
|
||||
if (likely(__spin_trylock(lock) == 0))
|
||||
if (likely(arch_spin_trylock(lock) == 0))
|
||||
break;
|
||||
local_save_flags(flags_dis);
|
||||
local_irq_restore(flags);
|
||||
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
|
||||
* This returns the old value in the lock + 1,
|
||||
* so we got a read lock if the return value is > 0.
|
||||
*/
|
||||
static inline long __read_trylock(raw_rwlock_t *rw)
|
||||
static inline long arch_read_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
long tmp;
|
||||
|
||||
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw)
|
||||
* This returns the old value in the lock,
|
||||
* so we got the write lock if the return value is 0.
|
||||
*/
|
||||
static inline long __write_trylock(raw_rwlock_t *rw)
|
||||
static inline long arch_write_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
long tmp, token;
|
||||
|
||||
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw)
|
||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
while (1) {
|
||||
if (likely(__read_trylock(rw) > 0))
|
||||
if (likely(arch_read_trylock(rw) > 0))
|
||||
break;
|
||||
do {
|
||||
HMT_low();
|
||||
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
while (1) {
|
||||
if (likely(__write_trylock(rw) == 0))
|
||||
if (likely(arch_write_trylock(rw) == 0))
|
||||
break;
|
||||
do {
|
||||
HMT_low();
|
||||
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||
|
||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
return __read_trylock(rw) > 0;
|
||||
return arch_read_trylock(rw) > 0;
|
||||
}
|
||||
|
||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
return __write_trylock(rw) == 0;
|
||||
return arch_write_trylock(rw) == 0;
|
||||
}
|
||||
|
||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||
|
Reference in New Issue
Block a user