Merge branches 'x86/numa-fixes', 'x86/apic', 'x86/apm', 'x86/bitops', 'x86/build', 'x86/cleanups', 'x86/cpa', 'x86/cpu', 'x86/defconfig', 'x86/gart', 'x86/i8259', 'x86/intel', 'x86/irqstats', 'x86/kconfig', 'x86/ldt', 'x86/mce', 'x86/memtest', 'x86/pat', 'x86/ptemask', 'x86/resumetrace', 'x86/threadinfo', 'x86/timers', 'x86/vdso' and 'x86/xen' into x86/devel
This commit is contained in:
@@ -142,7 +142,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
/*
|
||||
* Don't remap the low PCI/ISA area, it's always mapped..
|
||||
*/
|
||||
if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
|
||||
if (is_ISA_range(phys_addr, last_addr))
|
||||
return (__force void __iomem *)phys_to_virt(phys_addr);
|
||||
|
||||
/*
|
||||
@@ -261,7 +261,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
|
||||
{
|
||||
/*
|
||||
* Ideally, this should be:
|
||||
* pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
|
||||
* pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
|
||||
*
|
||||
* Till we fix all X drivers to use ioremap_wc(), we will use
|
||||
* UC MINUS.
|
||||
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(ioremap_nocache);
|
||||
*/
|
||||
void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
if (pat_wc_enabled)
|
||||
if (pat_enabled)
|
||||
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
|
||||
__builtin_return_address(0));
|
||||
else
|
||||
@@ -318,8 +318,8 @@ void iounmap(volatile void __iomem *addr)
|
||||
* vm_area and by simply returning an address into the kernel mapping
|
||||
* of ISA space. So handle that here.
|
||||
*/
|
||||
if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
|
||||
addr < phys_to_virt(ISA_END_ADDRESS))
|
||||
if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
|
||||
(void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
|
||||
return;
|
||||
|
||||
addr = (volatile void __iomem *)
|
||||
@@ -332,7 +332,7 @@ void iounmap(volatile void __iomem *addr)
|
||||
cpa takes care of the direct mappings. */
|
||||
read_lock(&vmlist_lock);
|
||||
for (p = vmlist; p; p = p->next) {
|
||||
if (p->addr == addr)
|
||||
if (p->addr == (void __force *)addr)
|
||||
break;
|
||||
}
|
||||
read_unlock(&vmlist_lock);
|
||||
@@ -346,7 +346,7 @@ void iounmap(volatile void __iomem *addr)
|
||||
free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
|
||||
|
||||
/* Finally remove it */
|
||||
o = remove_vm_area((void *)addr);
|
||||
o = remove_vm_area((void __force *)addr);
|
||||
BUG_ON(p != o || o == NULL);
|
||||
kfree(p);
|
||||
}
|
||||
@@ -365,7 +365,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
|
||||
if (page_is_ram(start >> PAGE_SHIFT))
|
||||
return __va(phys);
|
||||
|
||||
addr = (void *)ioremap(start, PAGE_SIZE);
|
||||
addr = (void __force *)ioremap(start, PAGE_SIZE);
|
||||
if (addr)
|
||||
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
|
||||
|
||||
@@ -593,10 +593,11 @@ void __init early_iounmap(void *addr, unsigned long size)
|
||||
unsigned long offset;
|
||||
unsigned int nrpages;
|
||||
enum fixed_addresses idx;
|
||||
unsigned int nesting;
|
||||
int nesting;
|
||||
|
||||
nesting = --early_ioremap_nested;
|
||||
WARN_ON(nesting < 0);
|
||||
if (WARN_ON(nesting < 0))
|
||||
return;
|
||||
|
||||
if (early_ioremap_debug) {
|
||||
printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
|
||||
|
Reference in New Issue
Block a user