x86: style cleanup of ioremap code
Fix the coding style before going further. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
1aaf74e919
commit
91eebf40b3
@@ -1,6 +1,4 @@
|
||||
/*
|
||||
* arch/i386/mm/ioremap.c
|
||||
*
|
||||
* Re-map IO memory to kernel address space so that we can access it.
|
||||
* This is needed for high PCI addresses that aren't mapped in the
|
||||
* 640k-1MB IO memory area on PC's
|
||||
@@ -21,10 +19,6 @@
|
||||
#define ISA_START_ADDRESS 0xa0000
|
||||
#define ISA_END_ADDRESS 0x100000
|
||||
|
||||
/*
|
||||
* Generic mapping function (not visible outside):
|
||||
*/
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
@@ -34,7 +28,8 @@
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
|
||||
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
void __iomem *addr;
|
||||
struct vm_struct *area;
|
||||
@@ -62,7 +57,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
|
||||
t_addr = __va(phys_addr);
|
||||
t_end = t_addr + (size - 1);
|
||||
|
||||
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
|
||||
for (page = virt_to_page(t_addr);
|
||||
page <= virt_to_page(t_end); page++)
|
||||
if (!PageReserved(page))
|
||||
return NULL;
|
||||
}
|
||||
@@ -114,11 +110,11 @@ EXPORT_SYMBOL(__ioremap);
|
||||
*
|
||||
* Must be freed with iounmap.
|
||||
*/
|
||||
|
||||
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
unsigned long last_addr;
|
||||
void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
|
||||
|
||||
if (!p)
|
||||
return p;
|
||||
|
||||
@@ -172,7 +168,8 @@ void iounmap(volatile void __iomem *addr)
|
||||
addr < phys_to_virt(ISA_END_ADDRESS))
|
||||
return;
|
||||
|
||||
addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
|
||||
addr = (volatile void __iomem *)
|
||||
(PAGE_MASK & (unsigned long __force)addr);
|
||||
|
||||
/* Use the vm area unlocked, assuming the caller
|
||||
ensures there isn't another iounmap for the same address
|
||||
@@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr)
|
||||
read_unlock(&vmlist_lock);
|
||||
|
||||
if (!p) {
|
||||
printk("iounmap: bad address %p\n", addr);
|
||||
printk(KERN_ERR "iounmap: bad address %p\n", addr);
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
@@ -237,7 +234,7 @@ void __init early_ioremap_init(void)
|
||||
unsigned long *pgd;
|
||||
|
||||
if (early_ioremap_debug)
|
||||
printk("early_ioremap_init()\n");
|
||||
printk(KERN_DEBUG "early_ioremap_init()\n");
|
||||
|
||||
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
*pgd = __pa(bm_pte) | _PAGE_TABLE;
|
||||
@@ -248,15 +245,16 @@ void __init early_ioremap_init(void)
|
||||
*/
|
||||
if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
|
||||
WARN_ON(1);
|
||||
printk("pgd %p != %p\n",
|
||||
printk(KERN_WARNING "pgd %p != %p\n",
|
||||
pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
|
||||
printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
||||
printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
printk("fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
||||
printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_END));
|
||||
|
||||
printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
||||
printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
|
||||
printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
||||
printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
|
||||
FIX_BTMAP_BEGIN);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,7 +263,7 @@ void __init early_ioremap_clear(void)
|
||||
unsigned long *pgd;
|
||||
|
||||
if (early_ioremap_debug)
|
||||
printk("early_ioremap_clear()\n");
|
||||
printk(KERN_DEBUG "early_ioremap_clear()\n");
|
||||
|
||||
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
*pgd = 0;
|
||||
@@ -351,7 +349,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
|
||||
|
||||
nesting = early_ioremap_nested;
|
||||
if (early_ioremap_debug) {
|
||||
printk("early_ioremap(%08lx, %08lx) [%d] => ",
|
||||
printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
|
||||
phys_addr, size, nesting);
|
||||
dump_stack();
|
||||
}
|
||||
@@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size)
|
||||
WARN_ON(nesting < 0);
|
||||
|
||||
if (early_ioremap_debug) {
|
||||
printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting);
|
||||
printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
|
||||
size, nesting);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,4 @@
|
||||
/*
|
||||
* arch/x86_64/mm/ioremap.c
|
||||
*
|
||||
* Re-map IO memory to kernel address space so that we can access it.
|
||||
* This is needed for high PCI addresses that aren't mapped in the
|
||||
* 640k-1MB IO memory area on PC's
|
||||
@@ -33,8 +31,7 @@ EXPORT_SYMBOL(__phys_addr);
|
||||
* Fix up the linear direct mapping of the kernel to avoid cache attribute
|
||||
* conflicts.
|
||||
*/
|
||||
static int
|
||||
ioremap_change_attr(unsigned long phys_addr, unsigned long size,
|
||||
static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
int err = 0;
|
||||
@@ -50,20 +47,18 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
|
||||
if (!lookup_address(vaddr, &level))
|
||||
return err;
|
||||
/*
|
||||
* Must use a address here and not struct page because the phys addr
|
||||
* can be a in hole between nodes and not have an memmap entry.
|
||||
* Must use a address here and not struct page because
|
||||
* the phys addr can be a in hole between nodes and
|
||||
* not have an memmap entry.
|
||||
*/
|
||||
err = change_page_attr_addr(vaddr,npages,MAKE_GLOBAL(__PAGE_KERNEL|flags));
|
||||
err = change_page_attr_addr(vaddr,npages,
|
||||
MAKE_GLOBAL(__PAGE_KERNEL|flags));
|
||||
if (!err)
|
||||
global_flush_tlb();
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic mapping function
|
||||
*/
|
||||
|
||||
/*
|
||||
* Remap an arbitrary physical address space into the kernel virtual
|
||||
* address space. Needed when the kernel wants to access high addresses
|
||||
@@ -73,7 +68,8 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
|
||||
* have to convert them into an offset in a page-aligned mapping, but the
|
||||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
|
||||
void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
void *addr;
|
||||
struct vm_struct *area;
|
||||
@@ -142,7 +138,6 @@ EXPORT_SYMBOL(__ioremap);
|
||||
*
|
||||
* Must be freed with iounmap.
|
||||
*/
|
||||
|
||||
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
|
||||
@@ -165,7 +160,8 @@ void iounmap(volatile void __iomem *addr)
|
||||
addr < phys_to_virt(ISA_END_ADDRESS))
|
||||
return;
|
||||
|
||||
addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
|
||||
addr = (volatile void __iomem *)
|
||||
(PAGE_MASK & (unsigned long __force)addr);
|
||||
/* Use the vm area unlocked, assuming the caller
|
||||
ensures there isn't another iounmap for the same address
|
||||
in parallel. Reuse of the virtual address is prevented by
|
||||
@@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr)
|
||||
read_unlock(&vmlist_lock);
|
||||
|
||||
if (!p) {
|
||||
printk("iounmap: bad address %p\n", addr);
|
||||
printk(KERN_ERR "iounmap: bad address %p\n", addr);
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
|
Reference in New Issue
Block a user