x86: unify kmap_atomic_pfn() and iomap_atomic_prot_pfn()
kmap_atomic_pfn() and iomap_atomic_prot_pfn() are almost same except pgprot. This patch removes the code duplication for these two functions. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> LKML-Reference: <20090311143317.GA22244@localhost.localdomain> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
78b020d035
commit
bb6d59ca92
@@ -63,6 +63,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
|
|||||||
void *kmap_atomic(struct page *page, enum km_type type);
|
void *kmap_atomic(struct page *page, enum km_type type);
|
||||||
void kunmap_atomic(void *kvaddr, enum km_type type);
|
void kunmap_atomic(void *kvaddr, enum km_type type);
|
||||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
||||||
|
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
||||||
struct page *kmap_atomic_to_page(void *ptr);
|
struct page *kmap_atomic_to_page(void *ptr);
|
||||||
|
|
||||||
#ifndef CONFIG_PARAVIRT
|
#ifndef CONFIG_PARAVIRT
|
||||||
|
@@ -121,23 +121,28 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
|
|||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is the same as kmap_atomic() but can map memory that doesn't
|
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
|
||||||
* have a struct page associated with it.
|
|
||||||
*/
|
|
||||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
|
|
||||||
{
|
{
|
||||||
enum fixed_addresses idx;
|
enum fixed_addresses idx;
|
||||||
unsigned long vaddr;
|
unsigned long vaddr;
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
|
||||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
idx = type + KM_TYPE_NR * smp_processor_id();
|
||||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||||
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
|
set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
|
||||||
arch_flush_lazy_mmu_mode();
|
arch_flush_lazy_mmu_mode();
|
||||||
|
|
||||||
return (void*) vaddr;
|
return (void*) vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This is the same as kmap_atomic() but can map memory that doesn't
|
||||||
|
* have a struct page associated with it.
|
||||||
|
*/
|
||||||
|
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
|
||||||
|
{
|
||||||
|
return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
|
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
|
||||||
|
|
||||||
struct page *kmap_atomic_to_page(void *ptr)
|
struct page *kmap_atomic_to_page(void *ptr)
|
||||||
|
@@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
#include <asm/iomap.h>
|
#include <asm/iomap.h>
|
||||||
#include <asm/pat.h>
|
#include <asm/pat.h>
|
||||||
|
#include <asm/highmem.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
int is_io_mapping_possible(resource_size_t base, unsigned long size)
|
int is_io_mapping_possible(resource_size_t base, unsigned long size)
|
||||||
@@ -36,11 +37,6 @@ EXPORT_SYMBOL_GPL(is_io_mapping_possible);
|
|||||||
void *
|
void *
|
||||||
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
|
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
|
||||||
{
|
{
|
||||||
enum fixed_addresses idx;
|
|
||||||
unsigned long vaddr;
|
|
||||||
|
|
||||||
pagefault_disable();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
|
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
|
||||||
* PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
|
* PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
|
||||||
@@ -50,12 +46,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
|
|||||||
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
|
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
|
||||||
prot = PAGE_KERNEL_UC_MINUS;
|
prot = PAGE_KERNEL_UC_MINUS;
|
||||||
|
|
||||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
return kmap_atomic_prot_pfn(pfn, type, prot);
|
||||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
||||||
set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
|
|
||||||
arch_flush_lazy_mmu_mode();
|
|
||||||
|
|
||||||
return (void*) vaddr;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
|
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user