Merge branch 'sh/pgtable' of git://github.com/mfleming/linux-2.6
This commit is contained in:
@@ -6,10 +6,13 @@
|
|||||||
|
|
||||||
#define QUICK_PT 1 /* Other page table pages that are zero on free */
|
#define QUICK_PT 1 /* Other page table pages that are zero on free */
|
||||||
|
|
||||||
|
extern pgd_t *pgd_alloc(struct mm_struct *);
|
||||||
|
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||||
|
|
||||||
#ifdef CONFIG_PGTABLE_LEVELS_3
|
#ifdef CONFIG_PGTABLE_LEVELS_3
|
||||||
#include <asm/pgalloc_pmd.h>
|
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
|
||||||
#else
|
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
|
||||||
#include <asm/pgalloc_nopmd.h>
|
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
||||||
@@ -67,7 +70,6 @@ do { \
|
|||||||
|
|
||||||
static inline void check_pgt_cache(void)
|
static inline void check_pgt_cache(void)
|
||||||
{
|
{
|
||||||
__check_pgt_cache();
|
|
||||||
quicklist_trim(QUICK_PT, NULL, 25, 16);
|
quicklist_trim(QUICK_PT, NULL, 25, 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,30 +0,0 @@
|
|||||||
#ifndef __ASM_SH_PGALLOC_NOPMD_H
|
|
||||||
#define __ASM_SH_PGALLOC_NOPMD_H
|
|
||||||
|
|
||||||
#define QUICK_PGD 0 /* We preserve special mappings over free */
|
|
||||||
|
|
||||||
static inline void pgd_ctor(void *x)
|
|
||||||
{
|
|
||||||
pgd_t *pgd = x;
|
|
||||||
|
|
||||||
memcpy(pgd + USER_PTRS_PER_PGD,
|
|
||||||
swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
||||||
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
||||||
{
|
|
||||||
quicklist_free(QUICK_PGD, NULL, pgd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __check_pgt_cache(void)
|
|
||||||
{
|
|
||||||
quicklist_trim(QUICK_PGD, NULL, 25, 16);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_PGALLOC_NOPMD_H */
|
|
@@ -1,41 +0,0 @@
|
|||||||
#ifndef __ASM_SH_PGALLOC_PMD_H
|
|
||||||
#define __ASM_SH_PGALLOC_PMD_H
|
|
||||||
|
|
||||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
pgd_t *pgd;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
pgd = kzalloc(sizeof(*pgd) * PTRS_PER_PGD, GFP_KERNEL | __GFP_REPEAT);
|
|
||||||
|
|
||||||
for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++)
|
|
||||||
pgd[i] = swapper_pg_dir[i];
|
|
||||||
|
|
||||||
return pgd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
||||||
{
|
|
||||||
kfree(pgd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __check_pgt_cache(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
||||||
{
|
|
||||||
set_pud(pud, __pud((unsigned long)pmd));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
||||||
{
|
|
||||||
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
||||||
{
|
|
||||||
quicklist_free(QUICK_PT, NULL, pmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_PGALLOC_PMD_H */
|
|
@@ -141,9 +141,9 @@ typedef pte_t *pte_addr_t;
|
|||||||
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
|
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No page table caches to initialise
|
* Initialise the page table caches
|
||||||
*/
|
*/
|
||||||
#define pgtable_cache_init() do { } while (0)
|
extern void pgtable_cache_init(void);
|
||||||
|
|
||||||
struct vm_area_struct;
|
struct vm_area_struct;
|
||||||
|
|
||||||
|
@@ -43,11 +43,6 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
|||||||
}
|
}
|
||||||
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
|
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
|
||||||
|
|
||||||
static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
|
|
||||||
{
|
|
||||||
pmd_val(*pmdp) = (unsigned long) ptep;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PGD defines. Top level.
|
* PGD defines. Top level.
|
||||||
*/
|
*/
|
||||||
@@ -202,12 +197,6 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
|
|||||||
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
|
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
|
||||||
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
|
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
|
||||||
|
|
||||||
/*
|
|
||||||
* Handling allocation failures during page table setup.
|
|
||||||
*/
|
|
||||||
extern void __handle_bad_pmd_kernel(pmd_t * pmd);
|
|
||||||
#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PTE level access routines.
|
* PTE level access routines.
|
||||||
*
|
*
|
||||||
|
@@ -17,11 +17,11 @@
|
|||||||
#define USER_PTRS_PER_PGD 2
|
#define USER_PTRS_PER_PGD 2
|
||||||
|
|
||||||
/* PMD bits */
|
/* PMD bits */
|
||||||
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
|
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE))
|
||||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||||
#define PMD_MASK (~(PMD_SIZE-1))
|
#define PMD_MASK (~(PMD_SIZE-1))
|
||||||
|
|
||||||
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
|
#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
|
||||||
|
|
||||||
#define pmd_ERROR(e) \
|
#define pmd_ERROR(e) \
|
||||||
printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||||
|
@@ -219,7 +219,7 @@ config PAGE_SIZE_4KB
|
|||||||
|
|
||||||
config PAGE_SIZE_8KB
|
config PAGE_SIZE_8KB
|
||||||
bool "8kB"
|
bool "8kB"
|
||||||
depends on !MMU || X2TLB && !PGTABLE_LEVELS_3
|
depends on !MMU || X2TLB
|
||||||
help
|
help
|
||||||
This enables 8kB pages as supported by SH-X2 and later MMUs.
|
This enables 8kB pages as supported by SH-X2 and later MMUs.
|
||||||
|
|
||||||
@@ -231,7 +231,7 @@ config PAGE_SIZE_16KB
|
|||||||
|
|
||||||
config PAGE_SIZE_64KB
|
config PAGE_SIZE_64KB
|
||||||
bool "64kB"
|
bool "64kB"
|
||||||
depends on !MMU || CPU_SH4 && !PGTABLE_LEVELS_3 || CPU_SH5
|
depends on !MMU || CPU_SH4 || CPU_SH5
|
||||||
help
|
help
|
||||||
This enables support for 64kB pages, possible on all SH-4
|
This enables support for 64kB pages, possible on all SH-4
|
||||||
CPUs and later.
|
CPUs and later.
|
||||||
|
@@ -15,7 +15,7 @@ obj-y += $(cacheops-y)
|
|||||||
|
|
||||||
mmu-y := nommu.o extable_32.o
|
mmu-y := nommu.o extable_32.o
|
||||||
mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
|
mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
|
||||||
ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o
|
ioremap_$(BITS).o kmap.o pgtable.o tlbflush_$(BITS).o
|
||||||
|
|
||||||
obj-y += $(mmu-y)
|
obj-y += $(mmu-y)
|
||||||
obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
|
obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
|
||||||
|
@@ -109,6 +109,7 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys)
|
|||||||
static void sh4_flush_dcache_page(void *arg)
|
static void sh4_flush_dcache_page(void *arg)
|
||||||
{
|
{
|
||||||
struct page *page = arg;
|
struct page *page = arg;
|
||||||
|
unsigned long addr = (unsigned long)page_address(page);
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = page_mapping(page);
|
||||||
|
|
||||||
@@ -116,16 +117,8 @@ static void sh4_flush_dcache_page(void *arg)
|
|||||||
set_bit(PG_dcache_dirty, &page->flags);
|
set_bit(PG_dcache_dirty, &page->flags);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
{
|
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
|
||||||
unsigned long phys = page_to_phys(page);
|
(addr & shm_align_mask), page_to_phys(page));
|
||||||
unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
|
|
||||||
int i, n;
|
|
||||||
|
|
||||||
/* Loop all the D-cache */
|
|
||||||
n = boot_cpu_data.dcache.n_aliases;
|
|
||||||
for (i = 0; i < n; i++, addr += PAGE_SIZE)
|
|
||||||
flush_cache_one(addr, phys);
|
|
||||||
}
|
|
||||||
|
|
||||||
wmb();
|
wmb();
|
||||||
}
|
}
|
||||||
|
57
arch/sh/mm/pgtable.c
Normal file
57
arch/sh/mm/pgtable.c
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
#include <linux/mm.h>
|
||||||
|
|
||||||
|
#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
|
||||||
|
|
||||||
|
static struct kmem_cache *pgd_cachep;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PGTABLE_LEVELS_3
|
||||||
|
static struct kmem_cache *pmd_cachep;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void pgd_ctor(void *x)
|
||||||
|
{
|
||||||
|
pgd_t *pgd = x;
|
||||||
|
|
||||||
|
memcpy(pgd + USER_PTRS_PER_PGD,
|
||||||
|
swapper_pg_dir + USER_PTRS_PER_PGD,
|
||||||
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
void pgtable_cache_init(void)
|
||||||
|
{
|
||||||
|
pgd_cachep = kmem_cache_create("pgd_cache",
|
||||||
|
PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
|
||||||
|
PAGE_SIZE, SLAB_PANIC, pgd_ctor);
|
||||||
|
#ifdef CONFIG_PGTABLE_LEVELS_3
|
||||||
|
pmd_cachep = kmem_cache_create("pmd_cache",
|
||||||
|
PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
|
||||||
|
PAGE_SIZE, SLAB_PANIC, NULL);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
|
||||||
|
}
|
||||||
|
|
||||||
|
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||||
|
{
|
||||||
|
kmem_cache_free(pgd_cachep, pgd);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PGTABLE_LEVELS_3
|
||||||
|
void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||||
|
{
|
||||||
|
set_pud(pud, __pud((unsigned long)pmd));
|
||||||
|
}
|
||||||
|
|
||||||
|
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
|
{
|
||||||
|
return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
|
||||||
|
}
|
||||||
|
|
||||||
|
void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||||
|
{
|
||||||
|
kmem_cache_free(pmd_cachep, pmd);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_PGTABLE_LEVELS_3 */
|
Reference in New Issue
Block a user