Merge 'akpm' patch series
* Merge akpm patch series: (122 commits) drivers/connector/cn_proc.c: remove unused local Documentation/SubmitChecklist: add RCU debug config options reiserfs: use hweight_long() reiserfs: use proper little-endian bitops pnpacpi: register disabled resources drivers/rtc/rtc-tegra.c: properly initialize spinlock drivers/rtc/rtc-twl.c: check return value of twl_rtc_write_u8() in twl_rtc_set_time() drivers/rtc: add support for Qualcomm PMIC8xxx RTC drivers/rtc/rtc-s3c.c: support clock gating drivers/rtc/rtc-mpc5121.c: add support for RTC on MPC5200 init: skip calibration delay if previously done misc/eeprom: add eeprom access driver for digsy_mtc board misc/eeprom: add driver for microwire 93xx46 EEPROMs checkpatch.pl: update $logFunctions checkpatch: make utf-8 test --strict checkpatch.pl: add ability to ignore various messages checkpatch: add a "prefer __aligned" check checkpatch: validate signature styles and To: and Cc: lines checkpatch: add __rcu as a sparse modifier checkpatch: suggest using min_t or max_t ... Did this as a merge because of (trivial) conflicts in - Documentation/feature-removal-schedule.txt - arch/xtensa/include/asm/uaccess.h that were just easier to fix up in the merge than in the patch series.
This commit is contained in:
@@ -41,6 +41,7 @@ config SPARC64
|
||||
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_KPROBES
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_SYSCALL_WRAPPERS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
@@ -81,10 +82,6 @@ config IOMMU_HELPER
|
||||
bool
|
||||
default y if SPARC64
|
||||
|
||||
config QUICKLIST
|
||||
bool
|
||||
default y if SPARC64
|
||||
|
||||
config STACKTRACE_SUPPORT
|
||||
bool
|
||||
default y if SPARC64
|
||||
|
@@ -5,7 +5,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/quicklist.h>
|
||||
|
||||
#include <asm/spitfire.h>
|
||||
#include <asm/cpudata.h>
|
||||
@@ -14,71 +13,114 @@
|
||||
|
||||
/* Page table allocation/freeing. */
|
||||
|
||||
extern struct kmem_cache *pgtable_cache;
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
quicklist_free(0, NULL, pgd);
|
||||
kmem_cache_free(pgtable_cache, pgd);
|
||||
}
|
||||
|
||||
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
return kmem_cache_alloc(pgtable_cache,
|
||||
GFP_KERNEL|__GFP_REPEAT);
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
quicklist_free(0, NULL, pmd);
|
||||
kmem_cache_free(pgtable_cache, pmd);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
||||
}
|
||||
|
||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
struct page *page;
|
||||
void *pg;
|
||||
pte_t *pte;
|
||||
|
||||
pg = quicklist_alloc(0, GFP_KERNEL, NULL);
|
||||
if (!pg)
|
||||
pte = pte_alloc_one_kernel(mm, address);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
page = virt_to_page(pg);
|
||||
page = virt_to_page(pte);
|
||||
pgtable_page_ctor(page);
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
quicklist_free(0, NULL, pte);
|
||||
free_page((unsigned long)pte);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||
{
|
||||
pgtable_page_dtor(ptepage);
|
||||
quicklist_free_page(0, NULL, ptepage);
|
||||
__free_page(ptepage);
|
||||
}
|
||||
|
||||
|
||||
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
|
||||
#define pmd_populate(MM,PMD,PTE_PAGE) \
|
||||
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
static inline void check_pgt_cache(void)
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
||||
static inline void pgtable_free(void *table, bool is_page)
|
||||
{
|
||||
quicklist_trim(0, NULL, 25, 16);
|
||||
if (is_page)
|
||||
free_page((unsigned long)table);
|
||||
else
|
||||
kmem_cache_free(pgtable_cache, table);
|
||||
}
|
||||
|
||||
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
||||
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
struct mmu_gather;
|
||||
extern void tlb_remove_table(struct mmu_gather *, void *);
|
||||
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
|
||||
{
|
||||
unsigned long pgf = (unsigned long)table;
|
||||
if (is_page)
|
||||
pgf |= 0x1UL;
|
||||
tlb_remove_table(tlb, (void *)pgf);
|
||||
}
|
||||
|
||||
static inline void __tlb_remove_table(void *_table)
|
||||
{
|
||||
void *table = (void *)((unsigned long)_table & ~0x1UL);
|
||||
bool is_page = false;
|
||||
|
||||
if ((unsigned long)_table & 0x1UL)
|
||||
is_page = true;
|
||||
pgtable_free(table, is_page);
|
||||
}
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
|
||||
{
|
||||
pgtable_free(table, is_page);
|
||||
}
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
|
||||
unsigned long address)
|
||||
{
|
||||
pgtable_page_dtor(ptepage);
|
||||
pgtable_free_tlb(tlb, page_address(ptepage), true);
|
||||
}
|
||||
|
||||
#define __pmd_free_tlb(tlb, pmd, addr) \
|
||||
pgtable_free_tlb(tlb, pmd, false)
|
||||
|
||||
#endif /* _SPARC64_PGALLOC_H */
|
||||
|
@@ -95,6 +95,10 @@
|
||||
/* PTE bits which are the same in SUN4U and SUN4V format. */
|
||||
#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
|
||||
#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
|
||||
#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
|
||||
|
||||
/* Advertise support for _PAGE_SPECIAL */
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
/* SUN4U pte bits... */
|
||||
#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
|
||||
@@ -104,6 +108,7 @@
|
||||
#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
|
||||
#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
|
||||
#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
|
||||
#define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
|
||||
#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
|
||||
#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
|
||||
#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
|
||||
@@ -133,6 +138,7 @@
|
||||
#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
|
||||
#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
|
||||
#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
|
||||
#define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
|
||||
#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
|
||||
#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
|
||||
#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
|
||||
@@ -302,10 +308,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
|
||||
: "=r" (mask), "=r" (tmp)
|
||||
: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
|
||||
_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
|
||||
_PAGE_SZBITS_4U),
|
||||
_PAGE_SZBITS_4U | _PAGE_SPECIAL),
|
||||
"i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
|
||||
_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
|
||||
_PAGE_SZBITS_4V));
|
||||
_PAGE_SZBITS_4V | _PAGE_SPECIAL));
|
||||
|
||||
return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
|
||||
}
|
||||
@@ -502,6 +508,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= _PAGE_SPECIAL;
|
||||
return pte;
|
||||
}
|
||||
|
||||
@@ -607,9 +614,9 @@ static inline unsigned long pte_present(pte_t pte)
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline int pte_special(pte_t pte)
|
||||
static inline unsigned long pte_special(pte_t pte)
|
||||
{
|
||||
return 0;
|
||||
return pte_val(pte) & _PAGE_SPECIAL;
|
||||
}
|
||||
|
||||
#define pmd_set(pmdp, ptep) \
|
||||
|
@@ -4,7 +4,7 @@
|
||||
asflags-y := -ansi
|
||||
ccflags-y := -Werror
|
||||
|
||||
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
|
||||
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
|
||||
obj-y += fault_$(BITS).o
|
||||
obj-y += init_$(BITS).o
|
||||
obj-$(CONFIG_SPARC32) += loadmmu.o
|
||||
|
181
arch/sparc/mm/gup.c
Normal file
181
arch/sparc/mm/gup.c
Normal file
@@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Lockless get_user_pages_fast for sparc, cribbed from powerpc
|
||||
*
|
||||
* Copyright (C) 2008 Nick Piggin
|
||||
* Copyright (C) 2008 Novell Inc.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/*
|
||||
* The performance critical leaf functions are made noinline otherwise gcc
|
||||
* inlines everything into a single function which results in too much
|
||||
* register pressure.
|
||||
*/
|
||||
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long mask, result;
|
||||
pte_t *ptep;
|
||||
|
||||
if (tlb_type == hypervisor) {
|
||||
result = _PAGE_PRESENT_4V|_PAGE_P_4V;
|
||||
if (write)
|
||||
result |= _PAGE_WRITE_4V;
|
||||
} else {
|
||||
result = _PAGE_PRESENT_4U|_PAGE_P_4U;
|
||||
if (write)
|
||||
result |= _PAGE_WRITE_4U;
|
||||
}
|
||||
mask = result | _PAGE_SPECIAL;
|
||||
|
||||
ptep = pte_offset_kernel(&pmd, addr);
|
||||
do {
|
||||
struct page *page, *head;
|
||||
pte_t pte = *ptep;
|
||||
|
||||
if ((pte_val(pte) & mask) != result)
|
||||
return 0;
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
/* The hugepage case is simplified on sparc64 because
|
||||
* we encode the sub-page pfn offsets into the
|
||||
* hugepage PTEs. We could optimize this in the future
|
||||
* use page_cache_add_speculative() for the hugepage case.
|
||||
*/
|
||||
page = pte_page(pte);
|
||||
head = compound_head(page);
|
||||
if (!page_cache_get_speculative(head))
|
||||
return 0;
|
||||
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||
put_page(head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp;
|
||||
|
||||
pmdp = pmd_offset(&pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(pmd))
|
||||
return 0;
|
||||
if (!gup_pte_range(pmd, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
|
||||
pudp = pud_offset(&pgd, addr);
|
||||
do {
|
||||
pud_t pud = *pudp;
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (!gup_pmd_range(pud, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
|
||||
/*
|
||||
* XXX: batch / limit 'nr', to avoid large irq off latency
|
||||
* needs some instrumenting to determine the common sizes used by
|
||||
* important workloads (eg. DB2), and whether limiting the batch size
|
||||
* will decrease performance.
|
||||
*
|
||||
* It seems like we're in the clear for the moment. Direct-IO is
|
||||
* the main guy that batches up lots of get_user_pages, and even
|
||||
* they are limited to 64-at-a-time which is not so many.
|
||||
*/
|
||||
/*
|
||||
* This doesn't prevent pagetable teardown, but does prevent
|
||||
* the pagetables from being freed on sparc.
|
||||
*
|
||||
* So long as we atomically load page table pointers versus teardown,
|
||||
* we can follow the address down to the the page and take a ref on it.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
|
||||
return nr;
|
||||
|
||||
{
|
||||
int ret;
|
||||
|
||||
slow:
|
||||
local_irq_enable();
|
||||
|
||||
/* Try to get the remaining pages with get_user_pages */
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(current, mm, start,
|
||||
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
if (ret < 0)
|
||||
ret = nr;
|
||||
else
|
||||
ret += nr;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
@@ -236,6 +236,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
|
||||
}
|
||||
}
|
||||
|
||||
struct kmem_cache *pgtable_cache __read_mostly;
|
||||
|
||||
static struct kmem_cache *tsb_caches[8] __read_mostly;
|
||||
|
||||
static const char *tsb_cache_names[8] = {
|
||||
@@ -253,6 +255,15 @@ void __init pgtable_cache_init(void)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
pgtable_cache = kmem_cache_create("pgtable_cache",
|
||||
PAGE_SIZE, PAGE_SIZE,
|
||||
0,
|
||||
_clear_page);
|
||||
if (!pgtable_cache) {
|
||||
prom_printf("pgtable_cache_init(): Could not create!\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
unsigned long size = 8192 << i;
|
||||
const char *name = tsb_cache_names[i];
|
||||
|
Reference in New Issue
Block a user