sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-) Sparc mmu_gather does two things: - tracks vaddrs to unhash - tracks pages to free Split these two things like powerpc has done and keep the vaddrs in per-cpu data structures and flush them on context switch. The remaining bits can then use the generic mmu_gather. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
d6bf29b44d
commit
90f08e399d
@ -19,33 +19,34 @@
|
||||
|
||||
/* Heavily inspired by the ppc64 code. */
|
||||
|
||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
|
||||
|
||||
void flush_tlb_pending(void)
|
||||
{
|
||||
struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
|
||||
if (mp->tlb_nr) {
|
||||
flush_tsb_user(mp);
|
||||
if (tb->tlb_nr) {
|
||||
flush_tsb_user(tb);
|
||||
|
||||
if (CTX_VALID(mp->mm->context)) {
|
||||
if (CTX_VALID(tb->mm->context)) {
|
||||
#ifdef CONFIG_SMP
|
||||
smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
|
||||
&mp->vaddrs[0]);
|
||||
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
|
||||
&tb->vaddrs[0]);
|
||||
#else
|
||||
__flush_tlb_pending(CTX_HWBITS(mp->mm->context),
|
||||
mp->tlb_nr, &mp->vaddrs[0]);
|
||||
__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
|
||||
tb->tlb_nr, &tb->vaddrs[0]);
|
||||
#endif
|
||||
}
|
||||
mp->tlb_nr = 0;
|
||||
tb->tlb_nr = 0;
|
||||
}
|
||||
|
||||
put_cpu_var(mmu_gathers);
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
||||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
|
||||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm)
|
||||
{
|
||||
struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
unsigned long nr;
|
||||
|
||||
vaddr &= PAGE_MASK;
|
||||
@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
|
||||
|
||||
no_cache_flush:
|
||||
|
||||
if (mp->fullmm)
|
||||
if (fullmm) {
|
||||
put_cpu_var(tlb_batch);
|
||||
return;
|
||||
}
|
||||
|
||||
nr = mp->tlb_nr;
|
||||
nr = tb->tlb_nr;
|
||||
|
||||
if (unlikely(nr != 0 && mm != mp->mm)) {
|
||||
if (unlikely(nr != 0 && mm != tb->mm)) {
|
||||
flush_tlb_pending();
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
if (nr == 0)
|
||||
mp->mm = mm;
|
||||
tb->mm = mm;
|
||||
|
||||
mp->vaddrs[nr] = vaddr;
|
||||
mp->tlb_nr = ++nr;
|
||||
tb->vaddrs[nr] = vaddr;
|
||||
tb->tlb_nr = ++nr;
|
||||
if (nr >= TLB_BATCH_NR)
|
||||
flush_tlb_pending();
|
||||
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
Reference in New Issue
Block a user