sparc: mmu_gather rework
Rework the sparc mmu_gather usage to conform to the new world order :-) Sparc mmu_gather does two things: - tracks vaddrs to unhash - tracks pages to free Split these two things like powerpc has done and keep the vaddrs in per-cpu data structures and flush them on context switch. The remaining bits can then use the generic mmu_gather. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
d6bf29b44d
commit
90f08e399d
@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
}
|
||||
|
||||
static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
|
||||
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
|
||||
unsigned long tsb, unsigned long nentries)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < mp->tlb_nr; i++) {
|
||||
unsigned long v = mp->vaddrs[i];
|
||||
for (i = 0; i < tb->tlb_nr; i++) {
|
||||
unsigned long v = tb->vaddrs[i];
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, uns
|
||||
}
|
||||
}
|
||||
|
||||
void flush_tsb_user(struct mmu_gather *mp)
|
||||
void flush_tsb_user(struct tlb_batch *tb)
|
||||
{
|
||||
struct mm_struct *mm = mp->mm;
|
||||
struct mm_struct *mm = tb->mm;
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *mp)
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
|
||||
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *mp)
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
|
||||
__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
|
||||
}
|
||||
#endif
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
|
Reference in New Issue
Block a user