[PATCH] mm: tlb_gather_mmu get_cpu_var
tlb_gather_mmu dates from before kernel preemption was allowed, and uses smp_processor_id or __get_cpu_var to find its per-cpu mmu_gather. That works because it's currently only called after getting page_table_lock, which is not dropped until after the matching tlb_finish_mmu. But don't rely on that, it will soon change: now disable preemption internally by proper get_cpu_var in tlb_gather_mmu, put_cpu_var in tlb_finish_mmu. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
7be7a54699
commit
15a23ffa2f
@ -39,8 +39,7 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
static inline struct mmu_gather *
|
||||
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu);
|
||||
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
|
||||
|
||||
tlb->mm = mm;
|
||||
tlb->freed = 0;
|
||||
@ -65,6 +64,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
put_cpu_var(mmu_gathers);
|
||||
}
|
||||
|
||||
static inline unsigned int tlb_is_full_mm(struct mmu_gather *tlb)
|
||||
|
Reference in New Issue
Block a user