mm: Convert i_mmap_lock to a mutex
Straightforward conversion of i_mmap_lock to a mutex. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
97a894136f
commit
3d48ae45e7
22
mm/mmap.c
22
mm/mmap.c
@ -194,7 +194,7 @@ error:
|
||||
}
|
||||
|
||||
/*
|
||||
* Requires inode->i_mapping->i_mmap_lock
|
||||
* Requires inode->i_mapping->i_mmap_mutex
|
||||
*/
|
||||
static void __remove_shared_vm_struct(struct vm_area_struct *vma,
|
||||
struct file *file, struct address_space *mapping)
|
||||
@ -222,9 +222,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
|
||||
|
||||
if (file) {
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
spin_lock(&mapping->i_mmap_lock);
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
__remove_shared_vm_struct(vma, file, mapping);
|
||||
spin_unlock(&mapping->i_mmap_lock);
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -446,13 +446,13 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
mapping = vma->vm_file->f_mapping;
|
||||
|
||||
if (mapping)
|
||||
spin_lock(&mapping->i_mmap_lock);
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
|
||||
__vma_link(mm, vma, prev, rb_link, rb_parent);
|
||||
__vma_link_file(vma);
|
||||
|
||||
if (mapping)
|
||||
spin_unlock(&mapping->i_mmap_lock);
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
|
||||
mm->map_count++;
|
||||
validate_mm(mm);
|
||||
@ -555,7 +555,7 @@ again: remove_next = 1 + (end > next->vm_end);
|
||||
mapping = file->f_mapping;
|
||||
if (!(vma->vm_flags & VM_NONLINEAR))
|
||||
root = &mapping->i_mmap;
|
||||
spin_lock(&mapping->i_mmap_lock);
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
if (insert) {
|
||||
/*
|
||||
* Put into prio_tree now, so instantiated pages
|
||||
@ -622,7 +622,7 @@ again: remove_next = 1 + (end > next->vm_end);
|
||||
if (anon_vma)
|
||||
anon_vma_unlock(anon_vma);
|
||||
if (mapping)
|
||||
spin_unlock(&mapping->i_mmap_lock);
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
|
||||
if (remove_next) {
|
||||
if (file) {
|
||||
@ -2290,7 +2290,7 @@ void exit_mmap(struct mm_struct *mm)
|
||||
|
||||
/* Insert vm structure into process list sorted by address
|
||||
* and into the inode's i_mmap tree. If vm_file is non-NULL
|
||||
* then i_mmap_lock is taken here.
|
||||
* then i_mmap_mutex is taken here.
|
||||
*/
|
||||
int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
|
||||
{
|
||||
@ -2532,7 +2532,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
|
||||
*/
|
||||
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
|
||||
BUG();
|
||||
spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
|
||||
mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2559,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
|
||||
* vma in this mm is backed by the same anon_vma or address_space.
|
||||
*
|
||||
* We can take all the locks in random order because the VM code
|
||||
* taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
|
||||
* taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never
|
||||
* takes more than one of them in a row. Secondly we're protected
|
||||
* against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
|
||||
*
|
||||
@ -2631,7 +2631,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
|
||||
* AS_MM_ALL_LOCKS can't change to 0 from under us
|
||||
* because we hold the mm_all_locks_mutex.
|
||||
*/
|
||||
spin_unlock(&mapping->i_mmap_lock);
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
|
||||
&mapping->flags))
|
||||
BUG();
|
||||
|
Reference in New Issue
Block a user