ksm: hold anon_vma in rmap_item
For full functionality, page_referenced_one() and try_to_unmap_one() need to know the vma: to pass vma down to arch-dependent flushes, or to observe VM_LOCKED or VM_EXEC. But KSM keeps no record of vma: nor can it, since vmas get split and merged without its knowledge. Instead, note page's anon_vma in its rmap_item when adding to stable tree: all the vmas which might map that page are listed by its anon_vma. page_referenced_ksm() and try_to_unmap_ksm() then traverse the anon_vma, first to find the probable vma, that which matches rmap_item's mm; but if that is not enough to locate all instances, traverse again to try the others. This catches those occasions when fork has duplicated a pte of a ksm page, but ksmd has not yet come around to assign it an rmap_item. But each rmap_item in the stable tree which refers to an anon_vma needs to take a reference to it. Andrea's anon_vma design cleverly avoided a reference count (an anon_vma was free when its list of vmas was empty), but KSM now needs to add that. Is a 32-bit count sufficient? I believe so - the anon_vma is only free when both count is 0 and list is empty. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Chris Wright <chrisw@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
5ad6468801
commit
db114b83ab
@ -26,6 +26,9 @@
|
||||
*/
|
||||
struct anon_vma {
|
||||
spinlock_t lock; /* Serialize access to vma list */
|
||||
#ifdef CONFIG_KSM
|
||||
atomic_t ksm_refcount;
|
||||
#endif
|
||||
/*
|
||||
* NOTE: the LSB of the head.next is set by
|
||||
* mm_take_all_locks() _after_ taking the above lock. So the
|
||||
@ -38,6 +41,26 @@ struct anon_vma {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_KSM
|
||||
static inline void ksm_refcount_init(struct anon_vma *anon_vma)
|
||||
{
|
||||
atomic_set(&anon_vma->ksm_refcount, 0);
|
||||
}
|
||||
|
||||
static inline int ksm_refcount(struct anon_vma *anon_vma)
|
||||
{
|
||||
return atomic_read(&anon_vma->ksm_refcount);
|
||||
}
|
||||
#else
|
||||
static inline void ksm_refcount_init(struct anon_vma *anon_vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int ksm_refcount(struct anon_vma *anon_vma)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_KSM */
|
||||
|
||||
static inline struct anon_vma *page_anon_vma(struct page *page)
|
||||
{
|
||||
@ -70,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
|
||||
void anon_vma_unlink(struct vm_area_struct *);
|
||||
void anon_vma_link(struct vm_area_struct *);
|
||||
void __anon_vma_link(struct vm_area_struct *);
|
||||
void anon_vma_free(struct anon_vma *);
|
||||
|
||||
/*
|
||||
* rmap interfaces called when adding or removing pte of page
|
||||
|
Reference in New Issue
Block a user