[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with a many-threaded application which concurrently initializes different parts of a large anonymous area. This patch corrects that, by using a separate spinlock per page table page, to guard the page table entries in that page, instead of using the mm's single page_table_lock. (But even then, page_table_lock is still used to guard page table allocation, and anon_vma allocation.) In this implementation, the spinlock is tucked inside the struct page of the page table page: with a BUILD_BUG_ON in case it overflows - which it would in the case of 32-bit PA-RISC with spinlock debugging enabled. Splitting the lock is not quite for free: another cacheline access. Ideally, I suppose we would use split ptlock only for multi-threaded processes on multi-cpu machines; but deciding that dynamically would have its own costs. So for now enable it by config, at some number of cpus - since the Kconfig language doesn't support inequalities, let preprocessor compare that with NR_CPUS. But I don't think it's worth being user-configurable: for good testing of both split and unsplit configs, split now at 4 cpus, and perhaps change that to 8 later. There is a benefit even for singly threaded processes: kswapd can be attacking one part of the mm while another part is busy faulting. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
b38c6845b6
commit
4c21e2f244
@ -226,13 +226,18 @@ struct page {
|
||||
* to show when page is mapped
|
||||
* & limit reverse map searches.
|
||||
*/
|
||||
unsigned long private; /* Mapping-private opaque data:
|
||||
union {
|
||||
unsigned long private; /* Mapping-private opaque data:
|
||||
* usually used for buffer_heads
|
||||
* if PagePrivate set; used for
|
||||
* swp_entry_t if PageSwapCache
|
||||
* When page is free, this indicates
|
||||
* order in the buddy system.
|
||||
*/
|
||||
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
|
||||
spinlock_t ptl;
|
||||
#endif
|
||||
} u;
|
||||
struct address_space *mapping; /* If low bit clear, points to
|
||||
* inode address_space, or NULL.
|
||||
* If page mapped as anonymous
|
||||
@ -260,6 +265,9 @@ struct page {
|
||||
#endif /* WANT_PAGE_VIRTUAL */
|
||||
};
|
||||
|
||||
#define page_private(page) ((page)->u.private)
|
||||
#define set_page_private(page, v) ((page)->u.private = (v))
|
||||
|
||||
/*
|
||||
* FIXME: take this include out, include page-flags.h in
|
||||
* files which need it (119 of them)
|
||||
@ -311,17 +319,17 @@ extern void FASTCALL(__page_cache_release(struct page *));
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
||||
static inline int page_count(struct page *p)
|
||||
static inline int page_count(struct page *page)
|
||||
{
|
||||
if (PageCompound(p))
|
||||
p = (struct page *)p->private;
|
||||
return atomic_read(&(p)->_count) + 1;
|
||||
if (PageCompound(page))
|
||||
page = (struct page *)page_private(page);
|
||||
return atomic_read(&page->_count) + 1;
|
||||
}
|
||||
|
||||
static inline void get_page(struct page *page)
|
||||
{
|
||||
if (unlikely(PageCompound(page)))
|
||||
page = (struct page *)page->private;
|
||||
page = (struct page *)page_private(page);
|
||||
atomic_inc(&page->_count);
|
||||
}
|
||||
|
||||
@ -587,7 +595,7 @@ static inline int PageAnon(struct page *page)
|
||||
static inline pgoff_t page_index(struct page *page)
|
||||
{
|
||||
if (unlikely(PageSwapCache(page)))
|
||||
return page->private;
|
||||
return page_private(page);
|
||||
return page->index;
|
||||
}
|
||||
|
||||
@ -779,9 +787,31 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
|
||||
}
|
||||
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
|
||||
|
||||
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
|
||||
/*
|
||||
* We tuck a spinlock to guard each pagetable page into its struct page,
|
||||
* at page->private, with BUILD_BUG_ON to make sure that this will not
|
||||
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
|
||||
* When freeing, reset page->mapping so free_pages_check won't complain.
|
||||
*/
|
||||
#define __pte_lockptr(page) &((page)->u.ptl)
|
||||
#define pte_lock_init(_page) do { \
|
||||
spin_lock_init(__pte_lockptr(_page)); \
|
||||
} while (0)
|
||||
#define pte_lock_deinit(page) ((page)->mapping = NULL)
|
||||
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
|
||||
#else
|
||||
/*
|
||||
* We use mm->page_table_lock to guard all pagetable pages of the mm.
|
||||
*/
|
||||
#define pte_lock_init(page) do {} while (0)
|
||||
#define pte_lock_deinit(page) do {} while (0)
|
||||
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
|
||||
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
|
||||
|
||||
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
|
||||
({ \
|
||||
spinlock_t *__ptl = &(mm)->page_table_lock; \
|
||||
spinlock_t *__ptl = pte_lockptr(mm, pmd); \
|
||||
pte_t *__pte = pte_offset_map(pmd, address); \
|
||||
*(ptlp) = __ptl; \
|
||||
spin_lock(__ptl); \
|
||||
|
Reference in New Issue
Block a user