mm: numa: Introduce last_nid to the page frame
This patch introduces a last_nid field to the page struct. This is used to build a two-stage filter in the next patch that is aimed at mitigating a problem whereby pages migrate to the wrong node when referenced by a process that was running off its home node. Signed-off-by: Mel Gorman <mgorman@suse.de>
This commit is contained in:
@@ -693,6 +693,36 @@ static inline int page_to_nid(const struct page *page)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
|
static inline int page_xchg_last_nid(struct page *page, int nid)
|
||||||
|
{
|
||||||
|
return xchg(&page->_last_nid, nid);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int page_last_nid(struct page *page)
|
||||||
|
{
|
||||||
|
return page->_last_nid;
|
||||||
|
}
|
||||||
|
static inline void reset_page_last_nid(struct page *page)
|
||||||
|
{
|
||||||
|
page->_last_nid = -1;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline int page_xchg_last_nid(struct page *page, int nid)
|
||||||
|
{
|
||||||
|
return page_to_nid(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int page_last_nid(struct page *page)
|
||||||
|
{
|
||||||
|
return page_to_nid(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void reset_page_last_nid(struct page *page)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline struct zone *page_zone(const struct page *page)
|
static inline struct zone *page_zone(const struct page *page)
|
||||||
{
|
{
|
||||||
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
|
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
|
||||||
|
@@ -175,6 +175,10 @@ struct page {
|
|||||||
*/
|
*/
|
||||||
void *shadow;
|
void *shadow;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
|
int _last_nid;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* The struct page can be forced to be double word aligned so that atomic ops
|
* The struct page can be forced to be double word aligned so that atomic ops
|
||||||
|
@@ -608,6 +608,7 @@ static inline int free_pages_check(struct page *page)
|
|||||||
bad_page(page);
|
bad_page(page);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
reset_page_last_nid(page);
|
||||||
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
|
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
|
||||||
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -3826,6 +3827,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|||||||
mminit_verify_page_links(page, zone, nid, pfn);
|
mminit_verify_page_links(page, zone, nid, pfn);
|
||||||
init_page_count(page);
|
init_page_count(page);
|
||||||
reset_page_mapcount(page);
|
reset_page_mapcount(page);
|
||||||
|
reset_page_last_nid(page);
|
||||||
SetPageReserved(page);
|
SetPageReserved(page);
|
||||||
/*
|
/*
|
||||||
* Mark the block movable so that blocks are reserved for
|
* Mark the block movable so that blocks are reserved for
|
||||||
|
Reference in New Issue
Block a user