[PATCH] memory hotplug locking: zone span seqlock
See the "fixup bad_range()" patch for more information, but this actually creates a the lock to protect things making assumptions about a zone's size staying constant at runtime. Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
208d54e551
commit
bdc8cb9845
@@ -12,6 +12,7 @@
|
||||
#include <linux/threads.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/* Free memory management - zoned buddy allocator. */
|
||||
@@ -137,6 +138,10 @@ struct zone {
|
||||
* free areas of different sizes
|
||||
*/
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/* see spanned/present_pages for more description */
|
||||
seqlock_t span_seqlock;
|
||||
#endif
|
||||
struct free_area free_area[MAX_ORDER];
|
||||
|
||||
|
||||
@@ -220,6 +225,16 @@ struct zone {
|
||||
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
|
||||
unsigned long zone_start_pfn;
|
||||
|
||||
/*
|
||||
* zone_start_pfn, spanned_pages and present_pages are all
|
||||
* protected by span_seqlock. It is a seqlock because it has
|
||||
* to be read outside of zone->lock, and it is done in the main
|
||||
* allocator path. But, it is written quite infrequently.
|
||||
*
|
||||
* The lock is declared along with zone->lock because it is
|
||||
* frequently read in proximity to zone->lock. It's good to
|
||||
* give them a chance of being in the same cacheline.
|
||||
*/
|
||||
unsigned long spanned_pages; /* total size, including holes */
|
||||
unsigned long present_pages; /* amount of memory (excluding holes) */
|
||||
|
||||
|
Reference in New Issue
Block a user