Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
This commit is contained in:
@@ -53,6 +53,7 @@
|
||||
#include <linux/compaction.h>
|
||||
#include <trace/events/kmem.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/memcontrol.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/div64.h>
|
||||
@@ -565,7 +566,8 @@ static inline int free_pages_check(struct page *page)
|
||||
if (unlikely(page_mapcount(page) |
|
||||
(page->mapping != NULL) |
|
||||
(atomic_read(&page->_count) != 0) |
|
||||
(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
|
||||
(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
|
||||
(mem_cgroup_bad_page_check(page)))) {
|
||||
bad_page(page);
|
||||
return 1;
|
||||
}
|
||||
@@ -614,6 +616,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||
list = &pcp->lists[migratetype];
|
||||
} while (list_empty(list));
|
||||
|
||||
/* This is the only non-empty list. Free them all. */
|
||||
if (batch_free == MIGRATE_PCPTYPES)
|
||||
batch_free = to_free;
|
||||
|
||||
do {
|
||||
page = list_entry(list->prev, struct page, lru);
|
||||
/* must delete as __free_one_page list manipulates */
|
||||
@@ -750,7 +756,8 @@ static inline int check_new_page(struct page *page)
|
||||
if (unlikely(page_mapcount(page) |
|
||||
(page->mapping != NULL) |
|
||||
(atomic_read(&page->_count) != 0) |
|
||||
(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
|
||||
(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
|
||||
(mem_cgroup_bad_page_check(page)))) {
|
||||
bad_page(page);
|
||||
return 1;
|
||||
}
|
||||
@@ -863,9 +870,8 @@ static int move_freepages(struct zone *zone,
|
||||
}
|
||||
|
||||
order = page_order(page);
|
||||
list_del(&page->lru);
|
||||
list_add(&page->lru,
|
||||
&zone->free_area[order].free_list[migratetype]);
|
||||
list_move(&page->lru,
|
||||
&zone->free_area[order].free_list[migratetype]);
|
||||
page += 1 << order;
|
||||
pages_moved += 1 << order;
|
||||
}
|
||||
@@ -936,7 +942,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
|
||||
* If breaking a large block of pages, move all free
|
||||
* pages to the preferred allocation list. If falling
|
||||
* back for a reclaimable kernel allocation, be more
|
||||
* agressive about taking ownership of free pages
|
||||
* aggressive about taking ownership of free pages
|
||||
*/
|
||||
if (unlikely(current_order >= (pageblock_order >> 1)) ||
|
||||
start_migratetype == MIGRATE_RECLAIMABLE ||
|
||||
@@ -1333,7 +1339,7 @@ again:
|
||||
}
|
||||
|
||||
__count_zone_vm_events(PGALLOC, zone, 1 << order);
|
||||
zone_statistics(preferred_zone, zone);
|
||||
zone_statistics(preferred_zone, zone, gfp_flags);
|
||||
local_irq_restore(flags);
|
||||
|
||||
VM_BUG_ON(bad_range(zone, page));
|
||||
@@ -1714,6 +1720,20 @@ try_next_zone:
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Large machines with many possible nodes should not always dump per-node
|
||||
* meminfo in irq context.
|
||||
*/
|
||||
static inline bool should_suppress_show_mem(void)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
#if NODES_SHIFT > 8
|
||||
ret = in_interrupt();
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
should_alloc_retry(gfp_t gfp_mask, unsigned int order,
|
||||
unsigned long pages_reclaimed)
|
||||
@@ -2085,7 +2105,7 @@ rebalance:
|
||||
sync_migration);
|
||||
if (page)
|
||||
goto got_pg;
|
||||
sync_migration = true;
|
||||
sync_migration = !(gfp_mask & __GFP_NO_KSWAPD);
|
||||
|
||||
/* Try direct reclaim and then allocating */
|
||||
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
||||
@@ -2157,11 +2177,25 @@ rebalance:
|
||||
|
||||
nopage:
|
||||
if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
|
||||
printk(KERN_WARNING "%s: page allocation failure."
|
||||
" order:%d, mode:0x%x\n",
|
||||
unsigned int filter = SHOW_MEM_FILTER_NODES;
|
||||
|
||||
/*
|
||||
* This documents exceptions given to allocations in certain
|
||||
* contexts that are allowed to allocate outside current's set
|
||||
* of allowed nodes.
|
||||
*/
|
||||
if (!(gfp_mask & __GFP_NOMEMALLOC))
|
||||
if (test_thread_flag(TIF_MEMDIE) ||
|
||||
(current->flags & (PF_MEMALLOC | PF_EXITING)))
|
||||
filter &= ~SHOW_MEM_FILTER_NODES;
|
||||
if (in_interrupt() || !wait)
|
||||
filter &= ~SHOW_MEM_FILTER_NODES;
|
||||
|
||||
pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
|
||||
current->comm, order, gfp_mask);
|
||||
dump_stack();
|
||||
show_mem();
|
||||
if (!should_suppress_show_mem())
|
||||
show_mem(filter);
|
||||
}
|
||||
return page;
|
||||
got_pg:
|
||||
@@ -2411,19 +2445,42 @@ void si_meminfo_node(struct sysinfo *val, int nid)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Determine whether the zone's node should be displayed or not, depending on
|
||||
* whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas().
|
||||
*/
|
||||
static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (!(flags & SHOW_MEM_FILTER_NODES))
|
||||
goto out;
|
||||
|
||||
get_mems_allowed();
|
||||
ret = !node_isset(zone->zone_pgdat->node_id,
|
||||
cpuset_current_mems_allowed);
|
||||
put_mems_allowed();
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||
|
||||
/*
|
||||
* Show free area list (used inside shift_scroll-lock stuff)
|
||||
* We also calculate the percentage fragmentation. We do this by counting the
|
||||
* memory on each free list with the exception of the first item on the list.
|
||||
* Suppresses nodes that are not allowed by current's cpuset if
|
||||
* SHOW_MEM_FILTER_NODES is passed.
|
||||
*/
|
||||
void show_free_areas(void)
|
||||
void __show_free_areas(unsigned int filter)
|
||||
{
|
||||
int cpu;
|
||||
struct zone *zone;
|
||||
|
||||
for_each_populated_zone(zone) {
|
||||
if (skip_free_areas_zone(filter, zone))
|
||||
continue;
|
||||
show_node(zone);
|
||||
printk("%s per-cpu:\n", zone->name);
|
||||
|
||||
@@ -2465,6 +2522,8 @@ void show_free_areas(void)
|
||||
for_each_populated_zone(zone) {
|
||||
int i;
|
||||
|
||||
if (skip_free_areas_zone(filter, zone))
|
||||
continue;
|
||||
show_node(zone);
|
||||
printk("%s"
|
||||
" free:%lukB"
|
||||
@@ -2532,6 +2591,8 @@ void show_free_areas(void)
|
||||
for_each_populated_zone(zone) {
|
||||
unsigned long nr[MAX_ORDER], flags, order, total = 0;
|
||||
|
||||
if (skip_free_areas_zone(filter, zone))
|
||||
continue;
|
||||
show_node(zone);
|
||||
printk("%s: ", zone->name);
|
||||
|
||||
@@ -2551,6 +2612,11 @@ void show_free_areas(void)
|
||||
show_swap_cache_info();
|
||||
}
|
||||
|
||||
void show_free_areas(void)
|
||||
{
|
||||
__show_free_areas(0);
|
||||
}
|
||||
|
||||
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
|
||||
{
|
||||
zoneref->zone = zone;
|
||||
@@ -3110,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data)
|
||||
* Called with zonelists_mutex held always
|
||||
* unless system_state == SYSTEM_BOOTING.
|
||||
*/
|
||||
void build_all_zonelists(void *data)
|
||||
void __ref build_all_zonelists(void *data)
|
||||
{
|
||||
set_zonelist_order();
|
||||
|
||||
@@ -3860,7 +3926,7 @@ static void __init find_usable_zone_for_movable(void)
|
||||
|
||||
/*
|
||||
* The zone ranges provided by the architecture do not include ZONE_MOVABLE
|
||||
* because it is sized independant of architecture. Unlike the other zones,
|
||||
* because it is sized independent of architecture. Unlike the other zones,
|
||||
* the starting point for ZONE_MOVABLE is not fixed. It may be different
|
||||
* in each node depending on the size of each node and how evenly kernelcore
|
||||
* is distributed. This helper function adjusts the zone ranges
|
||||
@@ -5621,4 +5687,5 @@ void dump_page(struct page *page)
|
||||
page, atomic_read(&page->_count), page_mapcount(page),
|
||||
page->mapping, page->index);
|
||||
dump_page_flags(page->flags);
|
||||
mem_cgroup_print_bad_page(page);
|
||||
}
|
||||
|
Reference in New Issue
Block a user