mm: introduce for_each_populated_zone() macro
Impact: cleanup In almost cases, for_each_zone() is used with populated_zone(). It's because almost function doesn't need memoryless node information. Therefore, for_each_populated_zone() can help to make code simplify. This patch has no functional change. [akpm@linux-foundation.org: small cleanup] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
a6dc60f897
commit
ee99c71c59
@@ -806,6 +806,14 @@ extern struct zone *next_zone(struct zone *zone);
|
|||||||
zone; \
|
zone; \
|
||||||
zone = next_zone(zone))
|
zone = next_zone(zone))
|
||||||
|
|
||||||
|
#define for_each_populated_zone(zone) \
|
||||||
|
for (zone = (first_online_pgdat())->node_zones; \
|
||||||
|
zone; \
|
||||||
|
zone = next_zone(zone)) \
|
||||||
|
if (!populated_zone(zone)) \
|
||||||
|
; /* do nothing */ \
|
||||||
|
else
|
||||||
|
|
||||||
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
|
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
|
||||||
{
|
{
|
||||||
return zoneref->zone;
|
return zoneref->zone;
|
||||||
|
@@ -321,13 +321,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(list);
|
INIT_LIST_HEAD(list);
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
unsigned long zone_start, zone_end;
|
unsigned long zone_start, zone_end;
|
||||||
struct mem_extent *ext, *cur, *aux;
|
struct mem_extent *ext, *cur, *aux;
|
||||||
|
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
zone_start = zone->zone_start_pfn;
|
zone_start = zone->zone_start_pfn;
|
||||||
zone_end = zone->zone_start_pfn + zone->spanned_pages;
|
zone_end = zone->zone_start_pfn + zone->spanned_pages;
|
||||||
|
|
||||||
@@ -804,8 +801,8 @@ static unsigned int count_free_highmem_pages(void)
|
|||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
unsigned int cnt = 0;
|
unsigned int cnt = 0;
|
||||||
|
|
||||||
for_each_zone(zone)
|
for_each_populated_zone(zone)
|
||||||
if (populated_zone(zone) && is_highmem(zone))
|
if (is_highmem(zone))
|
||||||
cnt += zone_page_state(zone, NR_FREE_PAGES);
|
cnt += zone_page_state(zone, NR_FREE_PAGES);
|
||||||
|
|
||||||
return cnt;
|
return cnt;
|
||||||
|
@@ -229,17 +229,16 @@ int swsusp_shrink_memory(void)
|
|||||||
size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
|
size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
|
||||||
tmp = size;
|
tmp = size;
|
||||||
size += highmem_size;
|
size += highmem_size;
|
||||||
for_each_zone (zone)
|
for_each_populated_zone(zone) {
|
||||||
if (populated_zone(zone)) {
|
tmp += snapshot_additional_pages(zone);
|
||||||
tmp += snapshot_additional_pages(zone);
|
if (is_highmem(zone)) {
|
||||||
if (is_highmem(zone)) {
|
highmem_size -=
|
||||||
highmem_size -=
|
|
||||||
zone_page_state(zone, NR_FREE_PAGES);
|
zone_page_state(zone, NR_FREE_PAGES);
|
||||||
} else {
|
} else {
|
||||||
tmp -= zone_page_state(zone, NR_FREE_PAGES);
|
tmp -= zone_page_state(zone, NR_FREE_PAGES);
|
||||||
tmp += zone->lowmem_reserve[ZONE_NORMAL];
|
tmp += zone->lowmem_reserve[ZONE_NORMAL];
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (highmem_size < 0)
|
if (highmem_size < 0)
|
||||||
highmem_size = 0;
|
highmem_size = 0;
|
||||||
|
@@ -922,13 +922,10 @@ static void drain_pages(unsigned int cpu)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
struct per_cpu_pageset *pset;
|
struct per_cpu_pageset *pset;
|
||||||
struct per_cpu_pages *pcp;
|
struct per_cpu_pages *pcp;
|
||||||
|
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
pset = zone_pcp(zone, cpu);
|
pset = zone_pcp(zone, cpu);
|
||||||
|
|
||||||
pcp = &pset->pcp;
|
pcp = &pset->pcp;
|
||||||
@@ -1879,10 +1876,7 @@ void show_free_areas(void)
|
|||||||
int cpu;
|
int cpu;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
show_node(zone);
|
show_node(zone);
|
||||||
printk("%s per-cpu:\n", zone->name);
|
printk("%s per-cpu:\n", zone->name);
|
||||||
|
|
||||||
@@ -1922,12 +1916,9 @@ void show_free_areas(void)
|
|||||||
global_page_state(NR_PAGETABLE),
|
global_page_state(NR_PAGETABLE),
|
||||||
global_page_state(NR_BOUNCE));
|
global_page_state(NR_BOUNCE));
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
show_node(zone);
|
show_node(zone);
|
||||||
printk("%s"
|
printk("%s"
|
||||||
" free:%lukB"
|
" free:%lukB"
|
||||||
@@ -1967,12 +1958,9 @@ void show_free_areas(void)
|
|||||||
printk("\n");
|
printk("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
unsigned long nr[MAX_ORDER], flags, order, total = 0;
|
unsigned long nr[MAX_ORDER], flags, order, total = 0;
|
||||||
|
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
show_node(zone);
|
show_node(zone);
|
||||||
printk("%s: ", zone->name);
|
printk("%s: ", zone->name);
|
||||||
|
|
||||||
@@ -2784,11 +2772,7 @@ static int __cpuinit process_zones(int cpu)
|
|||||||
|
|
||||||
node_set_state(node, N_CPU); /* this node has a cpu */
|
node_set_state(node, N_CPU); /* this node has a cpu */
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
|
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
|
zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
|
||||||
GFP_KERNEL, node);
|
GFP_KERNEL, node);
|
||||||
if (!zone_pcp(zone, cpu))
|
if (!zone_pcp(zone, cpu))
|
||||||
|
@@ -2061,11 +2061,9 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
|
|||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
unsigned long ret = 0;
|
unsigned long ret = 0;
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
enum lru_list l;
|
enum lru_list l;
|
||||||
|
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
|
if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
11
mm/vmstat.c
11
mm/vmstat.c
@@ -135,11 +135,7 @@ static void refresh_zone_stat_thresholds(void)
|
|||||||
int cpu;
|
int cpu;
|
||||||
int threshold;
|
int threshold;
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
|
|
||||||
if (!zone->present_pages)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
threshold = calculate_threshold(zone);
|
threshold = calculate_threshold(zone);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
@@ -301,12 +297,9 @@ void refresh_cpu_vm_stats(int cpu)
|
|||||||
int i;
|
int i;
|
||||||
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
|
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
|
||||||
|
|
||||||
for_each_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
struct per_cpu_pageset *p;
|
struct per_cpu_pageset *p;
|
||||||
|
|
||||||
if (!populated_zone(zone))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
p = zone_pcp(zone, cpu);
|
p = zone_pcp(zone, cpu);
|
||||||
|
|
||||||
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
|
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
|
||||||
|
Reference in New Issue
Block a user