mm: page_alloc: generalize order handling in __free_pages_bootmem()

__free_pages_bootmem() used to special-case higher-order frees to save
individual page checking with free_pages_bulk().

Nowadays, both zero order and non-zero order frees use free_pages(), which
checks each individual page anyway, and so there is little point in making
the distinction anymore.  The higher-order loop will work just fine for
zero order pages.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner
2012-01-10 15:08:10 -08:00
committed by Linus Torvalds
parent 43d2b11324
commit c3993076f8

View File

@@ -730,24 +730,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags); local_irq_restore(flags);
} }
/*
* permit the bootmem allocator to evade page validation on high-order frees
*/
void __meminit __free_pages_bootmem(struct page *page, unsigned int order) void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
{ {
if (order == 0) { unsigned int nr_pages = 1 << order;
__ClearPageReserved(page); unsigned int loop;
set_page_count(page, 0);
set_page_refcounted(page);
__free_page(page);
} else {
int loop;
prefetchw(page); prefetchw(page);
for (loop = 0; loop < (1 << order); loop++) { for (loop = 0; loop < nr_pages; loop++) {
struct page *p = &page[loop]; struct page *p = &page[loop];
if (loop + 1 < (1 << order)) if (loop + 1 < nr_pages)
prefetchw(p + 1); prefetchw(p + 1);
__ClearPageReserved(p); __ClearPageReserved(p);
set_page_count(p, 0); set_page_count(p, 0);
@@ -755,7 +747,6 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
set_page_refcounted(page); set_page_refcounted(page);
__free_pages(page, order); __free_pages(page, order);
}
} }