mm: add a basic debugging framework for memory initialisation

Boot initialisation is very complex, with significant numbers of
architecture-specific routines, hooks and code ordering.  While significant
amounts of the initialisation is architecture-independent, it trusts the data
received from the architecture layer.  This is a mistake, and has resulted in
a number of difficult-to-diagnose bugs.

This patchset adds some validation and tracing to memory initialisation.  It
also introduces a few basic defensive measures.  The validation code can be
explicitly disabled for embedded systems.

This patch:

Add additional debugging and verification code for memory initialisation.

Once enabled, the verification checks are always run and when required
additional debugging information may be outputted via a mminit_loglevel=
command-line parameter.

The verification code is placed in a new file mm/mm_init.c.  Ideally other mm
initialisation code will be moved here over time.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mel Gorman
2008-07-23 21:26:49 -07:00
committed by Linus Torvalds
parent 9483a578df
commit 6b74ab97bc
6 changed files with 79 additions and 9 deletions

View File

@ -26,6 +26,7 @@ obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_DEBUG_MEMORY_INIT) += mm_init.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o

View File

@ -59,4 +59,31 @@ static inline unsigned long page_order(struct page *page)
#define __paginginit __init
#endif
/* Memory initialisation debug and verification */
enum mminit_level {
MMINIT_WARNING,
MMINIT_VERIFY,
MMINIT_TRACE
};
#ifdef CONFIG_DEBUG_MEMORY_INIT
extern int mminit_loglevel;
#define mminit_dprintk(level, prefix, fmt, arg...) \
do { \
if (level < mminit_loglevel) { \
printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
} \
} while (0)
#else
static inline void mminit_dprintk(enum mminit_level level,
const char *prefix, const char *fmt, ...)
{
}
#endif /* CONFIG_DEBUG_MEMORY_INIT */
#endif

18
mm/mm_init.c Normal file
View File

@ -0,0 +1,18 @@
/*
* mm_init.c - Memory initialisation verification and debugging
*
* Copyright 2008 IBM Corporation, 2008
* Author Mel Gorman <mel@csn.ul.ie>
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
int __meminitdata mminit_loglevel;
static __init int set_mminit_loglevel(char *str)
{
get_option(&str, &mminit_loglevel);
return 0;
}
early_param("mminit_loglevel", set_mminit_loglevel);

View File

@ -2975,7 +2975,8 @@ void __init sparse_memory_present_with_active_regions(int nid)
void __init push_node_boundaries(unsigned int nid,
unsigned long start_pfn, unsigned long end_pfn)
{
printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
mminit_dprintk(MMINIT_TRACE, "zoneboundary",
"Entering push_node_boundaries(%u, %lu, %lu)\n",
nid, start_pfn, end_pfn);
/* Initialise the boundary for this node if necessary */
@ -2993,7 +2994,8 @@ void __init push_node_boundaries(unsigned int nid,
static void __meminit account_node_boundary(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
mminit_dprintk(MMINIT_TRACE, "zoneboundary",
"Entering account_node_boundary(%u, %lu, %lu)\n",
nid, *start_pfn, *end_pfn);
/* Return if boundary information has not been provided */
@ -3368,8 +3370,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
if (realsize >= memmap_pages) {
realsize -= memmap_pages;
printk(KERN_DEBUG
" %s zone: %lu pages used for memmap\n",
mminit_dprintk(MMINIT_TRACE, "memmap_init",
"%s zone: %lu pages used for memmap\n",
zone_names[j], memmap_pages);
} else
printk(KERN_WARNING
@ -3379,7 +3381,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
/* Account for reserved pages */
if (j == 0 && realsize > dma_reserve) {
realsize -= dma_reserve;
printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
mminit_dprintk(MMINIT_TRACE, "memmap_init",
"%s zone: %lu pages reserved\n",
zone_names[0], dma_reserve);
}
@ -3520,10 +3523,11 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
{
int i;
printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) "
"%d entries of %d used\n",
nid, start_pfn, end_pfn,
nr_nodemap_entries, MAX_ACTIVE_REGIONS);
mminit_dprintk(MMINIT_TRACE, "memory_register",
"Entering add_active_range(%d, %#lx, %#lx) "
"%d entries of %d used\n",
nid, start_pfn, end_pfn,
nr_nodemap_entries, MAX_ACTIVE_REGIONS);
/* Merge with existing active regions if possible */
for (i = 0; i < nr_nodemap_entries; i++) {