kcore: use registerd physmem information

For /proc/kcore, each arch registers its memory range by kclist_add().
In usual,

	- range of physical memory
	- range of vmalloc area
	- text, etc...

are registered but "range of physical memory" has some troubles.  It
doesn't updated at memory hotplug and it tend to include unnecessary
memory holes.  Now, /proc/iomem (kernel/resource.c) includes required
physical memory range information and it's properly updated at memory
hotplug.  Then, it's good to avoid using its own code(duplicating
information) and to rebuild kclist for physical memory based on
/proc/iomem.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Jiri Slaby <jirislaby@gmail.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: WANG Cong <xiyou.wangcong@gmail.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KAMEZAWA Hiroyuki
2009-09-22 16:45:48 -07:00
committed by Linus Torvalds
parent 908eedc616
commit 3089aa1b0c
8 changed files with 168 additions and 85 deletions

View File

@ -17,10 +17,14 @@
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/list.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/memory.h>
#include <asm/sections.h>
#define CORE_STR "CORE"
@ -31,17 +35,6 @@
static struct proc_dir_entry *proc_root_kcore;
static int open_kcore(struct inode * inode, struct file * filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *);
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
};
#ifndef kc_vaddr_to_offset
#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
@ -61,6 +54,7 @@ struct memelfnote
static LIST_HEAD(kclist_head);
static DEFINE_RWLOCK(kclist_lock);
static int kcore_need_update = 1;
void
kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
@ -99,6 +93,126 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
return size + *elf_buflen;
}
static void free_kclist_ents(struct list_head *head)
{
struct kcore_list *tmp, *pos;
list_for_each_entry_safe(pos, tmp, head, list) {
list_del(&pos->list);
kfree(pos);
}
}
/*
* Replace all KCORE_RAM information with passed list.
*/
static void __kcore_update_ram(struct list_head *list)
{
struct kcore_list *tmp, *pos;
LIST_HEAD(garbage);
write_lock(&kclist_lock);
if (kcore_need_update) {
list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
if (pos->type == KCORE_RAM)
list_move(&pos->list, &garbage);
}
list_splice_tail(list, &kclist_head);
} else
list_splice(list, &garbage);
kcore_need_update = 0;
write_unlock(&kclist_lock);
free_kclist_ents(&garbage);
}
#ifdef CONFIG_HIGHMEM
/*
* If no highmem, we can assume [0...max_low_pfn) continuous range of memory
* because memory hole is not as big as !HIGHMEM case.
* (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
*/
static int kcore_update_ram(void)
{
LIST_HEAD(head);
struct kcore_list *ent;
int ret = 0;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->addr = (unsigned long)__va(0);
ent->size = max_low_pfn << PAGE_SHIFT;
ent->type = KCORE_RAM;
list_add(&ent->list, &head);
__kcore_update_ram(&head);
return ret;
}
#else /* !CONFIG_HIGHMEM */
static int
kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
{
struct list_head *head = (struct list_head *)arg;
struct kcore_list *ent;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
ent->size = nr_pages << PAGE_SHIFT;
/* Sanity check: Can happen in 32bit arch...maybe */
if (ent->addr < (unsigned long) __va(0))
goto free_out;
/* cut not-mapped area. ....from ppc-32 code. */
if (ULONG_MAX - ent->addr < ent->size)
ent->size = ULONG_MAX - ent->addr;
/* cut when vmalloc() area is higher than direct-map area */
if (VMALLOC_START > (unsigned long)__va(0)) {
if (ent->addr > VMALLOC_START)
goto free_out;
if (VMALLOC_START - ent->addr < ent->size)
ent->size = VMALLOC_START - ent->addr;
}
ent->type = KCORE_RAM;
list_add_tail(&ent->list, head);
return 0;
free_out:
kfree(ent);
return 1;
}
static int kcore_update_ram(void)
{
int nid, ret;
unsigned long end_pfn;
LIST_HEAD(head);
/* Not inialized....update now */
/* find out "max pfn" */
end_pfn = 0;
for_each_node_state(nid, N_HIGH_MEMORY) {
unsigned long node_end;
node_end = NODE_DATA(nid)->node_start_pfn +
NODE_DATA(nid)->node_spanned_pages;
if (end_pfn < node_end)
end_pfn = node_end;
}
/* scan 0 to max_pfn */
ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
if (ret) {
free_kclist_ents(&head);
return -ENOMEM;
}
__kcore_update_ram(&head);
return ret;
}
#endif /* CONFIG_HIGHMEM */
/*****************************************************************************/
/*
@ -373,6 +487,39 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
return acc;
}
static int open_kcore(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (kcore_need_update)
kcore_update_ram();
return 0;
}
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
};
#ifdef CONFIG_MEMORY_HOTPLUG
/* just remember that we have to update kcore */
static int __meminit kcore_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
switch (action) {
case MEM_ONLINE:
case MEM_OFFLINE:
write_lock(&kclist_lock);
kcore_need_update = 1;
write_unlock(&kclist_lock);
}
return NOTIFY_OK;
}
#endif
static struct kcore_list kcore_vmalloc;
#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
@ -393,10 +540,18 @@ static void __init proc_kcore_text_init(void)
static int __init proc_kcore_init(void)
{
proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
&proc_kcore_operations);
/* Store text area if it's special */
proc_kcore_text_init();
/* Store vmalloc area */
kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
/* Store direct-map area from physical memory map */
kcore_update_ram();
hotplug_memory_notifier(kcore_callback, 0);
/* Other special area, area-for-module etc is arch specific. */
return 0;
}
module_init(proc_kcore_init);