Merge branches 'upstream/xenfs' and 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen
* 'upstream/xenfs' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen: xen/privcmd: make privcmd visible in domU xen/privcmd: move remap_domain_mfn_range() to core xen code and export. privcmd: MMAPBATCH: Fix error handling/reporting xenbus: export xen_store_interface for xenfs xen/privcmd: make sure vma is ours before doing anything to it xen/privcmd: print SIGBUS faults xen/xenfs: set_page_dirty is supposed to return true if it dirties xen/privcmd: create address space to allow writable mmaps xen: add privcmd driver xen: add variable hypercall caller xen: add xen_set_domain_pte() xen: add /proc/xen/xsd_{kva,port} to xenfs * 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen: (29 commits) xen: include xen/xen.h for definition of xen_initial_domain() xen: use host E820 map for dom0 xen: correctly rebuild mfn list list after migration. xen: improvements to VIRQ_DEBUG output xen: set up IRQ before binding virq to evtchn xen: ensure that all event channels start off bound to VCPU 0 xen/hvc: only notify if we actually sent something xen: don't add extra_pages for RAM after mem_end xen: add support for PAT xen: make sure xen_max_p2m_pfn is up to date xen: limit extra memory to a certain ratio of base xen: add extra pages for E820 RAM regions, even if beyond mem_end xen: make sure xen_extra_mem_start is beyond all non-RAM e820 xen: implement "extra" memory to reserve space for pages not present at boot xen: Use host-provided E820 map xen: don't map missing memory xen: defer building p2m mfn structures until kernel is mapped xen: add return value to set_phys_to_machine() xen: convert p2m to a 3 level tree xen: make install_p2mtop_page() static ... Fix up trivial conflict in arch/x86/xen/mmu.c, and fix the use of 'reserve_early()' - in the new memblock world order it is now 'memblock_x86_reserve_range()' instead. Pointed out by Jeremy.
This commit is contained in:
@@ -18,8 +18,10 @@
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/page.h>
|
||||
#include <xen/interface/callback.h>
|
||||
#include <xen/interface/memory.h>
|
||||
#include <xen/interface/physdev.h>
|
||||
#include <xen/interface/memory.h>
|
||||
#include <xen/features.h>
|
||||
@@ -34,6 +36,39 @@ extern void xen_sysenter_target(void);
|
||||
extern void xen_syscall_target(void);
|
||||
extern void xen_syscall32_target(void);
|
||||
|
||||
/* Amount of extra memory space we add to the e820 ranges */
|
||||
phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
|
||||
|
||||
/*
|
||||
* The maximum amount of extra memory compared to the base size. The
|
||||
* main scaling factor is the size of struct page. At extreme ratios
|
||||
* of base:extra, all the base memory can be filled with page
|
||||
* structures for the extra memory, leaving no space for anything
|
||||
* else.
|
||||
*
|
||||
* 10x seems like a reasonable balance between scaling flexibility and
|
||||
* leaving a practically usable system.
|
||||
*/
|
||||
#define EXTRA_MEM_RATIO (10)
|
||||
|
||||
static __init void xen_add_extra_mem(unsigned long pages)
|
||||
{
|
||||
u64 size = (u64)pages * PAGE_SIZE;
|
||||
u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
|
||||
|
||||
if (!pages)
|
||||
return;
|
||||
|
||||
e820_add_region(extra_start, size, E820_RAM);
|
||||
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
||||
|
||||
memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
|
||||
|
||||
xen_extra_mem_size += size;
|
||||
|
||||
xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
|
||||
}
|
||||
|
||||
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
|
||||
phys_addr_t end_addr)
|
||||
{
|
||||
@@ -105,16 +140,65 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
|
||||
/**
|
||||
* machine_specific_memory_setup - Hook for machine specific memory setup.
|
||||
**/
|
||||
|
||||
char * __init xen_memory_setup(void)
|
||||
{
|
||||
static struct e820entry map[E820MAX] __initdata;
|
||||
|
||||
unsigned long max_pfn = xen_start_info->nr_pages;
|
||||
unsigned long long mem_end;
|
||||
int rc;
|
||||
struct xen_memory_map memmap;
|
||||
unsigned long extra_pages = 0;
|
||||
unsigned long extra_limit;
|
||||
int i;
|
||||
int op;
|
||||
|
||||
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
|
||||
mem_end = PFN_PHYS(max_pfn);
|
||||
|
||||
memmap.nr_entries = E820MAX;
|
||||
set_xen_guest_handle(memmap.buffer, map);
|
||||
|
||||
op = xen_initial_domain() ?
|
||||
XENMEM_machine_memory_map :
|
||||
XENMEM_memory_map;
|
||||
rc = HYPERVISOR_memory_op(op, &memmap);
|
||||
if (rc == -ENOSYS) {
|
||||
memmap.nr_entries = 1;
|
||||
map[0].addr = 0ULL;
|
||||
map[0].size = mem_end;
|
||||
/* 8MB slack (to balance backend allocations). */
|
||||
map[0].size += 8ULL << 20;
|
||||
map[0].type = E820_RAM;
|
||||
rc = 0;
|
||||
}
|
||||
BUG_ON(rc);
|
||||
|
||||
e820.nr_map = 0;
|
||||
xen_extra_mem_start = mem_end;
|
||||
for (i = 0; i < memmap.nr_entries; i++) {
|
||||
unsigned long long end = map[i].addr + map[i].size;
|
||||
|
||||
e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
|
||||
if (map[i].type == E820_RAM) {
|
||||
if (map[i].addr < mem_end && end > mem_end) {
|
||||
/* Truncate region to max_mem. */
|
||||
u64 delta = end - mem_end;
|
||||
|
||||
map[i].size -= delta;
|
||||
extra_pages += PFN_DOWN(delta);
|
||||
|
||||
end = mem_end;
|
||||
}
|
||||
}
|
||||
|
||||
if (end > xen_extra_mem_start)
|
||||
xen_extra_mem_start = end;
|
||||
|
||||
/* If region is non-RAM or below mem_end, add what remains */
|
||||
if ((map[i].type != E820_RAM || map[i].addr < mem_end) &&
|
||||
map[i].size > 0)
|
||||
e820_add_region(map[i].addr, map[i].size, map[i].type);
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though this is normal, usable memory under Xen, reserve
|
||||
@@ -136,7 +220,29 @@ char * __init xen_memory_setup(void)
|
||||
|
||||
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
||||
|
||||
xen_return_unused_memory(xen_start_info->nr_pages, &e820);
|
||||
extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
|
||||
|
||||
/*
|
||||
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
|
||||
* factor the base size. On non-highmem systems, the base
|
||||
* size is the full initial memory allocation; on highmem it
|
||||
* is limited to the max size of lowmem, so that it doesn't
|
||||
* get completely filled.
|
||||
*
|
||||
* In principle there could be a problem in lowmem systems if
|
||||
* the initial memory is also very large with respect to
|
||||
* lowmem, but we won't try to deal with that here.
|
||||
*/
|
||||
extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
|
||||
max_pfn + extra_pages);
|
||||
|
||||
if (extra_limit >= max_pfn)
|
||||
extra_pages = extra_limit - max_pfn;
|
||||
else
|
||||
extra_pages = 0;
|
||||
|
||||
if (!xen_initial_domain())
|
||||
xen_add_extra_mem(extra_pages);
|
||||
|
||||
return "Xen";
|
||||
}
|
||||
|
Reference in New Issue
Block a user