Mailing List Archive

[xen-unstable] [LINUX][X86_64] Fix initial memory mapping code.
# HG changeset patch
# User kaf24@firebug.cl.cam.ac.uk
# Node ID 2fd7f4fb7d1453e4ff418c06961b0bd9fcb71129
# Parent 61e2ea81bd65f5df4b0e977ee9a457884b54597f
[LINUX][X86_64] Fix initial memory mapping code.

The temporary mappings needed to set up the 1:1 mappings must be torn down after use; otherwise they may trigger the
WARN_ON() in vmap_pte_range() (namely if the chunk allocated to hold kernel and initial page tables gets close to or
exceeds 128Mb, or if a sufficiently high mem= argument causes the static allocations to grow beyond 128Mb, which in
either case means these mappings extend into the modules area).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
---
linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c | 34 +++++++++++++++++--------
1 files changed, 24 insertions(+), 10 deletions(-)

diff -r 61e2ea81bd65 -r 2fd7f4fb7d14 linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c Thu Jun 01 18:32:04 2006 +0100
+++ b/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c Thu Jun 01 19:07:40 2006 +0100
@@ -56,6 +56,8 @@ struct dma_mapping_ops* dma_ops;
struct dma_mapping_ops* dma_ops;
EXPORT_SYMBOL(dma_ops);

+int after_bootmem;
+
extern unsigned long *contiguous_bitmap;

static unsigned long dma_reserve __initdata;
@@ -74,7 +76,7 @@ extern unsigned long start_pfn;
(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
__START_KERNEL_map)))

-static void early_make_page_readonly(void *va, unsigned int feature)
+static void __meminit early_make_page_readonly(void *va, unsigned int feature)
{
unsigned long addr, _va = (unsigned long)va;
pte_t pte, *ptep;
@@ -82,6 +84,11 @@ static void early_make_page_readonly(voi

if (xen_feature(feature))
return;
+
+ if (after_bootmem) {
+ make_page_readonly(va, feature);
+ return;
+ }

addr = (unsigned long) page[pgd_index(_va)];
addr_to_page(addr, page);
@@ -198,10 +205,6 @@ void show_mem(void)
printk(KERN_INFO "%lu pages swap cached\n",cached);
}

-/* References to section boundaries */
-
-int after_bootmem;
-
static void *spp_getpage(void)
{
void *ptr;
@@ -448,9 +451,9 @@ phys_pmd_init(pmd_t *pmd, unsigned long
pte = alloc_static_page(&pte_phys);
pte_save = pte;
for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
- if ((address >= end) ||
- ((address >> PAGE_SHIFT) >=
- xen_start_info->nr_pages)) {
+ if (address >= (after_bootmem
+ ? end
+ : xen_start_info->nr_pages << PAGE_SHIFT)) {
__set_pte(pte, __pte(0));
continue;
}
@@ -550,7 +553,7 @@ void __init xen_init_pt(void)
mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
}

-void __init extend_init_mapping(unsigned long tables_space)
+static void __init extend_init_mapping(unsigned long tables_space)
{
unsigned long va = __START_KERNEL_map;
unsigned long phys, addr, *pte_page;
@@ -666,7 +669,18 @@ void __meminit init_memory_mapping(unsig
set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
}

- BUG_ON(!after_bootmem && start_pfn != table_end);
+ if (!after_bootmem) {
+ BUG_ON(start_pfn != table_end);
+
+ /* Destroy the temporary mappings created above. */
+ start = __START_KERNEL_map + (table_start << PAGE_SHIFT);
+ end = start + tables_space;
+ for (; start < end; start += PAGE_SIZE) {
+ /* Should also clear out and reclaim any page table
+ pages no longer needed... */
+ WARN_ON(HYPERVISOR_update_va_mapping(start, __pte_ma(0), 0));
+ }
+ }

__flush_tlb_all();
}

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog