Mailing List Archive

Add l?e_create_page() macros. map_pages_to_xen() now takes a pfn range
ChangeSet 1.1463, 2005/05/19 14:24:18+01:00, kaf24@firebug.cl.cam.ac.uk

Add l?e_create_page() macros. map_pages_to_xen() now takes a pfn range
rather than a byte range. Fix x86/64 RAM mapping to discard partial
frames.
Signed-off-by: Keir Fraser <keir@xensource.com>



arch/x86/domain.c | 8 ++----
arch/x86/mm.c | 61 +++++++++++++++++++++++++++++--------------------
arch/x86/setup.c | 24 ++++++++++---------
arch/x86/shadow.c | 2 -
arch/x86/x86_32/mm.c | 8 ------
arch/x86/x86_64/mm.c | 16 +++---------
include/asm-x86/page.h | 13 +++++++---
7 files changed, 67 insertions(+), 65 deletions(-)


diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-05-19 10:03:11 -04:00
+++ b/xen/arch/x86/domain.c 2005-05-19 10:03:11 -04:00
@@ -263,9 +263,8 @@
PAGE_SHIFT] = INVALID_M2P_ENTRY;
ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_create_pfn(page_to_pfn(virt_to_page(gdt_table)),
- PAGE_HYPERVISOR);
-
+ l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+
ed->arch.guest_vtable = __linear_l2_table;
ed->arch.shadow_vtable = __shadow_linear_l2_table;

@@ -302,8 +301,7 @@
ed->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_create_pfn(page_to_pfn(virt_to_page(gdt_table)),
- PAGE_HYPERVISOR);
+ l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
}

#ifdef CONFIG_VMX
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-05-19 10:03:11 -04:00
+++ b/xen/arch/x86/mm.c 2005-05-19 10:03:11 -04:00
@@ -161,7 +161,10 @@
if ( p == 0 )
panic("Not enough memory for frame table\n");
map_pages_to_xen(
- FRAMETABLE_VIRT_START + i, p, 4UL << 20, PAGE_HYPERVISOR);
+ FRAMETABLE_VIRT_START + i,
+ p >> PAGE_SHIFT,
+ 4UL << (20-PAGE_SHIFT),
+ PAGE_HYPERVISOR);
}

memset(frame_table, 0, frame_table_size);
@@ -2833,31 +2836,30 @@
free_xenheap_page((unsigned long)d->arch.ptwr[PTWR_PT_INACTIVE].page);
}

-/* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
int map_pages_to_xen(
- unsigned long v,
- unsigned long p,
- unsigned long s,
+ unsigned long virt,
+ unsigned long pfn,
+ unsigned long nr_pfns,
unsigned long flags)
{
l2_pgentry_t *pl2e, ol2e;
- l1_pgentry_t *pl1e;
+ l1_pgentry_t *pl1e, ol1e;
unsigned int i;

unsigned int map_small_pages = !!(flags & MAP_SMALL_PAGES);
flags &= ~MAP_SMALL_PAGES;

- while ( s != 0 )
+ while ( nr_pfns != 0 )
{
- pl2e = virt_to_xen_l2e(v);
+ pl2e = virt_to_xen_l2e(virt);

- if ( (((v|p) & ((1 << L2_PAGETABLE_SHIFT) - 1)) == 0) &&
- (s >= (1 << L2_PAGETABLE_SHIFT)) &&
+ if ( ((((virt>>PAGE_SHIFT) | pfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
+ (nr_pfns >= (1<<PAGETABLE_ORDER)) &&
!map_small_pages )
{
/* Super-page mapping. */
ol2e = *pl2e;
- *pl2e = l2e_create_phys(p, flags|_PAGE_PSE);
+ *pl2e = l2e_create_pfn(pfn, flags|_PAGE_PSE);

if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
{
@@ -2866,9 +2868,9 @@
free_xen_pagetable(l2e_get_page(*pl2e));
}

- v += 1 << L2_PAGETABLE_SHIFT;
- p += 1 << L2_PAGETABLE_SHIFT;
- s -= 1 << L2_PAGETABLE_SHIFT;
+ virt += 1UL << L2_PAGETABLE_SHIFT;
+ pfn += 1UL << PAGETABLE_ORDER;
+ nr_pfns -= 1UL << PAGETABLE_ORDER;
}
else
{
@@ -2890,26 +2892,36 @@
local_flush_tlb_pge();
}

- pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
- if ( (l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
- local_flush_tlb_one(v);
- *pl1e = l1e_create_phys(p, flags);
-
- v += 1 << L1_PAGETABLE_SHIFT;
- p += 1 << L1_PAGETABLE_SHIFT;
- s -= 1 << L1_PAGETABLE_SHIFT;
+ pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
+ ol1e = *pl1e;
+ *pl1e = l1e_create_pfn(pfn, flags);
+ if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
+ local_flush_tlb_one(virt);
+
+ virt += 1UL << L1_PAGETABLE_SHIFT;
+ pfn += 1UL;
+ nr_pfns -= 1UL;
}
}

return 0;
}

+void __set_fixmap(
+ enum fixed_addresses idx, unsigned long p, unsigned long flags)
+{
+ if ( unlikely(idx >= __end_of_fixed_addresses) )
+ BUG();
+ map_pages_to_xen(fix_to_virt(idx), p >> PAGE_SHIFT, 1, flags);
+}
+
#ifdef MEMORY_GUARD

void memguard_init(void)
{
map_pages_to_xen(
- PAGE_OFFSET, 0, xenheap_phys_end, __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
+ PAGE_OFFSET, 0, xenheap_phys_end >> PAGE_SHIFT,
+ __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
}

static void __memguard_change_range(void *p, unsigned long l, int guard)
@@ -2927,7 +2939,8 @@
if ( guard )
flags &= ~_PAGE_PRESENT;

- map_pages_to_xen((unsigned long)(_p), __pa(_p), _l, flags);
+ map_pages_to_xen(
+ _p, virt_to_phys(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
}

void memguard_guard_range(void *p, unsigned long l)
diff -Nru a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c 2005-05-19 10:03:11 -04:00
+++ b/xen/arch/x86/setup.c 2005-05-19 10:03:11 -04:00
@@ -399,7 +399,7 @@
/* Map default GDT into their final position in the idle page table. */
map_pages_to_xen(
GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE,
- virt_to_phys(gdt_table), PAGE_SIZE, PAGE_HYPERVISOR);
+ virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);

/* Process CPU type information. */
identify_cpu(&boot_cpu_data);
@@ -580,17 +580,19 @@
* due to cache-attribute mismatches (e.g., AMD/AGP Linux bug).
*/
{
- unsigned long start = (unsigned long)e820.map[i].addr;
- unsigned long size = (unsigned long)e820.map[i].size;
- size = (size + (start & ~PAGE_MASK) + PAGE_SIZE - 1) & PAGE_MASK;
- if ( (start &= PAGE_MASK) < (64UL << 20) )
- {
- if ( (signed long)(size -= (64UL << 20) - start) <= 0 )
- continue;
- start = 64UL << 20;
- }
+ /* Calculate page-frame range, discarding partial frames. */
+ unsigned long start, end;
+ start = (e820.map[i].addr + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
+ /* Clip the range to above 64MB. */
+ if ( end < (64UL << (20-PAGE_SHIFT)) )
+ continue;
+ if ( start < (64UL << (20-PAGE_SHIFT)) )
+ start = 64UL << (20-PAGE_SHIFT);
+ /* Request the mapping. */
map_pages_to_xen(
- PAGE_OFFSET + start, start, size, PAGE_HYPERVISOR);
+ PAGE_OFFSET + (start << PAGE_SHIFT),
+ start, end-start, PAGE_HYPERVISOR);
}
#endif
}
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c 2005-05-19 10:03:13 -04:00
+++ b/xen/arch/x86/shadow.c 2005-05-19 10:03:13 -04:00
@@ -789,7 +789,7 @@
memset(l1, 0, PAGE_SIZE);
unmap_domain_mem_with_cache(l1, l1cache);

- l2e = l2e_create_pfn(page_to_pfn(l1page), __PAGE_HYPERVISOR);
+ l2e = l2e_create_page(l1page, __PAGE_HYPERVISOR);
l2[l2_table_offset(va)] = l2e;
}
unmap_domain_mem_with_cache(l2, l2cache);
diff -Nru a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c 2005-05-19 10:03:11 -04:00
+++ b/xen/arch/x86/x86_32/mm.c 2005-05-19 10:03:11 -04:00
@@ -54,14 +54,6 @@
return &idle_pg_table[l2_table_offset(v)];
}

-void __set_fixmap(
- enum fixed_addresses idx, unsigned long p, unsigned long flags)
-{
- if ( unlikely(idx >= __end_of_fixed_addresses) )
- BUG();
- map_pages_to_xen(fix_to_virt(idx), p, PAGE_SIZE, flags);
-}
-
void __init paging_init(void)
{
void *ioremap_pt;
diff -Nru a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c 2005-05-19 10:03:11 -04:00
+++ b/xen/arch/x86/x86_64/mm.c 2005-05-19 10:03:11 -04:00
@@ -72,17 +72,9 @@
return pl2e;
}

-void __set_fixmap(
- enum fixed_addresses idx, unsigned long p, unsigned long flags)
-{
- if ( unlikely(idx >= __end_of_fixed_addresses) )
- BUG();
- map_pages_to_xen(fix_to_virt(idx), p, PAGE_SIZE, flags);
-}
-
void __init paging_init(void)
{
- unsigned long i, p;
+ unsigned long i;
l3_pgentry_t *l3rw, *l3ro;
struct pfn_info *pg;

@@ -96,10 +88,10 @@
NULL, L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT);
if ( pg == NULL )
panic("Not enough memory for m2p table\n");
- p = page_to_phys(pg);
map_pages_to_xen(

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog