Mailing List Archive

[PATCH 06/14] arm: allow access to the xenheap and the boot pages.
arm: allow access to the xenheap and the boot pages.

This patch collects machine page frames, creates frame table to allow access to the xenheap and the boot pages.

xen/arch/arm/xen/mm.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++++
xen/arch/arm/xen/setup.c | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
xen/common/page_alloc.c | 5 +++
xen/include/asm-arm/mm.h | 28 +++++++++++++++++++++
xen/include/asm-arm/mmu.h | 10 +++++++
xen/include/asm-arm/platform.h | 62 ++++++++++++++++++++++++++++++++++++++++++++++

Signed-off-by: Jaemin Ryu <jm77.ryu@samsung.com>

diff -r 4d61f02fde37 xen/arch/arm/xen/mm.c
--- a/xen/arch/arm/xen/mm.c Fri Feb 03 17:47:32 2012 +0900
+++ b/xen/arch/arm/xen/mm.c Mon Feb 06 11:16:37 2012 +0900
@@ -192,3 +192,68 @@ int page_is_ram_type(unsigned long mfn,

return -EINVAL;
}
+
+#define PTS_PER_PAGE 4
+
+/*
+ * 4 page tables per a page.
+ */
+static inline void wire_page_tables(l1e_t *l1e, unsigned long tables)
+{
+ l1e = (l1e_t *)((unsigned long)l1e & ~(PTS_PER_PAGE - 1));
+
+ *(l1e + 0) = MK_L1E(tables + 0, L1E_GUEST_TABLE); pte_sync(l1e + 0);
+ *(l1e + 1) = MK_L1E(tables + 1024, L1E_GUEST_TABLE); pte_sync(l1e + 1);
+ *(l1e + 2) = MK_L1E(tables + 2048, L1E_GUEST_TABLE); pte_sync(l1e + 2);
+ *(l1e + 3) = MK_L1E(tables + 3072, L1E_GUEST_TABLE); pte_sync(l1e + 3);
+}
+
+unsigned long alloc_page_tables(l1e_t *l1e)
+{
+ unsigned long page;
+
+ page = alloc_clean_pages(1);
+ if (!page) {
+ return 0;
+ }
+
+// cache_clean_range(page, page + PAGE_SIZE, 0);
+
+ wire_page_tables(l1e, page);
+
+ return page;
+}
+
+
+int alloc_page_map(unsigned long virt, unsigned long phys, unsigned int size, unsigned int flags)
+{
+ l1e_t *l1e;
+ unsigned long vaddr = round_down(virt, PAGE_SIZE);
+ unsigned long last = virt + size;
+
+ l1e = l1_linear_offset_xen(vaddr);
+
+ do {
+ l2e_t *l2e;
+ unsigned long end = (vaddr + (SECTION_SIZE * PTS_PER_PAGE)) & (SECTION_MASK);
+ end = (end < last) ? end : last;
+
+ if (!l1e_val(*l1e)) {
+ if (!alloc_page_tables(l1e)) {
+ return -ENOMEM;
+ }
+ }
+
+ l2e = l2_linear_offset(l1e, vaddr);
+ do {
+ *l2e = MK_L2E(phys, flags);
+ pte_sync(l2e);
+
+ phys += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ } while(l2e++, vaddr < end);
+ } while(l1e += 4, vaddr < last);
+
+ return 0;
+}
+
diff -r 4d61f02fde37 xen/arch/arm/xen/setup.c
--- a/xen/arch/arm/xen/setup.c Fri Feb 03 17:47:32 2012 +0900
+++ b/xen/arch/arm/xen/setup.c Mon Feb 06 11:16:37 2012 +0900
@@ -31,6 +31,7 @@
#include <public/version.h>
#include <public/sched.h>
#include <asm/mmu.h>
+#include <asm/platform.h>

struct domain _dom_xen = {
.refcnt = ATOMIC_INIT(1),
@@ -74,6 +75,118 @@ void arch_get_xen_caps(xen_capabilities_
{
}

+static unsigned long lookup_xen_phys_start(void)
+{
+ l1e_t *l1e;
+
+ l1e = l1_linear_offset_xen(XEN_VIRT_START);
+
+ return l1e_val(*l1e) & SECTION_MASK;
+}
+
+static unsigned long lookup_xen_phys_end(void)
+{
+ l1e_t *l1e;
+
+ l1e = l1_linear_offset_xen(XEN_VIRT_START);
+
+ while(l1e_val(*(l1e + 1)) != 0)
+ l1e++;
+
+ return (l1e_val(*l1e) & SECTION_MASK) + SECTION_SIZE;
+}
+
+static unsigned int boot_page_collector(unsigned long start, unsigned long size)
+{
+ unsigned long end = start + size;
+
+ init_boot_pages(start, end);
+
+ start = start >> PAGE_SHIFT;
+ end = end >> PAGE_SHIFT;
+
+ min_page = min(start, min_page);
+ max_page = max(end, max_page);
+
+ return size >> PAGE_SHIFT;
+}
+
+#define FRAME_TABLE_BASE 0xFC000000UL
+
+/*
+ * The virtual address of the frame table should be aligned to 4MB boundary.
+ */
+struct page_info *alloc_frame_table(unsigned int sz)
+{
+ unsigned long start;
+
+ start = alloc_pages(sz >> PAGE_SHIFT);
+ if (!start) {
+ return NULL;
+ }
+
+ if(alloc_page_map(FRAME_TABLE_BASE, start, sz, L2E_GUEST_PAGE) < 0) {
+ return NULL;
+ }
+
+ return (struct page_info *)FRAME_TABLE_BASE;
+}
+
+static void frame_table_setup(unsigned int nr_boot_pages)
+{
+ int i;
+ unsigned int size;
+
+ size = round_up(nr_boot_pages * sizeof(struct page_info), PAGE_SIZE);
+
+ /* The location of the frame_table could be changed in near future.
+ * So, decision making of the virtual address is always performed in
+ * alloc_frame_table()
+ */
+
+ frame_table = alloc_frame_table(size);
+ if (frame_table == NULL) {
+ panic("Nomem\n");
+ }
+
+ memset(frame_table, 0, size);
+}
+
+unsigned int prepare_page_frames(void)
+{
+ struct memory_map *ent;
+ unsigned long start, end;
+ unsigned int pages = 0;
+
+ /* For virt_to_maddr() macro. */
+ xen_phys_start = lookup_xen_phys_start();
+ xen_phys_end = lookup_xen_phys_end();
+
+ /* For populating bootmem_region_list */
+ start = round_down(virt_to_maddr(&_end), PAGE_SIZE);
+ end = start + PAGE_SIZE;
+
+ init_boot_pages(start, end);
+
+ /* For early xenheap allocation */
+ xenheap_phys_start = end;
+ xenheap_phys_end = xen_phys_end;
+
+ iterate_memory_map(ent) {
+ if (ent->type == MEMORY_TYPE_RAM) {
+ pages += boot_page_collector(ent->base, ent->size);
+ }
+ }
+
+ reserve_boot_pages(xen_phys_start, xen_phys_end);
+
+ frame_table_setup(pages);
+
+ init_xenheap_pages(xenheap_phys_start, xenheap_phys_end);
+
+ return pages;
+}
+
static void idle_domain_init(void)
{
struct vcpu *v;
@@ -92,6 +205,8 @@ asmlinkage void start_xen(void)

smp_prepare_boot_cpu();

+ prepare_page_frames();
+
softirq_init();

tasklet_subsys_init();
diff -r 4d61f02fde37 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Fri Feb 03 17:47:32 2012 +0900
+++ b/xen/common/page_alloc.c Mon Feb 06 11:16:37 2012 +0900
@@ -146,6 +146,11 @@ static void __init bootmem_region_zap(un
}
}

+void __init reserve_boot_pages(paddr_t ps, paddr_t pe)
+{
+ bootmem_region_zap(ps >> PAGE_SHIFT, pe >> PAGE_SHIFT);
+}
+
void __init init_boot_pages(paddr_t ps, paddr_t pe)
{
unsigned long bad_spfn, bad_epfn;
diff -r 4d61f02fde37 xen/include/asm-arm/mm.h
--- a/xen/include/asm-arm/mm.h Fri Feb 03 17:47:32 2012 +0900
+++ b/xen/include/asm-arm/mm.h Mon Feb 06 11:16:37 2012 +0900
@@ -108,6 +108,13 @@

#define write_ptbase(v) cpu_switch_ttb((v)->arch.ctx.ttbr0)

+#if 0
+#undef page_list_entry
+struct page_list_entry
+{
+ unsigned long next, prev;
+};
+#endif
struct page_info
{
struct page_list_entry list;
@@ -187,6 +194,7 @@ extern unsigned long min_page, max_page;
extern struct domain *dom_xen, *dom_io, *dom_cow;
extern struct page_info *frame_table;

+extern l1e_t *xen_translation_table;
void memguard_guard_stack(void *p);

void share_xen_page_with_guest(struct page_info *page, struct domain *d, int readonly);
@@ -214,6 +222,11 @@ long arch_memory_op(int op, XEN_GUEST_HA

int map_pages_to_xen(unsigned long virt, unsigned long mfn, int nr, unsigned long flags);

+unsigned long alloc_page_map_tables(l1e_t *l1e);
+
+int alloc_page_map(unsigned long virt, unsigned long phys, unsigned int size, unsigned int flags);
+
+
static inline void put_page_and_type(struct page_info *page)
{
put_page_type(page);
@@ -234,4 +247,19 @@ static inline int get_page_and_type(stru
return rc;
}

+/*
+ * TDB : Page owner setting.
+ */
+#define alloc_pages(nr) \
+ (alloc_boot_pages(nr, PAGE_SIZE) << PAGE_SHIFT)
+
+#define alloc_clean_pages(nr) \
+({ \
+ unsigned long page; \
+ page = alloc_pages(nr); \
+ if (page) { \
+ memset(page, 0, nr << PAGE_SHIFT); \
+ } \
+ page; \
+})
#endif /* __ARM_MM_H__ */
diff -r 4d61f02fde37 xen/include/asm-arm/mmu.h
--- a/xen/include/asm-arm/mmu.h Fri Feb 03 17:47:32 2012 +0900
+++ b/xen/include/asm-arm/mmu.h Mon Feb 06 11:16:37 2012 +0900
@@ -140,6 +140,16 @@
#define l1_linear_offset_xen(va) \
(l1_linear_offset((xen_translation_table), va))

+#define pte_sync(ptr) \
+do { \
+ __asm__ __volatile__( \
+ "mcr p15, 0, %0, c7, c10, 1 @ clean D entry \n" \
+ : \
+ : "r"(ptr), "r"(0) \
+ : "memory"); \
+}while(0)
+
+
typedef struct { unsigned long l2e; } l2e_t;
typedef struct { unsigned long l1e; } l1e_t;

diff -r 4d61f02fde37 xen/include/asm-arm/platform.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/platform.h Mon Feb 06 11:16:37 2012 +0900
@@ -0,0 +1,62 @@
+/*
+ * platform.h
+ *
+ * Copyright (C) 2008 Samsung Electronics
+ * JaeMin Ryu <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ARM_PLATFORM_H__
+#define __ARM_PLATFORM_H__
+
+#include <xen/list.h>
+
+#define MEMORY_TYPE_RAM (0)
+#define MEMORY_TYPE_ROM (1)
+#define MEMORY_TYPE_DEV (2)
+#define MEMORY_TYPE_MASK (0xF)
+
+#ifdef __ASSEMBLY__
+#define DECLARE_PLATFORM_OP(gop, nop) \
+ .set gop, nop ;\
+ .global gop ;
+#else
+#define DECLARE_PLATFORM_OP(gop, nop) \
+ typeof (nop) gop \
+ __attribute__((weak, alias(#nop)))
+
+
+#define DECLARE_MEMORY_MAP(_n) \
+struct memory_map __attribute__ ((__section__(".init.memtable"))) _n ## _memmap[]
+
+#define MEMMAP_ENTRY(b, s, t, f) {b, s, t, (b & ~(0x100000 - 1)) | f}
+
+struct memory_map {
+ unsigned long base;
+ unsigned int size;
+ unsigned int type;
+ unsigned int flags;
+};
+
+#define iterate_memory_map(entry) \
+ for (entry = &_smemtable; entry < &_ememtable; entry++)
+
+#define memory_map_type(entry) (entry->type & MEMORY_TYPE_MASK)
+
+extern struct memory_map *_smemtable, *_ememtable;
+
+#endif
+#endif /* __ARM_PLATFORM_H__ */
+